]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge tag 'efi-urgent-for-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 18 Oct 2021 03:30:49 +0000 (17:30 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 18 Oct 2021 03:30:49 +0000 (17:30 -1000)
Pull EFI fixes from Borislav Petkov:
 "Forwarded from Ard Biesheuvel through the tip tree. Ard will send
  stuff directly in the near future.

  Low priority fixes but fixes nonetheless:

   - update stub diagnostic print that is no longer accurate

   - avoid statically allocated buffer for CPER error record decoding

   - avoid sleeping on the efi_runtime semaphore when calling the
     ResetSystem EFI runtime service"

* tag 'efi-urgent-for-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  efi: Change down_interruptible() in virt_efi_reset_system() to down_trylock()
  efi/cper: use stack buffer for error record decoding
  efi/libstub: Simplify "Exiting bootservices" message

1505 files changed:
CREDITS
Documentation/admin-guide/README.rst
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/core-api/irq/irq-domain.rst
Documentation/devicetree/bindings/arm/tegra.yaml
Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
Documentation/devicetree/bindings/net/dsa/marvell.txt
Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml [new file with mode: 0644]
Documentation/filesystems/ntfs3.rst
Documentation/gpu/amdgpu.rst
Documentation/gpu/drm-internals.rst
Documentation/hwmon/k10temp.rst
Documentation/networking/device_drivers/ethernet/intel/ice.rst
Documentation/networking/dsa/sja1105.rst
Documentation/process/changes.rst
Documentation/translations/zh_CN/admin-guide/README.rst
Documentation/translations/zh_TW/admin-guide/README.rst
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/include/asm/asm-prototypes.h
arch/alpha/include/asm/io.h
arch/alpha/include/asm/jensen.h
arch/alpha/include/asm/setup.h [new file with mode: 0644]
arch/alpha/include/uapi/asm/setup.h
arch/alpha/kernel/sys_jensen.c
arch/alpha/lib/Makefile
arch/alpha/lib/udiv-qrnnd.S [new file with mode: 0644]
arch/alpha/math-emu/Makefile
arch/alpha/math-emu/math.c
arch/alpha/math-emu/qrnnd.S [deleted file]
arch/arc/include/asm/pgtable.h
arch/arm/Kconfig
arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
arch/arm/boot/dts/at91-sama7g5ek.dts
arch/arm/boot/dts/bcm2711-rpi-4-b.dts
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/bcm2835-common.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/imx53-m53menlo.dts
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6qdl-pico.dtsi
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
arch/arm/boot/dts/omap3430-sdp.dts
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/sama7g5.dtsi
arch/arm/boot/dts/spear3xx.dtsi
arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
arch/arm/boot/dts/vexpress-v2m.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
arch/arm/boot/dts/vexpress-v2p-ca5s.dts
arch/arm/boot/dts/vexpress-v2p-ca9.dts
arch/arm/common/sharpsl_param.c
arch/arm/configs/gemini_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/oxnas_v6_defconfig
arch/arm/configs/shmobile_defconfig
arch/arm/kernel/signal.c
arch/arm/mach-at91/pm.c
arch/arm/mach-at91/pm_suspend.S
arch/arm/mach-dove/include/mach/uncompress.h
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-imx/src.c
arch/arm/mach-omap1/include/mach/memory.h
arch/arm/mach-omap1/usb.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/net/bpf_jit_32.c
arch/arm64/Kconfig
arch/arm64/boot/dts/arm/foundation-v8.dtsi
arch/arm64/boot/dts/arm/fvp-base-revc.dts
arch/arm64/boot/dts/arm/juno-base.dtsi
arch/arm64/boot/dts/arm/juno-motherboard.dtsi
arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
arch/arm64/boot/dts/freescale/imx8mm-evk.dts
arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
arch/arm64/boot/dts/freescale/imx8mq-evk.dts
arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
arch/arm64/boot/dts/qcom/ipq8074.dtsi
arch/arm64/boot/dts/qcom/pm8150.dtsi
arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sdm630.dtsi
arch/arm64/boot/dts/qcom/sdm845.dtsi
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/configs/defconfig
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/mte.h
arch/arm64/include/asm/string.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/mte.c
arch/arm64/kernel/process.c
arch/arm64/kernel/signal.c
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/perf.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/lib/strcmp.S
arch/arm64/lib/strncmp.S
arch/arm64/mm/hugetlbpage.c
arch/csky/Kconfig
arch/csky/include/asm/bitops.h
arch/csky/kernel/ptrace.c
arch/csky/kernel/signal.c
arch/ia64/Kconfig
arch/m68k/68000/entry.S
arch/m68k/Kconfig
arch/m68k/coldfire/entry.S
arch/m68k/include/asm/processor.h
arch/m68k/include/asm/raw_io.h
arch/m68k/include/asm/segment.h [deleted file]
arch/m68k/include/asm/thread_info.h
arch/m68k/include/asm/tlbflush.h
arch/m68k/include/asm/traps.h
arch/m68k/include/asm/uaccess.h
arch/m68k/kernel/asm-offsets.c
arch/m68k/kernel/entry.S
arch/m68k/kernel/process.c
arch/m68k/kernel/signal.c
arch/m68k/kernel/traps.c
arch/m68k/mac/misc.c
arch/m68k/mm/cache.c
arch/m68k/mm/init.c
arch/m68k/mm/kmap.c
arch/m68k/mm/memory.c
arch/m68k/mm/motorola.c
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/config.c
arch/m68k/sun3/config.c
arch/m68k/sun3/mmu_emu.c
arch/m68k/sun3/sun3ints.c
arch/m68k/sun3x/prom.c
arch/mips/Kconfig
arch/mips/include/asm/mips-cps.h
arch/mips/kernel/signal.c
arch/mips/net/bpf_jit.c
arch/nios2/Kconfig.debug
arch/nios2/kernel/setup.c
arch/parisc/Kconfig
arch/parisc/include/asm/page.h
arch/parisc/lib/iomap.c
arch/powerpc/boot/Makefile
arch/powerpc/boot/dts/fsl/t1023rdb.dts
arch/powerpc/include/asm/asm-const.h
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/code-patching.h
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/security_features.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/interrupt.c
arch/powerpc/kernel/interrupt_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/security.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/traps.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/lib/code-patching.c
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit64.h
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/net/bpf_jit_comp32.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/pseries/eeh_pseries.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/sysdev/xics/xics-common.c
arch/riscv/Kconfig
arch/riscv/include/asm/syscall.h
arch/riscv/include/asm/vdso.h
arch/riscv/include/uapi/asm/unistd.h
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/vdso.c
arch/riscv/kernel/vdso/vdso.lds.S
arch/riscv/mm/cacheflush.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/include/asm/ccwgroup.h
arch/s390/include/asm/pci.h
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/lib/string.c
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci.c
arch/s390/pci/pci_event.c
arch/s390/pci/pci_mmio.c
arch/sh/boot/Makefile
arch/sh/include/asm/pgtable-3level.h
arch/sparc/kernel/ioport.c
arch/sparc/kernel/mdesc.c
arch/sparc/lib/iomap.c
arch/x86/Kconfig
arch/x86/Makefile_32.cpu
arch/x86/crypto/sm4-aesni-avx-asm_64.S
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/hyperv/hv_apic.c
arch/x86/include/asm/entry-common.h
arch/x86/include/asm/kvm_page_track.h
arch/x86/include/asm/kvmclock.h
arch/x86/include/asm/pkeys.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/xen/pci.h
arch/x86/include/asm/xen/swiotlb-xen.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/hpet.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/sev-shared.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/ioapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/page_track.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/evmcs.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/lib/insn.c
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/numa.c
arch/x86/mm/numa_emulation.c
arch/x86/mm/pat/memtype.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/xen.c
arch/x86/platform/olpc/olpc.c
arch/x86/platform/pvh/enlighten.c
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/enlighten_pvh.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/pci-swiotlb-xen.c
arch/x86/xen/smp_pv.c
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/kmem_layout.h
arch/xtensa/kernel/irq.c
arch/xtensa/kernel/setup.c
arch/xtensa/mm/mmu.c
arch/xtensa/platforms/xtfpga/setup.c
block/bdev.c
block/bfq-iosched.c
block/bio.c
block/blk-cgroup.c
block/blk-integrity.c
block/blk-mq-debugfs.c
block/blk-mq-tag.c
block/bsg.c
block/fops.c
block/genhd.c
drivers/Kconfig
drivers/acpi/arm64/gtdt.c
drivers/acpi/nfit/core.c
drivers/acpi/osl.c
drivers/acpi/x86/s2idle.c
drivers/android/binder.c
drivers/android/binder_internal.h
drivers/base/arch_numa.c
drivers/base/core.c
drivers/base/power/trace.c
drivers/base/swnode.c
drivers/base/test/Makefile
drivers/block/nbd.c
drivers/bus/Kconfig
drivers/bus/Makefile
drivers/bus/simple-pm-bus.c
drivers/bus/ti-sysc.c
drivers/clk/qcom/Kconfig
drivers/clk/qcom/gcc-sm6115.c
drivers/clk/renesas/r9a07g044-cpg.c
drivers/clk/renesas/rzg2l-cpg.c
drivers/clk/socfpga/clk-agilex.c
drivers/comedi/comedi_fops.c
drivers/cpufreq/cpufreq_governor_attr_set.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/vexpress-spc-cpufreq.c
drivers/crypto/ccp/ccp-ops.c
drivers/edac/dmc520_edac.c
drivers/edac/synopsys_edac.c
drivers/firmware/Kconfig
drivers/firmware/arm_ffa/bus.c
drivers/firmware/arm_scmi/Kconfig
drivers/firmware/arm_scmi/virtio.c
drivers/fpga/dfl.c
drivers/fpga/ice40-spi.c
drivers/fpga/machxo2-spi.c
drivers/gpio/gpio-74x164.c
drivers/gpio/gpio-aspeed-sgpio.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-rockchip.c
drivers/gpio/gpio-uniphier.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/include/dal_asic_id.h
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/hyperv/hyperv_drm.h
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
drivers/gpu/drm/hyperv/hyperv_drm_proto.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_acpi.c
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_bw.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_dmc.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_vbt_defs.h
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/kmb/kmb_drv.h
drivers/gpu/drm/kmb/kmb_plane.c
drivers/gpu/drm/kmb/kmb_plane.h
drivers/gpu/drm/kmb/kmb_regs.h
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
drivers/gpu/drm/msm/edp/edp_ctrl.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_devfreq.c
drivers/gpu/drm/msm/msm_submitqueue.c
drivers/gpu/drm/nouveau/dispnv50/crc.c
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
drivers/gpu/drm/r128/ati_pcigart.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/rcar-du/rcar_lvds.h
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dc.h
drivers/gpu/drm/tegra/uapi.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/host1x/fence.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/hid-apple.c
drivers/hid/hid-betopff.c
drivers/hid/hid-u2fzero.c
drivers/hid/wacom_wac.c
drivers/hv/ring_buffer.c
drivers/hwmon/k10temp.c
drivers/hwmon/ltc2947-core.c
drivers/hwmon/mlxreg-fan.c
drivers/hwmon/occ/common.c
drivers/hwmon/pmbus/ibm-cffps.c
drivers/hwmon/pmbus/mp2975.c
drivers/hwmon/tmp421.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83793.c
drivers/hwtracing/coresight/coresight-syscfg.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/i2c-core-acpi.c
drivers/iio/accel/fxls8962af-core.c
drivers/iio/adc/ad7192.c
drivers/iio/adc/ad7780.c
drivers/iio/adc/ad7793.c
drivers/iio/adc/aspeed_adc.c
drivers/iio/adc/max1027.c
drivers/iio/adc/mt6577_auxadc.c
drivers/iio/adc/rzg2l_adc.c
drivers/iio/adc/ti-adc128s052.c
drivers/iio/common/ssp_sensors/ssp_spi.c
drivers/iio/dac/ti-dac5571.c
drivers/iio/imu/adis16475.c
drivers/iio/imu/adis16480.c
drivers/iio/light/opt3001.c
drivers/iio/test/Makefile
drivers/infiniband/core/cma.c
drivers/infiniband/core/cma_priv.h
drivers/infiniband/hw/hfi1/ipoib_tx.c
drivers/infiniband/hw/hns/hns_roce_cq.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/irdma/cm.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/i40iw_if.c
drivers/infiniband/hw/irdma/main.h
drivers/infiniband/hw/irdma/user.h
drivers/infiniband/hw/irdma/utils.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/hw/usnic/usnic_ib.h
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/snvs_pwrkey.c
drivers/input/touchscreen.c
drivers/input/touchscreen/resistive-adc-touch.c
drivers/interconnect/qcom/sdm660.c
drivers/iommu/Kconfig
drivers/iommu/apple-dart.c
drivers/iommu/arm/arm-smmu/Makefile
drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
drivers/iommu/intel/dmar.c
drivers/ipack/devices/ipoctal.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-renesas-rza1.c
drivers/isdn/capi/kcapi.c
drivers/isdn/hardware/mISDN/netjet.c
drivers/macintosh/smu.c
drivers/mcb/mcb-core.c
drivers/md/dm-clone-target.c
drivers/md/dm-rq.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/md/md.c
drivers/media/platform/Kconfig
drivers/media/platform/s5p-jpeg/jpeg-core.c
drivers/media/platform/s5p-jpeg/jpeg-core.h
drivers/media/rc/ir_toy.c
drivers/misc/Kconfig
drivers/misc/bcm-vk/bcm_vk_tty.c
drivers/misc/cb710/sgbuf2.c
drivers/misc/eeprom/at25.c
drivers/misc/eeprom/eeprom_93xx46.c
drivers/misc/fastrpc.c
drivers/misc/gehc-achc.c
drivers/misc/genwqe/card_base.c
drivers/misc/habanalabs/common/command_submission.c
drivers/misc/habanalabs/common/hw_queue.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudi_security.c
drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
drivers/misc/mei/hbm.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/mmc/host/Kconfig
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/net/dsa/b53/b53_mdio.c
drivers/net/dsa/b53/b53_mmap.c
drivers/net/dsa/b53/b53_priv.h
drivers/net/dsa/b53/b53_spi.c
drivers/net/dsa/b53/b53_srab.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/dsa_loop.c
drivers/net/dsa/hirschmann/hellcreek.c
drivers/net/dsa/lan9303-core.c
drivers/net/dsa/lan9303.h
drivers/net/dsa/lan9303_i2c.c
drivers/net/dsa/lan9303_mdio.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/microchip/ksz8795_spi.c
drivers/net/dsa/microchip/ksz8863_smi.c
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/dsa/microchip/ksz9477_spi.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/chip.h
drivers/net/dsa/mv88e6xxx/devlink.c
drivers/net/dsa/mv88e6xxx/devlink.h
drivers/net/dsa/mv88e6xxx/global1.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix.h
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/ocelot/seville_vsc9953.c
drivers/net/dsa/qca/ar9331.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/realtek-smi-core.c
drivers/net/dsa/sja1105/sja1105_clocking.c
drivers/net/dsa/sja1105/sja1105_devlink.c
drivers/net/dsa/sja1105/sja1105_flower.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_mdio.c
drivers/net/dsa/sja1105/sja1105_ptp.c
drivers/net/dsa/sja1105/sja1105_ptp.h
drivers/net/dsa/sja1105/sja1105_spi.c
drivers/net/dsa/sja1105/sja1105_static_config.c
drivers/net/dsa/sja1105/sja1105_static_config.h
drivers/net/dsa/sja1105/sja1105_vl.c
drivers/net/dsa/sja1105/sja1105_vl.h
drivers/net/dsa/vitesse-vsc73xx-core.c
drivers/net/dsa/vitesse-vsc73xx-platform.c
drivers/net/dsa/vitesse-vsc73xx-spi.c
drivers/net/dsa/vitesse-vsc73xx.h
drivers/net/dsa/xrs700x/xrs700x.c
drivers/net/dsa/xrs700x/xrs700x.h
drivers/net/dsa/xrs700x/xrs700x_i2c.c
drivers/net/dsa/xrs700x/xrs700x_mdio.c
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/amd/ni65.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/arc/Kconfig
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/cadence/macb_pci.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/enetc/enetc_ierb.c
drivers/net/ethernet/freescale/enetc/enetc_ierb.h
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_idc.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
drivers/net/ethernet/micrel/Makefile
drivers/net/ethernet/micrel/ks8851_common.c
drivers/net/ethernet/microchip/encx24j600-regmap.c
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/ethernet/microchip/encx24j600_hw.h
drivers/net/ethernet/microsoft/mana/hw_channel.c
drivers/net/ethernet/microsoft/mana/mana_en.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_devlink.c
drivers/net/ethernet/mscc/ocelot_mrp.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/mscc/ocelot_vcap.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
drivers/net/ethernet/pensando/ionic/ionic_stats.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/Kconfig
drivers/net/hamradio/6pack.c
drivers/net/hamradio/Kconfig
drivers/net/hamradio/dmascc.c
drivers/net/ipa/Kconfig
drivers/net/ipa/ipa_table.c
drivers/net/mdio/mdio-ipq4019.c
drivers/net/mdio/mdio-mscc-miim.c
drivers/net/mhi_net.c
drivers/net/pcs/pcs-xpcs-nxp.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/dp83640_reg.h
drivers/net/phy/mdio_bus.c
drivers/net/phy/mdio_device.c
drivers/net/phy/mxl-gpy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp.c
drivers/net/usb/Kconfig
drivers/net/usb/hso.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/Makefile
drivers/net/wireless/ath/ath10k/Kconfig
drivers/net/wireless/ath/ath5k/Kconfig
drivers/net/wireless/ath/ath5k/led.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/mwifiex/sta_tx.c
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
drivers/net/xen-netback/netback.c
drivers/nfc/st-nci/spi.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/configfs.c
drivers/nvmem/Kconfig
drivers/nvmem/core.c
drivers/of/base.c
drivers/of/device.c
drivers/of/property.c
drivers/pci/Kconfig
drivers/pci/controller/pci-hyperv.c
drivers/pci/hotplug/s390_pci_hpc.c
drivers/pci/msi.c
drivers/pci/pci-acpi.c
drivers/pci/quirks.c
drivers/pci/vpd.c
drivers/perf/arm_pmu.c
drivers/pinctrl/core.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-amd.h
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-rockchip.h
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-sc7280.c
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
drivers/platform/mellanox/mlxreg-io.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/dell/Kconfig
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel/hid.c
drivers/platform/x86/intel/int1092/intel_sar.c
drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
drivers/platform/x86/intel/punit_ipc.c
drivers/platform/x86/intel_scu_ipc.c
drivers/platform/x86/lg-laptop.c
drivers/platform/x86/touchscreen_dmi.c
drivers/ptp/Kconfig
drivers/ptp/ptp_kvm_x86.c
drivers/ptp/ptp_pch.c
drivers/regulator/max14577-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/rtc/rtc-cmos.c
drivers/s390/char/sclp_early.c
drivers/s390/cio/blacklist.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/css.c
drivers/s390/cio/css.h
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/arm/Kconfig
drivers/scsi/arm/acornscsi.c
drivers/scsi/arm/fas216.c
drivers/scsi/arm/queue.c
drivers/scsi/csiostor/csio_init.c
drivers/scsi/elx/efct/efct_lio.c
drivers/scsi/elx/efct/efct_scsi.c
drivers/scsi/elx/libefc/efc_device.c
drivers/scsi/elx/libefc/efc_fabric.c
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/ncr53c8xx.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd_zbc.c
drivers/scsi/ses.c
drivers/scsi/sr_ioctl.c
drivers/scsi/st.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshpb.c
drivers/scsi/virtio_scsi.c
drivers/soc/canaan/Kconfig
drivers/soc/qcom/mdt_loader.c
drivers/soc/qcom/socinfo.c
drivers/soc/ti/omap_prm.c
drivers/spi/spi-atmel.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-mux.c
drivers/spi/spi-nxp-fspi.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-tegra20-slink.c
drivers/spi/spi.c
drivers/spi/spidev.c
drivers/staging/greybus/uart.c
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
drivers/staging/media/hantro/hantro_drv.c
drivers/staging/media/sunxi/cedrus/cedrus_video.c
drivers/staging/r8188eu/hal/hal_intf.c
drivers/staging/r8188eu/os_dep/ioctl_linux.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/target/target_core_configfs.c
drivers/target/target_core_pr.c
drivers/tee/optee/core.c
drivers/tee/optee/device.c
drivers/tee/optee/optee_private.h
drivers/tee/optee/shm_pool.c
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
drivers/thermal/qcom/tsens.c
drivers/thermal/thermal_core.c
drivers/thunderbolt/Makefile
drivers/tty/hvc/hvc_xen.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/mvebu-uart.c
drivers/tty/synclink_gt.c
drivers/tty/tty_ldisc.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/class/cdc-wdm.c
drivers/usb/common/Kconfig
drivers/usb/core/hcd.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/host/bcma-hcd.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ohci-omap.c
drivers/usb/host/xhci-dbgtty.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/tusb6010.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/tcpm/tcpci.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tipd/core.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vfio/pci/vfio_pci_core.c
drivers/vhost/net.c
drivers/vhost/vdpa.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/gbefb.c
drivers/virtio/virtio.c
drivers/watchdog/Kconfig
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/gntdev.c
drivers/xen/privcmd.c
drivers/xen/swiotlb-xen.c
fs/9p/cache.c
fs/9p/fid.c
fs/9p/v9fs.c
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/afs/callback.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/dir_edit.c
fs/afs/dir_silly.c
fs/afs/file.c
fs/afs/fs_probe.c
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/protocol_afs.h [new file with mode: 0644]
fs/afs/protocol_yfs.h
fs/afs/rotate.c
fs/afs/server.c
fs/afs/super.c
fs/afs/write.c
fs/binfmt_elf.c
fs/btrfs/ctree.h
fs/btrfs/dir-item.c
fs/btrfs/extent-tree.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/space-info.c
fs/btrfs/tree-log.c
fs/btrfs/verity.c
fs/btrfs/volumes.c
fs/buffer.c
fs/ceph/caps.c
fs/cifs/cache.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifs_ioctl.h
fs/cifs/cifs_spnego.c
fs/cifs/cifs_spnego.h
fs/cifs/cifs_unicode.c
fs/cifs/cifsacl.c
fs/cifs/cifsacl.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/dns_resolve.c
fs/cifs/dns_resolve.h
fs/cifs/export.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/ntlmssp.h
fs/cifs/readdir.c
fs/cifs/rfc1002pdu.h
fs/cifs/sess.c
fs/cifs/smb2file.c
fs/cifs/smb2glob.h
fs/cifs/smb2inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2status.h
fs/cifs/smb2transport.c
fs/cifs/smberr.h
fs/cifs/transport.c
fs/cifs/winucase.c
fs/cifs/xattr.c
fs/debugfs/inode.c
fs/erofs/inode.c
fs/erofs/zmap.c
fs/ext2/balloc.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/fast_commit.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/super.c
fs/fscache/object.c
fs/fscache/operation.c
fs/inode.c
fs/io-wq.c
fs/io_uring.c
fs/kernfs/dir.c
fs/ksmbd/auth.c
fs/ksmbd/connection.c
fs/ksmbd/crypto_ctx.c
fs/ksmbd/crypto_ctx.h
fs/ksmbd/glob.h
fs/ksmbd/misc.c
fs/ksmbd/misc.h
fs/ksmbd/oplock.c
fs/ksmbd/server.c
fs/ksmbd/smb2misc.c
fs/ksmbd/smb2ops.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb2pdu.h
fs/ksmbd/smb_common.c
fs/ksmbd/smb_common.h
fs/ksmbd/smbacl.c
fs/ksmbd/transport_rdma.c
fs/ksmbd/transport_tcp.c
fs/ksmbd/vfs.c
fs/ksmbd/vfs.h
fs/lockd/svcxdr.h
fs/netfs/read_helper.c
fs/nfs_common/grace.c
fs/nfsd/filecache.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsctl.c
fs/ntfs3/attrib.c
fs/ntfs3/attrlist.c
fs/ntfs3/bitfunc.c
fs/ntfs3/bitmap.c
fs/ntfs3/debug.h
fs/ntfs3/dir.c
fs/ntfs3/file.c
fs/ntfs3/frecord.c
fs/ntfs3/fslog.c
fs/ntfs3/fsntfs.c
fs/ntfs3/index.c
fs/ntfs3/inode.c
fs/ntfs3/lib/decompress_common.h
fs/ntfs3/lib/lib.h
fs/ntfs3/lznt.c
fs/ntfs3/namei.c
fs/ntfs3/ntfs.h
fs/ntfs3/ntfs_fs.h
fs/ntfs3/record.c
fs/ntfs3/run.c
fs/ntfs3/super.c
fs/ntfs3/upcase.c
fs/ntfs3/xattr.c
fs/ocfs2/dlmglue.c
fs/overlayfs/dir.c
fs/overlayfs/file.c
fs/qnx4/dir.c
fs/smbfs_common/smbfsctl.h
fs/vboxsf/super.c
fs/verity/enable.c
fs/verity/open.c
include/acpi/acpi_io.h
include/asm-generic/io.h
include/asm-generic/iomap.h
include/asm-generic/mshyperv.h
include/asm-generic/pci_iomap.h
include/asm-generic/vmlinux.lds.h
include/kunit/test.h
include/kvm/arm_pmu.h
include/linux/arm-smccc.h
include/linux/bpf.h
include/linux/buffer_head.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/compiler_attributes.h
include/linux/cpumask.h
include/linux/dsa/mv88e6xxx.h [new file with mode: 0644]
include/linux/dsa/ocelot.h
include/linux/dsa/sja1105.h
include/linux/etherdevice.h
include/linux/fwnode.h
include/linux/irqdomain.h
include/linux/kvm_host.h
include/linux/mdio.h
include/linux/memblock.h
include/linux/migrate.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mmap_lock.h
include/linux/nvmem-consumer.h
include/linux/overflow.h
include/linux/packing.h
include/linux/perf/arm_pmu.h
include/linux/perf_event.h
include/linux/pkeys.h
include/linux/platform_data/usb-omap1.h
include/linux/qcom_scm.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/spi/spi.h
include/linux/tracehook.h
include/linux/uio.h
include/linux/usb/hcd.h
include/linux/workqueue.h
include/net/dsa.h
include/net/ip_fib.h
include/net/mac80211.h
include/net/netfilter/ipv6/nf_defrag_ipv6.h
include/net/netfilter/nf_tables.h
include/net/netns/netfilter.h
include/net/nexthop.h
include/net/pkt_sched.h
include/net/sock.h
include/scsi/scsi_device.h
include/soc/mscc/ocelot.h
include/soc/mscc/ocelot_ptp.h
include/soc/mscc/ocelot_vcap.h
include/sound/hda_codec.h
include/sound/rawmidi.h
include/trace/events/afs.h
include/trace/events/cachefiles.h
include/trace/events/erofs.h
include/uapi/linux/android/binder.h
include/uapi/linux/cifs/cifs_mount.h
include/uapi/linux/hyperv.h
include/uapi/linux/io_uring.h
include/uapi/linux/xfrm.h
include/uapi/misc/habanalabs.h
include/uapi/sound/asound.h
include/xen/xen-ops.h
init/do_mounts.c
init/main.c
ipc/sem.c
kernel/bpf/bpf_struct_ops.c
kernel/bpf/core.c
kernel/bpf/disasm.c
kernel/bpf/disasm.h
kernel/bpf/stackmap.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/dma/debug.c
kernel/dma/mapping.c
kernel/entry/common.c
kernel/events/core.c
kernel/irq/irqdomain.c
kernel/locking/rwbase_rt.c
kernel/module.c
kernel/printk/printk.c
kernel/rseq.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/time/posix-cpu-timers.c
kernel/trace/blktrace.c
kernel/trace/trace.c
kernel/trace/trace_eprobe.c
kernel/trace/trace_events_hist.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Kconfig.kasan
lib/Makefile
lib/bootconfig.c
lib/iov_iter.c
lib/kunit/executor_test.c
lib/packing.c
lib/pci_iomap.c
lib/zlib_inflate/inffast.c
mm/damon/dbgfs-test.h
mm/debug.c
mm/ksm.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/shmem.c
mm/swap.c
mm/util.c
mm/workingset.c
net/bpf/test_run.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/caif/chnl_net.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/net-procfs.c
net/core/netclassid_cgroup.c
net/core/netprio_cgroup.c
net/core/rtnetlink.c
net/core/sock.c
net/dccp/minisocks.c
net/dsa/Kconfig
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/dsa/switch.c
net/dsa/tag_dsa.c
net/dsa/tag_ocelot.c
net/dsa/tag_ocelot_8021q.c
net/dsa/tag_sja1105.c
net/ipv4/fib_semantics.c
net/ipv4/icmp.c
net/ipv4/inet_hashtables.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/nexthop.c
net/ipv4/tcp_input.c
net/ipv4/udp.c
net/ipv4/udp_tunnel_nic.c
net/ipv6/inet6_hashtables.c
net/ipv6/ioam6.c
net/ipv6/ioam6_iptunnel.c
net/ipv6/ip6_fib.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/route.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_ps.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/wpa.c
net/mctp/route.c
net/mptcp/mptcp_diag.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/mptcp/syncookies.c
net/mptcp/token.c
net/mptcp/token_test.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_masquerade.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_compat.c
net/netfilter/nft_quota.c
net/netfilter/xt_LOG.c
net/netfilter/xt_NFLOG.c
net/netlink/af_netlink.c
net/nfc/af_nfc.c
net/nfc/digital_core.c
net/nfc/digital_technology.c
net/nfc/nci/rsp.c
net/packet/af_packet.c
net/sched/cls_flower.c
net/sched/sch_api.c
net/sched/sch_fifo.c
net/sched/sch_mqprio.c
net/sched/sch_taprio.c
net/sctp/input.c
net/sctp/sm_make_chunk.c
net/smc/smc_cdc.c
net/smc/smc_clc.c
net/smc/smc_core.c
net/smc/smc_llc.c
net/smc/smc_tx.c
net/smc/smc_wr.h
net/sunrpc/auth_gss/svcauth_gss.c
net/tipc/socket.c
net/unix/af_unix.c
net/xfrm/xfrm_user.c
samples/bpf/Makefile
samples/bpf/bpf_insn.h
samples/bpf/xdp_redirect_map_multi.bpf.c
scripts/Makefile.clang
scripts/Makefile.gcc-plugins
scripts/Makefile.kasan
scripts/Makefile.modpost
scripts/checkkconfigsymbols.py
scripts/checksyscalls.sh
scripts/clang-tools/gen_compile_commands.py
scripts/min-tool-version.sh
scripts/recordmcount.pl
scripts/sorttable.c
security/selinux/hooks.c
security/selinux/nlmsgtab.c
security/smack/smack_lsm.c
sound/core/pcm_compat.c
sound/core/rawmidi.c
sound/core/seq_device.c
sound/drivers/pcsp/pcsp_lib.c
sound/firewire/motu/amdtp-motu.c
sound/firewire/oxfw/oxfw.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_bind.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_controller.h
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_intel.h
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_realtek.c
sound/pci/pcxhr/pcxhr_core.c
sound/soc/fsl/fsl_esai.c
sound/soc/fsl/fsl_micfil.c
sound/soc/fsl/fsl_sai.c
sound/soc/fsl/fsl_spdif.c
sound/soc/fsl/fsl_xcvr.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/mediatek/Kconfig
sound/soc/mediatek/common/mtk-afe-fe-dai.c
sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
sound/soc/sof/core.c
sound/soc/sof/imx/imx8.c
sound/soc/sof/imx/imx8m.c
sound/soc/sof/loader.c
sound/soc/sof/trace.c
sound/soc/sof/xtensa/core.c
sound/usb/card.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/mixer_quirks.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/arch/x86/include/asm/unistd_32.h [deleted file]
tools/arch/x86/include/asm/unistd_64.h [deleted file]
tools/arch/x86/include/uapi/asm/unistd_32.h [new file with mode: 0644]
tools/arch/x86/include/uapi/asm/unistd_64.h [new file with mode: 0644]
tools/arch/x86/lib/insn.c
tools/bootconfig/include/linux/memblock.h
tools/include/linux/compiler-gcc.h
tools/include/linux/overflow.h
tools/include/uapi/sound/asound.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/linker.c
tools/lib/bpf/strset.c
tools/lib/perf/evsel.c
tools/lib/perf/tests/test-evlist.c
tools/lib/perf/tests/test-evsel.c
tools/objtool/arch/x86/decode.c
tools/objtool/check.c
tools/objtool/elf.c
tools/objtool/include/objtool/elf.h
tools/objtool/orc_gen.c
tools/objtool/special.c
tools/perf/Documentation/jitdump-specification.txt
tools/perf/Documentation/perf-c2c.txt
tools/perf/Documentation/perf-intel-pt.txt
tools/perf/Documentation/perf-lock.txt
tools/perf/Documentation/perf-script-perl.txt
tools/perf/Documentation/perf-script-python.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/topdown.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/util/auxtrace.c
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm/util/perf_regs.c
tools/perf/arch/arm/util/pmu.c
tools/perf/arch/arm/util/unwind-libdw.c
tools/perf/arch/arm/util/unwind-libunwind.c
tools/perf/arch/x86/util/iostat.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/pmu-events/arch/powerpc/power8/other.json
tools/perf/pmu-events/jevents.c
tools/perf/tests/attr/test-stat-default
tools/perf/tests/attr/test-stat-detailed-1
tools/perf/tests/attr/test-stat-detailed-2
tools/perf/tests/attr/test-stat-detailed-3
tools/perf/tests/code-reading.c
tools/perf/tests/dwarf-unwind.c
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/annotate.c
tools/perf/util/bpf-event.c
tools/perf/util/config.c
tools/perf/util/machine.c
tools/perf/util/session.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/selftests/arm64/signal/test_signals_utils.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/cgroup_helpers.c
tools/testing/selftests/bpf/cgroup_helpers.h
tools/testing/selftests/bpf/network_helpers.c
tools/testing/selftests/bpf/network_helpers.h
tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
tools/testing/selftests/bpf/progs/connect4_dropper.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_task_pt_regs.c
tools/testing/selftests/bpf/test_lwt_ip_encap.sh
tools/testing/selftests/drivers/dma-buf/udmabuf.c
tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/access_tracking_perf_test.c
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/rseq_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/steal_time.c
tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/lib.mk
tools/testing/selftests/nci/nci_dev.c
tools/testing/selftests/net/af_unix/Makefile
tools/testing/selftests/net/af_unix/test_unix_oob.c
tools/testing/selftests/net/altnames.sh
tools/testing/selftests/net/ioam6.sh
tools/testing/selftests/net/ioam6_parser.c
tools/testing/selftests/netfilter/nft_nat_zones.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/nft_zones_many.sh [new file with mode: 0755]
tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
tools/testing/selftests/powerpc/tm/tm-syscall.c
tools/usb/testusb.c
tools/vm/page-types.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 7ef7b136e71d2d9dbc224aa435cb824a1aa1c65e..d8f63e8329e8fccbb8cc53d8d15a5e18c5390d64 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -971,6 +971,7 @@ D: PowerPC
 N: Daniel Drake
 E: dsd@gentoo.org
 D: USBAT02 CompactFlash support in usb-storage
+D: ZD1211RW wireless driver
 S: UK
 
 N: Oleg Drokin
index 35314b63008c258209d4454a86692fed462e3c46..caa3c09a5c3f91f73fd386370eb415c5e71ee9eb 100644 (file)
@@ -259,7 +259,7 @@ Configuring the kernel
 Compiling the kernel
 --------------------
 
- - Make sure you have at least gcc 4.9 available.
+ - Make sure you have at least gcc 5.1 available.
    For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
 
    Please note that you can still run a.out user programs with this kernel.
index babbe04c8d37d8a9c95c93a6916c47137672c816..4d8c27eca96be72ea0df121978ad04dd588e6ae3 100644 (file)
@@ -1226,7 +1226,7 @@ PAGE_SIZE multiple when read back.
 
        Note that all fields in this file are hierarchical and the
        file modified event can be generated due to an event down the
-       hierarchy. For for the local events at the cgroup level see
+       hierarchy. For the local events at the cgroup level see
        memory.events.local.
 
          low
@@ -2170,19 +2170,19 @@ existing device files.
 
 Cgroup v2 device controller has no interface files and is implemented
 on top of cgroup BPF. To control access to device files, a user may
-create bpf programs of the BPF_CGROUP_DEVICE type and attach them
-to cgroups. On an attempt to access a device file, corresponding
-BPF programs will be executed, and depending on the return value
-the attempt will succeed or fail with -EPERM.
-
-A BPF_CGROUP_DEVICE program takes a pointer to the bpf_cgroup_dev_ctx
-structure, which describes the device access attempt: access type
-(mknod/read/write) and device (type, major and minor numbers).
-If the program returns 0, the attempt fails with -EPERM, otherwise
-it succeeds.
-
-An example of BPF_CGROUP_DEVICE program may be found in the kernel
-source tree in the tools/testing/selftests/bpf/progs/dev_cgroup.c file.
+create bpf programs of type BPF_PROG_TYPE_CGROUP_DEVICE and attach
+them to cgroups with BPF_CGROUP_DEVICE flag. On an attempt to access a
+device file, corresponding BPF programs will be executed, and depending
+on the return value the attempt will succeed or fail with -EPERM.
+
+A BPF_PROG_TYPE_CGROUP_DEVICE program takes a pointer to the
+bpf_cgroup_dev_ctx structure, which describes the device access attempt:
+access type (mknod/read/write) and device (type, major and minor numbers).
+If the program returns 0, the attempt fails with -EPERM, otherwise it
+succeeds.
+
+An example of BPF_PROG_TYPE_CGROUP_DEVICE program may be found in
+tools/testing/selftests/bpf/progs/dev_cgroup.c in the kernel source tree.
 
 
 RDMA
index 91ba391f9b328b46ecce8c3c187d8e8c9238f0fc..43dc35fe5bc038ee93bd32f0c7fe641038427bd1 100644 (file)
                        The VGA and EFI output is eventually overwritten by
                        the real console.
 
-                       The xen output can only be used by Xen PV guests.
+                       The xen option can only be used in Xen domains.
 
                        The sclp output can only be used on s390.
 
index 6979b4af2c1f1df5ee2c4aacabb95cdfc5a67b68..9c0e8758037a1efbdcd9d9d26b387012444da67b 100644 (file)
@@ -175,9 +175,10 @@ for IRQ numbers that are passed to struct device registrations.  In that
 case the Linux IRQ numbers cannot be dynamically assigned and the legacy
 mapping should be used.
 
-As the name implies, the *_legacy() functions are deprecated and only
+As the name implies, the \*_legacy() functions are deprecated and only
 exist to ease the support of ancient platforms. No new users should be
-added.
+added. Same goes for the \*_simple() functions when their use results
+in the legacy behaviour.
 
 The legacy map assumes a contiguous range of IRQ numbers has already
 been allocated for the controller and that the IRQ number can be
index b962fa6d649c35ab4da1c20ddc399d202a06833e..d79d36ac0c44af58d316483a1a0fedd91ed720f4 100644 (file)
@@ -54,7 +54,7 @@ properties:
           - const: toradex,apalis_t30
           - const: nvidia,tegra30
       - items:
-          - const: toradex,apalis_t30-eval-v1.1
+          - const: toradex,apalis_t30-v1.1-eval
           - const: toradex,apalis_t30-eval
           - const: toradex,apalis_t30-v1.1
           - const: toradex,apalis_t30
index 07b20383cbca0db82f22861df8ed5263063c9222..b446d0f0f1b4f1e29d9139522f8c81fc2e0e0dd1 100644 (file)
@@ -50,7 +50,6 @@ properties:
               data-lanes:
                 description: array of physical DSI data lane indexes.
                 minItems: 1
-                maxItems: 4
                 items:
                   - const: 1
                   - const: 2
@@ -71,7 +70,6 @@ properties:
               data-lanes:
                 description: array of physical DSI data lane indexes.
                 minItems: 1
-                maxItems: 4
                 items:
                   - const: 1
                   - const: 2
index 1c2daf7c24cc04173517c5a0e53c776dc9ff2dc0..911564468c5e0f86132b3600355d7b813893c827 100644 (file)
@@ -18,7 +18,7 @@ properties:
     const: ti,sn65dsi86
 
   reg:
-    const: 0x2d
+    enum: [ 0x2c, 0x2d ]
 
   enable-gpios:
     maxItems: 1
index fbb59c9ddda65d9cbc8cccb073bd75e7b0a694d7..78044c340e2089144fdc2dabe6765e488c36dd2e 100644 (file)
@@ -9,7 +9,7 @@ function block.
 
 All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
 For a description of the MMSYS_CONFIG binding, see
-Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
+Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
 
 DISP function blocks
 ====================
index 2ed010f91e2d6db40f0cdc3ba05d7573a9591530..20ce88ab4b3a4513334ac9e4daa68620890780e4 100644 (file)
@@ -22,7 +22,7 @@ properties:
     items:
       - enum:
           # ili9341 240*320 Color on stm32f429-disco board
-        - st,sf-tc240t-9370-t
+          - st,sf-tc240t-9370-t
       - const: ilitek,ili9341
 
   reg: true
index 29de7807df54ec4713d48d972e2e0035d8a416b0..bcd41e491f1d1269237884797d3b4fdcfce58e41 100644 (file)
@@ -31,11 +31,11 @@ properties:
 
   clocks:
     minItems: 1
-    maxItems: 3
+    maxItems: 7
 
   clock-names:
     minItems: 1
-    maxItems: 3
+    maxItems: 7
 
 required:
   - compatible
@@ -72,6 +72,32 @@ allOf:
           contains:
             enum:
               - qcom,sdm660-a2noc
+    then:
+      properties:
+        clocks:
+          items:
+            - description: Bus Clock.
+            - description: Bus A Clock.
+            - description: IPA Clock.
+            - description: UFS AXI Clock.
+            - description: Aggregate2 UFS AXI Clock.
+            - description: Aggregate2 USB3 AXI Clock.
+            - description: Config NoC USB2 AXI Clock.
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+            - const: ipa
+            - const: ufs_axi
+            - const: aggre2_ufs_axi
+            - const: aggre2_usb3_axi
+            - const: cfg_noc_usb2_axi
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
               - qcom,sdm660-bimc
               - qcom,sdm660-cnoc
               - qcom,sdm660-gnoc
@@ -91,6 +117,7 @@ examples:
   - |
       #include <dt-bindings/clock/qcom,rpmcc.h>
       #include <dt-bindings/clock/qcom,mmcc-sdm660.h>
+      #include <dt-bindings/clock/qcom,gcc-sdm660.h>
 
       bimc: interconnect@1008000 {
               compatible = "qcom,sdm660-bimc";
@@ -123,9 +150,20 @@ examples:
               compatible = "qcom,sdm660-a2noc";
               reg = <0x01704000 0xc100>;
               #interconnect-cells = <1>;
-              clock-names = "bus", "bus_a";
+              clock-names = "bus",
+                            "bus_a",
+                            "ipa",
+                            "ufs_axi",
+                            "aggre2_ufs_axi",
+                            "aggre2_usb3_axi",
+                            "cfg_noc_usb2_axi";
               clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
-                       <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
+                       <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
+                       <&rpmcc RPM_SMD_IPA_CLK>,
+                       <&gcc GCC_UFS_AXI_CLK>,
+                       <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+                       <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+                       <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
       };
 
       mnoc: interconnect@1745000 {
index 3e5d82df90a2e1c263324f24b9a55757de9ae207..a2abed06a099b41c7b165c0650e3967ec43a4962 100644 (file)
@@ -31,7 +31,7 @@ properties:
     maxItems: 1
 
   port:
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/$defs/port-base
     additionalProperties: false
 
     properties:
index ad42992c6da39775c196a299559ff35373cf0cac..bf115ab9d926d7ace7ea0b5566fe997bf32db33c 100644 (file)
@@ -38,7 +38,7 @@ properties:
 
   port:
     additionalProperties: false
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/$defs/port-base
 
     properties:
       endpoint:
index 881f79532501f1449a8c61aae41baa93e9bc5f92..cf2ca2702cc9f69175201de1c57cf94f492be812 100644 (file)
@@ -38,7 +38,7 @@ properties:
 
   port:
     additionalProperties: false
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/$defs/port-base
 
     properties:
       endpoint:
index 1edeabf39e6a724c7d0ec8c0adfa22c0e249c294..afcf70947f7e621977b5e4b01eac6722e2c01e27 100644 (file)
@@ -38,7 +38,7 @@ properties:
 
   port:
     additionalProperties: false
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/$defs/port-base
 
     properties:
       endpoint:
index e6c9a2f77cc752316f22add398e7b15f441d318e..f300ced4cdf3679302e33cb1ac1e2e9d51b1146e 100644 (file)
@@ -20,9 +20,7 @@ properties:
       - snps,dwcmshc-sdhci
 
   reg:
-    minItems: 1
-    items:
-      - description: Offset and length of the register set for the device
+    maxItems: 1
 
   interrupts:
     maxItems: 1
index 7f2578d48e3f0bb769ca936ec7299c7103e19c36..9eb4bb529ad5c99ca3d124e5b184a339b35c989b 100644 (file)
@@ -19,7 +19,9 @@ properties:
       - const: allwinner,sun8i-v3s-emac
       - const: allwinner,sun50i-a64-emac
       - items:
-          - const: allwinner,sun50i-h6-emac
+          - enum:
+              - allwinner,sun20i-d1-emac
+              - allwinner,sun50i-h6-emac
           - const: allwinner,sun50i-a64-emac
 
   reg:
index 30c11fea491bda4855036f8b124f0c265d16682f..2363b412410c3714285fb0259c56c2e20816412a 100644 (file)
@@ -83,7 +83,7 @@ Example:
                #interrupt-cells = <2>;
 
                switch0: switch@0 {
-                       compatible = "marvell,mv88e6390";
+                       compatible = "marvell,mv88e6190";
                        reg = <0>;
                        reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
 
index 5629b2e4ccf89b36d212fcce47f7a7291af8e367..ee4afe361fac6d2325e99c2b0c614f3178435e1a 100644 (file)
@@ -34,7 +34,6 @@ properties:
 
   clocks:
     minItems: 3
-    maxItems: 5
     items:
       - description: MAC host clock
       - description: MAC apb clock
index 42689b7d03a2ff84c35d08891fc060e06e6100a0..c115c95ee584e6aa382fc70173b1428089e6ab8b 100644 (file)
@@ -21,6 +21,7 @@ select:
       contains:
         enum:
           - snps,dwmac
+          - snps,dwmac-3.40a
           - snps,dwmac-3.50a
           - snps,dwmac-3.610
           - snps,dwmac-3.70a
@@ -76,6 +77,7 @@ properties:
         - rockchip,rk3399-gmac
         - rockchip,rv1108-gmac
         - snps,dwmac
+        - snps,dwmac-3.40a
         - snps,dwmac-3.50a
         - snps,dwmac-3.610
         - snps,dwmac-3.70a
index 2911e565b26053bb2f00e55a6c475ef6f6003935..acea1cd444fd52490450405bceb5d063e955ab2c 100644 (file)
@@ -41,7 +41,6 @@ properties:
       - description: builtin MSI controller.
 
   interrupt-names:
-    minItems: 1
     items:
       - const: msi
 
index ca91201a99269750e80e497eb147979f42942fd3..d7e08b03e204f91dadb383b7ad5e7610c7f866a7 100644 (file)
@@ -171,7 +171,7 @@ examples:
       cs-gpios = <&gpio0 13 0>,
                  <&gpio0 14 0>;
       rx-sample-delay-ns = <3>;
-      spi-flash@1 {
+      flash@1 {
         compatible = "spi-nand";
         reg = <1>;
         rx-sample-delay-ns = <7>;
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
new file mode 100644 (file)
index 0000000..b9ca8ef
--- /dev/null
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC series UFS host controller Device Tree Bindings
+
+maintainers:
+  - Alim Akhtar <alim.akhtar@samsung.com>
+
+description: |
+  Each Samsung UFS host controller instance should have its own node.
+  This binding define Samsung specific binding other then what is used
+  in the common ufshcd bindings
+  [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+
+properties:
+
+  compatible:
+    enum:
+      - samsung,exynos7-ufs
+
+  reg:
+    items:
+      - description: HCI register
+      - description: vendor specific register
+      - description: unipro register
+      - description: UFS protector register
+
+  reg-names:
+    items:
+      - const: hci
+      - const: vs_hci
+      - const: unipro
+      - const: ufsp
+
+  clocks:
+    items:
+      - description: ufs link core clock
+      - description: unipro main clock
+
+  clock-names:
+    items:
+      - const: core_clk
+      - const: sclk_unipro_main
+
+  interrupts:
+    maxItems: 1
+
+  phys:
+    maxItems: 1
+
+  phy-names:
+    const: ufs-phy
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - phys
+  - phy-names
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/exynos7-clk.h>
+
+    ufs: ufs@15570000 {
+       compatible = "samsung,exynos7-ufs";
+       reg = <0x15570000 0x100>,
+             <0x15570100 0x100>,
+             <0x15571000 0x200>,
+             <0x15572000 0x300>;
+       reg-names = "hci", "vs_hci", "unipro", "ufsp";
+       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
+                <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
+       clock-names = "core_clk", "sclk_unipro_main";
+       pinctrl-names = "default";
+       pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+       phys = <&ufs_phy>;
+       phy-names = "ufs-phy";
+    };
+...
index ffe9ea0c1499896b23ee021ee881b805c6294726..d67ccd22c63b1eed83be1fa1f509247fdd9bdbb4 100644 (file)
 NTFS3
 =====
 
-
 Summary and Features
 ====================
 
-NTFS3 is fully functional NTFS Read-Write driver. The driver works with
-NTFS versions up to 3.1, normal/compressed/sparse files
-and journal replaying. File system type to use on mount is 'ntfs3'.
+NTFS3 is fully functional NTFS Read-Write driver. The driver works with NTFS
+versions up to 3.1. File system type to use on mount is *ntfs3*.
 
 - This driver implements NTFS read/write support for normal, sparse and
   compressed files.
-- Supports native journal replaying;
-- Supports extended attributes
-       Predefined extended attributes:
-       - 'system.ntfs_security' gets/sets security
-                       descriptor (SECURITY_DESCRIPTOR_RELATIVE)
-       - 'system.ntfs_attrib' gets/sets ntfs file/dir attributes.
-               Note: applied to empty files, this allows to switch type between
-               sparse(0x200), compressed(0x800) and normal;
+- Supports native journal replaying.
 - Supports NFS export of mounted NTFS volumes.
+- Supports extended attributes. Predefined extended attributes:
+
+       - *system.ntfs_security* gets/sets security
+
+               Descriptor: SECURITY_DESCRIPTOR_RELATIVE
+
+       - *system.ntfs_attrib* gets/sets ntfs file/dir attributes.
+
+         Note: Applied to empty files, this allows to switch type between
+         sparse(0x200), compressed(0x800) and normal.
 
 Mount Options
 =============
 
 The list below describes mount options supported by NTFS3 driver in addition to
-generic ones.
+generic ones. You can use every mount option with **no** option. If it is in
+this table marked with no it means default is without **no**.
 
-===============================================================================
+.. flat-table::
+   :widths: 1 5
+   :fill-cells:
 
-nls=name               This option informs the driver how to interpret path
-                       strings and translate them to Unicode and back. If
-                       this option is not set, the default codepage will be
-                       used (CONFIG_NLS_DEFAULT).
-                       Examples:
-                               'nls=utf8'
+   * - iocharset=name
+     - This option informs the driver how to interpret path strings and
+       translate them to Unicode and back. If this option is not set, the
+       default codepage will be used (CONFIG_NLS_DEFAULT).
 
-uid=
-gid=
-umask=                 Controls the default permissions for files/directories created
-                       after the NTFS volume is mounted.
+       Example: iocharset=utf8
 
-fmask=
-dmask=                 Instead of specifying umask which applies both to
-                       files and directories, fmask applies only to files and
-                       dmask only to directories.
+   * - uid=
+     - :rspan:`1`
+   * - gid=
 
-nohidden               Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN)
-                       attribute will not be shown under Linux.
+   * - umask=
+     - Controls the default permissions for files/directories created after
+       the NTFS volume is mounted.
 
-sys_immutable          Files with the Windows-specific SYSTEM
-                       (FILE_ATTRIBUTE_SYSTEM) attribute will be marked as system
-                       immutable files.
+   * - dmask=
+     - :rspan:`1` Instead of specifying umask which applies both to files and
+       directories, fmask applies only to files and dmask only to directories.
+   * - fmask=
 
-discard                        Enable support of the TRIM command for improved performance
-                       on delete operations, which is recommended for use with the
-                       solid-state drives (SSD).
+   * - noacsrules
+     - "No access rules" mount option sets access rights for files/folders to
+       777 and owner/group to root. This mount option absorbs all other
+       permissions.
 
-force                  Forces the driver to mount partitions even if 'dirty' flag
-                       (volume dirty) is set. Not recommended for use.
+       - Permissions change for files/folders will be reported as successful,
+        but they will remain 777.
 
-sparse                 Create new files as "sparse".
+       - Owner/group change will be reported as successful, butthey will stay
+        as root.
 
-showmeta               Use this parameter to show all meta-files (System Files) on
-                       a mounted NTFS partition.
-                       By default, all meta-files are hidden.
+   * - nohidden
+     - Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN) attribute
+       will not be shown under Linux.
 
-prealloc               Preallocate space for files excessively when file size is
-                       increasing on writes. Decreases fragmentation in case of
-                       parallel write operations to different files.
+   * - sys_immutable
+     - Files with the Windows-specific SYSTEM (FILE_ATTRIBUTE_SYSTEM) attribute
+       will be marked as system immutable files.
 
-no_acs_rules           "No access rules" mount option sets access rights for
-                       files/folders to 777 and owner/group to root. This mount
-                       option absorbs all other permissions:
-                       - permissions change for files/folders will be reported
-                               as successful, but they will remain 777;
-                       - owner/group change will be reported as successful, but
-                               they will stay as root
+   * - discard
+     - Enable support of the TRIM command for improved performance on delete
+       operations, which is recommended for use with the solid-state drives
+       (SSD).
 
-acl                    Support POSIX ACLs (Access Control Lists). Effective if
-                       supported by Kernel. Not to be confused with NTFS ACLs.
-                       The option specified as acl enables support for POSIX ACLs.
+   * - force
+     - Forces the driver to mount partitions even if volume is marked dirty.
+       Not recommended for use.
 
-noatime                        All files and directories will not update their last access
-                       time attribute if a partition is mounted with this parameter.
-                       This option can speed up file system operation.
+   * - sparse
+     - Create new files as sparse.
 
-===============================================================================
+   * - showmeta
+     - Use this parameter to show all meta-files (System Files) on a mounted
+       NTFS partition. By default, all meta-files are hidden.
 
-ToDo list
-=========
+   * - prealloc
+     - Preallocate space for files excessively when file size is increasing on
+       writes. Decreases fragmentation in case of parallel write operations to
+       different files.
 
-- Full journaling support (currently journal replaying is supported) over JBD.
+   * - acl
+     - Support POSIX ACLs (Access Control Lists). Effective if supported by
+       Kernel. Not to be confused with NTFS ACLs. The option specified as acl
+       enables support for POSIX ACLs.
 
+Todo list
+=========
+- Full journaling support over JBD. Currently journal replaying is supported
+  which is not necessarily as effectice as JBD would be.
 
 References
 ==========
-https://www.paragon-software.com/home/ntfs-linux-professional/
-       - Commercial version of the NTFS driver for Linux.
+- Commercial version of the NTFS driver for Linux.
+       https://www.paragon-software.com/home/ntfs-linux-professional/
 
-almaz.alexandrovich@paragon-software.com
-       - Direct e-mail address for feedback and requests on the NTFS3 implementation.
+- Direct e-mail address for feedback and requests on the NTFS3 implementation.
+       almaz.alexandrovich@paragon-software.com
index 364680cdad2e49eff419f67d4ddc7ce2b4a958e7..8ba72e898099ea3cba7db5d6756bbc3a85cb7948 100644 (file)
@@ -300,8 +300,8 @@ pcie_replay_count
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
    :doc: pcie_replay_count
 
-+GPU SmartShift Information
-============================
+GPU SmartShift Information
+==========================
 
 GPU SmartShift information via sysfs
 
index 06af044c882f41de769884a4c6d271d391022e12..607f78f0f189a3f8ec6d0ff86c20bd3c27463c0c 100644 (file)
@@ -111,15 +111,6 @@ Component Helper Usage
 .. kernel-doc:: drivers/gpu/drm/drm_drv.c
    :doc: component helper usage recommendations
 
-IRQ Helper Library
-~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
-   :doc: irq helpers
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
-   :export:
-
 Memory Manager Initialization
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
index 8557e26281c3a9c8499a095cd0f1263478166037..91b99adc6c487fdfe78390eef8a9cfd66edbb036 100644 (file)
@@ -132,20 +132,3 @@ On Family 17h and Family 18h CPUs, additional temperature sensors may report
 Core Complex Die (CCD) temperatures. Up to 8 such temperatures are reported
 as temp{3..10}_input, labeled Tccd{1..8}. Actual support depends on the CPU
 variant.
-
-Various Family 17h and 18h CPUs report voltage and current telemetry
-information. The following attributes may be reported.
-
-Attribute      Label   Description
-===============        ======= ================
-in0_input      Vcore   Core voltage
-in1_input      Vsoc    SoC voltage
-curr1_input    Icore   Core current
-curr2_input    Isoc    SoC current
-===============        ======= ================
-
-Current values are raw (unscaled) as reported by the CPU. Core current is
-reported as multiples of 1A / LSB. SoC is reported as multiples of 0.25A
-/ LSB. The real current is board specific. Reported currents should be seen
-as rough guidance, and should be scaled using sensors3.conf as appropriate
-for a given board.
index e7d9cbff771bc238dcb5b7023fbfae0368260afb..67b7a701ce9e15a0bde78072569f1c31d7959295 100644 (file)
@@ -851,7 +851,7 @@ NOTES:
 - 0x88A8 traffic will not be received unless VLAN stripping is disabled with
   the following command::
 
-    # ethool -K <ethX> rxvlan off
+    # ethtool -K <ethX> rxvlan off
 
 - 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS
   configured on the same port. 0x88a8/0x8100 traffic will not be received if
index 564caeebe2b26940edca7984ba83fa733bdafe63..29b1bae0cf000674f327d1552f47e77c0cb46581 100644 (file)
@@ -296,7 +296,7 @@ not available.
 Device Tree bindings and board design
 =====================================
 
-This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt``
+This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
 and aims to showcase some potential switch caveats.
 
 RMII PHY role and out-of-band signaling
index d3a8557b66a1abb9ebcda48b0872e8727ce94850..e35ab74a0f804b0453e323b91ebacc6feeda0851 100644 (file)
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
 ====================== ===============  ========================================
         Program        Minimal version       Command to check the version
 ====================== ===============  ========================================
-GNU C                  4.9              gcc --version
+GNU C                  5.1              gcc --version
 Clang/LLVM (optional)  10.0.1           clang --version
 GNU make               3.81             make --version
 binutils               2.23             ld -v
index 669a022f68175e93a462ccb256e12bbae7d182a5..980eb20521cfe3417ab349f5452f53b9c68cade8 100644 (file)
@@ -223,7 +223,7 @@ Linux内核5.x版本 <http://kernel.org/>
 编译内核
 ---------
 
- - 确保您至少有gcc 4.9可用。
+ - 确保您至少有gcc 5.1可用。
    有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 。
 
    请注意,您仍然可以使用此内核运行a.out用户程序。
index b752e50359e699e93850aa16cab485c00dd78e74..6ce97edbab37f58cbcb7aefa736555358bc2c79d 100644 (file)
@@ -226,7 +226,7 @@ Linux內核5.x版本 <http://kernel.org/>
 編譯內核
 ---------
 
- - 確保您至少有gcc 4.9可用。
+ - 確保您至少有gcc 5.1可用。
    有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 。
 
    請注意,您仍然可以使用此內核運行a.out用戶程序。
index eeb4c70b3d5b50a8e32b7ffe4e5e6cf253bbba68..8d118d7957d263aeeafa11b3ed795ed81def166e 100644 (file)
@@ -414,7 +414,8 @@ T:  git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 F:     drivers/acpi/pmic/
 
 ACPI THERMAL DRIVER
-M:     Zhang Rui <rui.zhang@intel.com>
+M:     Rafael J. Wysocki <rafael@kernel.org>
+R:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
 S:     Supported
 W:     https://01.org/linux-acpi
@@ -810,7 +811,7 @@ F:  Documentation/devicetree/bindings/dma/altr,msgdma.yaml
 F:     drivers/dma/altera-msgdma.c
 
 ALTERA PIO DRIVER
-M:     Joyce Ooi <joyce.ooi@intel.com>
+M:     Mun Yew Tham <mun.yew.tham@intel.com>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     drivers/gpio/gpio-altera.c
@@ -977,12 +978,12 @@ L:        platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/amd-pmc.*
 
-AMD POWERPLAY
+AMD POWERPLAY AND SWSMU
 M:     Evan Quan <evan.quan@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
 T:     git https://gitlab.freedesktop.org/agd5f/linux.git
-F:     drivers/gpu/drm/amd/pm/powerplay/
+F:     drivers/gpu/drm/amd/pm/
 
 AMD PTDMA DRIVER
 M:     Sanjay R Mehta <sanju.mehta@amd.com>
@@ -1275,6 +1276,7 @@ F:        drivers/input/mouse/bcm5974.c
 
 APPLE DART IOMMU DRIVER
 M:     Sven Peter <sven@svenpeter.dev>
+R:     Alyssa Rosenzweig <alyssa@rosenzweig.io>
 L:     iommu@lists.linux-foundation.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/iommu/apple,dart.yaml
@@ -1711,6 +1713,8 @@ F:        drivers/*/*alpine*
 
 ARM/APPLE MACHINE SUPPORT
 M:     Hector Martin <marcan@marcan.st>
+M:     Sven Peter <sven@svenpeter.dev>
+R:     Alyssa Rosenzweig <alyssa@rosenzweig.io>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 W:     https://asahilinux.org
@@ -2236,6 +2240,7 @@ F:        arch/arm/mach-pxa/mioa701.c
 
 ARM/MStar/Sigmastar Armv7 SoC support
 M:     Daniel Palmer <daniel@thingy.jp>
+M:     Romain Perier <romain.perier@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 W:     http://linux-chenxing.org/
@@ -2712,6 +2717,7 @@ F:        drivers/power/reset/keystone-reset.c
 
 ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE
 M:     Nishanth Menon <nm@ti.com>
+M:     Vignesh Raghavendra <vigneshr@ti.com>
 M:     Tero Kristo <kristo@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
@@ -2804,9 +2810,8 @@ F:        arch/arm/mach-pxa/include/mach/vpac270.h
 F:     arch/arm/mach-pxa/vpac270.c
 
 ARM/VT8500 ARM ARCHITECTURE
-M:     Tony Prisk <linux@prisktech.co.nz>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
+S:     Orphan
 F:     Documentation/devicetree/bindings/i2c/i2c-wmt.txt
 F:     arch/arm/mach-vt8500/
 F:     drivers/clocksource/timer-vt8500.c
@@ -2962,7 +2967,7 @@ F:        crypto/async_tx/
 F:     include/linux/async_tx.h
 
 AT24 EEPROM DRIVER
-M:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M:     Bartosz Golaszewski <brgl@bgdev.pl>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -3385,9 +3390,11 @@ F:       Documentation/networking/filter.rst
 F:     Documentation/userspace-api/ebpf/
 F:     arch/*/net/*
 F:     include/linux/bpf*
+F:     include/linux/btf*
 F:     include/linux/filter.h
 F:     include/trace/events/xdp.h
 F:     include/uapi/linux/bpf*
+F:     include/uapi/linux/btf*
 F:     include/uapi/linux/filter.h
 F:     kernel/bpf/
 F:     kernel/trace/bpf_trace.c
@@ -3821,7 +3828,6 @@ F:        drivers/scsi/mpi3mr/
 
 BROADCOM NETXTREME-E ROCE DRIVER
 M:     Selvin Xavier <selvin.xavier@broadcom.com>
-M:     Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.broadcom.com
@@ -4656,7 +4662,7 @@ W:        http://linux-cifs.samba.org/
 T:     git git://git.samba.org/sfrench/cifs-2.6.git
 F:     Documentation/admin-guide/cifs/
 F:     fs/cifs/
-F:     fs/cifs_common/
+F:     fs/smbfs_common/
 
 COMPACTPCI HOTPLUG CORE
 M:     Scott Murray <scott@spiteful.org>
@@ -7337,10 +7343,11 @@ F:      include/uapi/linux/fpga-dfl.h
 
 FPGA MANAGER FRAMEWORK
 M:     Moritz Fischer <mdf@kernel.org>
+M:     Wu Hao <hao.wu@intel.com>
+M:     Xu Yilun <yilun.xu@intel.com>
 R:     Tom Rix <trix@redhat.com>
 L:     linux-fpga@vger.kernel.org
 S:     Maintained
-W:     http://www.rocketboards.org
 Q:     http://patchwork.kernel.org/project/linux-fpga/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
 F:     Documentation/devicetree/bindings/fpga/
@@ -7434,7 +7441,7 @@ FREESCALE IMX / MXC FEC DRIVER
 M:     Joakim Zhang <qiangqing.zhang@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     Documentation/devicetree/bindings/net/fsl-fec.txt
+F:     Documentation/devicetree/bindings/net/fsl,fec.yaml
 F:     drivers/net/ethernet/freescale/fec.h
 F:     drivers/net/ethernet/freescale/fec_main.c
 F:     drivers/net/ethernet/freescale/fec_ptp.c
@@ -7986,7 +7993,7 @@ F:        include/linux/gpio/regmap.h
 
 GPIO SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
-M:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M:     Bartosz Golaszewski <brgl@bgdev.pl>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
@@ -8608,9 +8615,8 @@ F:        Documentation/devicetree/bindings/iio/humidity/st,hts221.yaml
 F:     drivers/iio/humidity/hts221*
 
 HUAWEI ETHERNET DRIVER
-M:     Bin Luo <luobin9@huawei.com>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Orphan
 F:     Documentation/networking/device_drivers/ethernet/huawei/hinic.rst
 F:     drivers/net/ethernet/huawei/hinic/
 
@@ -9302,7 +9308,7 @@ S:        Maintained
 F:     drivers/platform/x86/intel/atomisp2/led.c
 
 INTEL BIOS SAR INT1092 DRIVER
-M:     Shravan S <s.shravan@intel.com>
+M:     Shravan Sudhakar <s.shravan@intel.com>
 M:     Intel Corporation <linuxwwan@intel.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
@@ -9624,7 +9630,7 @@ F:        include/uapi/linux/isst_if.h
 F:     tools/power/x86/intel-speed-select/
 
 INTEL STRATIX10 FIRMWARE DRIVERS
-M:     Richard Gong <richard.gong@linux.intel.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu
@@ -10194,8 +10200,8 @@ M:      Hyunchul Lee <hyc.lee@gmail.com>
 L:     linux-cifs@vger.kernel.org
 S:     Maintained
 T:     git git://git.samba.org/ksmbd.git
-F:     fs/cifs_common/
 F:     fs/ksmbd/
+F:     fs/smbfs_common/
 
 KERNEL UNIT TESTING FRAMEWORK (KUnit)
 M:     Brendan Higgins <brendanhiggins@google.com>
@@ -10274,7 +10280,6 @@ KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
 M:     Christian Borntraeger <borntraeger@de.ibm.com>
 M:     Janosch Frank <frankja@linux.ibm.com>
 R:     David Hildenbrand <david@redhat.com>
-R:     Cornelia Huck <cohuck@redhat.com>
 R:     Claudio Imbrenda <imbrenda@linux.ibm.com>
 L:     kvm@vger.kernel.org
 S:     Supported
@@ -11148,6 +11153,7 @@ S:      Maintained
 F:     Documentation/devicetree/bindings/net/dsa/marvell.txt
 F:     Documentation/networking/devlink/mv88e6xxx.rst
 F:     drivers/net/dsa/mv88e6xxx/
+F:     include/linux/dsa/mv88e6xxx.h
 F:     include/linux/platform_data/mv88e6xxx.h
 
 MARVELL ARMADA 3700 PHY DRIVERS
@@ -11367,7 +11373,7 @@ F:      Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
 F:     drivers/iio/proximity/mb1232.c
 
 MAXIM MAX77650 PMIC MFD DRIVER
-M:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M:     Bartosz Golaszewski <brgl@bgdev.pl>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/*/*max77650.yaml
@@ -13255,9 +13261,9 @@ F:      Documentation/scsi/NinjaSCSI.rst
 F:     drivers/scsi/nsp32*
 
 NIOS2 ARCHITECTURE
-M:     Ley Foon Tan <ley.foon.tan@intel.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 F:     arch/nios2/
 
 NITRO ENCLAVES (NE)
@@ -14342,7 +14348,8 @@ F:      Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
 F:     drivers/pci/controller/pci-ixp4xx.c
 
 PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M:     Jonathan Derrick <jonathan.derrick@intel.com>
+M:     Nirmal Patel <nirmal.patel@linux.intel.com>
+R:     Jonathan Derrick <jonathan.derrick@linux.dev>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     drivers/pci/controller/vmd.c
@@ -16295,6 +16302,7 @@ S390
 M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 M:     Christian Borntraeger <borntraeger@de.ibm.com>
+R:     Alexander Gordeev <agordeev@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -16373,7 +16381,6 @@ F:      drivers/s390/crypto/vfio_ap_ops.c
 F:     drivers/s390/crypto/vfio_ap_private.h
 
 S390 VFIO-CCW DRIVER
-M:     Cornelia Huck <cohuck@redhat.com>
 M:     Eric Farman <farman@linux.ibm.com>
 M:     Matthew Rosato <mjrosato@linux.ibm.com>
 R:     Halil Pasic <pasic@linux.ibm.com>
@@ -16650,13 +16657,6 @@ M:     Lubomir Rintel <lkundrak@v3.sk>
 S:     Supported
 F:     drivers/char/pcmcia/scr24x_cs.c
 
-SCSI CDROM DRIVER
-M:     Jens Axboe <axboe@kernel.dk>
-L:     linux-scsi@vger.kernel.org
-S:     Maintained
-W:     http://www.kernel.dk
-F:     drivers/scsi/sr*
-
 SCSI RDMA PROTOCOL (SRP) INITIATOR
 M:     Bart Van Assche <bvanassche@acm.org>
 L:     linux-rdma@vger.kernel.org
@@ -16955,7 +16955,6 @@ F:      drivers/misc/sgi-xp/
 
 SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
 M:     Karsten Graul <kgraul@linux.ibm.com>
-M:     Guvenc Gulce <guvenc@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -17800,7 +17799,6 @@ F:      drivers/staging/nvec/
 
 STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
 M:     Jens Frederich <jfrederich@gmail.com>
-M:     Daniel Drake <dsd@laptop.org>
 M:     Jon Nettleton <jon.nettleton@gmail.com>
 S:     Maintained
 W:     http://wiki.laptop.org/go/DCON
@@ -17891,7 +17889,8 @@ M:      Olivier Moysan <olivier.moysan@foss.st.com>
 M:     Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
-F:     Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
+F:     Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+F:     Documentation/devicetree/bindings/sound/st,stm32-*.yaml
 F:     sound/soc/stm/
 
 STM32 TIMER/LPTIMER DRIVERS
@@ -17968,10 +17967,11 @@ F:    Documentation/admin-guide/svga.rst
 F:     arch/x86/boot/video*
 
 SWIOTLB SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Christoph Hellwig <hch@infradead.org>
 L:     iommu@lists.linux-foundation.org
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
+W:     http://git.infradead.org/users/hch/dma-mapping.git
+T:     git git://git.infradead.org/users/hch/dma-mapping.git
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 F:     kernel/dma/swiotlb.c
@@ -17987,7 +17987,7 @@ F:      net/switchdev/
 SY8106A REGULATOR DRIVER
 M:     Icenowy Zheng <icenowy@aosc.io>
 S:     Maintained
-F:     Documentation/devicetree/bindings/regulator/sy8106a-regulator.txt
+F:     Documentation/devicetree/bindings/regulator/silergy,sy8106a.yaml
 F:     drivers/regulator/sy8106a-regulator.c
 
 SYNC FILE FRAMEWORK
@@ -18554,13 +18554,14 @@ T:    git git://linuxtv.org/media_tree.git
 F:     drivers/media/radio/radio-raremono.c
 
 THERMAL
-M:     Zhang Rui <rui.zhang@intel.com>
+M:     Rafael J. Wysocki <rafael@kernel.org>
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 R:     Amit Kucheria <amitk@kernel.org>
+R:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-pm/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/thermal/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal
 F:     Documentation/devicetree/bindings/thermal/
 F:     drivers/thermal/
 F:     include/linux/cpu_cooling.h
@@ -18689,7 +18690,7 @@ F:      include/linux/clk/ti.h
 
 TI DAVINCI MACHINE SUPPORT
 M:     Sekhar Nori <nsekhar@ti.com>
-R:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
+R:     Bartosz Golaszewski <brgl@bgdev.pl>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
@@ -19288,13 +19289,12 @@ S:    Maintained
 F:     drivers/usb/misc/chaoskey.c
 
 USB CYPRESS C67X00 DRIVER
-M:     Peter Korsgaard <jacmet@sunsite.dk>
 L:     linux-usb@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/usb/c67x00/
 
 USB DAVICOM DM9601 DRIVER
-M:     Peter Korsgaard <jacmet@sunsite.dk>
+M:     Peter Korsgaard <peter@korsgaard.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 W:     http://www.linux-usb.org/usbnet
@@ -20474,7 +20474,6 @@ F:      samples/bpf/xdpsock*
 F:     tools/lib/bpf/xsk*
 
 XEN BLOCK SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 M:     Roger Pau Monné <roger.pau@citrix.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 S:     Supported
@@ -20522,7 +20521,7 @@ S:      Supported
 F:     drivers/net/xen-netback/*
 
 XEN PCI SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Juergen Gross <jgross@suse.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 S:     Supported
 F:     arch/x86/pci/*xen*
@@ -20545,7 +20544,8 @@ S:      Supported
 F:     sound/xen/*
 
 XEN SWIOTLB SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Juergen Gross <jgross@suse.com>
+M:     Stefano Stabellini <sstabellini@kernel.org>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 L:     iommu@lists.linux-foundation.org
 S:     Supported
@@ -20704,7 +20704,6 @@ S:      Maintained
 F:     mm/zbud.c
 
 ZD1211RW WIRELESS DRIVER
-M:     Daniel Drake <dsd@gentoo.org>
 M:     Ulrich Kunitz <kune@deine-taler.de>
 L:     linux-wireless@vger.kernel.org
 L:     zd1211-devs@lists.sourceforge.net (subscribers-only)
index 7cfe4ff36f44aa66938fea538109b6495a4de2d4..4d0c0ed9236e407bb0ebcc1ac0504a76b95c1a11 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc5
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
@@ -849,12 +849,6 @@ endif
 
 DEBUG_CFLAGS   :=
 
-# Workaround for GCC versions < 5.0
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
-ifdef CONFIG_CC_IS_GCC
-DEBUG_CFLAGS   += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
-endif
-
 ifdef CONFIG_DEBUG_INFO
 
 ifdef CONFIG_DEBUG_INFO_SPLIT
index 02e5b673bdade31dfd1bdd9c38f497d1e1d4afc2..4e87783c90ad708339a3a869adfa81d75615b27b 100644 (file)
@@ -20,7 +20,7 @@ config ALPHA
        select NEED_SG_DMA_LENGTH
        select VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
-       select GENERIC_PCI_IOMAP if PCI
+       select GENERIC_PCI_IOMAP
        select AUTO_IRQ_AFFINITY if SMP
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
@@ -199,7 +199,6 @@ config ALPHA_EIGER
 
 config ALPHA_JENSEN
        bool "Jensen"
-       depends on BROKEN
        select HAVE_EISA
        help
          DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
index b34cc1f06720ff4c3b6fb843e25ea9879a2ad0d4..c8ae46fc2e745fec17dd88f2d877553695283f5f 100644 (file)
@@ -16,3 +16,4 @@ extern void __divlu(void);
 extern void __remlu(void);
 extern void __divqu(void);
 extern void __remqu(void);
+extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
index 0fab5ac90775827345e7c0522a27bf6fd440b184..c9cb554fbe54ced5d8dd27e6b3430f37f71c9b34 100644 (file)
@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
  * Change virtual addresses to physical addresses and vv.
  */
 #ifdef USE_48_BIT_KSEG
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
 {
        return (unsigned long)address - IDENT_ADDR;
 }
@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
        return (void *) (address + IDENT_ADDR);
 }
 #else
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
 {
         unsigned long phys = (unsigned long)address;
 
@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
 extern unsigned long __direct_map_base;
 extern unsigned long __direct_map_size;
 
-static inline unsigned long __deprecated virt_to_bus(void *address)
+static inline unsigned long __deprecated virt_to_bus(volatile void *address)
 {
        unsigned long phys = virt_to_phys(address);
        unsigned long bus = phys + __direct_map_base;
index 916895155a889a79b09c5d9912b8c208e58a3892..1c4131453db2ca100b2a7522289d4f8368f45756 100644 (file)
@@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
  * convinced that I need one of the newer machines.
  */
 
-static inline unsigned int jensen_local_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
 {
        return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
 }
 
-static inline void jensen_local_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
 {
        *(vuip)((addr << 9) + EISA_VL82C106) = b;
        mb();
 }
 
-static inline unsigned int jensen_bus_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
 {
        long result;
 
@@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
        return __kernel_extbl(result, addr & 3);
 }
 
-static inline void jensen_bus_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
 {
        jensen_set_hae(0);
        *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h
new file mode 100644 (file)
index 0000000..262aab9
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ALPHA_SETUP_H
+#define __ALPHA_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+/*
+ * We leave one page for the initial stack page, and one page for
+ * the initial process structure. Also, the console eats 3 MB for
+ * the initial bootloader (one of which we can reclaim later).
+ */
+#define BOOT_PCB       0x20000000
+#define BOOT_ADDR      0x20000000
+/* Remove when official MILO sources have ELF support: */
+#define BOOT_SIZE      (16*1024)
+
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+#define KERNEL_START_PHYS      0x300000 /* Old bootloaders hardcoded this.  */
+#else
+#define KERNEL_START_PHYS      0x1000000 /* required: Wildfire/Titan/Marvel */
+#endif
+
+#define KERNEL_START   (PAGE_OFFSET+KERNEL_START_PHYS)
+#define SWAPPER_PGD    KERNEL_START
+#define INIT_STACK     (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
+#define EMPTY_PGT      (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
+#define EMPTY_PGE      (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
+#define ZERO_PGE       (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
+
+#define START_ADDR     (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
+
+/*
+ * This is setup by the secondary bootstrap loader.  Because
+ * the zero page is zeroed out as soon as the vm system is
+ * initialized, we need to copy things out into a more permanent
+ * place.
+ */
+#define PARAM                  ZERO_PGE
+#define COMMAND_LINE           ((char *)(absolute_pointer(PARAM + 0x0000)))
+#define INITRD_START           (*(unsigned long *) (PARAM+0x100))
+#define INITRD_SIZE            (*(unsigned long *) (PARAM+0x108))
+
+#endif
index 13b7ee465b0e521c7ffd6ac621935d09018fee8d..f881ea5947cbc1d42241f9d9cb5e7178cdc50f17 100644 (file)
@@ -1,43 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ALPHA_SETUP_H
-#define __ALPHA_SETUP_H
+#ifndef _UAPI__ALPHA_SETUP_H
+#define _UAPI__ALPHA_SETUP_H
 
 #define COMMAND_LINE_SIZE      256
 
-/*
- * We leave one page for the initial stack page, and one page for
- * the initial process structure. Also, the console eats 3 MB for
- * the initial bootloader (one of which we can reclaim later).
- */
-#define BOOT_PCB       0x20000000
-#define BOOT_ADDR      0x20000000
-/* Remove when official MILO sources have ELF support: */
-#define BOOT_SIZE      (16*1024)
-
-#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
-#define KERNEL_START_PHYS      0x300000 /* Old bootloaders hardcoded this.  */
-#else
-#define KERNEL_START_PHYS      0x1000000 /* required: Wildfire/Titan/Marvel */
-#endif
-
-#define KERNEL_START   (PAGE_OFFSET+KERNEL_START_PHYS)
-#define SWAPPER_PGD    KERNEL_START
-#define INIT_STACK     (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
-#define EMPTY_PGT      (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
-#define EMPTY_PGE      (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
-#define ZERO_PGE       (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
-
-#define START_ADDR     (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
-
-/*
- * This is setup by the secondary bootstrap loader.  Because
- * the zero page is zeroed out as soon as the vm system is
- * initialized, we need to copy things out into a more permanent
- * place.
- */
-#define PARAM                  ZERO_PGE
-#define COMMAND_LINE           ((char*)(PARAM + 0x0000))
-#define INITRD_START           (*(unsigned long *) (PARAM+0x100))
-#define INITRD_SIZE            (*(unsigned long *) (PARAM+0x108))
-
-#endif
+#endif /* _UAPI__ALPHA_SETUP_H */
index e5d870ff225f4b327277ea382dd5c57aecf10d40..5c9c88428124447c51b035418c2d6e537449b5f8 100644 (file)
@@ -7,6 +7,11 @@
  *
  * Code supporting the Jensen.
  */
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/jensen.h>
+#undef  __EXTERN_INLINE
+
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 
 #include <asm/ptrace.h>
 
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/jensen.h>
-#undef  __EXTERN_INLINE
-
 #include <asm/dma.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
index 854d5e79979e4ce929d7998bf1135a12880238f8..1cc74f7b50efbebe3c873db5a3cc55207f005248 100644 (file)
@@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
 ev67-$(CONFIG_ALPHA_EV67) := ev67-
 
 lib-y =        __divqu.o __remqu.o __divlu.o __remlu.o \
+       udiv-qrnnd.o \
        udelay.o \
        $(ev6-y)memset.o \
        $(ev6-y)memcpy.o \
diff --git a/arch/alpha/lib/udiv-qrnnd.S b/arch/alpha/lib/udiv-qrnnd.S
new file mode 100644 (file)
index 0000000..b887aa5
--- /dev/null
@@ -0,0 +1,165 @@
+ # Alpha 21064 __udiv_qrnnd
+ # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
+
+ # This file is part of GCC.
+
+ # The GNU MP Library is free software; you can redistribute it and/or modify
+ # it under the terms of the GNU General Public License as published by
+ # the Free Software Foundation; either version 2 of the License, or (at your
+ # option) any later version.
+
+ # In addition to the permissions in the GNU General Public License, the
+ # Free Software Foundation gives you unlimited permission to link the
+ # compiled version of this file with other programs, and to distribute
+ # those programs without any restriction coming from the use of this
+ # file.  (The General Public License restrictions do apply in other
+ # respects; for example, they cover modification of the file, and
+ # distribution when not linked into another program.)
+
+ # This file is distributed in the hope that it will be useful, but
+ # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ # or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+ # License for more details.
+
+ # You should have received a copy of the GNU General Public License
+ # along with GCC; see the file COPYING.  If not, write to the 
+ # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ # MA 02111-1307, USA.
+#include <asm/export.h>
+
+        .set noreorder
+        .set noat
+
+       .text
+
+       .globl __udiv_qrnnd
+       .ent __udiv_qrnnd
+__udiv_qrnnd:
+       .frame $30,0,$26,0
+       .prologue 0
+
+#define cnt    $2
+#define tmp    $3
+#define rem_ptr        $16
+#define n1     $17
+#define n0     $18
+#define d      $19
+#define qb     $20
+#define AT     $at
+
+       ldiq    cnt,16
+       blt     d,$largedivisor
+
+$loop1:        cmplt   n0,0,tmp
+       addq    n1,n1,n1
+       bis     n1,tmp,n1
+       addq    n0,n0,n0
+       cmpule  d,n1,qb
+       subq    n1,d,tmp
+       cmovne  qb,tmp,n1
+       bis     n0,qb,n0
+       cmplt   n0,0,tmp
+       addq    n1,n1,n1
+       bis     n1,tmp,n1
+       addq    n0,n0,n0
+       cmpule  d,n1,qb
+       subq    n1,d,tmp
+       cmovne  qb,tmp,n1
+       bis     n0,qb,n0
+       cmplt   n0,0,tmp
+       addq    n1,n1,n1
+       bis     n1,tmp,n1
+       addq    n0,n0,n0
+       cmpule  d,n1,qb
+       subq    n1,d,tmp
+       cmovne  qb,tmp,n1
+       bis     n0,qb,n0
+       cmplt   n0,0,tmp
+       addq    n1,n1,n1
+       bis     n1,tmp,n1
+       addq    n0,n0,n0
+       cmpule  d,n1,qb
+       subq    n1,d,tmp
+       cmovne  qb,tmp,n1
+       bis     n0,qb,n0
+       subq    cnt,1,cnt
+       bgt     cnt,$loop1
+       stq     n1,0(rem_ptr)
+       bis     $31,n0,$0
+       ret     $31,($26),1
+
+$largedivisor:
+       and     n0,1,$4
+
+       srl     n0,1,n0
+       sll     n1,63,tmp
+       or      tmp,n0,n0
+       srl     n1,1,n1
+
+       and     d,1,$6
+       srl     d,1,$5
+       addq    $5,$6,$5
+
+$loop2:        cmplt   n0,0,tmp
+       addq    n1,n1,n1
+       bis     n1,tmp,n1
+       addq    n0,n0,n0
+       cmpule  $5,n1,qb
+       subq    n1,$5,tmp
+       cmovne  qb,tmp,n1
+       bis     n0,qb,n0
+       cmplt   n0,0,tmp
+       addq    n1,n1,n1
+       bis     n1,tmp,n1
+       addq    n0,n0,n0
+       cmpule  $5,n1,qb
+       subq    n1,$5,tmp
+       cmovne  qb,tmp,n1
+       bis     n0,qb,n0
+       cmplt   n0,0,tmp
+       addq    n1,n1,n1
+       bis     n1,tmp,n1
+       addq    n0,n0,n0
+       cmpule  $5,n1,qb
+       subq    n1,$5,tmp
+       cmovne  qb,tmp,n1
+       bis     n0,qb,n0
+       cmplt   n0,0,tmp
+       addq    n1,n1,n1
+       bis     n1,tmp,n1
+       addq    n0,n0,n0
+       cmpule  $5,n1,qb
+       subq    n1,$5,tmp
+       cmovne  qb,tmp,n1
+       bis     n0,qb,n0
+       subq    cnt,1,cnt
+       bgt     cnt,$loop2
+
+       addq    n1,n1,n1
+       addq    $4,n1,n1
+       bne     $6,$Odd
+       stq     n1,0(rem_ptr)
+       bis     $31,n0,$0
+       ret     $31,($26),1
+
+$Odd:
+       /* q' in n0. r' in n1 */
+       addq    n1,n0,n1
+
+       cmpult  n1,n0,tmp       # tmp := carry from addq
+       subq    n1,d,AT
+       addq    n0,tmp,n0
+       cmovne  tmp,AT,n1
+
+       cmpult  n1,d,tmp
+       addq    n0,1,AT
+       cmoveq  tmp,AT,n0
+       subq    n1,d,AT
+       cmoveq  tmp,AT,n1
+
+       stq     n1,0(rem_ptr)
+       bis     $31,n0,$0
+       ret     $31,($26),1
+
+       .end    __udiv_qrnnd
+EXPORT_SYMBOL(__udiv_qrnnd)
index 6eda0973e1832ffe7c490b539b79709ef6f0cfe2..3206402a879291d92fa717cd4f8da6dd7627c35b 100644 (file)
@@ -7,4 +7,4 @@ ccflags-y := -w
 
 obj-$(CONFIG_MATHEMU) += math-emu.o
 
-math-emu-objs := math.o qrnnd.o
+math-emu-objs := math.o
index f7cef66af88de6ebbda3057c47db09234ba99d9a..4212258f3cfdc8ef83f12e679003be42ee215cd5 100644 (file)
@@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
 egress:
        return si_code;
 }
-
-EXPORT_SYMBOL(__udiv_qrnnd);
diff --git a/arch/alpha/math-emu/qrnnd.S b/arch/alpha/math-emu/qrnnd.S
deleted file mode 100644 (file)
index d6373ec..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
- # Alpha 21064 __udiv_qrnnd
- # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
-
- # This file is part of GCC.
-
- # The GNU MP Library is free software; you can redistribute it and/or modify
- # it under the terms of the GNU General Public License as published by
- # the Free Software Foundation; either version 2 of the License, or (at your
- # option) any later version.
-
- # In addition to the permissions in the GNU General Public License, the
- # Free Software Foundation gives you unlimited permission to link the
- # compiled version of this file with other programs, and to distribute
- # those programs without any restriction coming from the use of this
- # file.  (The General Public License restrictions do apply in other
- # respects; for example, they cover modification of the file, and
- # distribution when not linked into another program.)
-
- # This file is distributed in the hope that it will be useful, but
- # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- # or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
- # License for more details.
-
- # You should have received a copy of the GNU General Public License
- # along with GCC; see the file COPYING.  If not, write to the 
- # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- # MA 02111-1307, USA.
-
-        .set noreorder
-        .set noat
-
-       .text
-
-       .globl __udiv_qrnnd
-       .ent __udiv_qrnnd
-__udiv_qrnnd:
-       .frame $30,0,$26,0
-       .prologue 0
-
-#define cnt    $2
-#define tmp    $3
-#define rem_ptr        $16
-#define n1     $17
-#define n0     $18
-#define d      $19
-#define qb     $20
-#define AT     $at
-
-       ldiq    cnt,16
-       blt     d,$largedivisor
-
-$loop1:        cmplt   n0,0,tmp
-       addq    n1,n1,n1
-       bis     n1,tmp,n1
-       addq    n0,n0,n0
-       cmpule  d,n1,qb
-       subq    n1,d,tmp
-       cmovne  qb,tmp,n1
-       bis     n0,qb,n0
-       cmplt   n0,0,tmp
-       addq    n1,n1,n1
-       bis     n1,tmp,n1
-       addq    n0,n0,n0
-       cmpule  d,n1,qb
-       subq    n1,d,tmp
-       cmovne  qb,tmp,n1
-       bis     n0,qb,n0
-       cmplt   n0,0,tmp
-       addq    n1,n1,n1
-       bis     n1,tmp,n1
-       addq    n0,n0,n0
-       cmpule  d,n1,qb
-       subq    n1,d,tmp
-       cmovne  qb,tmp,n1
-       bis     n0,qb,n0
-       cmplt   n0,0,tmp
-       addq    n1,n1,n1
-       bis     n1,tmp,n1
-       addq    n0,n0,n0
-       cmpule  d,n1,qb
-       subq    n1,d,tmp
-       cmovne  qb,tmp,n1
-       bis     n0,qb,n0
-       subq    cnt,1,cnt
-       bgt     cnt,$loop1
-       stq     n1,0(rem_ptr)
-       bis     $31,n0,$0
-       ret     $31,($26),1
-
-$largedivisor:
-       and     n0,1,$4
-
-       srl     n0,1,n0
-       sll     n1,63,tmp
-       or      tmp,n0,n0
-       srl     n1,1,n1
-
-       and     d,1,$6
-       srl     d,1,$5
-       addq    $5,$6,$5
-
-$loop2:        cmplt   n0,0,tmp
-       addq    n1,n1,n1
-       bis     n1,tmp,n1
-       addq    n0,n0,n0
-       cmpule  $5,n1,qb
-       subq    n1,$5,tmp
-       cmovne  qb,tmp,n1
-       bis     n0,qb,n0
-       cmplt   n0,0,tmp
-       addq    n1,n1,n1
-       bis     n1,tmp,n1
-       addq    n0,n0,n0
-       cmpule  $5,n1,qb
-       subq    n1,$5,tmp
-       cmovne  qb,tmp,n1
-       bis     n0,qb,n0
-       cmplt   n0,0,tmp
-       addq    n1,n1,n1
-       bis     n1,tmp,n1
-       addq    n0,n0,n0
-       cmpule  $5,n1,qb
-       subq    n1,$5,tmp
-       cmovne  qb,tmp,n1
-       bis     n0,qb,n0
-       cmplt   n0,0,tmp
-       addq    n1,n1,n1
-       bis     n1,tmp,n1
-       addq    n0,n0,n0
-       cmpule  $5,n1,qb
-       subq    n1,$5,tmp
-       cmovne  qb,tmp,n1
-       bis     n0,qb,n0
-       subq    cnt,1,cnt
-       bgt     cnt,$loop2
-
-       addq    n1,n1,n1
-       addq    $4,n1,n1
-       bne     $6,$Odd
-       stq     n1,0(rem_ptr)
-       bis     $31,n0,$0
-       ret     $31,($26),1
-
-$Odd:
-       /* q' in n0. r' in n1 */
-       addq    n1,n0,n1
-
-       cmpult  n1,n0,tmp       # tmp := carry from addq
-       subq    n1,d,AT
-       addq    n0,tmp,n0
-       cmovne  tmp,AT,n1
-
-       cmpult  n1,d,tmp
-       addq    n0,1,AT
-       cmoveq  tmp,AT,n0
-       subq    n1,d,AT
-       cmoveq  tmp,AT,n1
-
-       stq     n1,0(rem_ptr)
-       bis     $31,n0,$0
-       ret     $31,($26),1
-
-       .end    __udiv_qrnnd
index 9320b04c04bf55ba13ba85c3b14a48e0244531af..4cf45a99fd792a5e1b528c96a610cfdd9120f14e 100644 (file)
@@ -26,11 +26,6 @@ extern char empty_zero_page[PAGE_SIZE];
 
 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
 
-/* Macro to mark a page protection as uncacheable */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
-
-extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
-
 /* to cope with aliasing VIPT cache */
 #define HAVE_ARCH_UNMAPPED_AREA
 
index fc196421b2cedb5ecf92eaeff32bcd907a2399d8..59baf6c132a749b7c0331d3e3597e398aef0dce0 100644 (file)
@@ -1989,8 +1989,6 @@ config ARCH_HIBERNATION_POSSIBLE
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 if CRYPTO
 source "arch/arm/crypto/Kconfig"
 endif
index 614999dcb990358eda699dacbeaf9630affbd8da..cd4672501add7d91b1bd930f6b78840d3187cd01 100644 (file)
@@ -71,7 +71,6 @@
                        isc: isc@f0008000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
-                               status = "okay";
                        };
 
                        qspi1: spi@f0024000 {
index 4cbed98cc2f43ce91065f626db64c1654ff4ebda..f3d6aaa3a78dc54e0fb706c36f98613a8cf86483 100644 (file)
 
                                        regulator-state-standby {
                                                regulator-on-in-suspend;
+                                               regulator-suspend-microvolt = <1350000>;
                                                regulator-mode = <4>;
                                        };
 
                                        regulator-state-mem {
                                                regulator-on-in-suspend;
+                                               regulator-suspend-microvolt = <1350000>;
                                                regulator-mode = <4>;
                                        };
                                };
        #address-cells = <1>;
        #size-cells = <0>;
        pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_gmac0_default &pinctrl_gmac0_txck_default &pinctrl_gmac0_phy_irq>;
+       pinctrl-0 = <&pinctrl_gmac0_default
+                    &pinctrl_gmac0_mdio_default
+                    &pinctrl_gmac0_txck_default
+                    &pinctrl_gmac0_phy_irq>;
        phy-mode = "rgmii-id";
        status = "okay";
 
        #address-cells = <1>;
        #size-cells = <0>;
        pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_gmac1_default &pinctrl_gmac1_phy_irq>;
+       pinctrl-0 = <&pinctrl_gmac1_default
+                    &pinctrl_gmac1_mdio_default
+                    &pinctrl_gmac1_phy_irq>;
        phy-mode = "rmii";
        status = "okay";
 
                         <PIN_PA15__G0_TXEN>,
                         <PIN_PA30__G0_RXCK>,
                         <PIN_PA18__G0_RXDV>,
-                        <PIN_PA22__G0_MDC>,
-                        <PIN_PA23__G0_MDIO>,
                         <PIN_PA25__G0_125CK>;
+               slew-rate = <0>;
+               bias-disable;
+       };
+
+       pinctrl_gmac0_mdio_default: gmac0_mdio_default {
+               pinmux = <PIN_PA22__G0_MDC>,
+                        <PIN_PA23__G0_MDIO>;
                bias-disable;
        };
 
        pinctrl_gmac0_txck_default: gmac0_txck_default {
                pinmux = <PIN_PA24__G0_TXCK>;
+               slew-rate = <0>;
                bias-pull-up;
        };
 
                         <PIN_PD25__G1_RX0>,
                         <PIN_PD26__G1_RX1>,
                         <PIN_PD27__G1_RXER>,
-                        <PIN_PD24__G1_RXDV>,
-                        <PIN_PD28__G1_MDC>,
+                        <PIN_PD24__G1_RXDV>;
+               slew-rate = <0>;
+               bias-disable;
+       };
+
+       pinctrl_gmac1_mdio_default: gmac1_mdio_default {
+               pinmux = <PIN_PD28__G1_MDC>,
                         <PIN_PD29__G1_MDIO>;
                bias-disable;
        };
                                 <PIN_PA8__SDMMC0_DAT5>,
                                 <PIN_PA9__SDMMC0_DAT6>,
                                 <PIN_PA10__SDMMC0_DAT7>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
 
                        pinmux = <PIN_PA0__SDMMC0_CK>,
                                 <PIN_PA2__SDMMC0_RSTN>,
                                 <PIN_PA11__SDMMC0_DS>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
        };
                                 <PIN_PC0__SDMMC1_DAT1>,
                                 <PIN_PC1__SDMMC1_DAT2>,
                                 <PIN_PC2__SDMMC1_DAT3>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
 
                                 <PIN_PB28__SDMMC1_RSTN>,
                                 <PIN_PC5__SDMMC1_1V8SEL>,
                                 <PIN_PC4__SDMMC1_CD>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
        };
                                 <PIN_PD6__SDMMC2_DAT1>,
                                 <PIN_PD7__SDMMC2_DAT2>,
                                 <PIN_PD8__SDMMC2_DAT3>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
 
                ck {
                        pinmux = <PIN_PD4__SDMMC2_CK>;
+                       slew-rate = <0>;
                        bias-pull-up;
                };
        };
        pinctrl-0 = <&pinctrl_sdmmc2_default>;
 };
 
+&shdwc {
+       atmel,shdwc-debouncer = <976>;
+       status = "okay";
+
+       input@0 {
+               reg = <0>;
+       };
+};
+
 &spdifrx {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_spdifrx_default>;
index f24bdd0870a521bb0412805f8fa0d1a7e3aa474c..72ce80fbf26626f45729b257914f2baab2078217 100644 (file)
@@ -40,8 +40,8 @@
                regulator-always-on;
                regulator-settling-time-us = <5000>;
                gpios = <&expgpio 4 GPIO_ACTIVE_HIGH>;
-               states = <1800000 0x1
-                         3300000 0x0>;
+               states = <1800000 0x1>,
+                        <3300000 0x0>;
                status = "okay";
        };
 
 };
 
 &pcie0 {
-       pci@1,0 {
+       pci@0,0 {
+               device_type = "pci";
                #address-cells = <3>;
                #size-cells = <2>;
                ranges;
 
                reg = <0 0 0 0 0>;
 
-               usb@1,0 {
-                       reg = <0x10000 0 0 0 0>;
+               usb@0,0 {
+                       reg = <0 0 0 0 0>;
                        resets = <&reset RASPBERRYPI_FIRMWARE_RESET_ID_USB>;
                };
        };
index b8a4096192aa92fb801ea28308dcb89e9b75d141..3b60297af7f60b09f4c625e121f7e2d938eb9b58 100644 (file)
                        status = "disabled";
                };
 
+               vec: vec@7ec13000 {
+                       compatible = "brcm,bcm2711-vec";
+                       reg = <0x7ec13000 0x1000>;
+                       clocks = <&clocks BCM2835_CLOCK_VEC>;
+                       interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+               };
+
                dvp: clock@7ef00000 {
                        compatible = "brcm,brcm2711-dvp";
                        reg = <0x7ef00000 0x10>;
                                compatible = "brcm,genet-mdio-v5";
                                reg = <0xe14 0x8>;
                                reg-names = "mdio";
-                               #address-cells = <0x0>;
-                               #size-cells = <0x1>;
+                               #address-cells = <0x1>;
+                               #size-cells = <0x0>;
                        };
                };
        };
index 4119271c979d6f1b80e32e0ace0a4f76a3f4393d..c25e797b90600a26a5ed510084bef57e65ee80ae 100644 (file)
                        status = "okay";
                };
 
+               vec: vec@7e806000 {
+                       compatible = "brcm,bcm2835-vec";
+                       reg = <0x7e806000 0x1000>;
+                       clocks = <&clocks BCM2835_CLOCK_VEC>;
+                       interrupts = <2 27>;
+                       status = "disabled";
+               };
+
                pixelvalve@7e807000 {
                        compatible = "brcm,bcm2835-pixelvalve2";
                        reg = <0x7e807000 0x100>;
index 0f3be55201a5bea78b2d75b4455ec7a3e6c5ebfc..a3e06b6809476c3431e0a85ae9781d6bc37c600f 100644 (file)
                        status = "disabled";
                };
 
-               vec: vec@7e806000 {
-                       compatible = "brcm,bcm2835-vec";
-                       reg = <0x7e806000 0x1000>;
-                       clocks = <&clocks BCM2835_CLOCK_VEC>;
-                       interrupts = <2 27>;
-                       status = "disabled";
-               };
-
                usb: usb@7e980000 {
                        compatible = "brcm,bcm2835-usb";
                        reg = <0x7e980000 0x10000>;
index d3082b9774e4094f4c9afe99dff9704cc8969d07..4f88e96d81ddbdd42078409d0a94e43f3e194566 100644 (file)
@@ -56,6 +56,7 @@
        panel {
                compatible = "edt,etm0700g0dh6";
                pinctrl-0 = <&pinctrl_display_gpio>;
+               pinctrl-names = "default";
                enable-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
 
                port {
@@ -76,8 +77,7 @@
                regulator-name = "vbus";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
-               enable-active-high;
+               gpio = <&gpio1 2 0>;
        };
 };
 
index cb8b539eb29d107e70d0df38d830877ec15103df..e5c4dc65fbabf518ee273f610cc93abafe3a18e4 100644 (file)
@@ -5,6 +5,7 @@
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
 #include <dt-bindings/pwm/pwm.h>
 
 / {
                        led-cur = /bits/ 8 <0x20>;
                        max-cur = /bits/ 8 <0x60>;
                        reg = <0>;
+                       color = <LED_COLOR_ID_RED>;
                };
 
                chan@1 {
                        led-cur = /bits/ 8 <0x20>;
                        max-cur = /bits/ 8 <0x60>;
                        reg = <1>;
+                       color = <LED_COLOR_ID_GREEN>;
                };
 
                chan@2 {
                        led-cur = /bits/ 8 <0x20>;
                        max-cur = /bits/ 8 <0x60>;
                        reg = <2>;
+                       color = <LED_COLOR_ID_BLUE>;
                };
 
                chan@3 {
                        led-cur = /bits/ 8 <0x0>;
                        max-cur = /bits/ 8 <0x0>;
                        reg = <3>;
+                       color = <LED_COLOR_ID_WHITE>;
                };
        };
 
index 5de4ccb979163dd7bc54b26658a3f69571382559..f7a56d6b160c8f75c5412be21449f5db58d84df7 100644 (file)
        pinctrl-0 = <&pinctrl_enet>;
        phy-mode = "rgmii-id";
        phy-reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>;
+       phy-handle = <&phy>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               phy: ethernet-phy@1 {
+                       reg = <1>;
+                       qca,clk-out-frequency = <125000000>;
+               };
+       };
 };
 
 &hdmi {
index 5a63ca6157229ccc48d175f8045ef9446b203720..99f4cf777a384b0286f3ad177534e4428a5aa37d 100644 (file)
                compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
                spi-rx-bus-width = <4>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                reg = <0>;
        };
 
                compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
                spi-rx-bus-width = <4>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                reg = <2>;
        };
 };
index 779cc536566d6156394c4dd5b2ae2578286bae6f..a3fde3316c7360002e324cfd7d013578819403f4 100644 (file)
                compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
                spi-rx-bus-width = <4>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                reg = <0>;
        };
 };
index c5b903718414992c105b2895b24e9c44476ab9fc..7d530ae3483b806e2c125facb086eabab6fff3a7 100644 (file)
 
        nand@1,0 {
                compatible = "ti,omap2-nand";
-               reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+               reg = <1 0 4>; /* CS1, offset 0, IO size 4 */
                interrupt-parent = <&gpmc>;
                interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
                             <1 IRQ_TYPE_NONE>; /* termcount */
index 0b2bed6e7adfd10febce5bc2441d040d9969c692..d1c1c6aab2b8781b7f047d93293cb9fb2f7c9cb1 100644 (file)
                        clock-frequency = <19200000>;
                };
 
-               pxo_board {
+               pxo_board: pxo_board {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
                        clock-frequency = <27000000>;
                };
 
                gpu: adreno-3xx@4300000 {
-                       compatible = "qcom,adreno-3xx";
+                       compatible = "qcom,adreno-320.2", "qcom,adreno";
                        reg = <0x04300000 0x20000>;
                        reg-names = "kgsl_3d0_reg_memory";
                        interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "kgsl_3d0_irq";
                        clock-names =
-                           "core_clk",
-                           "iface_clk",
-                           "mem_clk",
-                           "mem_iface_clk";
+                           "core",
+                           "iface",
+                           "mem",
+                           "mem_iface";
                        clocks =
                            <&mmcc GFX3D_CLK>,
                            <&mmcc GFX3D_AHB_CLK>,
                            <&mmcc GFX3D_AXI_CLK>,
                            <&mmcc MMSS_IMEM_AHB_CLK>;
-                       qcom,chipid = <0x03020002>;
 
                        iommus = <&gfx3d 0
                                  &gfx3d 1
                        reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
                        clock-names = "iface_clk", "ref";
                        clocks = <&mmcc DSI_M_AHB_CLK>,
-                                <&cxo_board>;
+                                <&pxo_board>;
                };
 
 
index cc6be6db7b80b9c798927724d2927ad0a7baaedc..6c58c151c6d9e12bf50ca826f418433496cd30d9 100644 (file)
                #size-cells = <1>;
                ranges;
 
+               securam: securam@e0000000 {
+                       compatible = "microchip,sama7g5-securam", "atmel,sama5d2-securam", "mmio-sram";
+                       reg = <0xe0000000 0x4000>;
+                       clocks = <&pmc PMC_TYPE_PERIPHERAL 18>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges = <0 0xe0000000 0x4000>;
+                       no-memory-wc;
+                       status = "okay";
+               };
+
                secumod: secumod@e0004000 {
                        compatible = "microchip,sama7g5-secumod", "atmel,sama5d2-secumod", "syscon";
                        reg = <0xe0004000 0x4000>;
                        clock-names = "td_slck", "md_slck", "main_xtal";
                };
 
+               shdwc: shdwc@e001d010 {
+                       compatible = "microchip,sama7g5-shdwc", "syscon";
+                       reg = <0xe001d010 0x10>;
+                       clocks = <&clk32k 0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       atmel,wakeup-rtc-timer;
+                       atmel,wakeup-rtt-timer;
+                       status = "disabled";
+               };
+
                rtt: rtt@e001d020 {
                        compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
                        reg = <0xe001d020 0x30>;
                        clocks = <&clk32k 0>;
                };
 
+               chipid@e0020000 {
+                       compatible = "microchip,sama7g5-chipid";
+                       reg = <0xe0020000 0x8>;
+               };
+
                sdmmc0: mmc@e1204000 {
                        compatible = "microchip,sama7g5-sdhci", "microchip,sam9x60-sdhci";
                        reg = <0xe1204000 0x4000>;
                        };
                };
 
+               uddrc: uddrc@e3800000 {
+                       compatible = "microchip,sama7g5-uddrc";
+                       reg = <0xe3800000 0x4000>;
+                       status = "okay";
+               };
+
+               ddr3phy: ddr3phy@e3804000 {
+                       compatible = "microchip,sama7g5-ddr3phy";
+                       reg = <0xe3804000 0x1000>;
+                       status = "okay";
+               };
+
                gic: interrupt-controller@e8c11000 {
                        compatible = "arm,cortex-a7-gic";
                        #interrupt-cells = <3>;
index f266b7b034823a1063f4107fc6c0eda9e3e29767..cc88ebe7a60ced1725da1f3939f2fa0e462ef918 100644 (file)
@@ -47,7 +47,7 @@
                };
 
                gmac: eth@e0800000 {
-                       compatible = "st,spear600-gmac";
+                       compatible = "snps,dwmac-3.40a";
                        reg = <0xe0800000 0x8000>;
                        interrupts = <23 22>;
                        interrupt-names = "macirq", "eth_wake_irq";
index 2ad9fd7c94ecce45e437b3d96fc2fc6a8f72bb45..8af4b77fe655db89a1f929fab8be20bb8c6709e1 100644 (file)
@@ -17,6 +17,7 @@
  * TAKE CARE WHEN MAINTAINING THIS FILE TO PROPAGATE ANY RELEVANT
  * CHANGES TO vexpress-v2m.dtsi!
  */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 
 / {
        v2m_fixed_3v3: fixed-regulator-0 {
        };
 
        bus@8000000 {
-               motherboard-bus {
-                       model = "V2M-P1";
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               #interrupt-cells = <1>;
+               interrupt-map-mask = <0 63>;
+               interrupt-map = <0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  1 &gic GIC_SPI  1 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  2 &gic GIC_SPI  2 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  3 &gic GIC_SPI  3 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  4 &gic GIC_SPI  4 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  5 &gic GIC_SPI  5 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  6 &gic GIC_SPI  6 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  7 &gic GIC_SPI  7 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  8 &gic GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  9 &gic GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+               motherboard-bus@8000000 {
                        arm,hbi = <0x190>;
                        arm,vexpress,site = <0>;
-                       arm,v2m-memory-map = "rs1";
                        compatible = "arm,vexpress,v2m-p1", "simple-bus";
                        #address-cells = <2>; /* SMB chipselect number and offset */
                        #size-cells = <1>;
-                       #interrupt-cells = <1>;
-                       ranges;
+                       ranges = <0 0 0x08000000 0x04000000>,
+                                <1 0 0x14000000 0x04000000>,
+                                <2 0 0x18000000 0x04000000>,
+                                <3 0 0x1c000000 0x04000000>,
+                                <4 0 0x0c000000 0x04000000>,
+                                <5 0 0x10000000 0x04000000>;
 
                        nor_flash: flash@0 {
                                compatible = "arm,vexpress-flash", "cfi-flash";
                                        clock-names = "apb_pclk";
                                };
 
-                               mmci@50000 {
+                               mmc@50000 {
                                        compatible = "arm,pl180", "arm,primecell";
                                        reg = <0x050000 0x1000>;
                                        interrupts = <9>, <10>;
                                        clock-names = "uartclk", "apb_pclk";
                                };
 
-                               wdt@f0000 {
+                               watchdog@f0000 {
                                        compatible = "arm,sp805", "arm,primecell";
                                        reg = <0x0f0000 0x1000>;
                                        interrupts = <0>;
index ec13ceb9ed362263fb7bd4428f75616d75f25384..f434fe5cf4a14ffac109fd48d230070a41049e11 100644 (file)
  * TAKE CARE WHEN MAINTAINING THIS FILE TO PROPAGATE ANY RELEVANT
  * CHANGES TO vexpress-v2m-rs1.dtsi!
  */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 
 / {
-       bus@4000000 {
-               motherboard {
-                       model = "V2M-P1";
+       bus@40000000 {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x40000000 0x40000000 0x10000000>,
+                        <0x10000000 0x10000000 0x00020000>;
+
+               #interrupt-cells = <1>;
+               interrupt-map-mask = <0 63>;
+               interrupt-map = <0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  1 &gic GIC_SPI  1 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  2 &gic GIC_SPI  2 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  3 &gic GIC_SPI  3 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  4 &gic GIC_SPI  4 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  5 &gic GIC_SPI  5 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  6 &gic GIC_SPI  6 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  7 &gic GIC_SPI  7 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  8 &gic GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
+                               <0  9 &gic GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+               motherboard-bus@40000000 {
                        arm,hbi = <0x190>;
                        arm,vexpress,site = <0>;
                        compatible = "arm,vexpress,v2m-p1", "simple-bus";
                        #address-cells = <2>; /* SMB chipselect number and offset */
                        #size-cells = <1>;
-                       #interrupt-cells = <1>;
-                       ranges;
+                       ranges = <0 0 0x40000000 0x04000000>,
+                                <1 0 0x44000000 0x04000000>,
+                                <2 0 0x48000000 0x04000000>,
+                                <3 0 0x4c000000 0x04000000>,
+                                <7 0 0x10000000 0x00020000>;
 
                        flash@0,00000000 {
                                compatible = "arm,vexpress-flash", "cfi-flash";
index e63c5c0bfb43f03683c5a01048f563719f96073e..679537e17ff5c7119ffb62ad158e2400090685c7 100644 (file)
        };
 
        bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic 0  0 4>,
-                               <0 0  1 &gic 0  1 4>,
-                               <0 0  2 &gic 0  2 4>,
-                               <0 0  3 &gic 0  3 4>,
-                               <0 0  4 &gic 0  4 4>,
-                               <0 0  5 &gic 0  5 4>,
-                               <0 0  6 &gic 0  6 4>,
-                               <0 0  7 &gic 0  7 4>,
-                               <0 0  8 &gic 0  8 4>,
-                               <0 0  9 &gic 0  9 4>,
-                               <0 0 10 &gic 0 10 4>,
-                               <0 0 11 &gic 0 11 4>,
-                               <0 0 12 &gic 0 12 4>,
-                               <0 0 13 &gic 0 13 4>,
-                               <0 0 14 &gic 0 14 4>,
-                               <0 0 15 &gic 0 15 4>,
-                               <0 0 16 &gic 0 16 4>,
-                               <0 0 17 &gic 0 17 4>,
-                               <0 0 18 &gic 0 18 4>,
-                               <0 0 19 &gic 0 19 4>,
-                               <0 0 20 &gic 0 20 4>,
-                               <0 0 21 &gic 0 21 4>,
-                               <0 0 22 &gic 0 22 4>,
-                               <0 0 23 &gic 0 23 4>,
-                               <0 0 24 &gic 0 24 4>,
-                               <0 0 25 &gic 0 25 4>,
-                               <0 0 26 &gic 0 26 4>,
-                               <0 0 27 &gic 0 27 4>,
-                               <0 0 28 &gic 0 28 4>,
-                               <0 0 29 &gic 0 29 4>,
-                               <0 0 30 &gic 0 30 4>,
-                               <0 0 31 &gic 0 31 4>,
-                               <0 0 32 &gic 0 32 4>,
-                               <0 0 33 &gic 0 33 4>,
-                               <0 0 34 &gic 0 34 4>,
-                               <0 0 35 &gic 0 35 4>,
-                               <0 0 36 &gic 0 36 4>,
-                               <0 0 37 &gic 0 37 4>,
-                               <0 0 38 &gic 0 38 4>,
-                               <0 0 39 &gic 0 39 4>,
-                               <0 0 40 &gic 0 40 4>,
-                               <0 0 41 &gic 0 41 4>,
-                               <0 0 42 &gic 0 42 4>;
+               ranges = <0x8000000 0 0x8000000 0x18000000>;
        };
 
        site2: hsb@40000000 {
index 012d40a7228c14293588d99fc8e6466ab7542ee4..511e87cc2bc5e0aa3b8bb9b566e56c997ffc816c 100644 (file)
        };
 
        smb: bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic 0  0 4>,
-                               <0 0  1 &gic 0  1 4>,
-                               <0 0  2 &gic 0  2 4>,
-                               <0 0  3 &gic 0  3 4>,
-                               <0 0  4 &gic 0  4 4>,
-                               <0 0  5 &gic 0  5 4>,
-                               <0 0  6 &gic 0  6 4>,
-                               <0 0  7 &gic 0  7 4>,
-                               <0 0  8 &gic 0  8 4>,
-                               <0 0  9 &gic 0  9 4>,
-                               <0 0 10 &gic 0 10 4>,
-                               <0 0 11 &gic 0 11 4>,
-                               <0 0 12 &gic 0 12 4>,
-                               <0 0 13 &gic 0 13 4>,
-                               <0 0 14 &gic 0 14 4>,
-                               <0 0 15 &gic 0 15 4>,
-                               <0 0 16 &gic 0 16 4>,
-                               <0 0 17 &gic 0 17 4>,
-                               <0 0 18 &gic 0 18 4>,
-                               <0 0 19 &gic 0 19 4>,
-                               <0 0 20 &gic 0 20 4>,
-                               <0 0 21 &gic 0 21 4>,
-                               <0 0 22 &gic 0 22 4>,
-                               <0 0 23 &gic 0 23 4>,
-                               <0 0 24 &gic 0 24 4>,
-                               <0 0 25 &gic 0 25 4>,
-                               <0 0 26 &gic 0 26 4>,
-                               <0 0 27 &gic 0 27 4>,
-                               <0 0 28 &gic 0 28 4>,
-                               <0 0 29 &gic 0 29 4>,
-                               <0 0 30 &gic 0 30 4>,
-                               <0 0 31 &gic 0 31 4>,
-                               <0 0 32 &gic 0 32 4>,
-                               <0 0 33 &gic 0 33 4>,
-                               <0 0 34 &gic 0 34 4>,
-                               <0 0 35 &gic 0 35 4>,
-                               <0 0 36 &gic 0 36 4>,
-                               <0 0 37 &gic 0 37 4>,
-                               <0 0 38 &gic 0 38 4>,
-                               <0 0 39 &gic 0 39 4>,
-                               <0 0 40 &gic 0 40 4>,
-                               <0 0 41 &gic 0 41 4>,
-                               <0 0 42 &gic 0 42 4>;
+               ranges = <0x8000000 0 0x8000000 0x18000000>;
        };
 
        site2: hsb@40000000 {
index 7aa64ae2577981469c29b61a9a11eb18587020c7..3b88209bacea2f8664dcc4653f6bbca2397de4cb 100644 (file)
        };
 
        smb: bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0x08000000 0x04000000>,
-                        <1 0 0x14000000 0x04000000>,
-                        <2 0 0x18000000 0x04000000>,
-                        <3 0 0x1c000000 0x04000000>,
-                        <4 0 0x0c000000 0x04000000>,
-                        <5 0 0x10000000 0x04000000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic 0  0 4>,
-                               <0 0  1 &gic 0  1 4>,
-                               <0 0  2 &gic 0  2 4>,
-                               <0 0  3 &gic 0  3 4>,
-                               <0 0  4 &gic 0  4 4>,
-                               <0 0  5 &gic 0  5 4>,
-                               <0 0  6 &gic 0  6 4>,
-                               <0 0  7 &gic 0  7 4>,
-                               <0 0  8 &gic 0  8 4>,
-                               <0 0  9 &gic 0  9 4>,
-                               <0 0 10 &gic 0 10 4>,
-                               <0 0 11 &gic 0 11 4>,
-                               <0 0 12 &gic 0 12 4>,
-                               <0 0 13 &gic 0 13 4>,
-                               <0 0 14 &gic 0 14 4>,
-                               <0 0 15 &gic 0 15 4>,
-                               <0 0 16 &gic 0 16 4>,
-                               <0 0 17 &gic 0 17 4>,
-                               <0 0 18 &gic 0 18 4>,
-                               <0 0 19 &gic 0 19 4>,
-                               <0 0 20 &gic 0 20 4>,
-                               <0 0 21 &gic 0 21 4>,
-                               <0 0 22 &gic 0 22 4>,
-                               <0 0 23 &gic 0 23 4>,
-                               <0 0 24 &gic 0 24 4>,
-                               <0 0 25 &gic 0 25 4>,
-                               <0 0 26 &gic 0 26 4>,
-                               <0 0 27 &gic 0 27 4>,
-                               <0 0 28 &gic 0 28 4>,
-                               <0 0 29 &gic 0 29 4>,
-                               <0 0 30 &gic 0 30 4>,
-                               <0 0 31 &gic 0 31 4>,
-                               <0 0 32 &gic 0 32 4>,
-                               <0 0 33 &gic 0 33 4>,
-                               <0 0 34 &gic 0 34 4>,
-                               <0 0 35 &gic 0 35 4>,
-                               <0 0 36 &gic 0 36 4>,
-                               <0 0 37 &gic 0 37 4>,
-                               <0 0 38 &gic 0 38 4>,
-                               <0 0 39 &gic 0 39 4>,
-                               <0 0 40 &gic 0 40 4>,
-                               <0 0 41 &gic 0 41 4>,
-                               <0 0 42 &gic 0 42 4>;
+               ranges = <0 0x8000000 0x18000000>;
        };
 
        site2: hsb@40000000 {
index 4c58479558562a5fa347bbe46bbb6fee280e8f03..5916e4877eace701c1affb59ea6d446a43079679 100644 (file)
                };
        };
 
-       smb: bus@4000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0x40000000 0x04000000>,
-                        <1 0 0x44000000 0x04000000>,
-                        <2 0 0x48000000 0x04000000>,
-                        <3 0 0x4c000000 0x04000000>,
-                        <7 0 0x10000000 0x00020000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic 0  0 4>,
-                               <0 0  1 &gic 0  1 4>,
-                               <0 0  2 &gic 0  2 4>,
-                               <0 0  3 &gic 0  3 4>,
-                               <0 0  4 &gic 0  4 4>,
-                               <0 0  5 &gic 0  5 4>,
-                               <0 0  6 &gic 0  6 4>,
-                               <0 0  7 &gic 0  7 4>,
-                               <0 0  8 &gic 0  8 4>,
-                               <0 0  9 &gic 0  9 4>,
-                               <0 0 10 &gic 0 10 4>,
-                               <0 0 11 &gic 0 11 4>,
-                               <0 0 12 &gic 0 12 4>,
-                               <0 0 13 &gic 0 13 4>,
-                               <0 0 14 &gic 0 14 4>,
-                               <0 0 15 &gic 0 15 4>,
-                               <0 0 16 &gic 0 16 4>,
-                               <0 0 17 &gic 0 17 4>,
-                               <0 0 18 &gic 0 18 4>,
-                               <0 0 19 &gic 0 19 4>,
-                               <0 0 20 &gic 0 20 4>,
-                               <0 0 21 &gic 0 21 4>,
-                               <0 0 22 &gic 0 22 4>,
-                               <0 0 23 &gic 0 23 4>,
-                               <0 0 24 &gic 0 24 4>,
-                               <0 0 25 &gic 0 25 4>,
-                               <0 0 26 &gic 0 26 4>,
-                               <0 0 27 &gic 0 27 4>,
-                               <0 0 28 &gic 0 28 4>,
-                               <0 0 29 &gic 0 29 4>,
-                               <0 0 30 &gic 0 30 4>,
-                               <0 0 31 &gic 0 31 4>,
-                               <0 0 32 &gic 0 32 4>,
-                               <0 0 33 &gic 0 33 4>,
-                               <0 0 34 &gic 0 34 4>,
-                               <0 0 35 &gic 0 35 4>,
-                               <0 0 36 &gic 0 36 4>,
-                               <0 0 37 &gic 0 37 4>,
-                               <0 0 38 &gic 0 38 4>,
-                               <0 0 39 &gic 0 39 4>,
-                               <0 0 40 &gic 0 40 4>,
-                               <0 0 41 &gic 0 41 4>,
-                               <0 0 42 &gic 0 42 4>;
-       };
-
        site2: hsb@e0000000 {
                compatible = "simple-bus";
                #address-cells = <1>;
index efeb5724d9e9a8755f708bc07f085181c6811447..6237ede2f0c70010c6bc5fd9c3688365a602a787 100644 (file)
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(sharpsl_param);
 
 void sharpsl_save_param(void)
 {
-       memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
+       struct sharpsl_param_info *params = param_start(PARAM_BASE);
+
+       memcpy(&sharpsl_param, params, sizeof(*params));
 
        if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
                sharpsl_param.comadj=-1;
index d2d5f1cf815f207f30aa78d2511a0cb568f2a6c6..e6ff844821cfb8084a2f320182b22061476bb254 100644 (file)
@@ -76,6 +76,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_ILITEK_IL9322=y
 CONFIG_DRM_TVE200=y
+CONFIG_FB=y
 CONFIG_LOGO=y
 CONFIG_USB=y
 CONFIG_USB_MON=y
index ccee86d0045dd60fbd1fbde612e8b0d53f8f1ac6..5e4128dadd8d54b21d841b9d2d0920abeed58b76 100644 (file)
@@ -292,6 +292,7 @@ CONFIG_DRM_IMX_LDB=y
 CONFIG_DRM_IMX_HDMI=y
 CONFIG_DRM_ETNAVIV=y
 CONFIG_DRM_MXSFB=y
+CONFIG_FB=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_L4F00242T03=y
index ba67c4717dccec3f0b2748dd1b885dcf4844d293..33572998dbbe08f6857dc4efeebfaac6244e41b1 100644 (file)
@@ -197,7 +197,6 @@ CONFIG_PCI_EPF_TEST=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_OMAP_OCP2SCP=y
-CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -456,6 +455,7 @@ CONFIG_PINCTRL_STMFX=y
 CONFIG_PINCTRL_PALMAS=y
 CONFIG_PINCTRL_OWL=y
 CONFIG_PINCTRL_S500=y
+CONFIG_PINCTRL_MSM=y
 CONFIG_PINCTRL_APQ8064=y
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_PINCTRL_IPQ8064=y
@@ -725,6 +725,7 @@ CONFIG_DRM_PL111=m
 CONFIG_DRM_LIMA=m
 CONFIG_DRM_PANFROST=m
 CONFIG_DRM_ASPEED_GFX=m
+CONFIG_FB=y
 CONFIG_FB_EFI=y
 CONFIG_FB_WM8505=y
 CONFIG_FB_SH_MOBILE_LCDC=y
@@ -1122,6 +1123,7 @@ CONFIG_PHY_DM816X_USB=m
 CONFIG_OMAP_USB2=y
 CONFIG_TI_PIPE3=y
 CONFIG_TWL4030_USB=m
+CONFIG_RAS=y
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_ROCKCHIP_EFUSE=m
 CONFIG_NVMEM_SUNXI_SID=y
index cae0db6b4eaf30e2cf943a2678f2b66a442f2d4a..de37f7e9099910b952073904147df3f557e7a3e7 100644 (file)
@@ -46,7 +46,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
-CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
index d9a27e4e09140b43e906528703cf686866b83104..18d2a960b2d21a04c3c5213e828dd34ee674dde0 100644 (file)
@@ -40,7 +40,6 @@ CONFIG_PCI_RCAR_GEN2=y
 CONFIG_PCIE_RCAR_HOST=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
index d0a800be04869bc08534964351bf90fd3876a409..a41e27ace391fa98450e1abd9b9a58e12758cba2 100644 (file)
@@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
                                uprobe_notify_resume(regs);
                        } else {
                                tracehook_notify_resume(regs);
-                               rseq_handle_notify_resume(NULL, regs);
                        }
                }
                local_irq_disable();
index d6cfe7c4bb00f7f6e6b1132b8f95370e86f30096..8711d6824c1fa558ff6bfef963e9f6ff464ef7fd 100644 (file)
@@ -47,12 +47,26 @@ struct at91_pm_bu {
        unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
 };
 
+/*
+ * struct at91_pm_sfrbu_offsets: registers mapping for SFRBU
+ * @pswbu: power switch BU control registers
+ */
+struct at91_pm_sfrbu_regs {
+       struct {
+               u32 key;
+               u32 ctrl;
+               u32 state;
+               u32 softsw;
+       } pswbu;
+};
+
 /**
  * struct at91_soc_pm - AT91 SoC power management data structure
  * @config_shdwc_ws: wakeup sources configuration function for SHDWC
  * @config_pmc_ws: wakeup srouces configuration function for PMC
  * @ws_ids: wakup sources of_device_id array
  * @data: PM data to be used on last phase of suspend
+ * @sfrbu_regs: SFRBU registers mapping
  * @bu: backup unit mapped data (for backup mode)
  * @memcs: memory chip select
  */
@@ -62,6 +76,7 @@ struct at91_soc_pm {
        const struct of_device_id *ws_ids;
        struct at91_pm_bu *bu;
        struct at91_pm_data data;
+       struct at91_pm_sfrbu_regs sfrbu_regs;
        void *memcs;
 };
 
@@ -356,9 +371,36 @@ static int at91_suspend_finish(unsigned long val)
        return 0;
 }
 
+static void at91_pm_switch_ba_to_vbat(void)
+{
+       unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
+       unsigned int val;
+
+       /* Just for safety. */
+       if (!soc_pm.data.sfrbu)
+               return;
+
+       val = readl(soc_pm.data.sfrbu + offset);
+
+       /* Already on VBAT. */
+       if (!(val & soc_pm.sfrbu_regs.pswbu.state))
+               return;
+
+       val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
+       val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
+       writel(val, soc_pm.data.sfrbu + offset);
+
+       /* Wait for update. */
+       val = readl(soc_pm.data.sfrbu + offset);
+       while (val & soc_pm.sfrbu_regs.pswbu.state)
+               val = readl(soc_pm.data.sfrbu + offset);
+}
+
 static void at91_pm_suspend(suspend_state_t state)
 {
        if (soc_pm.data.mode == AT91_PM_BACKUP) {
+               at91_pm_switch_ba_to_vbat();
+
                cpu_suspend(0, at91_suspend_finish);
 
                /* The SRAM is lost between suspend cycles */
@@ -589,18 +631,22 @@ static const struct of_device_id ramc_phy_ids[] __initconst = {
        { /* Sentinel. */ },
 };
 
-static __init void at91_dt_ramc(bool phy_mandatory)
+static __init int at91_dt_ramc(bool phy_mandatory)
 {
        struct device_node *np;
        const struct of_device_id *of_id;
        int idx = 0;
        void *standby = NULL;
        const struct ramc_info *ramc;
+       int ret;
 
        for_each_matching_node_and_match(np, ramc_ids, &of_id) {
                soc_pm.data.ramc[idx] = of_iomap(np, 0);
-               if (!soc_pm.data.ramc[idx])
-                       panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx);
+               if (!soc_pm.data.ramc[idx]) {
+                       pr_err("unable to map ramc[%d] cpu registers\n", idx);
+                       ret = -ENOMEM;
+                       goto unmap_ramc;
+               }
 
                ramc = of_id->data;
                if (ramc) {
@@ -612,25 +658,42 @@ static __init void at91_dt_ramc(bool phy_mandatory)
                idx++;
        }
 
-       if (!idx)
-               panic(pr_fmt("unable to find compatible ram controller node in dtb\n"));
+       if (!idx) {
+               pr_err("unable to find compatible ram controller node in dtb\n");
+               ret = -ENODEV;
+               goto unmap_ramc;
+       }
 
        /* Lookup for DDR PHY node, if any. */
        for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
                soc_pm.data.ramc_phy = of_iomap(np, 0);
-               if (!soc_pm.data.ramc_phy)
-                       panic(pr_fmt("unable to map ramc phy cpu registers\n"));
+               if (!soc_pm.data.ramc_phy) {
+                       pr_err("unable to map ramc phy cpu registers\n");
+                       ret = -ENOMEM;
+                       goto unmap_ramc;
+               }
        }
 
-       if (phy_mandatory && !soc_pm.data.ramc_phy)
-               panic(pr_fmt("DDR PHY is mandatory!\n"));
+       if (phy_mandatory && !soc_pm.data.ramc_phy) {
+               pr_err("DDR PHY is mandatory!\n");
+               ret = -ENODEV;
+               goto unmap_ramc;
+       }
 
        if (!standby) {
                pr_warn("ramc no standby function available\n");
-               return;
+               return 0;
        }
 
        at91_cpuidle_device.dev.platform_data = standby;
+
+       return 0;
+
+unmap_ramc:
+       while (idx)
+               iounmap(soc_pm.data.ramc[--idx]);
+
+       return ret;
 }
 
 static void at91rm9200_idle(void)
@@ -1017,6 +1080,8 @@ static void __init at91_pm_init(void (*pm_idle)(void))
 
 void __init at91rm9200_pm_init(void)
 {
+       int ret;
+
        if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
                return;
 
@@ -1028,7 +1093,9 @@ void __init at91rm9200_pm_init(void)
        soc_pm.data.standby_mode = AT91_PM_STANDBY;
        soc_pm.data.suspend_mode = AT91_PM_ULP0;
 
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
 
        /*
         * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
@@ -1046,13 +1113,17 @@ void __init sam9x60_pm_init(void)
        static const int iomaps[] __initconst = {
                [AT91_PM_ULP1]          = AT91_PM_IOMAP(SHDWC),
        };
+       int ret;
 
        if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
                return;
 
        at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
        at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
+
        at91_pm_init(NULL);
 
        soc_pm.ws_ids = sam9x60_ws_ids;
@@ -1061,6 +1132,8 @@ void __init sam9x60_pm_init(void)
 
 void __init at91sam9_pm_init(void)
 {
+       int ret;
+
        if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
                return;
 
@@ -1072,7 +1145,10 @@ void __init at91sam9_pm_init(void)
        soc_pm.data.standby_mode = AT91_PM_STANDBY;
        soc_pm.data.suspend_mode = AT91_PM_ULP0;
 
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
+
        at91_pm_init(at91sam9_idle);
 }
 
@@ -1081,12 +1157,16 @@ void __init sama5_pm_init(void)
        static const int modes[] __initconst = {
                AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
        };
+       int ret;
 
        if (!IS_ENABLED(CONFIG_SOC_SAMA5))
                return;
 
        at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
+
        at91_pm_init(NULL);
 }
 
@@ -1101,18 +1181,27 @@ void __init sama5d2_pm_init(void)
                [AT91_PM_BACKUP]        = AT91_PM_IOMAP(SHDWC) |
                                          AT91_PM_IOMAP(SFRBU),
        };
+       int ret;
 
        if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
                return;
 
        at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
        at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
-       at91_dt_ramc(false);
+       ret = at91_dt_ramc(false);
+       if (ret)
+               return;
+
        at91_pm_init(NULL);
 
        soc_pm.ws_ids = sama5d2_ws_ids;
        soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
        soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
+
+       soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
+       soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
+       soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
+       soc_pm.sfrbu_regs.pswbu.state = BIT(3);
 }
 
 void __init sama7_pm_init(void)
@@ -1127,18 +1216,27 @@ void __init sama7_pm_init(void)
                [AT91_PM_BACKUP]        = AT91_PM_IOMAP(SFRBU) |
                                          AT91_PM_IOMAP(SHDWC),
        };
+       int ret;
 
        if (!IS_ENABLED(CONFIG_SOC_SAMA7))
                return;
 
        at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
 
-       at91_dt_ramc(true);
+       ret = at91_dt_ramc(true);
+       if (ret)
+               return;
+
        at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
        at91_pm_init(NULL);
 
        soc_pm.ws_ids = sama7g5_ws_ids;
        soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
+
+       soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
+       soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
+       soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
+       soc_pm.sfrbu_regs.pswbu.state = BIT(2);
 }
 
 static int __init at91_pm_modes_select(char *str)
index cbd61a3bcab1d0bae012f6b0a86c3b3aecfd7133..fdb4f63ecde4bc6a2d523c7ee3d41ff7a60b287c 100644 (file)
@@ -1014,31 +1014,55 @@ ENTRY(at91_pm_suspend_in_sram)
        mov     tmp1, #0
        mcr     p15, 0, tmp1, c7, c10, 4
 
+       /* Flush tlb. */
+       mov     r4, #0
+       mcr     p15, 0, r4, c8, c7, 0
+
+       ldr     tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET]
+       str     tmp1, .mckr_offset
+       ldr     tmp1, [r0, #PM_DATA_PMC_VERSION]
+       str     tmp1, .pmc_version
+       ldr     tmp1, [r0, #PM_DATA_MEMCTRL]
+       str     tmp1, .memtype
+       ldr     tmp1, [r0, #PM_DATA_MODE]
+       str     tmp1, .pm_mode
+
+       /*
+        * ldrne below are here to preload their address in the TLB as access
+        * to RAM may be limited while in self-refresh.
+        */
        ldr     tmp1, [r0, #PM_DATA_PMC]
        str     tmp1, .pmc_base
+       cmp     tmp1, #0
+       ldrne   tmp2, [tmp1, #0]
+
        ldr     tmp1, [r0, #PM_DATA_RAMC0]
        str     tmp1, .sramc_base
+       cmp     tmp1, #0
+       ldrne   tmp2, [tmp1, #0]
+
        ldr     tmp1, [r0, #PM_DATA_RAMC1]
        str     tmp1, .sramc1_base
+       cmp     tmp1, #0
+       ldrne   tmp2, [tmp1, #0]
+
+#ifndef CONFIG_SOC_SAM_V4_V5
+       /* ldrne below are here to preload their address in the TLB */
        ldr     tmp1, [r0, #PM_DATA_RAMC_PHY]
        str     tmp1, .sramc_phy_base
-       ldr     tmp1, [r0, #PM_DATA_MEMCTRL]
-       str     tmp1, .memtype
-       ldr     tmp1, [r0, #PM_DATA_MODE]
-       str     tmp1, .pm_mode
-       ldr     tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET]
-       str     tmp1, .mckr_offset
-       ldr     tmp1, [r0, #PM_DATA_PMC_VERSION]
-       str     tmp1, .pmc_version
-       /* Both ldrne below are here to preload their address in the TLB */
+       cmp     tmp1, #0
+       ldrne   tmp2, [tmp1, #0]
+
        ldr     tmp1, [r0, #PM_DATA_SHDWC]
        str     tmp1, .shdwc
        cmp     tmp1, #0
        ldrne   tmp2, [tmp1, #0]
+
        ldr     tmp1, [r0, #PM_DATA_SFRBU]
        str     tmp1, .sfrbu
        cmp     tmp1, #0
        ldrne   tmp2, [tmp1, #0x10]
+#endif
 
        /* Active the self-refresh mode */
        at91_sramc_self_refresh_ena
index 7a4bd8838036ffb7a7233325b6b6696170d173d4..ddf873f35e2b041ee2874a9b267bbe7437a86704 100644 (file)
@@ -11,7 +11,7 @@
 
 #define LSR_THRE       0x20
 
-static void putc(const char c)
+static inline void putc(const char c)
 {
        int i;
 
@@ -24,7 +24,7 @@ static void putc(const char c)
        *UART_THR = c;
 }
 
-static void flush(void)
+static inline void flush(void)
 {
 }
 
index 11dcc369ec14a3353d94504b10fa4c476124225c..c9d7c29d95e1e1eddf08196f7e42c34b7de496f4 100644 (file)
@@ -172,6 +172,9 @@ static void __init imx6q_init_machine(void)
                                imx_get_soc_revision());
 
        imx6q_enet_phy_init();
+
+       of_platform_default_populate(NULL, NULL, NULL);
+
        imx_anatop_init();
        cpu_is_imx6q() ?  imx6q_pm_init() : imx6dl_pm_init();
        imx6q_1588_init();
index 9244437cb1b9b5345e248ee542af658b4bea8a72..f2ecca339910a0e7b3cf339dd250b4be2b10b434 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/genalloc.h>
+#include <linux/irqchip/arm-gic.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/of.h>
@@ -619,6 +620,7 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata
 
 static void imx6_pm_stby_poweroff(void)
 {
+       gic_cpu_if_down(0);
        imx6_set_lpm(STOP_POWER_OFF);
        imx6q_suspend_finish(0);
 
index 95fd1fbb082607128aca4cf146d2e82a430150c2..59a8e8cc44693b0f4720f4945f1e1fd1d5183a7b 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/iopoll.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/platform_device.h>
 #include <linux/reset-controller.h>
 #include <linux/smp.h>
 #include <asm/smp_plat.h>
@@ -81,11 +82,6 @@ static const struct reset_control_ops imx_src_ops = {
        .reset = imx_src_reset_module,
 };
 
-static struct reset_controller_dev imx_reset_controller = {
-       .ops = &imx_src_ops,
-       .nr_resets = ARRAY_SIZE(sw_reset_bits),
-};
-
 static void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
 {
        writel_relaxed(enable, gpc_base + offset);
@@ -177,10 +173,6 @@ void __init imx_src_init(void)
        src_base = of_iomap(np, 0);
        WARN_ON(!src_base);
 
-       imx_reset_controller.of_node = np;
-       if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
-               reset_controller_register(&imx_reset_controller);
-
        /*
         * force warm reset sources to generate cold reset
         * for a more reliable restart
@@ -214,3 +206,33 @@ void __init imx7_src_init(void)
        if (!gpc_base)
                return;
 }
+
+static const struct of_device_id imx_src_dt_ids[] = {
+       { .compatible = "fsl,imx51-src" },
+       { /* sentinel */ }
+};
+
+static int imx_src_probe(struct platform_device *pdev)
+{
+       struct reset_controller_dev *rcdev;
+
+       rcdev = devm_kzalloc(&pdev->dev, sizeof(*rcdev), GFP_KERNEL);
+       if (!rcdev)
+               return -ENOMEM;
+
+       rcdev->ops = &imx_src_ops;
+       rcdev->dev = &pdev->dev;
+       rcdev->of_node = pdev->dev.of_node;
+       rcdev->nr_resets = ARRAY_SIZE(sw_reset_bits);
+
+       return devm_reset_controller_register(&pdev->dev, rcdev);
+}
+
+static struct platform_driver imx_src_driver = {
+       .driver = {
+               .name = "imx-src",
+               .of_match_table = imx_src_dt_ids,
+       },
+       .probe = imx_src_probe,
+};
+builtin_platform_driver(imx_src_driver);
index 36bc0000cb6ab8705e3dd1f865109141fdf8c462..ba3a350479c8d68123d62d0dcd35171f63e7835e 100644 (file)
@@ -9,16 +9,4 @@
 /* REVISIT: omap1 legacy drivers still rely on this */
 #include <mach/soc.h>
 
-/*
- * Bus address is physical address, except for OMAP-1510 Local Bus.
- * OMAP-1510 bus address is translated into a Local Bus address if the
- * OMAP bus type is lbus. We do the address translation based on the
- * device overriding the defaults used in the dma-mapping API.
- */
-
-/*
- * OMAP-1510 Local Bus address offset
- */
-#define OMAP1510_LB_OFFSET     UL(0x30000000)
-
 #endif
index 86d3b3c157af444901d69033a7e24a82bc0addd7..e60831c82b789fac6860517d1734d740e019ef4c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/dma-map-ops.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 
 #include <asm/irq.h>
 
@@ -206,8 +207,6 @@ static inline void udc_device_init(struct omap_usb_config *pdata)
 
 #endif
 
-#if    IS_ENABLED(CONFIG_USB_OHCI_HCD)
-
 /* The dmamask must be set for OHCI to work */
 static u64 ohci_dmamask = ~(u32)0;
 
@@ -236,20 +235,15 @@ static struct platform_device ohci_device = {
 
 static inline void ohci_device_init(struct omap_usb_config *pdata)
 {
+       if (!IS_ENABLED(CONFIG_USB_OHCI_HCD))
+               return;
+
        if (cpu_is_omap7xx())
                ohci_resources[1].start = INT_7XX_USB_HHC_1;
        pdata->ohci_device = &ohci_device;
        pdata->ocpi_enable = &ocpi_enable;
 }
 
-#else
-
-static inline void ohci_device_init(struct omap_usb_config *pdata)
-{
-}
-
-#endif
-
 #if    defined(CONFIG_USB_OTG) && defined(CONFIG_ARCH_OMAP_OTG)
 
 static struct resource otg_resources[] = {
@@ -534,33 +528,87 @@ bad:
 }
 
 #ifdef CONFIG_ARCH_OMAP15XX
+/* OMAP-1510 OHCI has its own MMU for DMA */
+#define OMAP1510_LB_MEMSIZE    32      /* Should be same as SDRAM size */
+#define OMAP1510_LB_CLOCK_DIV  0xfffec10c
+#define OMAP1510_LB_MMU_CTL    0xfffec208
+#define OMAP1510_LB_MMU_LCK    0xfffec224
+#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
+#define OMAP1510_LB_MMU_CAM_H  0xfffec22c
+#define OMAP1510_LB_MMU_CAM_L  0xfffec230
+#define OMAP1510_LB_MMU_RAM_H  0xfffec234
+#define OMAP1510_LB_MMU_RAM_L  0xfffec238
 
-/* ULPD_DPLL_CTRL */
-#define DPLL_IOB               (1 << 13)
-#define DPLL_PLL_ENABLE                (1 << 4)
-#define DPLL_LOCK              (1 << 0)
+/*
+ * Bus address is physical address, except for OMAP-1510 Local Bus.
+ * OMAP-1510 bus address is translated into a Local Bus address if the
+ * OMAP bus type is lbus.
+ */
+#define OMAP1510_LB_OFFSET        UL(0x30000000)
 
-/* ULPD_APLL_CTRL */
-#define APLL_NDPLL_SWITCH      (1 << 0)
+/*
+ * OMAP-1510 specific Local Bus clock on/off
+ */
+static int omap_1510_local_bus_power(int on)
+{
+       if (on) {
+               omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
+               udelay(200);
+       } else {
+               omap_writel(0, OMAP1510_LB_MMU_CTL);
+       }
 
-static int omap_1510_usb_ohci_notifier(struct notifier_block *nb,
-               unsigned long event, void *data)
+       return 0;
+}
+
+/*
+ * OMAP-1510 specific Local Bus initialization
+ * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
+ *       See also arch/mach-omap/memory.h for __virt_to_dma() and
+ *       __dma_to_virt() which need to match with the physical
+ *       Local Bus address below.
+ */
+static int omap_1510_local_bus_init(void)
 {
-       struct device *dev = data;
+       unsigned int tlb;
+       unsigned long lbaddr, physaddr;
+
+       omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
+              OMAP1510_LB_CLOCK_DIV);
+
+       /* Configure the Local Bus MMU table */
+       for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
+               lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
+               physaddr = tlb * 0x00100000 + PHYS_OFFSET;
+               omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
+               omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
+                      OMAP1510_LB_MMU_CAM_L);
+               omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
+               omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
+               omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
+               omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
+       }
 
-       if (event != BUS_NOTIFY_ADD_DEVICE)
-               return NOTIFY_DONE;
+       /* Enable the walking table */
+       omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
+       udelay(200);
 
-       if (strncmp(dev_name(dev), "ohci", 4) == 0 &&
-           dma_direct_set_offset(dev, PHYS_OFFSET, OMAP1510_LB_OFFSET,
-                       (u64)-1))
-               WARN_ONCE(1, "failed to set DMA offset\n");
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block omap_1510_usb_ohci_nb = {
-       .notifier_call          = omap_1510_usb_ohci_notifier,
-};
+static void omap_1510_local_bus_reset(void)
+{
+       omap_1510_local_bus_power(1);
+       omap_1510_local_bus_init();
+}
+
+/* ULPD_DPLL_CTRL */
+#define DPLL_IOB               (1 << 13)
+#define DPLL_PLL_ENABLE                (1 << 4)
+#define DPLL_LOCK              (1 << 0)
+
+/* ULPD_APLL_CTRL */
+#define APLL_NDPLL_SWITCH      (1 << 0)
 
 static void __init omap_1510_usb_init(struct omap_usb_config *config)
 {
@@ -616,19 +664,19 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
        }
 #endif
 
-#if    IS_ENABLED(CONFIG_USB_OHCI_HCD)
-       if (config->register_host) {
+       if (IS_ENABLED(CONFIG_USB_OHCI_HCD) && config->register_host) {
                int status;
 
-               bus_register_notifier(&platform_bus_type,
-                                     &omap_1510_usb_ohci_nb);
                ohci_device.dev.platform_data = config;
+               dma_direct_set_offset(&ohci_device.dev, PHYS_OFFSET,
+                                     OMAP1510_LB_OFFSET, (u64)-1);
                status = platform_device_register(&ohci_device);
                if (status)
                        pr_debug("can't register OHCI device, %d\n", status);
                /* hcd explicitly gates 48MHz */
+
+               config->lb_reset = omap_1510_local_bus_reset;
        }
-#endif
 }
 
 #else
index 7f13adf26e61198c52a5b4289dad9664b7e03463..02c253de9b6e78ff341ec870781dd3fd6b11fbca 100644 (file)
@@ -112,7 +112,6 @@ config ARCH_OMAP2PLUS
        select PM_GENERIC_DOMAINS
        select PM_GENERIC_DOMAINS_OF
        select RESET_CONTROLLER
-       select SIMPLE_PM_BUS
        select SOC_BUS
        select TI_SYSC
        select OMAP_IRQCHIP
index 12b26e04686faf178c27ad45897e1052411c0a05..0c2936c7a37998b769807063e3a5f0c1f0b26b1a 100644 (file)
@@ -3614,6 +3614,8 @@ int omap_hwmod_init_module(struct device *dev,
                oh->flags |= HWMOD_SWSUP_SIDLE_ACT;
        if (data->cfg->quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
                oh->flags |= HWMOD_SWSUP_MSTANDBY;
+       if (data->cfg->quirks & SYSC_QUIRK_CLKDM_NOAUTO)
+               oh->flags |= HWMOD_CLKDM_NOAUTO;
 
        error = omap_hwmod_check_module(dev, oh, data, sysc_fields,
                                        rev_offs, sysc_offs, syss_offs,
index a951276f05475ab8520a666257435b943b09d575..a903b26cde40970f89ca530bb621456ba202c8ed 100644 (file)
  *                        +-----+
  *                        |RSVD | JIT scratchpad
  * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
+ *                        | ... | caller-saved registers
+ *                        +-----+
+ *                        | ... | arguments passed on stack
+ * ARM_SP during call =>  +-----|
  *                        |     |
  *                        | ... | Function call stack
  *                        |     |
  *
  * When popping registers off the stack at the end of a BPF function, we
  * reference them via the current ARM_FP register.
+ *
+ * Some eBPF operations are implemented via a call to a helper function.
+ * Such calls are "invisible" in the eBPF code, so it is up to the calling
+ * program to preserve any caller-saved ARM registers during the call. The
+ * JIT emits code to push and pop those registers onto the stack, immediately
+ * above the callee stack frame.
  */
 #define CALLEE_MASK    (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
                         1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
@@ -70,6 +80,8 @@
 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
 #define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
 
+#define CALLER_MASK    (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
+
 enum {
        /* Stack layout - these are offsets from (top of stack - 4) */
        BPF_R2_HI,
@@ -464,6 +476,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
 
 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
 {
+       const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
        const s8 *tmp = bpf2a32[TMP_REG_1];
 
 #if __LINUX_ARM_ARCH__ == 7
@@ -495,11 +508,17 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
                emit(ARM_MOV_R(ARM_R0, rm), ctx);
        }
 
+       /* Push caller-saved registers on stack */
+       emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
+
        /* Call appropriate function */
        emit_mov_i(ARM_IP, op == BPF_DIV ?
                   (u32)jit_udiv32 : (u32)jit_mod32, ctx);
        emit_blx_r(ARM_IP, ctx);
 
+       /* Restore caller-saved registers from stack */
+       emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
+
        /* Save return value */
        if (rd != ARM_R0)
                emit(ARM_MOV_R(rd, ARM_R0), ctx);
index 077f2ec4eeb23ff82e12d563ab696198cd977b32..fee914c716aa262d12060cce3980471d510af283 100644 (file)
@@ -86,7 +86,7 @@ config ARM64
        select ARCH_SUPPORTS_LTO_CLANG_THIN
        select ARCH_SUPPORTS_CFI_CLANG
        select ARCH_SUPPORTS_ATOMIC_RMW
-       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
+       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
        select ARCH_SUPPORTS_NUMA_BALANCING
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
        select ARCH_WANT_DEFAULT_BPF_JIT
@@ -1931,8 +1931,6 @@ source "drivers/cpufreq/Kconfig"
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "drivers/acpi/Kconfig"
 
 source "arch/arm64/kvm/Kconfig"
index 05ae893d1b2ee7aaf048b353fd829eb2ae5b96ad..fbf13f7c2baf5db494c41a7769cfe0ede723b446 100644 (file)
 
        bus@8000000 {
                compatible = "arm,vexpress,v2m-p1", "simple-bus";
-               arm,v2m-memory-map = "rs1";
                #address-cells = <2>; /* SMB chipselect number and offset */
                #size-cells = <1>;
 
index b8a21092db4d3f09e03e065b36343d84b329286b..269b649934b5abf4f4675c8c48f08c530aadb944 100644 (file)
                                remote-endpoint = <&clcd_pads>;
                        };
                };
-
-              panel-timing {
-                      clock-frequency = <63500127>;
-                      hactive = <1024>;
-                      hback-porch = <152>;
-                      hfront-porch = <48>;
-                      hsync-len = <104>;
-                      vactive = <768>;
-                      vback-porch = <23>;
-                      vfront-porch = <3>;
-                      vsync-len = <4>;
-              };
        };
 
        bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 63>;
                interrupt-map = <0 0  0 &gic 0 0 GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
index 8e7a66943b0180d5077a190f872af96843c617da..6288e104a0893f65377ee70706c95137fe4e63ba 100644 (file)
@@ -27,8 +27,6 @@
                reg = <0x0 0x2b1f0000 0x0 0x1000>;
                interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-names = "mhu_lpri_rx",
-                                 "mhu_hpri_rx";
                #mbox-cells = <1>;
                clocks = <&soc_refclk100mhz>;
                clock-names = "apb_pclk";
        };
 
        bus@8000000 {
-               compatible = "simple-bus";
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 15>;
                interrupt-map = <0 0  0 &gic 0 GIC_SPI  68 IRQ_TYPE_LEVEL_HIGH>,
index 40d95c58b55e255454d7d8fa0bbc418c84ef7e01..fefd2b5f01762820531cafb6c7bd51c3749ab994 100644 (file)
        };
 
        bus@8000000 {
-               motherboard-bus {
+               compatible = "simple-bus";
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0x8000000 0 0x8000000 0x18000000>;
+
+               motherboard-bus@8000000 {
                        compatible = "arm,vexpress,v2p-p1", "simple-bus";
                        #address-cells = <2>;  /* SMB chipselect number and offset */
                        #size-cells = <1>;
-                       #interrupt-cells = <1>;
-                       ranges;
-                       model = "V2M-Juno";
+                       ranges = <0 0 0 0x08000000 0x04000000>,
+                                <1 0 0 0x14000000 0x04000000>,
+                                <2 0 0 0x18000000 0x04000000>,
+                                <3 0 0 0x1c000000 0x04000000>,
+                                <4 0 0 0x0c000000 0x04000000>,
+                                <5 0 0 0x10000000 0x04000000>;
                        arm,hbi = <0x252>;
                        arm,vexpress,site = <0>;
-                       arm,v2m-memory-map = "rs1";
 
                        flash@0 {
                                /* 2 * 32MiB NOR Flash memory mounted on CS0 */
                                        };
                                };
 
-                               mmci@50000 {
+                               mmc@50000 {
                                        compatible = "arm,pl180", "arm,primecell";
                                        reg = <0x050000 0x1000>;
                                        interrupts = <5>;
                                        clock-names = "KMIREFCLK", "apb_pclk";
                                };
 
-                               wdt@f0000 {
+                               watchdog@f0000 {
                                        compatible = "arm,sp805", "arm,primecell";
                                        reg = <0x0f0000 0x10000>;
                                        interrupts = <7>;
index 3050f45bade4a2540d22a4e09da7fed2055ddf32..258991ad7cc0af2ecc7a7f5ec793f98074a1164e 100644 (file)
        };
 
        bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 63>;
                interrupt-map = <0 0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
index b917d9d3f1c4c16550d7cde7e1dc47f18aefb270..33182d9e582677fc0bbdf3d698d46ef102b1731e 100644 (file)
@@ -6,7 +6,7 @@
  */
 / {
        bus@8000000 {
-               motherboard-bus {
+               motherboard-bus@8000000 {
                        arm,v2m-memory-map = "rs2";
 
                        iofpga-bus@300000000 {
index 4c4a381d2c75fd4b805e663f6683fd98872a4d91..5f6cab668aa079853c554e284077db1adb37387f 100644 (file)
        };
 
        bus@8000000 {
-               motherboard-bus {
-                       arm,v2m-memory-map = "rs1";
+               compatible = "simple-bus";
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0x8000000 0 0x8000000 0x18000000>;
+
+               motherboard-bus@8000000 {
                        compatible = "arm,vexpress,v2m-p1", "simple-bus";
                        #address-cells = <2>; /* SMB chipselect number and offset */
                        #size-cells = <1>;
-                       #interrupt-cells = <1>;
-                       ranges;
+                       ranges = <0 0 0 0x08000000 0x04000000>,
+                                <1 0 0 0x14000000 0x04000000>,
+                                <2 0 0 0x18000000 0x04000000>,
+                                <3 0 0 0x1c000000 0x04000000>,
+                                <4 0 0 0x0c000000 0x04000000>,
+                                <5 0 0 0x10000000 0x04000000>;
 
                        flash@0 {
                                compatible = "arm,vexpress-flash", "cfi-flash";
                                        clock-names = "apb_pclk";
                                };
 
-                               mmci@50000 {
+                               mmc@50000 {
                                        compatible = "arm,pl180", "arm,primecell";
                                        reg = <0x050000 0x1000>;
                                        interrupts = <9>, <10>;
                                        clock-names = "uartclk", "apb_pclk";
                                };
 
-                               wdt@f0000 {
+                               watchdog@f0000 {
                                        compatible = "arm,sp805", "arm,primecell";
                                        reg = <0x0f0000 0x1000>;
                                        interrupts = <0>;
index d859914500a706bbecc4d237b172c042fe8fc560..5b6d9d8e934db472ea29a67859174535beb6ecfe 100644 (file)
        };
 
        smb: bus@8000000 {
-               compatible = "simple-bus";
-
-               #address-cells = <2>;
-               #size-cells = <1>;
-               ranges = <0 0 0 0x08000000 0x04000000>,
-                        <1 0 0 0x14000000 0x04000000>,
-                        <2 0 0 0x18000000 0x04000000>,
-                        <3 0 0 0x1c000000 0x04000000>,
-                        <4 0 0 0x0c000000 0x04000000>,
-                        <5 0 0 0x10000000 0x04000000>;
-
-               #interrupt-cells = <1>;
-               interrupt-map-mask = <0 0 63>;
-               interrupt-map = <0 0  0 &gic GIC_SPI  0 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  1 &gic GIC_SPI  1 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  2 &gic GIC_SPI  2 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  3 &gic GIC_SPI  3 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  4 &gic GIC_SPI  4 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  5 &gic GIC_SPI  5 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  6 &gic GIC_SPI  6 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  7 &gic GIC_SPI  7 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  8 &gic GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0  9 &gic GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+               ranges = <0x8000000 0 0x8000000 0x18000000>;
        };
 };
index 343ecf0e8973a2faf4b61f820880c8f8b75694d6..06b36cc65865c831bc192581518fa64a7e7e5132 100644 (file)
                        interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <0>; /* fixed up by bootloader */
                        clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
-                       voltage-ranges = <1800 1800 3300 3300>;
+                       voltage-ranges = <1800 1800>;
                        sdhci,auto-cmd12;
-                       broken-cd;
+                       non-removable;
                        little-endian;
                        bus-width = <4>;
                        status = "disabled";
index 988f8ab679ad6c3ec0942bb38d39971b2edaa477..40f5e7a3b0644ed7c348008eacd3f5819f6c8c08 100644 (file)
@@ -91,7 +91,7 @@
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
                spi-max-frequency = <80000000>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
        };
 };
index 4e2820d19244aa2769000630b2a83d37c076fc7c..a2b24d4d4e3e74e279f1c68b99cc2366311ef577 100644 (file)
@@ -48,7 +48,7 @@
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
                spi-max-frequency = <80000000>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
        };
 };
index d0456daefda8830bbc46bbfacec716651090101c..9db9b90bf2bc9a46d9fd77fe8465f7ca4c5bf4a4 100644 (file)
                                regulator-min-microvolt = <850000>;
                                regulator-max-microvolt = <950000>;
                                regulator-boot-on;
+                               regulator-always-on;
                                regulator-ramp-delay = <3125>;
                                nxp,dvs-run-voltage = <950000>;
                                nxp,dvs-standby-voltage = <850000>;
index 05cb60991fb9963e006e0d2ecba4edf7a7ed7892..d52686f4c0598850bc6dc9ec8d5f69361e567558 100644 (file)
        pinctrl_hog: hoggrp {
                fsl,pins = <
                        MX8MM_IOMUXC_NAND_CE0_B_GPIO3_IO1       0x40000159 /* M2_GDIS# */
-                       MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12      0x40000041 /* M2_RST# */
+                       MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13      0x40000041 /* M2_RST# */
                        MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7      0x40000119 /* M2_OFF# */
                        MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15      0x40000159 /* M2_WDIS# */
                        MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14       0x40000041 /* AMP GPIO1 */
index 54eaf3d6055b11707c1fad1b1bd68729fa5f5bfe..3b2d627a03428937edc42f67e49684ab09d66ddb 100644 (file)
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
                spi-max-frequency = <80000000>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
        };
 };
index e77db4996e58e823a58acd28dae4c1ba9eba9111..236f425e1570a2470a5b2f722fc8321ca13ece00 100644 (file)
        pinctrl_hog: hoggrp {
                fsl,pins = <
                        MX8MN_IOMUXC_NAND_CE0_B_GPIO3_IO1       0x40000159 /* M2_GDIS# */
-                       MX8MN_IOMUXC_GPIO1_IO12_GPIO1_IO12      0x40000041 /* M2_RST# */
+                       MX8MN_IOMUXC_GPIO1_IO13_GPIO1_IO13      0x40000041 /* M2_RST# */
                        MX8MN_IOMUXC_NAND_DATA01_GPIO3_IO7      0x40000119 /* M2_OFF# */
                        MX8MN_IOMUXC_GPIO1_IO15_GPIO1_IO15      0x40000159 /* M2_WDIS# */
                        MX8MN_IOMUXC_SAI2_RXFS_GPIO4_IO21       0x40000041 /* APP GPIO1 */
index aa78e0d8c72b26d68206e284923fb79ce0f36d7b..fc178eebf8aa443c0cc355e532f766439cc72850 100644 (file)
@@ -74,7 +74,7 @@
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <80000000>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
        };
 };
index 49f9db971f3b22e0ab549594e75b734c0469a781..b83df77195ece79761ad3106fa06818aa400683b 100644 (file)
                #size-cells = <1>;
                compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
+               spi-tx-bus-width = <1>;
+               spi-rx-bus-width = <4>;
        };
 };
 
index f593e4ff62e1c73fb092fdee4c5f59e5afb4e8a5..564746d5000d5aed30f6c29b5977083a254eb88c 100644 (file)
                #address-cells = <1>;
                #size-cells = <1>;
                reg = <0>;
-               spi-tx-bus-width = <4>;
+               spi-tx-bus-width = <1>;
                spi-rx-bus-width = <4>;
                m25p,fast-read;
                spi-max-frequency = <50000000>;
index a620ac0d0b19a28929615536bf5984d2623241b3..db333001df4d6f1d5cee9af1f86867542c1abb3f 100644 (file)
                                interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
                                phys = <&qusb_phy_0>, <&usb0_ssphy>;
                                phy-names = "usb2-phy", "usb3-phy";
-                               tx-fifo-resize;
                                snps,is-utmi-l1-suspend;
                                snps,hird-threshold = /bits/ 8 <0x0>;
                                snps,dis_u2_susphy_quirk;
                                interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
                                phys = <&qusb_phy_1>, <&usb1_ssphy>;
                                phy-names = "usb2-phy", "usb3-phy";
-                               tx-fifo-resize;
                                snps,is-utmi-l1-suspend;
                                snps,hird-threshold = /bits/ 8 <0x0>;
                                snps,dis_u2_susphy_quirk;
index c566a64b1373fc55b7d0e338d8342112dd9aabdc..0df76f7b1cc11fedc685eafb0a3201401c342bc6 100644 (file)
                #size-cells = <0>;
 
                pon: power-on@800 {
-                       compatible = "qcom,pm8916-pon";
+                       compatible = "qcom,pm8998-pon";
                        reg = <0x0800>;
+                       mode-bootloader = <0x2>;
+                       mode-recovery = <0x1>;
 
                        pon_pwrkey: pwrkey {
                                compatible = "qcom,pm8941-pwrkey";
index 8ac96f8e79d42bababaf79a9f35b5d88a629763b..28d5b5528516b029fe6df2587471b44121ebdac6 100644 (file)
        };
 };
 
+&pon_pwrkey {
+       status = "okay";
+};
+
+&pon_resin {
+       status = "okay";
+
+       linux,code = <KEY_VOLUMEDOWN>;
+};
+
 &qupv3_id_0 {
        status = "okay";
 };
index 0f2b3c00e4346725397416b247a198c91d00aa7b..70c88c37de321028c55c6af40e2e62138e49cbc3 100644 (file)
                        "Headphone Jack", "HPOL",
                        "Headphone Jack", "HPOR";
 
-               #sound-dai-cells = <0>;
                #address-cells = <1>;
                #size-cells = <0>;
 
                        };
                };
 
-               dai-link@2 {
+               dai-link@5 {
                        link-name = "MultiMedia2";
-                       reg = <2>;
+                       reg = <LPASS_DP_RX>;
                        cpu {
-                               sound-dai = <&lpass_cpu 2>;
+                               sound-dai = <&lpass_cpu LPASS_DP_RX>;
                        };
 
                        codec {
@@ -782,7 +781,7 @@ hp_i2c: &i2c9 {
                qcom,playback-sd-lines = <0>;
        };
 
-       hdmi-primary@0 {
+       hdmi@5 {
                reg = <LPASS_DP_RX>;
        };
 };
index 53a21d0861787b50e0812c93d90fb6b565dcd9ca..fd78f16181ddd47b9452c1a1c78a8e9d926d551c 100644 (file)
 
                cpufreq_hw: cpufreq@18591000 {
                        compatible = "qcom,cpufreq-epss";
-                       reg = <0 0x18591100 0 0x900>,
-                             <0 0x18592100 0 0x900>,
-                             <0 0x18593100 0 0x900>;
+                       reg = <0 0x18591000 0 0x1000>,
+                             <0 0x18592000 0 0x1000>,
+                             <0 0x18593000 0 0x1000>;
                        clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
                        clock-names = "xo", "alternate";
                        #freq-domain-cells = <1>;
index 9153e6616ba4bda13d3ea75aa38bf20fa428b01a..9c7f87e42fccd0dfd40cd3bce643b91c45b9f79d 100644 (file)
                        compatible = "qcom,sdm660-a2noc";
                        reg = <0x01704000 0xc100>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
+                       clock-names = "bus",
+                                     "bus_a",
+                                     "ipa",
+                                     "ufs_axi",
+                                     "aggre2_ufs_axi",
+                                     "aggre2_usb3_axi",
+                                     "cfg_noc_usb2_axi";
                        clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
-                                <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
+                                <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
+                                <&rpmcc RPM_SMD_IPA_CLK>,
+                                <&gcc GCC_UFS_AXI_CLK>,
+                                <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+                                <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+                                <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
                };
 
                mnoc: interconnect@1745000 {
index 6d7172e6f4c30558b06424b6be6ceb0c7c30fc45..b3b91192618449ed3d6b84d328c8bb9f42cc0782 100644 (file)
                        no-map;
                };
 
-               wlan_msa_mem: memory@8c400000 {
-                       reg = <0 0x8c400000 0 0x100000>;
+               ipa_fw_mem: memory@8c400000 {
+                       reg = <0 0x8c400000 0 0x10000>;
                        no-map;
                };
 
-               gpu_mem: memory@8c515000 {
-                       reg = <0 0x8c515000 0 0x2000>;
+               ipa_gsi_mem: memory@8c410000 {
+                       reg = <0 0x8c410000 0 0x5000>;
                        no-map;
                };
 
-               ipa_fw_mem: memory@8c517000 {
-                       reg = <0 0x8c517000 0 0x5a000>;
+               gpu_mem: memory@8c415000 {
+                       reg = <0 0x8c415000 0 0x2000>;
                        no-map;
                };
 
-               adsp_mem: memory@8c600000 {
-                       reg = <0 0x8c600000 0 0x1a00000>;
+               adsp_mem: memory@8c500000 {
+                       reg = <0 0x8c500000 0 0x1a00000>;
+                       no-map;
+               };
+
+               wlan_msa_mem: memory@8df00000 {
+                       reg = <0 0x8df00000 0 0x100000>;
                        no-map;
                };
 
index 385e5029437d368190d56d6aa02df8ca78577dab..2ba23aa582a18d3436b370952d7c4075c8d2e39e 100644 (file)
 #include "sdm850.dtsi"
 #include "pm8998.dtsi"
 
+/*
+ * Update following upstream (sdm845.dtsi) reserved
+ * memory mappings for firmware loading to succeed
+ * and enable the IPA device.
+ */
+/delete-node/ &ipa_fw_mem;
+/delete-node/ &ipa_gsi_mem;
+/delete-node/ &gpu_mem;
+/delete-node/ &adsp_mem;
+/delete-node/ &wlan_msa_mem;
+
 / {
        model = "Lenovo Yoga C630";
        compatible = "lenovo,yoga-c630", "qcom,sdm845";
                };
        };
 
+       /* Reserved memory changes for IPA */
+       reserved-memory {
+               wlan_msa_mem: memory@8c400000 {
+                       reg = <0 0x8c400000 0 0x100000>;
+                       no-map;
+               };
+
+               gpu_mem: memory@8c515000 {
+                       reg = <0 0x8c515000 0 0x2000>;
+                       no-map;
+               };
+
+               ipa_fw_mem: memory@8c517000 {
+                       reg = <0 0x8c517000 0 0x5a000>;
+                       no-map;
+               };
+
+               adsp_mem: memory@8c600000 {
+                       reg = <0 0x8c600000 0 0x1a00000>;
+                       no-map;
+               };
+       };
+
        sn65dsi86_refclk: sn65dsi86-refclk {
                compatible = "fixed-clock";
                #clock-cells = <0>;
index 156d96afbbfcff1385efe40f669c528b9deacc72..545197bc050134766df4b6998b82cc7f27647756 100644 (file)
@@ -245,7 +245,6 @@ CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
 CONFIG_HISILICON_LPC=y
-CONFIG_SIMPLE_PM_BUS=y
 CONFIG_FSL_MC_BUS=y
 CONFIG_TEGRA_ACONNECT=m
 CONFIG_GNSS=m
index 7535dc7cc5aa18ddd4e05b5f325406c19ce0cf06..bd68e1b7f29f34552b9e4decd2c430a6d3916c43 100644 (file)
@@ -50,9 +50,6 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
 #define acpi_os_ioremap acpi_os_ioremap
 
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
-#define acpi_os_memmap acpi_os_memmap
-
 typedef u64 phys_cpuid_t;
 #define PHYS_CPUID_INVALID INVALID_HWID
 
index 89faca0e740d03a306a70b49d19a91c4a3023886..bfa58409a4d4da3221f44ffc5d08bae2812b92cf 100644 (file)
@@ -525,6 +525,11 @@ alternative_endif
 #define EXPORT_SYMBOL_NOKASAN(name)    EXPORT_SYMBOL(name)
 #endif
 
+#ifdef CONFIG_KASAN_HW_TAGS
+#define EXPORT_SYMBOL_NOHWKASAN(name)
+#else
+#define EXPORT_SYMBOL_NOHWKASAN(name)  EXPORT_SYMBOL_NOKASAN(name)
+#endif
        /*
         * Emit a 64-bit absolute little endian symbol reference in a way that
         * ensures that it will be resolved at build time, even when building a
index 3f93b9e0b339b0f808ef1995b73605585e0e3cb0..02511650cffe52188a9696a22ea7cd589421b676 100644 (file)
@@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void);
 
 static inline void mte_check_tfsr_entry(void)
 {
+       if (!system_supports_mte())
+               return;
+
        mte_check_tfsr_el1();
 }
 
 static inline void mte_check_tfsr_exit(void)
 {
+       if (!system_supports_mte())
+               return;
+
        /*
         * The asynchronous faults are sync'ed automatically with
         * TFSR_EL1 on kernel entry but for exit an explicit dsb()
index 3a3264ff47b9709df741eaece1c8eae995a5ffb3..95f7686b728d7e5689f46ce6ae37fe7724779be2 100644 (file)
@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
 #define __HAVE_ARCH_STRCHR
 extern char *strchr(const char *, int c);
 
+#ifndef CONFIG_KASAN_HW_TAGS
 #define __HAVE_ARCH_STRCMP
 extern int strcmp(const char *, const char *);
 
 #define __HAVE_ARCH_STRNCMP
 extern int strncmp(const char *, const char *, __kernel_size_t);
+#endif
 
 #define __HAVE_ARCH_STRLEN
 extern __kernel_size_t strlen(const char *);
index 1c9c2f7a1c04d40642698afccdaf36d2a86a0f46..f3851724fe35611eb9d113e9e4aec6ee395b456f 100644 (file)
@@ -273,8 +273,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
        return __pgprot(PROT_DEVICE_nGnRnE);
 }
 
-static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
-                                      acpi_size size, bool memory)
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
 {
        efi_memory_desc_t *md, *region = NULL;
        pgprot_t prot;
@@ -300,11 +299,9 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
         * It is fine for AML to remap regions that are not represented in the
         * EFI memory map at all, as it only describes normal memory, and MMIO
         * regions that require a virtual mapping to make them accessible to
-        * the EFI runtime services. Determine the region default
-        * attributes by checking the requested memory semantics.
+        * the EFI runtime services.
         */
-       prot = memory ? __pgprot(PROT_NORMAL_NC) :
-                       __pgprot(PROT_DEVICE_nGnRnE);
+       prot = __pgprot(PROT_DEVICE_nGnRnE);
        if (region) {
                switch (region->type) {
                case EFI_LOADER_CODE:
@@ -364,16 +361,6 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
        return __ioremap(phys, size, prot);
 }
 
-void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
-{
-       return __acpi_os_ioremap(phys, size, false);
-}
-
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
-{
-       return __acpi_os_ioremap(phys, size, true);
-}
-
 /*
  * Claim Synchronous External Aborts as a firmware first notification.
  *
index f8a3067d10c6b8f459e418960192671fa17dfa46..6ec7036ef7e1808eca4b63973cdecf12b0d25c16 100644 (file)
@@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
        /*
         * For reasons that aren't entirely clear, enabling KPTI on Cavium
         * ThunderX leads to apparent I-cache corruption of kernel text, which
-        * ends as well as you might imagine. Don't even try.
+        * ends as well as you might imagine. Don't even try. We cannot rely
+        * on the cpus_have_*cap() helpers here to detect the CPU erratum
+        * because cpucap detection order may change. However, since we know
+        * affected CPUs are always in a homogeneous configuration, it is
+        * safe to rely on this_cpu_has_cap() here.
         */
-       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+       if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
                str = "ARM64_WORKAROUND_CAVIUM_27456";
                __kpti_forced = -1;
        }
index 5a294f20e9decd9d0c4aa6c7ab91f63bb6b2b4e6..ff4962750b3d0f66327167eaf0686a8efdfc9a29 100644 (file)
@@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
 void sve_alloc(struct task_struct *task)
 {
        if (task->thread.sve_state) {
-               memset(task->thread.sve_state, 0, sve_state_size(current));
+               memset(task->thread.sve_state, 0, sve_state_size(task));
                return;
        }
 
index 9d314a3bad3ba25fc9e2eedd0eeafecdb11206a3..e5e801bc53122cdb75fa7418827b3a31b9d82eca 100644 (file)
@@ -142,12 +142,7 @@ void mte_enable_kernel_async(void)
 #ifdef CONFIG_KASAN_HW_TAGS
 void mte_check_tfsr_el1(void)
 {
-       u64 tfsr_el1;
-
-       if (!system_supports_mte())
-               return;
-
-       tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+       u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
 
        if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
                /*
@@ -199,6 +194,9 @@ void mte_thread_init_user(void)
 
 void mte_thread_switch(struct task_struct *next)
 {
+       if (!system_supports_mte())
+               return;
+
        mte_update_sctlr_user(next);
 
        /*
index 19100fe8f7e499851251e2d1e9d201fa21aa9d7a..40adb8cdbf5af667d595abe5908cb81723d89722 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/nospec.h>
-#include <linux/sched.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
 #include <linux/unistd.h>
@@ -58,7 +57,7 @@
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 #include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
 EXPORT_SYMBOL(__stack_chk_guard);
 #endif
 
index 9fe70b12b34f62ff1c672a600bfcc585c70e7ab5..c287b9407f287f21ca18ab527bdd488485e9f72f 100644 (file)
@@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
                        if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
                                do_signal(regs);
 
-                       if (thread_flags & _TIF_NOTIFY_RESUME) {
+                       if (thread_flags & _TIF_NOTIFY_RESUME)
                                tracehook_notify_resume(regs);
-                               rseq_handle_notify_resume(NULL, regs);
-                       }
 
                        if (thread_flags & _TIF_FOREIGN_FPSTATE)
                                fpsimd_restore_current_state();
index 5df6193fc43044158f60db2606fb27d6902957c6..8d741f71377f4bf72b551eefcb1d847d4f761842 100644 (file)
@@ -54,7 +54,7 @@ $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
 #    runtime. Because the hypervisor is part of the kernel binary, relocations
 #    produce a kernel VA. We enumerate relocations targeting hyp at build time
 #    and convert the kernel VAs at those positions to hyp VAs.
-$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
+$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE
        $(call if_changed,hyprel)
 
 # 5) Compile hyp-reloc.S and link it into the existing partially linked object.
index f9bb3b14130eef9da9e6fe0214c65bc9d23f6861..c84fe24b2ea1e8f1f3a2ab6e1f6e6c9bb46801ed 100644 (file)
@@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
 
 int kvm_perf_init(void)
 {
-       if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
-               static_branch_enable(&kvm_arm_pmu_available);
-
        return perf_register_guest_info_callbacks(&kvm_guest_cbs);
 }
 
index f5065f23b413faf606137b3ceabc1bc81b87a27b..2af3c37445e00899217ac82a6c9e7ddc474b7093 100644 (file)
@@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
        kvm_pmu_create_perf_event(vcpu, select_idx);
 }
 
-int kvm_pmu_probe_pmuver(void)
+void kvm_host_pmu_init(struct arm_pmu *pmu)
+{
+       if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
+           !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
+               static_branch_enable(&kvm_arm_pmu_available);
+}
+
+static int kvm_pmu_probe_pmuver(void)
 {
        struct perf_event_attr attr = { };
        struct perf_event *event;
index d7bee210a798af7ab0a387279430cbd24b42c041..83bcad72ec97205f4e7dfd04d5d92ee304d81c13 100644 (file)
@@ -173,4 +173,4 @@ L(done):
        ret
 
 SYM_FUNC_END_PI(strcmp)
-EXPORT_SYMBOL_NOKASAN(strcmp)
+EXPORT_SYMBOL_NOHWKASAN(strcmp)
index 48d44f7fddb134c4cfdb4fc40b3a7c1f0f656b7e..e42bcfcd37e6f691a8b78e3c17a6330272cbefd0 100644 (file)
@@ -258,4 +258,4 @@ L(ret0):
        ret
 
 SYM_FUNC_END_PI(strncmp)
-EXPORT_SYMBOL_NOKASAN(strncmp)
+EXPORT_SYMBOL_NOHWKASAN(strncmp)
index 23505fc353247019e952a2b760dda82677d5fdfe..a8158c9489666819b5d2a70034c3c645e512bc1f 100644 (file)
@@ -43,7 +43,7 @@ void __init arm64_hugetlb_cma_reserve(void)
 #ifdef CONFIG_ARM64_4K_PAGES
        order = PUD_SHIFT - PAGE_SHIFT;
 #else
-       order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
+       order = CONT_PMD_SHIFT - PAGE_SHIFT;
 #endif
        /*
         * HugeTLB CMA reservation is required for gigantic
index 9d4d898df76ba717e7808afc4209e0cc20c4dafc..823d3d5a9e11487c3037fbbc01af0141456c9335 100644 (file)
@@ -8,7 +8,7 @@ config CSKY
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_QUEUED_RWLOCKS
-       select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
+       select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
        select COMMON_CLK
        select CLKSRC_MMIO
@@ -241,6 +241,7 @@ endchoice
 
 menuconfig HAVE_TCM
        bool "Tightly-Coupled/Sram Memory"
+       depends on !COMPILE_TEST
        help
          The implementation are not only used by TCM (Tightly-Coupled Meory)
          but also used by sram on SOC bus. It follow existed linux tcm
index 91818787d860925defd3fc71cb3cefe87fe02d17..02b72a0007671308878bae4de47eda256b442324 100644 (file)
@@ -74,7 +74,6 @@ static __always_inline unsigned long __fls(unsigned long x)
  * bug fix, why only could use atomic!!!!
  */
 #include <asm-generic/bitops/non-atomic.h>
-#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
 
 #include <asm-generic/bitops/le.h>
 #include <asm-generic/bitops/ext2-atomic.h>
index 0105ac81b4328adc090342543cd4601c048aed7d..1a5f54e0d272631137179c848715a7ab8a5791ff 100644 (file)
@@ -99,7 +99,8 @@ static int gpr_set(struct task_struct *target,
        if (ret)
                return ret;
 
-       regs.sr = task_pt_regs(target)->sr;
+       /* BIT(0) of regs.sr is Condition Code/Carry bit */
+       regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0));
 #ifdef CONFIG_CPU_HAS_HILO
        regs.dcsr = task_pt_regs(target)->dcsr;
 #endif
index 312f046d452d87e19f6da7230d4ab50db1140ed3..c7b763d2f526e661f16b5ad98c5c56bd4c7c257e 100644 (file)
@@ -52,10 +52,14 @@ static long restore_sigcontext(struct pt_regs *regs,
        struct sigcontext __user *sc)
 {
        int err = 0;
+       unsigned long sr = regs->sr;
 
        /* sc_pt_regs is structured the same as the start of pt_regs */
        err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs));
 
+       /* BIT(0) of regs->sr is Condition Code/Carry bit */
+       regs->sr = (sr & ~1) | (regs->sr & 1);
+
        /* Restore the floating-point state. */
        err |= restore_fpu_state(sc);
 
@@ -260,8 +264,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
                do_signal(regs);
 
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+       if (thread_info_flags & _TIF_NOTIFY_RESUME)
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(NULL, regs);
-       }
 }
index 045792cde4811ef33bbdf7d614d2c53b52397e37..1e33666fa679be428e11ea0e3f0917d53de8e33c 100644 (file)
@@ -388,8 +388,6 @@ config CRASH_DUMP
          help
            Generate crash dump after being started by kexec.
 
-source "drivers/firmware/Kconfig"
-
 endmenu
 
 menu "Power management and ACPI options"
index 259b3661b614168ff8ab377587c66b1478222218..997b549330156dc87bf48de2a2c2435b4fdc62d2 100644 (file)
@@ -15,7 +15,6 @@
 #include <asm/unistd.h>
 #include <asm/errno.h>
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/traps.h>
 #include <asm/asm-offsets.h>
 #include <asm/entry.h>
@@ -25,7 +24,6 @@
 .globl system_call
 .globl resume
 .globl ret_from_exception
-.globl ret_from_signal
 .globl sys_call_table
 .globl bad_interrupt
 .globl inthandler1
@@ -59,8 +57,6 @@ do_trace:
        subql   #4,%sp                  /* dummy return address */
        SAVE_SWITCH_STACK
        jbsr    syscall_trace_leave
-
-ret_from_signal:
        RESTORE_SWITCH_STACK
        addql   #4,%sp
        jra     ret_from_exception
index 774c35f47eeab5affbfba83c375452165cae16ba..0b50da08a9c56fc691aa1e918e5b641ae2dc7fae 100644 (file)
@@ -29,7 +29,6 @@ config M68K
        select NO_DMA if !MMU && !COLDFIRE
        select OLD_SIGACTION
        select OLD_SIGSUSPEND3
-       select SET_FS
        select UACCESS_MEMCPY if !MMU
        select VIRT_TO_BUS
        select ZONE_DMA
index d43a02795a4a445e18efea2d5eb386534ae8816e..9f337c70243a39f75828675170028f2ed2c18980 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/thread_info.h>
 #include <asm/errno.h>
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/asm-offsets.h>
 #include <asm/entry.h>
 
@@ -51,7 +50,6 @@ sw_usp:
 .globl system_call
 .globl resume
 .globl ret_from_exception
-.globl ret_from_signal
 .globl sys_call_table
 .globl inthandler
 
@@ -98,8 +96,6 @@ ENTRY(system_call)
        subql   #4,%sp                  /* dummy return address */
        SAVE_SWITCH_STACK
        jbsr    syscall_trace_leave
-
-ret_from_signal:
        RESTORE_SWITCH_STACK
        addql   #4,%sp
 
index 3750819ac5a13bde46a73d0b9ae6af21dd5aa42f..f4d82c619a5c48ec7b3a87df334089c0e44873e6 100644 (file)
@@ -9,7 +9,6 @@
 #define __ASM_M68K_PROCESSOR_H
 
 #include <linux/thread_info.h>
-#include <asm/segment.h>
 #include <asm/fpu.h>
 #include <asm/ptrace.h>
 
@@ -75,11 +74,37 @@ static inline void wrusp(unsigned long usp)
 #define TASK_UNMAPPED_BASE     0
 #endif
 
+/* Address spaces (or Function Codes in Motorola lingo) */
+#define USER_DATA     1
+#define USER_PROGRAM  2
+#define SUPER_DATA    5
+#define SUPER_PROGRAM 6
+#define CPU_SPACE     7
+
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
+/*
+ * Set the SFC/DFC registers for special MM operations.  For most normal
+ * operation these remain set to USER_DATA for the uaccess routines.
+ */
+static inline void set_fc(unsigned long val)
+{
+       WARN_ON_ONCE(in_interrupt());
+
+       __asm__ __volatile__ ("movec %0,%/sfc\n\t"
+                             "movec %0,%/dfc\n\t"
+                             : /* no outputs */ : "r" (val) : "memory");
+}
+#else
+static inline void set_fc(unsigned long val)
+{
+}
+#endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
+
 struct thread_struct {
        unsigned long  ksp;             /* kernel stack pointer */
        unsigned long  usp;             /* user stack pointer */
        unsigned short sr;              /* saved status register */
-       unsigned short fs;              /* saved fs (sfc, dfc) */
+       unsigned short fc;              /* saved fc (sfc, dfc) */
        unsigned long  crp[2];          /* cpu root pointer */
        unsigned long  esp0;            /* points to SR of stack frame */
        unsigned long  faddr;           /* info about last fault */
@@ -92,7 +117,7 @@ struct thread_struct {
 #define INIT_THREAD  {                                                 \
        .ksp    = sizeof(init_stack) + (unsigned long) init_stack,      \
        .sr     = PS_S,                                                 \
-       .fs     = __KERNEL_DS,                                          \
+       .fc     = USER_DATA,                                            \
 }
 
 /*
index 911826ea83ce1a3f9524b6a882051f85e7abbf6e..80eb2396d01ebf61895f8471c33b0e9728f25bcd 100644 (file)
  * two accesses to memory, which may be undesirable for some devices.
  */
 #define in_8(addr) \
-    ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
+    ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
 #define in_be16(addr) \
-    ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
+    ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
 #define in_be32(addr) \
-    ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
+    ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
 #define in_le16(addr) \
-    ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
+    ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
 #define in_le32(addr) \
-    ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
+    ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
 
-#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
-#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
-#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
-#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
-#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
+#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
+#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
+#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
+#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
 
 #define raw_inb in_8
 #define raw_inw in_be16
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
deleted file mode 100644 (file)
index 2b5e68a..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_SEGMENT_H
-#define _M68K_SEGMENT_H
-
-/* define constants */
-/* Address spaces (FC0-FC2) */
-#define USER_DATA     (1)
-#ifndef __USER_DS
-#define __USER_DS     (USER_DATA)
-#endif
-#define USER_PROGRAM  (2)
-#define SUPER_DATA    (5)
-#ifndef __KERNEL_DS
-#define __KERNEL_DS   (SUPER_DATA)
-#endif
-#define SUPER_PROGRAM (6)
-#define CPU_SPACE     (7)
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
-       unsigned long seg;
-} mm_segment_t;
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
-/*
- * Get/set the SFC/DFC registers for MOVES instructions
- */
-#define USER_DS                MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS      MAKE_MM_SEG(__KERNEL_DS)
-
-static inline mm_segment_t get_fs(void)
-{
-       mm_segment_t _v;
-       __asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
-       return _v;
-}
-
-static inline void set_fs(mm_segment_t val)
-{
-       __asm__ __volatile__ ("movec %0,%/sfc\n\t"
-                             "movec %0,%/dfc\n\t"
-                             : /* no outputs */ : "r" (val.seg) : "memory");
-}
-
-#else
-#define USER_DS                MAKE_MM_SEG(TASK_SIZE)
-#define KERNEL_DS      MAKE_MM_SEG(0xFFFFFFFF)
-#define get_fs()       (current_thread_info()->addr_limit)
-#define set_fs(x)      (current_thread_info()->addr_limit = (x))
-#endif
-
-#define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _M68K_SEGMENT_H */
index 15a757073fa58ffcab231b5b7f4190479b2d363b..c952658ba79223b028d9dd7baba20d8a3992d119 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <asm/types.h>
 #include <asm/page.h>
-#include <asm/segment.h>
 
 /*
  * On machines with 4k pages we default to an 8k thread size, though we
@@ -27,7 +26,6 @@
 struct thread_info {
        struct task_struct      *task;          /* main task structure */
        unsigned long           flags;
-       mm_segment_t            addr_limit;     /* thread address space */
        int                     preempt_count;  /* 0 => preemptable, <0 => BUG */
        __u32                   cpu;            /* should always be 0 on m68k */
        unsigned long           tp_value;       /* thread pointer */
@@ -37,7 +35,6 @@ struct thread_info {
 #define INIT_THREAD_INFO(tsk)                  \
 {                                              \
        .task           = &tsk,                 \
-       .addr_limit     = KERNEL_DS,            \
        .preempt_count  = INIT_PREEMPT_COUNT,   \
 }
 
index a6318ccd308fd3ae3bbf3e0a62f05aae7b6ed9d1..b882e2f4f5516f8d67c501596ec22df5c2f92610 100644 (file)
@@ -13,13 +13,12 @@ static inline void flush_tlb_kernel_page(void *addr)
        if (CPU_IS_COLDFIRE) {
                mmu_write(MMUOR, MMUOR_CNL);
        } else if (CPU_IS_040_OR_060) {
-               mm_segment_t old_fs = get_fs();
-               set_fs(KERNEL_DS);
+               set_fc(SUPER_DATA);
                __asm__ __volatile__(".chip 68040\n\t"
                                     "pflush (%0)\n\t"
                                     ".chip 68k"
                                     : : "a" (addr));
-               set_fs(old_fs);
+               set_fc(USER_DATA);
        } else if (CPU_IS_020_OR_030)
                __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
 }
@@ -84,12 +83,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 
 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
-       if (vma->vm_mm == current->active_mm) {
-               mm_segment_t old_fs = force_uaccess_begin();
-
+       if (vma->vm_mm == current->active_mm)
                __flush_tlb_one(addr);
-               force_uaccess_end(old_fs);
-       }
 }
 
 static inline void flush_tlb_range(struct vm_area_struct *vma,
index 4aff3358fbaff75eb80952b2ea05977845614421..a9d5c1c870d312021ed9bea77ac5075326dc978d 100644 (file)
@@ -267,6 +267,10 @@ struct frame {
     } un;
 };
 
+#ifdef CONFIG_M68040
+asmlinkage void berr_040cleanup(struct frame *fp);
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _M68K_TRAPS_H */
index f98208ccbbcd1a3ed3594f092ff0c53b11a4165f..ba670523885c89ff72352f0147da5534b3773a1b 100644 (file)
@@ -9,13 +9,16 @@
  */
 #include <linux/compiler.h>
 #include <linux/types.h>
-#include <asm/segment.h>
 #include <asm/extable.h>
 
 /* We let the MMU do all checking */
 static inline int access_ok(const void __user *addr,
                            unsigned long size)
 {
+       /*
+        * XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
+        * for TASK_SIZE!
+        */
        return 1;
 }
 
@@ -35,12 +38,9 @@ static inline int access_ok(const void __user *addr,
 #define        MOVES   "move"
 #endif
 
-extern int __put_user_bad(void);
-extern int __get_user_bad(void);
-
-#define __put_user_asm(res, x, ptr, bwl, reg, err)     \
+#define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
 asm volatile ("\n"                                     \
-       "1:     "MOVES"."#bwl"  %2,%1\n"                \
+       "1:     "inst"."#bwl"   %2,%1\n"                \
        "2:\n"                                          \
        "       .section .fixup,\"ax\"\n"               \
        "       .even\n"                                \
@@ -56,6 +56,31 @@ asm volatile ("\n"                                   \
        : "+d" (res), "=m" (*(ptr))                     \
        : #reg (x), "i" (err))
 
+#define __put_user_asm8(inst, res, x, ptr)                     \
+do {                                                           \
+       const void *__pu_ptr = (const void __force *)(ptr);     \
+                                                               \
+       asm volatile ("\n"                                      \
+               "1:     "inst".l %2,(%1)+\n"                    \
+               "2:     "inst".l %R2,(%1)\n"                    \
+               "3:\n"                                          \
+               "       .section .fixup,\"ax\"\n"               \
+               "       .even\n"                                \
+               "10:    movel %3,%0\n"                          \
+               "       jra 3b\n"                               \
+               "       .previous\n"                            \
+               "\n"                                            \
+               "       .section __ex_table,\"a\"\n"            \
+               "       .align 4\n"                             \
+               "       .long 1b,10b\n"                         \
+               "       .long 2b,10b\n"                         \
+               "       .long 3b,10b\n"                         \
+               "       .previous"                              \
+               : "+d" (res), "+a" (__pu_ptr)                   \
+               : "r" (x), "i" (-EFAULT)                        \
+               : "memory");                                    \
+} while (0)
+
 /*
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
@@ -68,51 +93,29 @@ asm volatile ("\n"                                  \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof (*(ptr))) {                                      \
        case 1:                                                         \
-               __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
+               __put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
                break;                                                  \
        case 2:                                                         \
-               __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \
+               __put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
+               __put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
                break;                                                  \
        case 8:                                                         \
-           {                                                           \
-               const void __user *__pu_ptr = (ptr);                    \
-               asm volatile ("\n"                                      \
-                       "1:     "MOVES".l       %2,(%1)+\n"             \
-                       "2:     "MOVES".l       %R2,(%1)\n"             \
-                       "3:\n"                                          \
-                       "       .section .fixup,\"ax\"\n"               \
-                       "       .even\n"                                \
-                       "10:    movel %3,%0\n"                          \
-                       "       jra 3b\n"                               \
-                       "       .previous\n"                            \
-                       "\n"                                            \
-                       "       .section __ex_table,\"a\"\n"            \
-                       "       .align 4\n"                             \
-                       "       .long 1b,10b\n"                         \
-                       "       .long 2b,10b\n"                         \
-                       "       .long 3b,10b\n"                         \
-                       "       .previous"                              \
-                       : "+d" (__pu_err), "+a" (__pu_ptr)              \
-                       : "r" (__pu_val), "i" (-EFAULT)                 \
-                       : "memory");                                    \
+               __put_user_asm8(MOVES, __pu_err, __pu_val, ptr);        \
                break;                                                  \
-           }                                                           \
        default:                                                        \
-               __pu_err = __put_user_bad();                            \
-               break;                                                  \
+               BUILD_BUG();                                            \
        }                                                               \
        __pu_err;                                                       \
 })
 #define put_user(x, ptr)       __put_user(x, ptr)
 
 
-#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({            \
+#define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({      \
        type __gu_val;                                                  \
        asm volatile ("\n"                                              \
-               "1:     "MOVES"."#bwl"  %2,%1\n"                        \
+               "1:     "inst"."#bwl"   %2,%1\n"                        \
                "2:\n"                                                  \
                "       .section .fixup,\"ax\"\n"                       \
                "       .even\n"                                        \
@@ -130,53 +133,57 @@ asm volatile ("\n"                                        \
        (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;  \
 })
 
+#define __get_user_asm8(inst, res, x, ptr)                             \
+do {                                                                   \
+       const void *__gu_ptr = (const void __force *)(ptr);             \
+       union {                                                         \
+               u64 l;                                                  \
+               __typeof__(*(ptr)) t;                                   \
+       } __gu_val;                                                     \
+                                                                       \
+       asm volatile ("\n"                                              \
+               "1:     "inst".l (%2)+,%1\n"                            \
+               "2:     "inst".l (%2),%R1\n"                            \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .even\n"                                        \
+               "10:    move.l  %3,%0\n"                                \
+               "       sub.l   %1,%1\n"                                \
+               "       sub.l   %R1,%R1\n"                              \
+               "       jra     3b\n"                                   \
+               "       .previous\n"                                    \
+               "\n"                                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  4\n"                                    \
+               "       .long   1b,10b\n"                               \
+               "       .long   2b,10b\n"                               \
+               "       .previous"                                      \
+               : "+d" (res), "=&r" (__gu_val.l),                       \
+                 "+a" (__gu_ptr)                                       \
+               : "i" (-EFAULT)                                         \
+               : "memory");                                            \
+       (x) = __gu_val.t;                                               \
+} while (0)
+
 #define __get_user(x, ptr)                                             \
 ({                                                                     \
        int __gu_err = 0;                                               \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);    \
+               __get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT);   \
+               __get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);   \
+               __get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
                break;                                                  \
-       case 8: {                                                       \
-               const void __user *__gu_ptr = (ptr);                    \
-               union {                                                 \
-                       u64 l;                                          \
-                       __typeof__(*(ptr)) t;                           \
-               } __gu_val;                                             \
-               asm volatile ("\n"                                      \
-                       "1:     "MOVES".l       (%2)+,%1\n"             \
-                       "2:     "MOVES".l       (%2),%R1\n"             \
-                       "3:\n"                                          \
-                       "       .section .fixup,\"ax\"\n"               \
-                       "       .even\n"                                \
-                       "10:    move.l  %3,%0\n"                        \
-                       "       sub.l   %1,%1\n"                        \
-                       "       sub.l   %R1,%R1\n"                      \
-                       "       jra     3b\n"                           \
-                       "       .previous\n"                            \
-                       "\n"                                            \
-                       "       .section __ex_table,\"a\"\n"            \
-                       "       .align  4\n"                            \
-                       "       .long   1b,10b\n"                       \
-                       "       .long   2b,10b\n"                       \
-                       "       .previous"                              \
-                       : "+d" (__gu_err), "=&r" (__gu_val.l),          \
-                         "+a" (__gu_ptr)                               \
-                       : "i" (-EFAULT)                                 \
-                       : "memory");                                    \
-               (x) = __gu_val.t;                                       \
+       case 8:                                                         \
+               __get_user_asm8(MOVES, __gu_err, x, ptr);               \
                break;                                                  \
-       }                                                               \
        default:                                                        \
-               __gu_err = __get_user_bad();                            \
-               break;                                                  \
+               BUILD_BUG();                                            \
        }                                                               \
        __gu_err;                                                       \
 })
@@ -322,16 +329,19 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 
        switch (n) {
        case 1:
-               __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
+               __put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
+                               b, d, 1);
                break;
        case 2:
-               __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
+               __put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
+                               w, r, 2);
                break;
        case 3:
                __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
                break;
        case 4:
-               __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
+               __put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
+                               l, r, 4);
                break;
        case 5:
                __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
@@ -380,8 +390,65 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 #define INLINE_COPY_FROM_USER
 #define INLINE_COPY_TO_USER
 
-#define user_addr_max() \
-       (uaccess_kernel() ? ~0UL : TASK_SIZE)
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label)                        \
+do {                                                                   \
+       type *__gk_dst = (type *)(dst);                                 \
+       type *__gk_src = (type *)(src);                                 \
+       int __gk_err = 0;                                               \
+                                                                       \
+       switch (sizeof(type)) {                                         \
+       case 1:                                                         \
+               __get_user_asm("move", __gk_err, *__gk_dst, __gk_src,   \
+                               u8, b, d, -EFAULT);                     \
+               break;                                                  \
+       case 2:                                                         \
+               __get_user_asm("move", __gk_err, *__gk_dst, __gk_src,   \
+                               u16, w, r, -EFAULT);                    \
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_asm("move", __gk_err, *__gk_dst, __gk_src,   \
+                               u32, l, r, -EFAULT);                    \
+               break;                                                  \
+       case 8:                                                         \
+               __get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \
+               break;                                                  \
+       default:                                                        \
+               BUILD_BUG();                                            \
+       }                                                               \
+       if (unlikely(__gk_err))                                         \
+               goto err_label;                                         \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label)                        \
+do {                                                                   \
+       type __pk_src = *(type *)(src);                                 \
+       type *__pk_dst = (type *)(dst);                                 \
+       int __pk_err = 0;                                               \
+                                                                       \
+       switch (sizeof(type)) {                                         \
+       case 1:                                                         \
+               __put_user_asm("move", __pk_err, __pk_src, __pk_dst,    \
+                               b, d, -EFAULT);                         \
+               break;                                                  \
+       case 2:                                                         \
+               __put_user_asm("move", __pk_err, __pk_src, __pk_dst,    \
+                               w, r, -EFAULT);                         \
+               break;                                                  \
+       case 4:                                                         \
+               __put_user_asm("move", __pk_err, __pk_src, __pk_dst,    \
+                               l, r, -EFAULT);                         \
+               break;                                                  \
+       case 8:                                                         \
+               __put_user_asm8("move", __pk_err, __pk_src, __pk_dst);  \
+               break;                                                  \
+       default:                                                        \
+               BUILD_BUG();                                            \
+       }                                                               \
+       if (unlikely(__pk_err))                                         \
+               goto err_label;                                         \
+} while (0)
 
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern __must_check long strnlen_user(const char __user *str, long n);
index ccea355052efad8994f9682eed160b9fde6ba57c..906d73230537440fde199e779bd484683a0e8d9d 100644 (file)
@@ -31,7 +31,7 @@ int main(void)
        DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
        DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
        DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
-       DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
+       DEFINE(THREAD_FC, offsetof(struct thread_struct, fc));
        DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
        DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
        DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
index 9dd76fbb7c6b2752b0a8eefb847a65e2f26251b1..9434fca68de5d018b9f301f9b8813f7b6826c667 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/linkage.h>
 #include <asm/errno.h>
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/traps.h>
 #include <asm/unistd.h>
 #include <asm/asm-offsets.h>
@@ -78,20 +77,38 @@ ENTRY(__sys_clone3)
 
 ENTRY(sys_sigreturn)
        SAVE_SWITCH_STACK
-       movel   %sp,%sp@-                 | switch_stack pointer
-       pea     %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+       movel   %sp,%a1                         | switch_stack pointer
+       lea     %sp@(SWITCH_STACK_SIZE),%a0     | pt_regs pointer
+       lea     %sp@(-84),%sp                   | leave a gap
+       movel   %a1,%sp@-
+       movel   %a0,%sp@-
        jbsr    do_sigreturn
-       addql   #8,%sp
-       RESTORE_SWITCH_STACK
-       rts
+       jra     1f                              | shared with rt_sigreturn()
 
 ENTRY(sys_rt_sigreturn)
        SAVE_SWITCH_STACK
-       movel   %sp,%sp@-                 | switch_stack pointer
-       pea     %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+       movel   %sp,%a1                         | switch_stack pointer
+       lea     %sp@(SWITCH_STACK_SIZE),%a0     | pt_regs pointer
+       lea     %sp@(-84),%sp                   | leave a gap
+       movel   %a1,%sp@-
+       movel   %a0,%sp@-
+       | stack contents:
+       |   [original pt_regs address] [original switch_stack address]
+       |   [gap] [switch_stack] [pt_regs] [exception frame]
        jbsr    do_rt_sigreturn
-       addql   #8,%sp
+
+1:
+       | stack contents now:
+       |   [original pt_regs address] [original switch_stack address]
+       |   [unused part of the gap] [moved switch_stack] [moved pt_regs]
+       |   [replacement exception frame]
+       | return value of do_{rt_,}sigreturn() points to moved switch_stack.
+
+       movel   %d0,%sp                         | discard the leftover junk
        RESTORE_SWITCH_STACK
+       | stack contents now is just [syscall return address] [pt_regs] [frame]
+       | return pt_regs.d0
+       movel   %sp@(PT_OFF_D0+4),%d0
        rts
 
 ENTRY(buserr)
@@ -182,25 +199,6 @@ do_trace_exit:
        addql   #4,%sp
        jra     .Lret_from_exception
 
-ENTRY(ret_from_signal)
-       movel   %curptr@(TASK_STACK),%a1
-       tstb    %a1@(TINFO_FLAGS+2)
-       jge     1f
-       jbsr    syscall_trace
-1:     RESTORE_SWITCH_STACK
-       addql   #4,%sp
-/* on 68040 complete pending writebacks if any */
-#ifdef CONFIG_M68040
-       bfextu  %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
-       subql   #7,%d0                          | bus error frame ?
-       jbne    1f
-       movel   %sp,%sp@-
-       jbsr    berr_040cleanup
-       addql   #4,%sp
-1:
-#endif
-       jra     .Lret_from_exception
-
 ENTRY(system_call)
        SAVE_ALL_SYS
 
@@ -338,7 +336,7 @@ resume:
 
        /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
        movec   %sfc,%d0
-       movew   %d0,%a0@(TASK_THREAD+THREAD_FS)
+       movew   %d0,%a0@(TASK_THREAD+THREAD_FC)
 
        /* save usp */
        /* it is better to use a movel here instead of a movew 8*) */
@@ -424,7 +422,7 @@ resume:
        movel   %a0,%usp
 
        /* restore fs (sfc,%dfc) */
-       movew   %a1@(TASK_THREAD+THREAD_FS),%a0
+       movew   %a1@(TASK_THREAD+THREAD_FC),%a0
        movec   %a0,%sfc
        movec   %a0,%dfc
 
index db49f90917112b8d76769050b5ed774e32b9550d..1ab692b952cd6235db7418cdb7b4cf6a83155f4e 100644 (file)
@@ -92,7 +92,7 @@ void show_regs(struct pt_regs * regs)
 
 void flush_thread(void)
 {
-       current->thread.fs = __USER_DS;
+       current->thread.fc = USER_DATA;
 #ifdef CONFIG_FPU
        if (!FPU_IS_EMU) {
                unsigned long zero = 0;
@@ -155,7 +155,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
         * Must save the current SFC/DFC value, NOT the value when
         * the parent was last descheduled - RGH  10-08-96
         */
-       p->thread.fs = get_fs().seg;
+       p->thread.fc = USER_DATA;
 
        if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
                /* kernel thread */
index 8f215e79e70e61c1a69b949aac19a6ff8b5a43d9..338817d0cb3fb100609c1c38dc899d27ff5508ba 100644 (file)
@@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 
        if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
                fpu_version = sc->sc_fpstate[0];
-               if (CPU_IS_020_OR_030 &&
+               if (CPU_IS_020_OR_030 && !regs->stkadj &&
                    regs->vector >= (VEC_FPBRUC * 4) &&
                    regs->vector <= (VEC_FPNAN * 4)) {
                        /* Clear pending exception in 68882 idle frame */
@@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
                if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
                        context_size = fpstate[1];
                fpu_version = fpstate[0];
-               if (CPU_IS_020_OR_030 &&
+               if (CPU_IS_020_OR_030 && !regs->stkadj &&
                    regs->vector >= (VEC_FPBRUC * 4) &&
                    regs->vector <= (VEC_FPNAN * 4)) {
                        /* Clear pending exception in 68882 idle frame */
@@ -641,56 +641,35 @@ static inline void siginfo_build_tests(void)
 static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
                               void __user *fp)
 {
-       int fsize = frame_extra_sizes(formatvec >> 12);
-       if (fsize < 0) {
+       int extra = frame_extra_sizes(formatvec >> 12);
+       char buf[sizeof_field(struct frame, un)];
+
+       if (extra < 0) {
                /*
                 * user process trying to return with weird frame format
                 */
                pr_debug("user process returning with weird frame format\n");
-               return 1;
+               return -1;
        }
-       if (!fsize) {
-               regs->format = formatvec >> 12;
-               regs->vector = formatvec & 0xfff;
-       } else {
-               struct switch_stack *sw = (struct switch_stack *)regs - 1;
-               /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */
-               unsigned long buf[sizeof_field(struct frame, un) / 2];
-
-               /* that'll make sure that expansion won't crap over data */
-               if (copy_from_user(buf + fsize / 4, fp, fsize))
-                       return 1;
-
-               /* point of no return */
-               regs->format = formatvec >> 12;
-               regs->vector = formatvec & 0xfff;
-#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
-               __asm__ __volatile__ (
-#ifdef CONFIG_COLDFIRE
-                        "   movel %0,%/sp\n\t"
-                        "   bra ret_from_signal\n"
-#else
-                        "   movel %0,%/a0\n\t"
-                        "   subl %1,%/a0\n\t"     /* make room on stack */
-                        "   movel %/a0,%/sp\n\t"  /* set stack pointer */
-                        /* move switch_stack and pt_regs */
-                        "1: movel %0@+,%/a0@+\n\t"
-                        "   dbra %2,1b\n\t"
-                        "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
-                        "   lsrl  #2,%1\n\t"
-                        "   subql #1,%1\n\t"
-                        /* copy to the gap we'd made */
-                        "2: movel %4@+,%/a0@+\n\t"
-                        "   dbra %1,2b\n\t"
-                        "   bral ret_from_signal\n"
+       if (extra && copy_from_user(buf, fp, extra))
+               return -1;
+       regs->format = formatvec >> 12;
+       regs->vector = formatvec & 0xfff;
+       if (extra) {
+               void *p = (struct switch_stack *)regs - 1;
+               struct frame *new = (void *)regs - extra;
+               int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
+
+               memmove(p - extra, p, size);
+               memcpy(p - extra + size, buf, extra);
+               current->thread.esp0 = (unsigned long)&new->ptregs;
+#ifdef CONFIG_M68040
+               /* on 68040 complete pending writebacks if any */
+               if (new->ptregs.format == 7) // bus error frame
+                       berr_040cleanup(new);
 #endif
-                        : /* no outputs, it doesn't ever return */
-                        : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
-                          "n" (frame_offset), "a" (buf + fsize/4)
-                        : "a0");
-#undef frame_offset
        }
-       return 0;
+       return extra;
 }
 
 static inline int
@@ -698,7 +677,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
 {
        int formatvec;
        struct sigcontext context;
-       int err = 0;
 
        siginfo_build_tests();
 
@@ -707,7 +685,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
 
        /* get previous context */
        if (copy_from_user(&context, usc, sizeof(context)))
-               goto badframe;
+               return -1;
 
        /* restore passed registers */
        regs->d0 = context.sc_d0;
@@ -720,15 +698,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
        wrusp(context.sc_usp);
        formatvec = context.sc_formatvec;
 
-       err = restore_fpu_state(&context);
+       if (restore_fpu_state(&context))
+               return -1;
 
-       if (err || mangle_kernel_stack(regs, formatvec, fp))
-               goto badframe;
-
-       return 0;
-
-badframe:
-       return 1;
+       return mangle_kernel_stack(regs, formatvec, fp);
 }
 
 static inline int
@@ -745,7 +718,7 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
 
        err = __get_user(temp, &uc->uc_mcontext.version);
        if (temp != MCONTEXT_VERSION)
-               goto badframe;
+               return -1;
        /* restore passed registers */
        err |= __get_user(regs->d0, &gregs[0]);
        err |= __get_user(regs->d1, &gregs[1]);
@@ -774,22 +747,17 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
        err |= restore_altstack(&uc->uc_stack);
 
        if (err)
-               goto badframe;
+               return -1;
 
-       if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
-               goto badframe;
-
-       return 0;
-
-badframe:
-       return 1;
+       return mangle_kernel_stack(regs, temp, &uc->uc_extra);
 }
 
-asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 {
        unsigned long usp = rdusp();
        struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
        sigset_t set;
+       int size;
 
        if (!access_ok(frame, sizeof(*frame)))
                goto badframe;
@@ -801,20 +769,22 @@ asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 
        set_current_blocked(&set);
 
-       if (restore_sigcontext(regs, &frame->sc, frame + 1))
+       size = restore_sigcontext(regs, &frame->sc, frame + 1);
+       if (size < 0)
                goto badframe;
-       return regs->d0;
+       return (void *)sw - size;
 
 badframe:
        force_sig(SIGSEGV);
-       return 0;
+       return sw;
 }
 
-asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 {
        unsigned long usp = rdusp();
        struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
        sigset_t set;
+       int size;
 
        if (!access_ok(frame, sizeof(*frame)))
                goto badframe;
@@ -823,27 +793,34 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 
        set_current_blocked(&set);
 
-       if (rt_restore_ucontext(regs, sw, &frame->uc))
+       size = rt_restore_ucontext(regs, sw, &frame->uc);
+       if (size < 0)
                goto badframe;
-       return regs->d0;
+       return (void *)sw - size;
 
 badframe:
        force_sig(SIGSEGV);
-       return 0;
+       return sw;
+}
+
+static inline struct pt_regs *rte_regs(struct pt_regs *regs)
+{
+       return (void *)regs + regs->stkadj;
 }
 
 static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
                             unsigned long mask)
 {
+       struct pt_regs *tregs = rte_regs(regs);
        sc->sc_mask = mask;
        sc->sc_usp = rdusp();
        sc->sc_d0 = regs->d0;
        sc->sc_d1 = regs->d1;
        sc->sc_a0 = regs->a0;
        sc->sc_a1 = regs->a1;
-       sc->sc_sr = regs->sr;
-       sc->sc_pc = regs->pc;
-       sc->sc_formatvec = regs->format << 12 | regs->vector;
+       sc->sc_sr = tregs->sr;
+       sc->sc_pc = tregs->pc;
+       sc->sc_formatvec = tregs->format << 12 | tregs->vector;
        save_a5_state(sc, regs);
        save_fpu_state(sc, regs);
 }
@@ -851,6 +828,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
 static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
 {
        struct switch_stack *sw = (struct switch_stack *)regs - 1;
+       struct pt_regs *tregs = rte_regs(regs);
        greg_t __user *gregs = uc->uc_mcontext.gregs;
        int err = 0;
 
@@ -871,9 +849,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
        err |= __put_user(sw->a5, &gregs[13]);
        err |= __put_user(sw->a6, &gregs[14]);
        err |= __put_user(rdusp(), &gregs[15]);
-       err |= __put_user(regs->pc, &gregs[16]);
-       err |= __put_user(regs->sr, &gregs[17]);
-       err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
+       err |= __put_user(tregs->pc, &gregs[16]);
+       err |= __put_user(tregs->sr, &gregs[17]);
+       err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
        err |= rt_save_fpu_state(uc, regs);
        return err;
 }
@@ -890,13 +868,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
                        struct pt_regs *regs)
 {
        struct sigframe __user *frame;
-       int fsize = frame_extra_sizes(regs->format);
+       struct pt_regs *tregs = rte_regs(regs);
+       int fsize = frame_extra_sizes(tregs->format);
        struct sigcontext context;
        int err = 0, sig = ksig->sig;
 
        if (fsize < 0) {
                pr_debug("setup_frame: Unknown frame format %#x\n",
-                        regs->format);
+                        tregs->format);
                return -EFAULT;
        }
 
@@ -907,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
 
        err |= __put_user(sig, &frame->sig);
 
-       err |= __put_user(regs->vector, &frame->code);
+       err |= __put_user(tregs->vector, &frame->code);
        err |= __put_user(&frame->sc, &frame->psc);
 
        if (_NSIG_WORDS > 1)
@@ -933,34 +912,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
 
        push_cache ((unsigned long) &frame->retcode);
 
-       /*
-        * Set up registers for signal handler.  All the state we are about
-        * to destroy is successfully copied to sigframe.
-        */
-       wrusp ((unsigned long) frame);
-       regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
-       adjustformat(regs);
-
        /*
         * This is subtle; if we build more than one sigframe, all but the
         * first one will see frame format 0 and have fsize == 0, so we won't
         * screw stkadj.
         */
-       if (fsize)
+       if (fsize) {
                regs->stkadj = fsize;
-
-       /* Prepare to skip over the extra stuff in the exception frame.  */
-       if (regs->stkadj) {
-               struct pt_regs *tregs =
-                       (struct pt_regs *)((ulong)regs + regs->stkadj);
+               tregs = rte_regs(regs);
                pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
-               /* This must be copied with decreasing addresses to
-                   handle overlaps.  */
                tregs->vector = 0;
                tregs->format = 0;
-               tregs->pc = regs->pc;
                tregs->sr = regs->sr;
        }
+
+       /*
+        * Set up registers for signal handler.  All the state we are about
+        * to destroy is successfully copied to sigframe.
+        */
+       wrusp ((unsigned long) frame);
+       tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+       adjustformat(regs);
+
        return 0;
 }
 
@@ -968,7 +941,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
                           struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
-       int fsize = frame_extra_sizes(regs->format);
+       struct pt_regs *tregs = rte_regs(regs);
+       int fsize = frame_extra_sizes(tregs->format);
        int err = 0, sig = ksig->sig;
 
        if (fsize < 0) {
@@ -1018,34 +992,27 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 
        push_cache ((unsigned long) &frame->retcode);
 
-       /*
-        * Set up registers for signal handler.  All the state we are about
-        * to destroy is successfully copied to sigframe.
-        */
-       wrusp ((unsigned long) frame);
-       regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
-       adjustformat(regs);
-
        /*
         * This is subtle; if we build more than one sigframe, all but the
         * first one will see frame format 0 and have fsize == 0, so we won't
         * screw stkadj.
         */
-       if (fsize)
+       if (fsize) {
                regs->stkadj = fsize;
-
-       /* Prepare to skip over the extra stuff in the exception frame.  */
-       if (regs->stkadj) {
-               struct pt_regs *tregs =
-                       (struct pt_regs *)((ulong)regs + regs->stkadj);
+               tregs = rte_regs(regs);
                pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
-               /* This must be copied with decreasing addresses to
-                   handle overlaps.  */
                tregs->vector = 0;
                tregs->format = 0;
-               tregs->pc = regs->pc;
                tregs->sr = regs->sr;
        }
+
+       /*
+        * Set up registers for signal handler.  All the state we are about
+        * to destroy is successfully copied to sigframe.
+        */
+       wrusp ((unsigned long) frame);
+       tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+       adjustformat(regs);
        return 0;
 }
 
index 5b19fcdcd69e96e31cd3db25bce50f5ea0a227b0..9718ce94cc845ab36c393b6dda9a349247bc6da3 100644 (file)
@@ -181,9 +181,8 @@ static inline void access_error060 (struct frame *fp)
 static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
 {
        unsigned long mmusr;
-       mm_segment_t old_fs = get_fs();
 
-       set_fs(MAKE_MM_SEG(wbs));
+       set_fc(wbs);
 
        if (iswrite)
                asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
@@ -192,7 +191,7 @@ static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
 
        asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
 
-       set_fs(old_fs);
+       set_fc(USER_DATA);
 
        return mmusr;
 }
@@ -201,10 +200,8 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
                                   unsigned long wbd)
 {
        int res = 0;
-       mm_segment_t old_fs = get_fs();
 
-       /* set_fs can not be moved, otherwise put_user() may oops */
-       set_fs(MAKE_MM_SEG(wbs));
+       set_fc(wbs);
 
        switch (wbs & WBSIZ_040) {
        case BA_SIZE_BYTE:
@@ -218,9 +215,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
                break;
        }
 
-       /* set_fs can not be moved, otherwise put_user() may oops */
-       set_fs(old_fs);
-
+       set_fc(USER_DATA);
 
        pr_debug("do_040writeback1, res=%d\n", res);
 
index 90f4e9ca1276b5039ff95f9487bd3c996f6d5e00..4fab3479175865d47b579a965b1249fac4dbc4c3 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <linux/uaccess.h>
 #include <asm/io.h>
-#include <asm/segment.h>
 #include <asm/setup.h>
 #include <asm/macintosh.h>
 #include <asm/mac_via.h>
index b486c0889eece6e81c597c3a8f30ea80e437d086..dde978e66f14fb311740b2f9b009a8d36e86e109 100644 (file)
@@ -49,24 +49,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
                if (mmusr & MMU_R_040)
                        return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
        } else {
-               unsigned short mmusr;
-               unsigned long *descaddr;
-
-               asm volatile ("ptestr %3,%2@,#7,%0\n\t"
-                             "pmove %%psr,%1"
-                             : "=a&" (descaddr), "=m" (mmusr)
-                             : "a" (vaddr), "d" (get_fs().seg));
-               if (mmusr & (MMU_I|MMU_B|MMU_L))
-                       return 0;
-               descaddr = phys_to_virt((unsigned long)descaddr);
-               switch (mmusr & MMU_NUM) {
-               case 1:
-                       return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
-               case 2:
-                       return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
-               case 3:
-                       return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
-               }
+               WARN_ON_ONCE(!CPU_IS_040_OR_060);
        }
        return 0;
 }
@@ -107,11 +90,9 @@ void flush_icache_user_range(unsigned long address, unsigned long endaddr)
 
 void flush_icache_range(unsigned long address, unsigned long endaddr)
 {
-       mm_segment_t old_fs = get_fs();
-
-       set_fs(KERNEL_DS);
+       set_fc(SUPER_DATA);
        flush_icache_user_range(address, endaddr);
-       set_fs(old_fs);
+       set_fc(USER_DATA);
 }
 EXPORT_SYMBOL(flush_icache_range);
 
index 5d749e188246d736e2eda5d393dc22b3449e4c7b..1b47bec15832066542b6cfa53e3e46489459c147 100644 (file)
@@ -72,12 +72,6 @@ void __init paging_init(void)
        if (!empty_zero_page)
                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
                      __func__, PAGE_SIZE, PAGE_SIZE);
-
-       /*
-        * Set up SFC/DFC registers (user data space).
-        */
-       set_fs (USER_DS);
-
        max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
        free_area_init(max_zone_pfn);
 }
index 1269d513b2217f84e725e8b6cda8503db104ee83..20ddf71b43d05a2c42819acbce516d2bc5a9444a 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/vmalloc.h>
 
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/io.h>
 #include <asm/tlbflush.h>
index fe75aecfb238a3c7f0fdf24733ce1836cdb8de92..c2c03b0a1567f9d37c2fe417b19d5e9719ddb8b3 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/gfp.h>
 
 #include <asm/setup.h>
-#include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/traps.h>
 #include <asm/machdep.h>
index 3a653f0a4188d4af82046e8ff4283ff916c43c41..9f3f77785aa78a013ba26b31da01d12a2d5d645f 100644 (file)
@@ -467,7 +467,7 @@ void __init paging_init(void)
        /*
         * Set up SFC/DFC registers
         */
-       set_fs(KERNEL_DS);
+       set_fc(USER_DATA);
 
 #ifdef DEBUG
        printk ("before free_area_init\n");
index e1e90c49a49624ccf2cc81f427723de0c89dd28f..dfd6202fd403e92b208f5c8ba15c37be2912a298 100644 (file)
@@ -171,7 +171,6 @@ static int bcd2int (unsigned char b)
 
 int mvme147_hwclk(int op, struct rtc_time *t)
 {
-#warning check me!
        if (!op) {
                m147_rtc->ctrl = RTC_READ;
                t->tm_year = bcd2int (m147_rtc->bcd_year);
@@ -183,6 +182,9 @@ int mvme147_hwclk(int op, struct rtc_time *t)
                m147_rtc->ctrl = 0;
                if (t->tm_year < 70)
                        t->tm_year += 100;
+       } else {
+               /* FIXME Setting the time is not yet supported */
+               return -EOPNOTSUPP;
        }
        return 0;
 }
index b59593c7cfb9dfbfd9073b18166bca1cdd5c0515..b4422c2dfbbf4f7c0a1131dd0dab5413e7cc50f0 100644 (file)
@@ -436,7 +436,6 @@ int bcd2int (unsigned char b)
 
 int mvme16x_hwclk(int op, struct rtc_time *t)
 {
-#warning check me!
        if (!op) {
                rtc->ctrl = RTC_READ;
                t->tm_year = bcd2int (rtc->bcd_year);
@@ -448,6 +447,9 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
                rtc->ctrl = 0;
                if (t->tm_year < 70)
                        t->tm_year += 100;
+       } else {
+               /* FIXME Setting the time is not yet supported */
+               return -EOPNOTSUPP;
        }
        return 0;
 }
index f7dd47232b6ca9207fc2e1ed28dfefecd18671aa..203f428a0344a73534ecd1ddc2068f108c282730 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/intersil.h>
 #include <asm/irq.h>
 #include <asm/sections.h>
-#include <asm/segment.h>
 #include <asm/sun3ints.h>
 
 char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
@@ -89,7 +88,7 @@ void __init sun3_init(void)
        sun3_reserved_pmeg[249] = 1;
        sun3_reserved_pmeg[252] = 1;
        sun3_reserved_pmeg[253] = 1;
-       set_fs(KERNEL_DS);
+       set_fc(USER_DATA);
 }
 
 /* Without this, Bad Things happen when something calls arch_reset. */
index 7aa879b7c7ff57423ff792a6275807e5e725c34d..7ec20817c0c9eaffe487a5347aecd1c280866b03 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/uaccess.h>
 #include <asm/page.h>
 #include <asm/sun3mmu.h>
-#include <asm/segment.h>
 #include <asm/oplib.h>
 #include <asm/mmu_context.h>
 #include <asm/dvma.h>
@@ -191,14 +190,13 @@ void __init mmu_emu_init(unsigned long bootmem_end)
        for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
                sun3_put_segmap(seg, SUN3_INVALID_PMEG);
 
-       set_fs(MAKE_MM_SEG(3));
+       set_fc(3);
        for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
                i = sun3_get_segmap(seg);
                for(j = 1; j < CONTEXTS_NUM; j++)
                        (*(romvec->pv_setctxt))(j, (void *)seg, i);
        }
-       set_fs(KERNEL_DS);
-
+       set_fc(USER_DATA);
 }
 
 /* erase the mappings for a dead context.  Uses the pg_dir for hints
index 41ae422119d32826c92b6d7bcd35608bd9735176..36cc280a4505f5d2456b6c917c8c4eb03225a859 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/sched.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
-#include <asm/segment.h>
 #include <asm/intersil.h>
 #include <asm/oplib.h>
 #include <asm/sun3ints.h>
index 74d2fe57524b36694d2e6c5d08e7d8ff5f2ee8c0..64c23bfaa90c5ba9006002c6e91969c776b6bf0c 100644 (file)
@@ -14,7 +14,6 @@
 #include <asm/traps.h>
 #include <asm/sun3xprom.h>
 #include <asm/idprom.h>
-#include <asm/segment.h>
 #include <asm/sun3ints.h>
 #include <asm/openprom.h>
 #include <asm/machines.h>
index 771ca53af06d2643a1515bbccfd20f72faf7ee29..6b8f591c5054ca49ea1886d54e855d2bfafb171d 100644 (file)
@@ -3316,8 +3316,6 @@ source "drivers/cpuidle/Kconfig"
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "arch/mips/kvm/Kconfig"
 
 source "arch/mips/vdso/Kconfig"
index 35fb8ee6dd33ec27262680bc1a0b245ffd285a78..fd43d876892ec4c63227f4c788d80daa20b3c542 100644 (file)
@@ -10,8 +10,6 @@
 #include <linux/io.h>
 #include <linux/types.h>
 
-#include <asm/mips-boards/launch.h>
-
 extern unsigned long __cps_access_bad_size(void)
        __compiletime_error("Bad size for CPS accessor");
 
@@ -167,30 +165,11 @@ static inline uint64_t mips_cps_cluster_config(unsigned int cluster)
  */
 static inline unsigned int mips_cps_numcores(unsigned int cluster)
 {
-       unsigned int ncores;
-
        if (!mips_cm_present())
                return 0;
 
        /* Add one before masking to handle 0xff indicating no cores */
-       ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
-
-       if (IS_ENABLED(CONFIG_SOC_MT7621)) {
-               struct cpulaunch *launch;
-
-               /*
-                * Ralink MT7621S SoC is single core, but the GCR_CONFIG method
-                * always reports 2 cores. Check the second core's LAUNCH_FREADY
-                * flag to detect if the second core is missing. This method
-                * only works before the core has been started.
-                */
-               launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
-               launch += 2; /* MT7621 has 2 VPEs per core */
-               if (!(launch->flags & LAUNCH_FREADY))
-                       ncores = 1;
-       }
-
-       return ncores;
+       return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
 }
 
 /**
index f1e985109da013276857529bece31ed5bdbb8559..c9b2a75563e128ed91225f6ba2cea89808bd8e8b 100644 (file)
@@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
                do_signal(regs);
 
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+       if (thread_info_flags & _TIF_NOTIFY_RESUME)
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(NULL, regs);
-       }
 
        user_enter();
 }
index 0af88622c619253d1a5284739c709d24af32f9a7..cb6d22439f71b0e3460b5e1b40bad9459f33bc71 100644 (file)
@@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
         func##_positive)
 
+static bool is_bad_offset(int b_off)
+{
+       return b_off > 0x1ffff || b_off < -0x20000;
+}
+
 static int build_body(struct jit_ctx *ctx)
 {
        const struct bpf_prog *prog = ctx->skf;
@@ -728,7 +733,10 @@ load_common:
                        /* Load return register on DS for failures */
                        emit_reg_move(r_ret, r_zero, ctx);
                        /* Return with error */
-                       emit_b(b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_b(b_off, ctx);
                        emit_nop(ctx);
                        break;
                case BPF_LD | BPF_W | BPF_IND:
@@ -775,8 +783,10 @@ load_ind:
                        emit_jalr(MIPS_R_RA, r_s0, ctx);
                        emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
                        /* Check the error value */
-                       emit_bcond(MIPS_COND_NE, r_ret, 0,
-                                  b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
                        emit_reg_move(r_ret, r_zero, ctx);
                        /* We are good */
                        /* X <- P[1:K] & 0xf */
@@ -855,8 +865,10 @@ load_ind:
                        /* A /= X */
                        ctx->flags |= SEEN_X | SEEN_A;
                        /* Check if r_X is zero */
-                       emit_bcond(MIPS_COND_EQ, r_X, r_zero,
-                                  b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
                        emit_load_imm(r_ret, 0, ctx); /* delay slot */
                        emit_div(r_A, r_X, ctx);
                        break;
@@ -864,8 +876,10 @@ load_ind:
                        /* A %= X */
                        ctx->flags |= SEEN_X | SEEN_A;
                        /* Check if r_X is zero */
-                       emit_bcond(MIPS_COND_EQ, r_X, r_zero,
-                                  b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
                        emit_load_imm(r_ret, 0, ctx); /* delay slot */
                        emit_mod(r_A, r_X, ctx);
                        break;
@@ -926,7 +940,10 @@ load_ind:
                        break;
                case BPF_JMP | BPF_JA:
                        /* pc += K */
-                       emit_b(b_imm(i + k + 1, ctx), ctx);
+                       b_off = b_imm(i + k + 1, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_b(b_off, ctx);
                        emit_nop(ctx);
                        break;
                case BPF_JMP | BPF_JEQ | BPF_K:
@@ -1056,12 +1073,16 @@ jmp_cmp:
                        break;
                case BPF_RET | BPF_A:
                        ctx->flags |= SEEN_A;
-                       if (i != prog->len - 1)
+                       if (i != prog->len - 1) {
                                /*
                                 * If this is not the last instruction
                                 * then jump to the epilogue
                                 */
-                               emit_b(b_imm(prog->len, ctx), ctx);
+                               b_off = b_imm(prog->len, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               emit_b(b_off, ctx);
+                       }
                        emit_reg_move(r_ret, r_A, ctx); /* delay slot */
                        break;
                case BPF_RET | BPF_K:
@@ -1075,7 +1096,10 @@ jmp_cmp:
                                 * If this is not the last instruction
                                 * then jump to the epilogue
                                 */
-                               emit_b(b_imm(prog->len, ctx), ctx);
+                               b_off = b_imm(prog->len, ctx);
+                               if (is_bad_offset(b_off))
+                                       return -E2BIG;
+                               emit_b(b_off, ctx);
                                emit_nop(ctx);
                        }
                        break;
@@ -1133,8 +1157,10 @@ jmp_cmp:
                        /* Load *dev pointer */
                        emit_load_ptr(r_s0, r_skb, off, ctx);
                        /* error (0) in the delay slot */
-                       emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
-                                  b_imm(prog->len, ctx), ctx);
+                       b_off = b_imm(prog->len, ctx);
+                       if (is_bad_offset(b_off))
+                               return -E2BIG;
+                       emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
                        emit_reg_move(r_ret, r_zero, ctx);
                        if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
                                BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
@@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
 
        /* Generate the actual JIT code */
        build_prologue(&ctx);
-       build_body(&ctx);
+       if (build_body(&ctx)) {
+               module_memfree(ctx.target);
+               goto out;
+       }
        build_epilogue(&ctx);
 
        /* Update the icache */
index a8bc06e96ef58d6e84ac37b1cc28f2fdf9ef8a6d..ca1beb87f987c6e4161892a67fa8bd13a00f6252 100644 (file)
@@ -3,9 +3,10 @@
 config EARLY_PRINTK
        bool "Activate early kernel debugging"
        default y
+       depends on TTY
        select SERIAL_CORE_CONSOLE
        help
-         Enable early printk on console
+         Enable early printk on console.
          This is useful for kernel debugging when your machine crashes very
          early before the console code is initialized.
          You should normally say N here, unless you want to debug such a crash.
index cf8d687a2644a419663f7528c03ea06238a89a81..40bc8fb75e0b50fbb6fcc026169537e908d12390 100644 (file)
@@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
 
 void __init setup_arch(char **cmdline_p)
 {
-       int dram_start;
-
        console_verbose();
 
        memory_start = memblock_start_of_DRAM();
index 4742b6f169b7202f5669c996a9841d0cef0f3cab..27a8b49af11fc9612e10623bf250b415bd6f5552 100644 (file)
@@ -384,6 +384,4 @@ config KEXEC_FILE
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "drivers/parisc/Kconfig"
index d00313d1274e899e18eb3ba7c9ea0185fbb73512..0561568f7b4898d4959b97576f25ca08cde37ebe 100644 (file)
@@ -184,7 +184,7 @@ extern int npmem_ranges;
 #include <asm-generic/getorder.h>
 #include <asm/pdc.h>
 
-#define PAGE0   ((struct zeropage *)__PAGE_OFFSET)
+#define PAGE0   ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
 
 /* DEFINITION OF THE ZERO-PAGE (PAG0) */
 /* based on work by Jason Eckhardt (jason@equator.com) */
index f03adb1999e77ed05f1282be1b24aa5c601403ce..367f6397bda7a4c05f0d577047e4a0d15df01859 100644 (file)
@@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
        }
 }
 
+#ifdef CONFIG_PCI
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        if (!INDIRECT_ADDR(addr)) {
                iounmap(addr);
        }
 }
+EXPORT_SYMBOL(pci_iounmap);
+#endif
 
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
 EXPORT_SYMBOL(iowrite32_rep);
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iounmap);
index 6900d0ac242122de7b7f1e7a8c9ea164508f5bda..089ee3ea55c8ae64144e79f940b87a5abdd91abc 100644 (file)
@@ -35,7 +35,6 @@ endif
 BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
                 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-                -include $(srctree)/include/linux/compiler_attributes.h \
                 $(LINUXINCLUDE)
 
 ifdef CONFIG_PPC64_BOOT_WRAPPER
@@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
 BOOTCFLAGS     += -fno-stack-protector
 endif
 
+BOOTCFLAGS     += -include $(srctree)/include/linux/compiler_attributes.h
 BOOTCFLAGS     += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
 
 DTC_FLAGS      ?= -p 1024
index 5ba6fbfca2742b9d8f8f62a96e6a2874f201eaed..f82f85c65964cc477ad3b3b8a965e52ef1d490d0 100644 (file)
 
                        fm1mac3: ethernet@e4000 {
                                phy-handle = <&sgmii_aqr_phy3>;
-                               phy-connection-type = "sgmii-2500";
+                               phy-connection-type = "2500base-x";
                                sleep = <&rcpm 0x20000000>;
                        };
 
index 0ce2368bd20f975dde774f05dc19123fc53d77b1..dbfa5e1e3198af0faf0cf90e51cb144f27e678c4 100644 (file)
 #  define ASM_CONST(x)         __ASM_CONST(x)
 #endif
 
-/*
- * Inline assembly memory constraint
- *
- * GCC 4.9 doesn't properly handle pre update memory constraint "m<>"
- *
- */
-#if defined(GCC_VERSION) && GCC_VERSION < 50000
-#define UPD_CONSTR ""
-#else
 #define UPD_CONSTR "<>"
-#endif
 
 #endif /* _ASM_POWERPC_ASM_CONST_H */
index d4b145b279f6c51bc2d644720af92db03e488350..9f38040f0641dc1b87e036f77b961961b21eab80 100644 (file)
@@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
        if (kuap_is_disabled())
                return;
 
+       if (unlikely(kuap != KUAP_NONE)) {
+               current->thread.kuap = KUAP_NONE;
+               kuap_lock(kuap, false);
+       }
+
+       if (likely(regs->kuap == KUAP_NONE))
+               return;
+
        current->thread.kuap = regs->kuap;
 
        kuap_unlock(regs->kuap, false);
index a95f63788c6b1423e957f7e89871987f541b62b1..4ba834599c4d4c68bea72c13dd2a07f3a0db3c91 100644 (file)
@@ -23,6 +23,7 @@
 #define BRANCH_ABSOLUTE        0x2
 
 bool is_offset_in_branch_range(long offset);
+bool is_offset_in_cond_branch_range(long offset);
 int create_branch(struct ppc_inst *instr, const u32 *addr,
                  unsigned long target, int flags);
 int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
index 6b800d3e2681f6082a163b24d63a2be06f2f275d..a1d238255f077df9391dd606e22c71d44cdb0bbf 100644 (file)
@@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
        local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 
-       if (is_implicit_soft_masked(regs)) {
-               // Adjust regs->softe soft implicit soft-mask, so
-               // arch_irq_disabled_regs(regs) behaves as expected.
+       if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
+               /*
+                * Adjust regs->softe to be soft-masked if it had not been
+                * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
+                * not yet set disabled), or if it was in an implicit soft
+                * masked state. This makes arch_irq_disabled_regs(regs)
+                * behave as expected.
+                */
                regs->softe = IRQS_ALL_DISABLED;
        }
-       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-               BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
 
        /* Don't do any per-CPU operations until interrupt state is fixed */
 
@@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
 /* kernel/traps.c */
 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
 #ifdef CONFIG_PPC_BOOK3S_64
-DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
-#else
-DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
 #endif
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
 DECLARE_INTERRUPT_HANDLER(SMIException);
 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
 DECLARE_INTERRUPT_HANDLER(unknown_exception);
index 792eefaf230b80b52aaac7d54ae5ca71e2ced11c..27574f218b371f3f42ce98bb69821a1eaa723fe7 100644 (file)
@@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
        return !!(powerpc_security_features & feature);
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+enum stf_barrier_type stf_barrier_type_get(void);
+#else
+static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
+#endif
 
 // Features indicating support for Spectre/Meltdown mitigations
 
index 111249fd619de8692323156c32063b2c51b2b7bc..038ce8d9061d166b8c85663adc2fa4dab08c23f1 100644 (file)
@@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
        struct iommu_table *tbl = get_iommu_table_base(dev);
        u64 mask;
 
+       if (dev_is_pci(dev)) {
+               u64 bypass_mask = dma_direct_get_required_mask(dev);
+
+               if (dma_iommu_dma_supported(dev, bypass_mask)) {
+                       dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
+                       return bypass_mask;
+               }
+       }
+
        if (!tbl)
                return 0;
 
index 37859e62a8dcba2af7ecebc5f158127121844063..eaf1f72131a18f8f6a376c332619ab0e00ecda7a 100644 (file)
@@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
        li      r10,MSR_RI
        mtmsrd  r10,1
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      machine_check_exception
+       bl      machine_check_exception_async
        b       interrupt_return_srr
 
 
@@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
        subi    r12,r12,1
        sth     r12,PACA_IN_MCE(r13)
 
-       /* Invoke machine_check_exception to print MCE event and panic. */
+       /*
+        * Invoke machine_check_exception to print MCE event and panic.
+        * This is the NMI version of the handler because we are called from
+        * the early handler which is a true NMI.
+        */
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      machine_check_exception
 
@@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
         */
 
        andi.   r10,r12,MSR_PR
-       bne     2f                      /* If userspace, go normal path */
+       bne     .Lnormal_stack          /* If userspace, go normal path */
 
        andis.  r10,r12,(SRR1_PROGTM)@h
-       bne     1f                      /* If TM, emergency             */
+       bne     .Lemergency_stack       /* If TM, emergency             */
 
        cmpdi   r1,-INT_FRAME_SIZE      /* check if r1 is in userspace  */
-       blt     2f                      /* normal path if not           */
+       blt     .Lnormal_stack          /* normal path if not           */
 
        /* Use the emergency stack                                      */
-1:     andi.   r10,r12,MSR_PR          /* Set CR0 correctly for label  */
+.Lemergency_stack:
+       andi.   r10,r12,MSR_PR          /* Set CR0 correctly for label  */
                                        /* 3 in EXCEPTION_PROLOG_COMMON */
        mr      r10,r1                  /* Save r1                      */
        ld      r1,PACAEMERGSP(r13)     /* Use emergency stack          */
        subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame            */
        __ISTACK(program_check)=0
        __GEN_COMMON_BODY program_check
-       b 3f
-2:
+       b .Ldo_program_check
+
+.Lnormal_stack:
        __ISTACK(program_check)=1
        __GEN_COMMON_BODY program_check
-3:
+
+.Ldo_program_check:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      program_check_exception
        REST_NVGPRS(r1) /* instruction emulation may change GPRs */
index a73f3f70a6576ff9ab4a1450a123e8d011837a93..de10a26972581d94129d962e73614f744c598706 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/switch_to.h>
 #include <asm/syscall.h>
 #include <asm/time.h>
+#include <asm/tm.h>
 #include <asm/unistd.h>
 
 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
         */
        irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
 
+       /*
+        * If system call is called with TM active, set _TIF_RESTOREALL to
+        * prevent RFSCV being used to return to userspace, because POWER9
+        * TM implementation has problems with this instruction returning to
+        * transactional state. Final register values are not relevant because
+        * the transaction will be aborted upon return anyway. Or in the case
+        * of unsupported_scv SIGILL fault, the return state does not much
+        * matter because it's an edge case.
+        */
+       if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+                       unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+               current_thread_info()->flags |= _TIF_RESTOREALL;
+
+       /*
+        * If the system call was made with a transaction active, doom it and
+        * return without performing the system call. Unless it was an
+        * unsupported scv vector, in which case it's treated like an illegal
+        * instruction.
+        */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+           !trap_is_unsupported_scv(regs)) {
+               /* Enable TM in the kernel, and disable EE (for scv) */
+               hard_irq_disable();
+               mtmsr(mfmsr() | MSR_TM);
+
+               /* tabort, this dooms the transaction, nothing else */
+               asm volatile(".long 0x7c00071d | ((%0) << 16)"
+                               :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+               /*
+                * Userspace will never see the return value. Execution will
+                * resume after the tbegin. of the aborted transaction with the
+                * checkpointed register state. A context switch could occur
+                * or signal delivered to the process before resuming the
+                * doomed transaction context, but that should all be handled
+                * as expected.
+                */
+               return -ENOSYS;
+       }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
        local_irq_enable();
 
        if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
index d4212d2ff0b54bc64b3167c8e2b34120d08eb70e..ec950b08a8dcc06571ba8f65076dd357bb16f118 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/mmu.h>
 #include <asm/ppc_asm.h>
 #include <asm/ptrace.h>
-#include <asm/tm.h>
 
        .section        ".toc","aw"
 SYS_CALL_TABLE:
@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
        .globl system_call_vectored_\name
 system_call_vectored_\name:
 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       bne     tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
        SCV_INTERRUPT_TO_KERNEL
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
        .globl system_call_common
 system_call_common:
 _ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       bne     tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
        std     r10,0(r1)
@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
 #endif
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-tabort_syscall:
-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
-       /* Firstly we need to enable TM in the kernel */
-       mfmsr   r10
-       li      r9, 1
-       rldimi  r10, r9, MSR_TM_LG, 63-MSR_TM_LG
-       mtmsrd  r10, 0
-
-       /* tabort, this dooms the transaction, nothing else */
-       li      r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
-       TABORT(R9)
-
-       /*
-        * Return directly to userspace. We have corrupted user register state,
-        * but userspace will never see that register state. Execution will
-        * resume after the tbegin of the aborted transaction with the
-        * checkpointed register state.
-        */
-       li      r9, MSR_RI
-       andc    r10, r10, r9
-       mtmsrd  r10, 1
-       mtspr   SPRN_SRR0, r11
-       mtspr   SPRN_SRR1, r12
-       RFI_TO_USER
-       b       .       /* prevent speculative execution */
-#endif
-
        /*
         * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
         * touched, no exit work created, then this can be used.
index 551b653228c47fff328a35a0cc24eba8973899df..c4f1d6b7d99235c350d9f9cdbb706f7834ce9f23 100644 (file)
@@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
                return;
        }
 
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+               WARN_ON_ONCE(in_nmi() || in_hardirq());
+
        /*
         * After the stb, interrupts are unmasked and there are no interrupts
         * pending replay. The restart sequence makes this atomic with
@@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
        if (mask)
                return;
 
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+               WARN_ON_ONCE(in_nmi() || in_hardirq());
+
        /*
         * From this point onward, we can take interrupts, preempt,
         * etc... unless we got hard-disabled. We check if an event
index 47a683cd00d246e6b8a5fe2f648802acd21cd8e4..fd829f7f25a47ee207cb7a7226b1ba384d27c36c 100644 (file)
@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
 {
        int index;
        struct machine_check_event evt;
+       unsigned long msr;
 
        if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
                return;
@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
        memcpy(&local_paca->mce_info->mce_event_queue[index],
               &evt, sizeof(evt));
 
-       /* Queue irq work to process this event later. */
-       irq_work_queue(&mce_event_process_work);
+       /*
+        * Queue irq work to process this event later. Before
+        * queuing the work enable translation for non radix LPAR,
+        * as irq_work_queue may try to access memory outside RMO
+        * region.
+        */
+       if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
+               msr = mfmsr();
+               mtmsr(msr | MSR_IR | MSR_DR);
+               irq_work_queue(&mce_event_process_work);
+               mtmsr(msr);
+       } else {
+               irq_work_queue(&mce_event_process_work);
+       }
 }
 
 void mce_common_process_ue(struct pt_regs *regs,
index 1a998490fe60f04555c1a95422eb47a6d7c0e39a..15fb5ea1b9eafa7eae3a2e2eb0ba8cc61909b3a5 100644 (file)
@@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
 
 early_param("no_stf_barrier", handle_no_stf_barrier);
 
+enum stf_barrier_type stf_barrier_type_get(void)
+{
+       return stf_enabled_flush_types;
+}
+
 /* This is the generic flag used by other architectures */
 static int __init handle_ssbd(char *p)
 {
index e600764a926c67b89254f6314ba91e3af69a1306..b93b87df499d7e5dfd4cf33f48f6c58d569451db 100644 (file)
@@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
                do_signal(current);
        }
 
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+       if (thread_info_flags & _TIF_NOTIFY_RESUME)
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(NULL, regs);
-       }
 }
 
 static unsigned long get_tm_stackpointer(struct task_struct *tsk)
index aac8c0412ff9f0829b79634a69c7f8a63cc5e2ef..11741703d26e0719b7b2d7488ebb54203fa2026d 100644 (file)
@@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
                return false;
        }
 
-       show_signal_msg(signr, regs, code, addr);
+       /*
+        * Must not enable interrupts even for user-mode exception, because
+        * this can be called from machine check, which may be a NMI or IRQ
+        * which don't like interrupts being enabled. Could check for
+        * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
+        * reason why _exception() should enable irqs for an exception handler,
+        * the handlers themselves do that directly.
+        */
 
-       if (arch_irqs_disabled())
-               interrupt_cond_local_irq_enable(regs);
+       show_signal_msg(signr, regs, code, addr);
 
        current->thread.trap_nr = code;
 
@@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
         * do_exit() checks for in_interrupt() and panics in that case, so
         * exit the irq/nmi before calling die.
         */
-       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
-               irq_exit();
-       else
+       if (in_nmi())
                nmi_exit();
+       else
+               irq_exit();
        die(str, regs, err);
 }
 
 /*
- * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
  * (it uses its own early real-mode handler to handle the MCE proper
  * and then raises irq_work to call this handler when interrupts are
- * enabled).
+ * enabled). The only time when this is not true is if the early handler
+ * is unrecoverable, then it does call this directly to try to get a
+ * message out.
  */
-#ifdef CONFIG_PPC_BOOK3S_64
-DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
-#else
-DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
-#endif
+static void __machine_check_exception(struct pt_regs *regs)
 {
        int recover = 0;
 
@@ -841,12 +845,19 @@ bail:
        /* Must die if the interrupt is not recoverable */
        if (regs_is_unrecoverable(regs))
                die_mce("Unrecoverable Machine check", regs, SIGBUS);
+}
 
 #ifdef CONFIG_PPC_BOOK3S_64
-       return;
-#else
-       return 0;
+DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
+{
+       __machine_check_exception(regs);
+}
 #endif
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+{
+       __machine_check_exception(regs);
+
+       return 0;
 }
 
 DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
index 75079397c2a563c97072faa75b7715d7b719a6a5..90484425a1e6b283ddaa4a58383c65f37ffaf595 100644 (file)
@@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
        /* The following code handles the fake_suspend = 1 case */
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
-       stdu    r1, -PPC_MIN_STKFRM(r1)
+       stdu    r1, -TM_FRAME_SIZE(r1)
 
        /* Turn on TM. */
        mfmsr   r8
@@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
        nop
 
+       /*
+        * It's possible that treclaim. may modify registers, if we have lost
+        * track of fake-suspend state in the guest due to it using rfscv.
+        * Save and restore registers in case this occurs.
+        */
+       mfspr   r3, SPRN_DSCR
+       mfspr   r4, SPRN_XER
+       mfspr   r5, SPRN_AMR
+       /* SPRN_TAR would need to be saved here if the kernel ever used it */
+       mfcr    r12
+       SAVE_NVGPRS(r1)
+       SAVE_GPR(2, r1)
+       SAVE_GPR(3, r1)
+       SAVE_GPR(4, r1)
+       SAVE_GPR(5, r1)
+       stw     r12, 8(r1)
+       std     r1, HSTATE_HOST_R1(r13)
+
        /* We have to treclaim here because that's the only way to do S->N */
        li      r3, TM_CAUSE_KVM_RESCHED
        TRECLAIM(R3)
 
+       GET_PACA(r13)
+       ld      r1, HSTATE_HOST_R1(r13)
+       REST_GPR(2, r1)
+       REST_GPR(3, r1)
+       REST_GPR(4, r1)
+       REST_GPR(5, r1)
+       lwz     r12, 8(r1)
+       REST_NVGPRS(r1)
+       mtspr   SPRN_DSCR, r3
+       mtspr   SPRN_XER, r4
+       mtspr   SPRN_AMR, r5
+       mtcr    r12
+       HMT_MEDIUM
+
        /*
         * We were in fake suspend, so we are not going to save the
         * register state as the guest checkpointed state (since
@@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
        std     r5, VCPU_TFHAR(r9)
        std     r6, VCPU_TFIAR(r9)
 
-       addi    r1, r1, PPC_MIN_STKFRM
+       addi    r1, r1, TM_FRAME_SIZE
        ld      r0, PPC_LR_STKOFF(r1)
        mtlr    r0
        blr
index f9a3019e37b43cfffb66e23f882d6314dfd2ae39..c5ed9882383521ec65c30a96cbc09037e9624ae3 100644 (file)
@@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
        return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
 }
 
+bool is_offset_in_cond_branch_range(long offset)
+{
+       return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
+}
+
 /*
  * Helper to check if a given instruction is a conditional branch
  * Derived from the conditional checks in analyse_instr()
@@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
                offset = offset - (unsigned long)addr;
 
        /* Check we can represent the target in the instruction format */
-       if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
+       if (!is_offset_in_cond_branch_range(offset))
                return 1;
 
        /* Mask out the flags and target, so they don't step on each other. */
index 99fad093f43ec105775d1983edfa85a37e0d1f5c..7e9b978b768ed967c0ed1ae1f32e6b56cb6e76d6 100644 (file)
 #define EMIT(instr)            PLANT_INSTR(image, ctx->idx, instr)
 
 /* Long jump; (unconditional 'branch') */
-#define PPC_JMP(dest)          EMIT(PPC_INST_BRANCH |                        \
-                                    (((dest) - (ctx->idx * 4)) & 0x03fffffc))
+#define PPC_JMP(dest)                                                        \
+       do {                                                                  \
+               long offset = (long)(dest) - (ctx->idx * 4);                  \
+               if (!is_offset_in_branch_range(offset)) {                     \
+                       pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);                       \
+                       return -ERANGE;                                       \
+               }                                                             \
+               EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc));                \
+       } while (0)
+
 /* blr; (unconditional 'branch' with link) to absolute address */
 #define PPC_BL_ABS(dest)       EMIT(PPC_INST_BL |                            \
                                     (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
 /* "cond" here covers BO:BI fields. */
-#define PPC_BCC_SHORT(cond, dest)      EMIT(PPC_INST_BRANCH_COND |           \
-                                            (((cond) & 0x3ff) << 16) |       \
-                                            (((dest) - (ctx->idx * 4)) &     \
-                                             0xfffc))
+#define PPC_BCC_SHORT(cond, dest)                                            \
+       do {                                                                  \
+               long offset = (long)(dest) - (ctx->idx * 4);                  \
+               if (!is_offset_in_cond_branch_range(offset)) {                \
+                       pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);           \
+                       return -ERANGE;                                       \
+               }                                                             \
+               EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc));                                      \
+       } while (0)
+
 /* Sign-extended 32-bit immediate load */
 #define PPC_LI32(d, i)         do {                                          \
                if ((int)(uintptr_t)(i) >= -32768 &&                          \
 #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
 #endif
 
-static inline bool is_nearbranch(int offset)
-{
-       return (offset < 32768) && (offset >= -32768);
-}
-
 /*
  * The fly in the ointment of code size changing from pass to pass is
  * avoided by padding the short branch case with a NOP.         If code size differs
@@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
  * state.
  */
 #define PPC_BCC(cond, dest)    do {                                          \
-               if (is_nearbranch((dest) - (ctx->idx * 4))) {                 \
+               if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) {    \
                        PPC_BCC_SHORT(cond, dest);                            \
                        EMIT(PPC_RAW_NOP());                                  \
                } else {                                                      \
index 7b713edfa7e2617a9681a135233f078377b90e43..b63b35e45e558cbbff353893121171c69b1c925a 100644 (file)
  * with our redzone usage.
  *
  *             [       prev sp         ] <-------------
- *             [   nv gpr save area    ] 6*8           |
+ *             [   nv gpr save area    ] 5*8           |
  *             [    tail_call_cnt      ] 8             |
- *             [    local_tmp_var      ]             |
+ *             [    local_tmp_var      ] 16            |
  * fp (r31) -->        [   ebpf stack space    ] upto 512      |
  *             [     frame header      ] 32/112        |
  * sp (r1) --->        [    stack pointer      ] --------------
  */
 
 /* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE     (6*8)
+#define BPF_PPC_STACK_SAVE     (5*8)
 /* for bpf JIT code internal usage */
-#define BPF_PPC_STACK_LOCALS   16
+#define BPF_PPC_STACK_LOCALS   24
 /* stack frame excluding BPF stack, ensure this is quadword aligned */
 #define BPF_PPC_STACKFRAME     (STACK_FRAME_MIN_SIZE + \
                                 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
index 53aefee3fe70be6526699687ad7136e9e69b0c38..fcbf7a917c566e5f73b24da3770e30d871f4182c 100644 (file)
@@ -210,7 +210,11 @@ skip_init_ctx:
                /* Now build the prologue, body code & epilogue for real. */
                cgctx.idx = 0;
                bpf_jit_build_prologue(code_base, &cgctx);
-               bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
+               if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
+                       bpf_jit_binary_free(bpf_hdr);
+                       fp = org_fp;
+                       goto out_addrs;
+               }
                bpf_jit_build_epilogue(code_base, &cgctx);
 
                if (bpf_jit_enable > 1)
index beb12cbc8c29940993725ad04e3d575a91f135cd..0da31d41d4131068fa5543cbe0dbdd30051f13fb 100644 (file)
@@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
        }
 }
 
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 {
        /*
         * By now, the eBPF program has already setup parameters in r3-r6
@@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        bpf_jit_emit_common_epilogue(image, ctx);
 
        EMIT(PPC_RAW_BCTR());
+
        /* out: */
+       return 0;
 }
 
 /* Assemble the body code between the prologue & epilogue */
@@ -355,7 +357,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                                PPC_LI32(_R0, imm);
                                EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
                        }
-                       if (imm >= 0)
+                       if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
                                EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
                        else
                                EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
@@ -623,7 +625,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        EMIT(PPC_RAW_LI(dst_reg_h, 0));
                        break;
                case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
-                       EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
+                       EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
                        break;
                case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
                        bpf_set_seen_register(ctx, tmp_reg);
@@ -1073,7 +1075,7 @@ cond_branch:
                                break;
                        case BPF_JMP32 | BPF_JSET | BPF_K:
                                /* andi does not sign-extend the immediate */
-                               if (imm >= -32768 && imm < 32768) {
+                               if (imm >= 0 && imm < 32768) {
                                        /* PPC_ANDI is _only/always_ dot-form */
                                        EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
                                } else {
@@ -1090,7 +1092,9 @@ cond_branch:
                 */
                case BPF_JMP | BPF_TAIL_CALL:
                        ctx->seen |= SEEN_TAILCALL;
-                       bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       if (ret < 0)
+                               return ret;
                        break;
 
                default:
@@ -1103,7 +1107,7 @@ cond_branch:
                        return -EOPNOTSUPP;
                }
                if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
-                   !insn_is_zext(&insn[i + 1]))
+                   !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
                        EMIT(PPC_RAW_LI(dst_reg_h, 0));
        }
 
index b87a63dba9c8fb5129898c078b17d1c9eb3406c1..8b5157ccfebae55e9a6e0e2e42a05497d661be38 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/if_vlan.h>
 #include <asm/kprobes.h>
 #include <linux/bpf.h>
+#include <asm/security_features.h>
 
 #include "bpf_jit64.h"
 
@@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  *             [       prev sp         ] <-------------
  *             [         ...           ]               |
  * sp (r1) --->        [    stack pointer      ] --------------
- *             [   nv gpr save area    ] 6*8
+ *             [   nv gpr save area    ] 5*8
  *             [    tail_call_cnt      ] 8
- *             [    local_tmp_var      ] 8
+ *             [    local_tmp_var      ] 16
  *             [   unused red zone     ] 208 bytes protected
  */
 static int bpf_jit_stack_local(struct codegen_context *ctx)
@@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
        if (bpf_has_stack_frame(ctx))
                return STACK_FRAME_MIN_SIZE + ctx->stack_size;
        else
-               return -(BPF_PPC_STACK_SAVE + 16);
+               return -(BPF_PPC_STACK_SAVE + 24);
 }
 
 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
 {
-       return bpf_jit_stack_local(ctx) + 8;
+       return bpf_jit_stack_local(ctx) + 16;
 }
 
 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
@@ -206,7 +207,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
        EMIT(PPC_RAW_BCTRL());
 }
 
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 {
        /*
         * By now, the eBPF program has already setup parameters in r3, r4 and r5
@@ -267,13 +268,38 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        bpf_jit_emit_common_epilogue(image, ctx);
 
        EMIT(PPC_RAW_BCTR());
+
        /* out: */
+       return 0;
 }
 
+/*
+ * We spill into the redzone always, even if the bpf program has its own stackframe.
+ * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
+ */
+void bpf_stf_barrier(void);
+
+asm (
+"              .global bpf_stf_barrier         ;"
+"      bpf_stf_barrier:                        ;"
+"              std     21,-64(1)               ;"
+"              std     22,-56(1)               ;"
+"              sync                            ;"
+"              ld      21,-64(1)               ;"
+"              ld      22,-56(1)               ;"
+"              ori     31,31,0                 ;"
+"              .rept 14                        ;"
+"              b       1f                      ;"
+"      1:                                      ;"
+"              .endr                           ;"
+"              blr                             ;"
+);
+
 /* Assemble the body code between the prologue & epilogue */
 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
                       u32 *addrs, bool extra_pass)
 {
+       enum stf_barrier_type stf_barrier = stf_barrier_type_get();
        const struct bpf_insn *insn = fp->insnsi;
        int flen = fp->len;
        int i, ret;
@@ -328,18 +354,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
                        goto bpf_alu32_trunc;
                case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
-               case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
                case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+                       if (!imm) {
+                               goto bpf_alu32_trunc;
+                       } else if (imm >= -32768 && imm < 32768) {
+                               EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+                       } else {
+                               PPC_LI32(b2p[TMP_REG_1], imm);
+                               EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
+                       }
+                       goto bpf_alu32_trunc;
+               case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
                case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
-                       if (BPF_OP(code) == BPF_SUB)
-                               imm = -imm;
-                       if (imm) {
-                               if (imm >= -32768 && imm < 32768)
-                                       EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
-                               else {
-                                       PPC_LI32(b2p[TMP_REG_1], imm);
-                                       EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
-                               }
+                       if (!imm) {
+                               goto bpf_alu32_trunc;
+                       } else if (imm > -32768 && imm <= 32768) {
+                               EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
+                       } else {
+                               PPC_LI32(b2p[TMP_REG_1], imm);
+                               EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
                        }
                        goto bpf_alu32_trunc;
                case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
@@ -389,8 +422,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
                        if (imm == 0)
                                return -EINVAL;
-                       else if (imm == 1)
-                               goto bpf_alu32_trunc;
+                       if (imm == 1) {
+                               if (BPF_OP(code) == BPF_DIV) {
+                                       goto bpf_alu32_trunc;
+                               } else {
+                                       EMIT(PPC_RAW_LI(dst_reg, 0));
+                                       break;
+                               }
+                       }
 
                        PPC_LI32(b2p[TMP_REG_1], imm);
                        switch (BPF_CLASS(code)) {
@@ -631,6 +670,29 @@ emit_clear:
                 * BPF_ST NOSPEC (speculation barrier)
                 */
                case BPF_ST | BPF_NOSPEC:
+                       if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
+                                       !security_ftr_enabled(SEC_FTR_STF_BARRIER))
+                               break;
+
+                       switch (stf_barrier) {
+                       case STF_BARRIER_EIEIO:
+                               EMIT(PPC_RAW_EIEIO() | 0x02000000);
+                               break;
+                       case STF_BARRIER_SYNC_ORI:
+                               EMIT(PPC_RAW_SYNC());
+                               EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
+                               EMIT(PPC_RAW_ORI(_R31, _R31, 0));
+                               break;
+                       case STF_BARRIER_FALLBACK:
+                               EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
+                               PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+                               EMIT(PPC_RAW_MTCTR(12));
+                               EMIT(PPC_RAW_BCTRL());
+                               EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+                               break;
+                       case STF_BARRIER_NONE:
+                               break;
+                       }
                        break;
 
                /*
@@ -993,7 +1055,9 @@ cond_branch:
                 */
                case BPF_JMP | BPF_TAIL_CALL:
                        ctx->seen |= SEEN_TAILCALL;
-                       bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       if (ret < 0)
+                               return ret;
                        break;
 
                default:
index bc15200852b7c3059dd44060a7ebc87e860f4c4f..09fafcf2d3a060d9ba1e28658447d659bc993819 100644 (file)
@@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void)
        if (is_kdump_kernel() || reset_devices) {
                pr_info("Issue PHB reset ...\n");
                list_for_each_entry(phb, &hose_list, list_node) {
+                       // Skip if the slot is empty
+                       if (list_empty(&PCI_DN(phb->dn)->child_list))
+                               continue;
+
                        pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
                        config_addr = pseries_eeh_get_pe_config_addr(pdn);
 
index 1b305e4118622af70561daf8887b19ff2dcf101f..8627362f613ee51d931246731f7a85281ca2af3b 100644 (file)
@@ -507,12 +507,27 @@ static void pseries_msi_unmask(struct irq_data *d)
        irq_chip_unmask_parent(d);
 }
 
+static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+       struct msi_desc *entry = irq_data_get_msi_desc(data);
+
+       /*
+        * Do not update the MSIx vector table. It's not strictly necessary
+        * because the table is initialized by the underlying hypervisor, PowerVM
+        * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
+        * activation will fail. This can happen in some drivers (eg. IPR) which
+        * deactivate an IRQ used for testing MSI support.
+        */
+       entry->msg = *msg;
+}
+
 static struct irq_chip pseries_pci_msi_irq_chip = {
        .name           = "pSeries-PCI-MSI",
        .irq_shutdown   = pseries_msi_shutdown,
        .irq_mask       = pseries_msi_mask,
        .irq_unmask     = pseries_msi_unmask,
        .irq_eoi        = irq_chip_eoi_parent,
+       .irq_write_msi_msg      = pseries_msi_write_msg,
 };
 
 static struct msi_domain_info pseries_msi_domain_info = {
index 5c1a157a83b8989928dcd816d404852d1ff10a8e..244a727c6ba49c8eb59ba9de65e51694a27aa644 100644 (file)
@@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
        if (xics_ics->check(xics_ics, hwirq))
                return -EINVAL;
 
-       /* No chip data for the XICS domain */
+       /* Let the ICS be the chip data for the XICS domain. For ICS native */
        irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
-                           NULL, handle_fasteoi_irq, NULL, NULL);
+                           xics_ics, handle_fasteoi_irq, NULL, NULL);
 
        return 0;
 }
index c3f3fd583e04b63bae46331821ca1580fbebcb9c..6a6fa9e976d598b04dcb95febbac95d7119349d5 100644 (file)
@@ -236,7 +236,7 @@ config ARCH_RV32I
 config ARCH_RV64I
        bool "RV64I"
        select 64BIT
-       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
+       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
        select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
@@ -561,5 +561,3 @@ menu "Power management options"
 source "kernel/power/Kconfig"
 
 endmenu
-
-source "drivers/firmware/Kconfig"
index b933b1583c9fd24693cce86a3c7244b7b857c454..34fbb3ea21d5bb259b0c6c93aa2d36ebaabb67e4 100644 (file)
@@ -82,4 +82,5 @@ static inline int syscall_get_arch(struct task_struct *task)
 #endif
 }
 
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
 #endif /* _ASM_RISCV_SYSCALL_H */
index 893e47195e3055c341e3980ad023e83d6d28025e..208e31bc5d1c28e2b728400a035f92034013b6df 100644 (file)
 #ifdef CONFIG_MMU
 
 #include <linux/types.h>
-#include <generated/vdso-offsets.h>
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefor don't have one.
+ */
+#ifdef CONFIG_MMU
+
+#define __VVAR_PAGES    1
 
-#ifndef CONFIG_GENERIC_TIME_VSYSCALL
-struct vdso_data {
-};
-#endif
+#ifndef __ASSEMBLY__
+#include <generated/vdso-offsets.h>
 
 #define VDSO_SYMBOL(base, name)                                                        \
        (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
 
 #endif /* CONFIG_MMU */
 
-asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
 
 #endif /* _ASM_RISCV_VDSO_H */
index 4b989ae15d59f7b8e4765e3ad93b07a2923b2e6e..8062996c2dfd077eca892b3ce7bc0fa8a91f2281 100644 (file)
 #ifdef __LP64__
 #define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_CLONE3
 #endif /* __LP64__ */
 
+#define __ARCH_WANT_SYS_CLONE3
+
 #include <asm-generic/unistd.h>
 
 /*
index a63c667c27b35cdcf028e44b3394ba14b6abb1e9..44b1420a22705a81eca174d748bfc2d2986302d7 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/linkage.h>
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
-#include <asm/vdso.h>
 #include <asm/syscall.h>
 
 #undef __SYSCALL
index 25a3b8849599173846378f4aa50907a9b878beed..b70956d8040812423cf5c13ed03a6f3f629ae34b 100644 (file)
 #include <linux/binfmts.h>
 #include <linux/err.h>
 #include <asm/page.h>
+#include <asm/vdso.h>
+
 #ifdef CONFIG_GENERIC_TIME_VSYSCALL
 #include <vdso/datapage.h>
 #else
-#include <asm/vdso.h>
+struct vdso_data {
+};
 #endif
 
 extern char vdso_start[], vdso_end[];
 
+enum vvar_pages {
+       VVAR_DATA_PAGE_OFFSET,
+       VVAR_NR_PAGES,
+};
+
+#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
+
 static unsigned int vdso_pages __ro_after_init;
 static struct page **vdso_pagelist __ro_after_init;
 
@@ -38,7 +48,7 @@ static int __init vdso_init(void)
 
        vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
        vdso_pagelist =
-               kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+               kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
        if (unlikely(vdso_pagelist == NULL)) {
                pr_err("vdso: pagelist allocation failed\n");
                return -ENOMEM;
@@ -63,38 +73,41 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
        unsigned long vdso_base, vdso_len;
        int ret;
 
-       vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+       BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+
+       vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
+
+       if (mmap_write_lock_killable(mm))
+               return -EINTR;
 
-       mmap_write_lock(mm);
        vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
        if (IS_ERR_VALUE(vdso_base)) {
                ret = vdso_base;
                goto end;
        }
 
-       /*
-        * Put vDSO base into mm struct. We need to do this before calling
-        * install_special_mapping or the perf counter mmap tracking code
-        * will fail to recognise it as a vDSO (since arch_vma_name fails).
-        */
-       mm->context.vdso = (void *)vdso_base;
+       mm->context.vdso = NULL;
+       ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
+               (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+       if (unlikely(ret))
+               goto end;
 
        ret =
-          install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+          install_special_mapping(mm, vdso_base + VVAR_SIZE,
+               vdso_pages << PAGE_SHIFT,
                (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
                vdso_pagelist);
 
-       if (unlikely(ret)) {
-               mm->context.vdso = NULL;
+       if (unlikely(ret))
                goto end;
-       }
 
-       vdso_base += (vdso_pages << PAGE_SHIFT);
-       ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
-               (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+       /*
+        * Put vDSO base into mm struct. We need to do this before calling
+        * install_special_mapping or the perf counter mmap tracking code
+        * will fail to recognise it as a vDSO (since arch_vma_name fails).
+        */
+       mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
 
-       if (unlikely(ret))
-               mm->context.vdso = NULL;
 end:
        mmap_write_unlock(mm);
        return ret;
@@ -105,7 +118,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
        if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
                return "[vdso]";
        if (vma->vm_mm && (vma->vm_start ==
-                          (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+                          (long)vma->vm_mm->context.vdso - VVAR_SIZE))
                return "[vdso_data]";
        return NULL;
 }
index e6f558bca71bb2128e6f1952b97e78a16cc09777..e9111f700af08062850cca8ba9e1684749aaa2fa 100644 (file)
@@ -3,12 +3,13 @@
  * Copyright (C) 2012 Regents of the University of California
  */
 #include <asm/page.h>
+#include <asm/vdso.h>
 
 OUTPUT_ARCH(riscv)
 
 SECTIONS
 {
-       PROVIDE(_vdso_data = . + PAGE_SIZE);
+       PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
        . = SIZEOF_HEADERS;
 
        .hash           : { *(.hash) }                  :text
index 094118663285d545c101ac518a3b517bcec2ca8b..89f81067e09edba0c9cacdafd41cc97ea2b0e19e 100644 (file)
@@ -16,6 +16,8 @@ static void ipi_remote_fence_i(void *info)
 
 void flush_icache_all(void)
 {
+       local_flush_icache_all();
+
        if (IS_ENABLED(CONFIG_RISCV_SBI))
                sbi_remote_fence_i(NULL);
        else
index 2bd90c51efd34981a1f5376f279d1bf9a189cb85..b86de61b8caa2f2909b4c569c3f574942c07652a 100644 (file)
@@ -685,16 +685,6 @@ config STACK_GUARD
          The minimum size for the stack guard should be 256 for 31 bit and
          512 for 64 bit.
 
-config WARN_DYNAMIC_STACK
-       def_bool n
-       prompt "Emit compiler warnings for function with dynamic stack usage"
-       help
-         This option enables the compiler option -mwarn-dynamicstack. If the
-         compiler supports this options generates warnings for functions
-         that dynamically allocate stack space using alloca.
-
-         Say N if you are unsure.
-
 endmenu
 
 menu "I/O subsystem"
index a3cf33ad009fc53e52464e68c30899e43ba4499b..450b351dfa8ef37c238443cbf5c4b84876aaeaba 100644 (file)
@@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
 endif
 endif
 
-ifdef CONFIG_WARN_DYNAMIC_STACK
-  ifneq ($(call cc-option,-mwarn-dynamicstack),)
-    KBUILD_CFLAGS += -mwarn-dynamicstack
-    KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
-  endif
-endif
-
 ifdef CONFIG_EXPOLINE
   ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
     CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
index 37b6115ed80e8a0698defe66318ac0caf355d1d5..6aad18ee131d6634efd7188b9660d723f2ea36a6 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -503,6 +504,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
@@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
@@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_DMA_API_DEBUG=y
-CONFIG_STRING_SELFTEST=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
@@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
-CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_STRING_SELFTEST=y
 CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_LIVEPATCH=m
index 56a1cc85c5d72dedd1c93e5d851cc6dfcb02e79d..f08b161c94463cd2b2a02d7d13c730d3dffb0ebf 100644 (file)
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
 CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -494,6 +495,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
@@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
index 36dbf5043fc099db13ae4bcefdb47d4bb5e45582..aa995d91cd1da451e2915ad29031f0141e171a5d 100644 (file)
@@ -55,7 +55,7 @@ int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
                        int num_devices, const char *buf);
 
 extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
-extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
 
 extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
 extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
index e4803ec51110c65252f14e82bcb6e089661eb5cc..6b3c366af78eb5e9608cdd732d25155c9e05fb9a 100644 (file)
@@ -207,6 +207,8 @@ int zpci_enable_device(struct zpci_dev *);
 int zpci_disable_device(struct zpci_dev *);
 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
 int zpci_deconfigure_device(struct zpci_dev *zdev);
+void zpci_device_reserved(struct zpci_dev *zdev);
+bool zpci_is_device_configured(struct zpci_dev *zdev);
 
 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
 int zpci_unregister_ioat(struct zpci_dev *, u8);
index 16256e17a544a84f4dee803048b063d23f4c8f8e..10722455fd02e9b2f81f7f53af238f13acd3a4d9 100644 (file)
@@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
        kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
-       set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
        kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
-       clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
index 752a0ffab9bf10c5f0e4cd10d47bb4b5e38f7c09..6a6dd5e1daf63999e895767789cd4eb05173ac31 100644 (file)
@@ -4066,7 +4066,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
                kvm_s390_patch_guest_per_regs(vcpu);
        }
 
-       clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
+       clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
 
        vcpu->arch.sie_block->icptcode = 0;
        cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
index ecd741ee3276e83a4f69f59d4a09e0ebe4072e58..52bc8fbaa60ac5bc12e8eec4f99f2bb6f2d40b82 100644 (file)
@@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
 
 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
 {
-       return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static inline int kvm_is_ucontrol(struct kvm *kvm)
index cfcdf76d6a9573be23b9482504c054d9f6816285..a95ca6df4e5e6902fef19f87da7e6a6754b80b62 100644 (file)
@@ -259,14 +259,13 @@ EXPORT_SYMBOL(strcmp);
 #ifdef __HAVE_ARCH_STRRCHR
 char *strrchr(const char *s, int c)
 {
-       size_t len = __strend(s) - s;
-
-       if (len)
-              do {
-                      if (s[len] == (char) c)
-                              return (char *) s + len;
-              } while (--len > 0);
-       return NULL;
+       ssize_t len = __strend(s) - s;
+
+       do {
+               if (s[len] == (char)c)
+                       return (char *)s + len;
+       } while (--len >= 0);
+       return NULL;
 }
 EXPORT_SYMBOL(strrchr);
 #endif
index 88419263a89a967bb6b40e7af668b5be2159dae1..1a374d021e256d90ad06e77a1d945509ee902c2f 100644 (file)
@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 
 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)            \
 ({                                                             \
-       /* Branch instruction needs 6 bytes */                  \
-       int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+       int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2;      \
        _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
        REG_SET_SEEN(b1);                                       \
        REG_SET_SEEN(b2);                                       \
@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9080000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
-               if (!imm)
-                       break;
-               /* alfi %dst,imm */
-               EMIT6_IMM(0xc20b0000, dst_reg, imm);
+               if (imm != 0) {
+                       /* alfi %dst,imm */
+                       EMIT6_IMM(0xc20b0000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9090000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
-               if (!imm)
-                       break;
-               /* alfi %dst,-imm */
-               EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+               if (imm != 0) {
+                       /* alfi %dst,-imm */
+                       EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
                if (!imm)
                        break;
-               /* agfi %dst,-imm */
-               EMIT6_IMM(0xc2080000, dst_reg, -imm);
+               if (imm == -0x80000000) {
+                       /* algfi %dst,0x80000000 */
+                       EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
+               } else {
+                       /* agfi %dst,-imm */
+                       EMIT6_IMM(0xc2080000, dst_reg, -imm);
+               }
                break;
        /*
         * BPF_MUL
@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb90c0000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
-               if (imm == 1)
-                       break;
-               /* msfi %r5,imm */
-               EMIT6_IMM(0xc2010000, dst_reg, imm);
+               if (imm != 1) {
+                       /* msfi %r5,imm */
+                       EMIT6_IMM(0xc2010000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                        if (BPF_OP(insn->code) == BPF_MOD)
                                /* lhgi %dst,0 */
                                EMIT4_IMM(0xa7090000, dst_reg, 0);
+                       else
+                               EMIT_ZERO(dst_reg);
                        break;
                }
                /* lhi %w0,0 */
@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9820000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
-               if (!imm)
-                       break;
-               /* xilf %dst,imm */
-               EMIT6_IMM(0xc0070000, dst_reg, imm);
+               if (imm != 0) {
+                       /* xilf %dst,imm */
+                       EMIT6_IMM(0xc0070000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
-               if (imm == 0)
-                       break;
-               /* sll %dst,imm(%r0) */
-               EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* sll %dst,imm(%r0) */
+                       EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
-               if (imm == 0)
-                       break;
-               /* srl %dst,imm(%r0) */
-               EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* srl %dst,imm(%r0) */
+                       EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
-               if (imm == 0)
-                       break;
-               /* sra %dst,imm(%r0) */
-               EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* sra %dst,imm(%r0) */
+                       EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
@@ -1820,7 +1826,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
        jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
        if (jit.addrs == NULL) {
                fp = orig_fp;
-               goto out;
+               goto free_addrs;
        }
        /*
         * Three initial passes:
index e7e6788d75a864605a99ff34d56393d1a9b35a5b..b833155ce838111981b424418703c584c47c8771 100644 (file)
@@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
        spin_unlock(&zpci_list_lock);
 
        list_for_each_entry_safe(zdev, tmp, &remove, entry)
-               zpci_zdev_put(zdev);
+               zpci_device_reserved(zdev);
 }
 
 int pci_domain_nr(struct pci_bus *bus)
@@ -751,6 +751,14 @@ error:
        return ERR_PTR(rc);
 }
 
+bool zpci_is_device_configured(struct zpci_dev *zdev)
+{
+       enum zpci_state state = zdev->state;
+
+       return state != ZPCI_FN_STATE_RESERVED &&
+               state != ZPCI_FN_STATE_STANDBY;
+}
+
 /**
  * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
  * @zdev: The zpci_dev to be configured
@@ -822,6 +830,31 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
        return 0;
 }
 
+/**
+ * zpci_device_reserved() - Mark device as resverved
+ * @zdev: the zpci_dev that was reserved
+ *
+ * Handle the case that a given zPCI function was reserved by another system.
+ * After a call to this function the zpci_dev can not be found via
+ * get_zdev_by_fid() anymore but may still be accessible via existing
+ * references though it will not be functional anymore.
+ */
+void zpci_device_reserved(struct zpci_dev *zdev)
+{
+       if (zdev->has_hp_slot)
+               zpci_exit_slot(zdev);
+       /*
+        * Remove device from zpci_list as it is going away. This also
+        * makes sure we ignore subsequent zPCI events for this device.
+        */
+       spin_lock(&zpci_list_lock);
+       list_del(&zdev->entry);
+       spin_unlock(&zpci_list_lock);
+       zdev->state = ZPCI_FN_STATE_RESERVED;
+       zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+       zpci_zdev_put(zdev);
+}
+
 void zpci_release_device(struct kref *kref)
 {
        struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
@@ -843,6 +876,12 @@ void zpci_release_device(struct kref *kref)
        case ZPCI_FN_STATE_STANDBY:
                if (zdev->has_hp_slot)
                        zpci_exit_slot(zdev);
+               spin_lock(&zpci_list_lock);
+               list_del(&zdev->entry);
+               spin_unlock(&zpci_list_lock);
+               zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+               fallthrough;
+       case ZPCI_FN_STATE_RESERVED:
                if (zdev->has_resources)
                        zpci_cleanup_bus_resources(zdev);
                zpci_bus_device_unregister(zdev);
@@ -851,10 +890,6 @@ void zpci_release_device(struct kref *kref)
        default:
                break;
        }
-
-       spin_lock(&zpci_list_lock);
-       list_del(&zdev->entry);
-       spin_unlock(&zpci_list_lock);
        zpci_dbg(3, "rem fid:%x\n", zdev->fid);
        kfree(zdev);
 }
index c856f80cb21b880d233fe6c6ec3c93ab4fcb688c..5b8d647523f969abe8fc0aa792e9390f631fe828 100644 (file)
@@ -140,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
                        /* The 0x0304 event may immediately reserve the device */
                        if (!clp_get_state(zdev->fid, &state) &&
                            state == ZPCI_FN_STATE_RESERVED) {
-                               zpci_zdev_put(zdev);
+                               zpci_device_reserved(zdev);
                        }
                }
                break;
@@ -151,7 +151,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
        case 0x0308: /* Standby -> Reserved */
                if (!zdev)
                        break;
-               zpci_zdev_put(zdev);
+               zpci_device_reserved(zdev);
                break;
        default:
                break;
index ae683aa623ace069c7cd8e663632cd9608fb5734..c5b35ea129cfa5addd89efb09530dac88d39e088 100644 (file)
@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
 
        mmap_read_lock(current->mm);
        ret = -EINVAL;
-       vma = find_vma(current->mm, mmio_addr);
+       vma = vma_lookup(current->mm, mmio_addr);
        if (!vma)
                goto out_unlock_mmap;
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
 
        mmap_read_lock(current->mm);
        ret = -EINVAL;
-       vma = find_vma(current->mm, mmio_addr);
+       vma = vma_lookup(current->mm, mmio_addr);
        if (!vma)
                goto out_unlock_mmap;
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
index 58592dfa5cb602ed58eb34dcdead6f1cc66755c6..c081e7e2d6e7fed5ae9e1426efe686af457263ce 100644 (file)
@@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
 $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
        $(call if_changed,lzo)
 
-$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
        $(call if_changed,uimage,bzip2)
 
-$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,uimage,gzip)
 
-$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
        $(call if_changed,uimage,lzma)
 
-$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
        $(call if_changed,uimage,xz)
 
-$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
        $(call if_changed,uimage,lzo)
 
-$(obj)/uImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
        $(call if_changed,uimage,none)
 
 OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
-$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
+$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
        $(call if_changed,objcopy)
 
 OBJCOPYFLAGS_uImage.srec := -I binary -O srec
-$(obj)/uImage.srec: $(obj)/uImage
+$(obj)/uImage.srec: $(obj)/uImage FORCE
        $(call if_changed,objcopy)
 
 $(obj)/uImage: $(obj)/uImage.$(suffix-y)
index 56bf35c2f29c2b832170d67468c1a23d5f27383e..cdced80a7ffa3944535eeebb3a1fc49764a6bdd9 100644 (file)
@@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
 
 static inline pmd_t *pud_pgtable(pud_t pud)
 {
-       return (pmd_t *)pud_val(pud);
+       return (pmd_t *)(unsigned long)pud_val(pud);
 }
 
 /* only used by the stubbed out hugetlb gup code, should never be called */
index 8e1d72a16759494086c2e8fe85f3044e673f12ef..7ceae24b0ca991992b346140c49e92bb3009f762 100644 (file)
@@ -356,7 +356,9 @@ err_nomem:
 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs)
 {
-       if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
+       size = PAGE_ALIGN(size);
+
+       if (!sparc_dma_free_resource(cpu_addr, size))
                return;
 
        dma_make_coherent(dma_addr, size);
index 8e645ddac58e2dea89d88efd156c4c3128d4e215..30f171b7b00c23aeb887a3c7fa13020e47420f45 100644 (file)
@@ -39,6 +39,7 @@ struct mdesc_hdr {
        u32     node_sz; /* node block size */
        u32     name_sz; /* name block size */
        u32     data_sz; /* data block size */
+       char    data[];
 } __attribute__((aligned(16)));
 
 struct mdesc_elem {
@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
 
 static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
 {
-       return (struct mdesc_elem *) (mdesc + 1);
+       return (struct mdesc_elem *) mdesc->data;
 }
 
 static void *name_block(struct mdesc_hdr *mdesc)
index c9da9f139694dafe211b5bb30e4d36892cabaf77..f3a8cd491ce0de2e5b0de17e65b368d2d86231d9 100644 (file)
@@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
 
+#ifdef CONFIG_PCI
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        /* nothing to do */
 }
 EXPORT_SYMBOL(pci_iounmap);
+#endif
index 4e001bbbb4256f412236fd6229e5073ec2b36ddc..d9830e7e1060f7c38697904d0a04cfd9a888ccd7 100644 (file)
@@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
 config ARCH_HIBERNATION_POSSIBLE
        def_bool y
 
+config ARCH_NR_GPIO
+       int
+       default 1024 if X86_64
+       default 512
+
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
 
@@ -1400,7 +1405,7 @@ config HIGHMEM4G
 
 config HIGHMEM64G
        bool "64GB"
-       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
        select X86_PAE
        help
          Select this if you have a 32-bit processor and more than 4
@@ -1520,7 +1525,6 @@ config AMD_MEM_ENCRYPT
 
 config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
        bool "Activate AMD Secure Memory Encryption (SME) by default"
-       default y
        depends on AMD_MEM_ENCRYPT
        help
          Say yes to have system memory encrypted by default if running on
@@ -2605,7 +2609,6 @@ config PCI_OLPC
 config PCI_XEN
        def_bool y
        depends on PCI && XEN
-       select SWIOTLB_XEN
 
 config MMCONF_FAM10H
        def_bool y
@@ -2828,8 +2831,6 @@ config HAVE_ATOMIC_IOMAP
        def_bool y
        depends on X86_32
 
-source "drivers/firmware/Kconfig"
-
 source "arch/x86/kvm/Kconfig"
 
 source "arch/x86/Kconfig.assembler"
index e7355f8b51c2b8e1f51c885d275771247afa3c36..94834c4b5e5ef279f9f0da9521a76f9a82348556 100644 (file)
@@ -4,6 +4,12 @@
 
 tune           = $(call cc-option,-mtune=$(1),$(2))
 
+ifdef CONFIG_CC_IS_CLANG
+align          := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
+else
+align          := -falign-functions=0 -falign-jumps=0 -falign-loops=0
+endif
+
 cflags-$(CONFIG_M486SX)                += -march=i486
 cflags-$(CONFIG_M486)          += -march=i486
 cflags-$(CONFIG_M586)          += -march=i586
@@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6)                += -march=k6
 # They make zero difference whatsosever to performance at this time.
 cflags-$(CONFIG_MK7)           += -march=athlon
 cflags-$(CONFIG_MK8)           += $(call cc-option,-march=k8,-march=athlon)
-cflags-$(CONFIG_MCRUSOE)       += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
-cflags-$(CONFIG_MEFFICEON)     += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCRUSOE)       += -march=i686 $(align)
+cflags-$(CONFIG_MEFFICEON)     += -march=i686 $(call tune,pentium3) $(align)
 cflags-$(CONFIG_MWINCHIPC6)    += $(call cc-option,-march=winchip-c6,-march=i586)
 cflags-$(CONFIG_MWINCHIP3D)    += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) $(align)
 cflags-$(CONFIG_MVIAC3_2)      += $(call cc-option,-march=c3-2,-march=i686)
 cflags-$(CONFIG_MVIAC7)                += -march=i686
 cflags-$(CONFIG_MCORE2)                += -march=i686 $(call tune,core2)
index fa2c3f50aecbdfb118a8f8b154751047ca9f4d3a..18d2f51991944a3635d48327add9c06359e908ba 100644 (file)
@@ -367,10 +367,11 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8)
         *      %rdx: src (1..8 blocks)
         *      %rcx: num blocks (1..8)
         */
-       FRAME_BEGIN
-
        cmpq $5, %rcx;
        jb sm4_aesni_avx_crypt4;
+
+       FRAME_BEGIN
+
        vmovdqu (0 * 16)(%rdx), RA0;
        vmovdqu (1 * 16)(%rdx), RA1;
        vmovdqu (2 * 16)(%rdx), RA2;
index 2a57dbed48945c0c4d0c437f34b4e2737114cac4..6dfa8ddaa60f7615beef94d8c4b1ce0fcf32d857 100644 (file)
@@ -2465,6 +2465,7 @@ static int x86_pmu_event_init(struct perf_event *event)
        if (err) {
                if (event->destroy)
                        event->destroy(event);
+               event->destroy = NULL;
        }
 
        if (READ_ONCE(x86_pmu.attr_rdpmc) &&
index 7011e87be6d030fafedf364e1284d69eba45027a..9a044438072ba2190dfa1453d18483b0815b8b9a 100644 (file)
@@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
        INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
        INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
        INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
+       INTEL_EVENT_CONSTRAINT(0xef, 0xf),
        INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
        EVENT_CONSTRAINT_END
 };
index 90e682a92820dea099bc5cbb75a703581d828c0e..db2d92fb44daf820402f90bd4d320cb5c133477a 100644 (file)
@@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
 /*
  * IPI implementation on Hyper-V.
  */
-static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
+static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
+               bool exclude_self)
 {
        struct hv_send_ipi_ex **arg;
        struct hv_send_ipi_ex *ipi_arg;
@@ -121,14 +122,27 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
        ipi_arg->reserved = 0;
        ipi_arg->vp_set.valid_bank_mask = 0;
 
-       if (!cpumask_equal(mask, cpu_present_mask)) {
+       /*
+        * Use HV_GENERIC_SET_ALL and avoid converting cpumask to VP_SET
+        * when the IPI is sent to all currently present CPUs.
+        */
+       if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
                ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
-               nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
-       }
-       if (nr_bank < 0)
-               goto ipi_mask_ex_done;
-       if (!nr_bank)
+               if (exclude_self)
+                       nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
+               else
+                       nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
+
+               /*
+                * 'nr_bank <= 0' means some CPUs in cpumask can't be
+                * represented in VP_SET. Return an error and fall back to
+                * native (architectural) method of sending IPIs.
+                */
+               if (nr_bank <= 0)
+                       goto ipi_mask_ex_done;
+       } else {
                ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
+       }
 
        status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
                              ipi_arg, NULL);
@@ -138,15 +152,25 @@ ipi_mask_ex_done:
        return hv_result_success(status);
 }
 
-static bool __send_ipi_mask(const struct cpumask *mask, int vector)
+static bool __send_ipi_mask(const struct cpumask *mask, int vector,
+               bool exclude_self)
 {
-       int cur_cpu, vcpu;
+       int cur_cpu, vcpu, this_cpu = smp_processor_id();
        struct hv_send_ipi ipi_arg;
        u64 status;
+       unsigned int weight;
 
        trace_hyperv_send_ipi_mask(mask, vector);
 
-       if (cpumask_empty(mask))
+       weight = cpumask_weight(mask);
+
+       /*
+        * Do nothing if
+        *   1. the mask is empty
+        *   2. the mask only contains self when exclude_self is true
+        */
+       if (weight == 0 ||
+           (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
                return true;
 
        if (!hv_hypercall_pg)
@@ -172,6 +196,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
        ipi_arg.cpu_mask = 0;
 
        for_each_cpu(cur_cpu, mask) {
+               if (exclude_self && cur_cpu == this_cpu)
+                       continue;
                vcpu = hv_cpu_number_to_vp_number(cur_cpu);
                if (vcpu == VP_INVAL)
                        return false;
@@ -191,7 +217,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
        return hv_result_success(status);
 
 do_ex_hypercall:
-       return __send_ipi_mask_ex(mask, vector);
+       return __send_ipi_mask_ex(mask, vector, exclude_self);
 }
 
 static bool __send_ipi_one(int cpu, int vector)
@@ -208,7 +234,7 @@ static bool __send_ipi_one(int cpu, int vector)
                return false;
 
        if (vp >= 64)
-               return __send_ipi_mask_ex(cpumask_of(cpu), vector);
+               return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
 
        status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
        return hv_result_success(status);
@@ -222,20 +248,13 @@ static void hv_send_ipi(int cpu, int vector)
 
 static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
 {
-       if (!__send_ipi_mask(mask, vector))
+       if (!__send_ipi_mask(mask, vector, false))
                orig_apic.send_IPI_mask(mask, vector);
 }
 
 static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 {
-       unsigned int this_cpu = smp_processor_id();
-       struct cpumask new_mask;
-       const struct cpumask *local_mask;
-
-       cpumask_copy(&new_mask, mask);
-       cpumask_clear_cpu(this_cpu, &new_mask);
-       local_mask = &new_mask;
-       if (!__send_ipi_mask(local_mask, vector))
+       if (!__send_ipi_mask(mask, vector, true))
                orig_apic.send_IPI_mask_allbutself(mask, vector);
 }
 
@@ -246,7 +265,7 @@ static void hv_send_ipi_allbutself(int vector)
 
 static void hv_send_ipi_all(int vector)
 {
-       if (!__send_ipi_mask(cpu_online_mask, vector))
+       if (!__send_ipi_mask(cpu_online_mask, vector, false))
                orig_apic.send_IPI_all(vector);
 }
 
index 14ebd21965691d17ea763e5f62494cf780946cae..43184640b579a45565919801e8a23a2301e69017 100644 (file)
@@ -25,7 +25,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
                 * For !SMAP hardware we patch out CLAC on entry.
                 */
                if (boot_cpu_has(X86_FEATURE_SMAP) ||
-                   (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+                   (IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
                        mask |= X86_EFLAGS_AC;
 
                WARN_ON_ONCE(flags & mask);
index 87bd6025d91d4bb22df73bd821072904b337a5f3..6a5f3acf2b331291be6b5a2a1600361b9b2f2393 100644 (file)
@@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
                            struct kvm_page_track_notifier_node *node);
 };
 
-void kvm_page_track_init(struct kvm *kvm);
+int kvm_page_track_init(struct kvm *kvm);
 void kvm_page_track_cleanup(struct kvm *kvm);
 
 void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
index eceea929909740613471b74567d8e24413b57339..6c576519210280483b93f67d2101aa4f1d26445d 100644 (file)
@@ -2,6 +2,20 @@
 #ifndef _ASM_X86_KVM_CLOCK_H
 #define _ASM_X86_KVM_CLOCK_H
 
+#include <linux/percpu.h>
+
 extern struct clocksource kvm_clock;
 
+DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+
+static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
+{
+       return &this_cpu_read(hv_clock_per_cpu)->pvti;
+}
+
+static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
+{
+       return this_cpu_read(hv_clock_per_cpu);
+}
+
 #endif /* _ASM_X86_KVM_CLOCK_H */
index 5c7bcaa7962323ba29dc825c00d2d7d8a2234417..1d5f14aff5f6fd975143874cf1010b32c999ee11 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef _ASM_X86_PKEYS_H
 #define _ASM_X86_PKEYS_H
 
-#define ARCH_DEFAULT_PKEY      0
-
 /*
  * If more than 16 keys are ever supported, a thorough audit
  * will be necessary to ensure that the types that store key
index f3fbb84ff8a77fe5ab539fc7caa42491f55d39fd..68c257a3de0d393b9ba904526fb485bb5f12ab38 100644 (file)
@@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
 {
        const struct { char _[64]; } *__src = src;
        struct { char _[64]; } __iomem *__dst = dst;
-       int zf;
+       bool zf;
 
        /*
         * ENQCMDS %(rdx), rax
index c9fa7be3df82ddb9495961b3e2f22b1ac07edafa..5c95d242f38d708e313f39cc781fa8455fdd16ed 100644 (file)
@@ -301,8 +301,8 @@ do {                                                                        \
        unsigned int __gu_low, __gu_high;                               \
        const unsigned int __user *__gu_ptr;                            \
        __gu_ptr = (const void __user *)(ptr);                          \
-       __get_user_asm(__gu_low, ptr, "l", "=r", label);                \
-       __get_user_asm(__gu_high, ptr+1, "l", "=r", label);             \
+       __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);           \
+       __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);        \
        (x) = ((unsigned long long)__gu_high << 32) | __gu_low;         \
 } while (0)
 #else
index 3506d8c598c18b78596816ab99e98a5ab99d4ba6..4557f7cb0fa6ea175a99cb801886ecc273d9a16f 100644 (file)
@@ -14,16 +14,19 @@ static inline int pci_xen_hvm_init(void)
        return -1;
 }
 #endif
-#if defined(CONFIG_XEN_DOM0)
+#ifdef CONFIG_XEN_PV_DOM0
 int __init pci_xen_initial_domain(void);
-int xen_find_device_domain_owner(struct pci_dev *dev);
-int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
-int xen_unregister_device_domain_owner(struct pci_dev *dev);
 #else
 static inline int __init pci_xen_initial_domain(void)
 {
        return -1;
 }
+#endif
+#ifdef CONFIG_XEN_DOM0
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
 static inline int xen_find_device_domain_owner(struct pci_dev *dev)
 {
        return -1;
index 6b56d0d45d15304f4330cd926e4dab60df1023aa..66b4ddde7743029d97450fdf826dd10a777acce2 100644 (file)
@@ -3,14 +3,10 @@
 #define _ASM_X86_SWIOTLB_XEN_H
 
 #ifdef CONFIG_SWIOTLB_XEN
-extern int xen_swiotlb;
 extern int __init pci_xen_swiotlb_detect(void);
-extern void __init pci_xen_swiotlb_init(void);
 extern int pci_xen_swiotlb_init_late(void);
 #else
-#define xen_swiotlb (0)
-static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
-static inline void __init pci_xen_swiotlb_init(void) { }
+#define pci_xen_swiotlb_detect NULL
 static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
 #endif
 
index 0f8885949e8c4b602fefc2eaf918d11a2957c051..b3410f1ac21754c9ff94338a71da12e812d65686 100644 (file)
@@ -326,6 +326,7 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_SMAP
                cr4_set_bits(X86_CR4_SMAP);
 #else
+               clear_cpu_cap(c, X86_FEATURE_SMAP);
                cr4_clear_bits(X86_CR4_SMAP);
 #endif
        }
index 8cb7816d03b4cd27dd9a2143533e4d684e6a0823..193204aee880178f6d30412b4c068a033473e330 100644 (file)
@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
 
 static void kill_me_now(struct callback_head *ch)
 {
+       struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
+
+       p->mce_count = 0;
        force_sig(SIGBUS);
 }
 
@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
        int flags = MF_ACTION_REQUIRED;
        int ret;
 
+       p->mce_count = 0;
        pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
 
        if (!p->mce_ripv)
@@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
        }
 }
 
-static void queue_task_work(struct mce *m, int kill_current_task)
+static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
 {
-       current->mce_addr = m->addr;
-       current->mce_kflags = m->kflags;
-       current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
-       current->mce_whole_page = whole_page(m);
+       int count = ++current->mce_count;
 
-       if (kill_current_task)
-               current->mce_kill_me.func = kill_me_now;
-       else
-               current->mce_kill_me.func = kill_me_maybe;
+       /* First call, save all the details */
+       if (count == 1) {
+               current->mce_addr = m->addr;
+               current->mce_kflags = m->kflags;
+               current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+               current->mce_whole_page = whole_page(m);
+
+               if (kill_current_task)
+                       current->mce_kill_me.func = kill_me_now;
+               else
+                       current->mce_kill_me.func = kill_me_maybe;
+       }
+
+       /* Ten is likely overkill. Don't expect more than two faults before task_work() */
+       if (count > 10)
+               mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
+
+       /* Second or later call, make sure page address matches the one from first call */
+       if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
+               mce_panic("Consecutive machine checks to different user pages", m, msg);
+
+       /* Do not call task_work_add() more than once */
+       if (count > 1)
+               return;
 
        task_work_add(current, &current->mce_kill_me, TWA_RESUME);
 }
@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                /* If this triggers there is no way to recover. Die hard. */
                BUG_ON(!on_thread_stack() || !user_mode(regs));
 
-               queue_task_work(&m, kill_current_task);
+               queue_task_work(&m, msg, kill_current_task);
 
        } else {
                /*
@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                }
 
                if (m.kflags & MCE_IN_KERNEL_COPYIN)
-                       queue_task_work(&m, kill_current_task);
+                       queue_task_work(&m, msg, kill_current_task);
        }
 out:
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
index 4b8813bafffdcf87cde891df773b53ed2f702de7..bb1c3f5f60c81d3969e0c84e269f3f4883dd0467 100644 (file)
@@ -527,12 +527,14 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
        rdt_domain_reconfigure_cdp(r);
 
        if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
-               kfree(d);
+               kfree(hw_dom);
                return;
        }
 
        if (r->mon_capable && domain_setup_mon_state(r, d)) {
-               kfree(d);
+               kfree(hw_dom->ctrl_val);
+               kfree(hw_dom->mbps_val);
+               kfree(hw_dom);
                return;
        }
 
index 38837dad46e629052957a0d837c928e62045a8b0..391a4e2b86049ecf012be5593d281ce28daa5147 100644 (file)
@@ -714,12 +714,6 @@ static struct chipset early_qrk[] __initdata = {
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x3e20,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x3ec4,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x8a12,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
        { PCI_VENDOR_ID_BROADCOM, 0x4331,
          PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
        {}
index 445c57c9c5397f9b1af0f94efb4da614ba8f6080..831b25c5e70581aeb490e951a7bb8842a5dbc0be 100644 (file)
@@ -379,9 +379,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
                                     sizeof(fpu->state.fxsave)))
                        return -EFAULT;
 
-               /* Reject invalid MXCSR values. */
-               if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
-                       return -EINVAL;
+               if (IS_ENABLED(CONFIG_X86_64)) {
+                       /* Reject invalid MXCSR values. */
+                       if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
+                               return -EINVAL;
+               } else {
+                       /* Mask invalid bits out for historical reasons (broken hardware). */
+                       fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
+               }
 
                /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
                if (use_xsave())
index 42fc41dd0e1f17058724bbba75dfa1ede83994c8..882213df37130249b8712322810e5cbec3b92785 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/irq_remapping.h>
 #include <asm/hpet.h>
 #include <asm/time.h>
+#include <asm/mwait.h>
 
 #undef  pr_fmt
 #define pr_fmt(fmt) "hpet: " fmt
@@ -916,6 +917,83 @@ static bool __init hpet_counting(void)
        return false;
 }
 
+static bool __init mwait_pc10_supported(void)
+{
+       unsigned int eax, ebx, ecx, mwait_substates;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return false;
+
+       if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
+               return false;
+
+       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+               return false;
+
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+       return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
+              (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
+              (mwait_substates & (0xF << 28));
+}
+
+/*
+ * Check whether the system supports PC10. If so force disable HPET as that
+ * stops counting in PC10. This check is overbroad as it does not take any
+ * of the following into account:
+ *
+ *     - ACPI tables
+ *     - Enablement of intel_idle
+ *     - Command line arguments which limit intel_idle C-state support
+ *
+ * That's perfectly fine. HPET is a piece of hardware designed by committee
+ * and the only reasons why it is still in use on modern systems is the
+ * fact that it is impossible to reliably query TSC and CPU frequency via
+ * CPUID or firmware.
+ *
+ * If HPET is functional it is useful for calibrating TSC, but this can be
+ * done via PMTIMER as well which seems to be the last remaining timer on
+ * X86/INTEL platforms that has not been completely wreckaged by feature
+ * creep.
+ *
+ * In theory HPET support should be removed altogether, but there are older
+ * systems out there which depend on it because TSC and APIC timer are
+ * dysfunctional in deeper C-states.
+ *
+ * It's only 20 years now that hardware people have been asked to provide
+ * reliable and discoverable facilities which can be used for timekeeping
+ * and per CPU timer interrupts.
+ *
+ * The probability that this problem is going to be solved in the
+ * forseeable future is close to zero, so the kernel has to be cluttered
+ * with heuristics to keep up with the ever growing amount of hardware and
+ * firmware trainwrecks. Hopefully some day hardware people will understand
+ * that the approach of "This can be fixed in software" is not sustainable.
+ * Hope dies last...
+ */
+static bool __init hpet_is_pc10_damaged(void)
+{
+       unsigned long long pcfg;
+
+       /* Check whether PC10 substates are supported */
+       if (!mwait_pc10_supported())
+               return false;
+
+       /* Check whether PC10 is enabled in PKG C-state limit */
+       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
+       if ((pcfg & 0xF) < 8)
+               return false;
+
+       if (hpet_force_user) {
+               pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
+               return false;
+       }
+
+       pr_info("HPET dysfunctional in PC10. Force disabled.\n");
+       boot_hpet_disable = true;
+       return true;
+}
+
 /**
  * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
  */
@@ -929,6 +1007,9 @@ int __init hpet_enable(void)
        if (!is_hpet_capable())
                return 0;
 
+       if (hpet_is_pc10_damaged())
+               return 0;
+
        hpet_set_mapping();
        if (!hpet_virt_address)
                return 0;
index ad273e5861c1b267cb815fdd54969c2b26ffc90d..73c74b961d0fd9b69b6dd0f9fab50b312b5fccd6 100644 (file)
@@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
 static struct pvclock_vsyscall_time_info
                        hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
 static struct pvclock_wall_clock wall_clock __bss_decrypted;
-static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
 static struct pvclock_vsyscall_time_info *hvclock_mem;
-
-static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
-{
-       return &this_cpu_read(hv_clock_per_cpu)->pvti;
-}
-
-static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
-{
-       return this_cpu_read(hv_clock_per_cpu);
-}
+DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
 
 /*
  * The wallclock is the time of day when we booted. Since then, some time may
index 79f164141116a0b74b007d3ec97ce7225b31c55f..40ed44ead0631288b16f98e233464994eb09e062 100644 (file)
@@ -830,6 +830,20 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.oem.arch_setup();
 
+       /*
+        * Do some memory reservations *before* memory is added to memblock, so
+        * memblock allocations won't overwrite it.
+        *
+        * After this point, everything still needed from the boot loader or
+        * firmware or kernel text should be early reserved or marked not RAM in
+        * e820. All other memory is free game.
+        *
+        * This call needs to happen before e820__memory_setup() which calls the
+        * xen_memory_setup() on Xen dom0 which relies on the fact that those
+        * early reservations have happened already.
+        */
+       early_reserve_memory();
+
        iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
        e820__memory_setup();
        parse_setup_data();
@@ -876,18 +890,6 @@ void __init setup_arch(char **cmdline_p)
 
        parse_early_param();
 
-       /*
-        * Do some memory reservations *before* memory is added to
-        * memblock, so memblock allocations won't overwrite it.
-        * Do it after early param, so we could get (unlikely) panic from
-        * serial.
-        *
-        * After this point everything still needed from the boot loader or
-        * firmware or kernel text should be early reserved or marked not
-        * RAM in e820. All other memory is free game.
-        */
-       early_reserve_memory();
-
 #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Memory used by the kernel cannot be hot-removed because Linux
index 78a32b956e8104237c15f72ebf58460578878dea..5afd985591939cc285f0b035cc2e2fc810cef4e2 100644 (file)
@@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 
 static void __init pcpu_fc_free(void *ptr, size_t size)
 {
-       memblock_free(__pa(ptr), size);
+       memblock_free_ptr(ptr, size);
 }
 
 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
index 9f90f460a28cc0f49b161188933dba804249acdf..bf1033a62e4806a64a430075fc5b06540f554010 100644 (file)
@@ -130,6 +130,8 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
                } else {
                        ret = ES_VMM_ERROR;
                }
+       } else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
+               ret = ES_VMM_ERROR;
        } else {
                ret = ES_OK;
        }
index fe03bd978761eb34a1b5230303236c5e746a807d..751aa85a300129e9489e8c6ed61a1e4ff35117e0 100644 (file)
@@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
        for (i = 0; i < nent; i++) {
                e = &entries[i];
 
-               if (e->function == function && (e->index == index ||
-                   !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
+               if (e->function == function &&
+                   (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
                        return e;
        }
 
index 2837110e66eda5952d8110f874e158c2a4bcf067..9a144ca8e146054209c446d9e66e426fca0334ee 100644 (file)
@@ -435,7 +435,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
        __FOP_RET(#op)
 
 asm(".pushsection .fixup, \"ax\"\n"
-    ".global kvm_fastop_exception \n"
     "kvm_fastop_exception: xor %esi, %esi; ret\n"
     ".popsection");
 
@@ -4206,7 +4205,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
        u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
 
        if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
-               return emulate_ud(ctxt);
+               return emulate_gp(ctxt, 0);
 
        return X86EMUL_CONTINUE;
 }
index 232a86a6faaf921845262d14163c696cc046be78..d5124b520f761cae5c602e6e7a7c0ca484ce7927 100644 (file)
@@ -939,7 +939,7 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
        for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
                stimer_init(&hv_vcpu->stimer[i], i);
 
-       hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
+       hv_vcpu->vp_index = vcpu->vcpu_idx;
 
        return 0;
 }
@@ -1444,7 +1444,6 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        switch (msr) {
        case HV_X64_MSR_VP_INDEX: {
                struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
-               int vcpu_idx = kvm_vcpu_get_idx(vcpu);
                u32 new_vp_index = (u32)data;
 
                if (!host || new_vp_index >= KVM_MAX_VCPUS)
@@ -1459,9 +1458,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
                 * VP index is changing, adjust num_mismatched_vp_indexes if
                 * it now matches or no longer matches vcpu_idx.
                 */
-               if (hv_vcpu->vp_index == vcpu_idx)
+               if (hv_vcpu->vp_index == vcpu->vcpu_idx)
                        atomic_inc(&hv->num_mismatched_vp_indexes);
-               else if (new_vp_index == vcpu_idx)
+               else if (new_vp_index == vcpu->vcpu_idx)
                        atomic_dec(&hv->num_mismatched_vp_indexes);
 
                hv_vcpu->vp_index = new_vp_index;
index 730da8537d058ddc6e57071ca31da24ab6c0868b..ed1c4e546d0495eeaa0f7e4ef85b964fc591fb81 100644 (file)
@@ -83,7 +83,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
 
-       return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
+       return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
 }
 
 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
index ff005fe738a4c1dae5e2b6b4648e1d5f34cfd24c..8c065da73f8e541430770fd860653e4dee4a470c 100644 (file)
@@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
        unsigned index;
        bool mask_before, mask_after;
        union kvm_ioapic_redirect_entry *e;
-       unsigned long vcpu_bitmap;
        int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
+       DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
 
        switch (ioapic->ioregsel) {
        case IOAPIC_REG_VERSION:
@@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                        irq.shorthand = APIC_DEST_NOSHORT;
                        irq.dest_id = e->fields.dest_id;
                        irq.msi_redir_hint = false;
-                       bitmap_zero(&vcpu_bitmap, 16);
+                       bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
                        kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
-                                                &vcpu_bitmap);
+                                                vcpu_bitmap);
                        if (old_dest_mode != e->fields.dest_mode ||
                            old_dest_id != e->fields.dest_id) {
                                /*
@@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                                    kvm_lapic_irq_dest_mode(
                                        !!e->fields.dest_mode);
                                kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
-                                                        &vcpu_bitmap);
+                                                        vcpu_bitmap);
                        }
                        kvm_make_scan_ioapic_request_mask(ioapic->kvm,
-                                                         &vcpu_bitmap);
+                                                         vcpu_bitmap);
                } else {
                        kvm_make_scan_ioapic_request(ioapic->kvm);
                }
index 2d7e61122af81566d857cde0e8540aa852fb5795..1a64ba5b9437b14f59d07ff75bda3ce3a323bdde 100644 (file)
@@ -2027,8 +2027,8 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
        } while (!sp->unsync_children);
 }
 
-static void mmu_sync_children(struct kvm_vcpu *vcpu,
-                             struct kvm_mmu_page *parent)
+static int mmu_sync_children(struct kvm_vcpu *vcpu,
+                            struct kvm_mmu_page *parent, bool can_yield)
 {
        int i;
        struct kvm_mmu_page *sp;
@@ -2055,12 +2055,18 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
                }
                if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
                        kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+                       if (!can_yield) {
+                               kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+                               return -EINTR;
+                       }
+
                        cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
                        flush = false;
                }
        }
 
        kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+       return 0;
 }
 
 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
@@ -2146,9 +2152,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                        kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
                }
 
-               if (sp->unsync_children)
-                       kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-
                __clear_sp_write_flooding_count(sp);
 
 trace_get_page:
@@ -3684,7 +3687,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
                write_lock(&vcpu->kvm->mmu_lock);
                kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
 
-               mmu_sync_children(vcpu, sp);
+               mmu_sync_children(vcpu, sp, true);
 
                kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
                write_unlock(&vcpu->kvm->mmu_lock);
@@ -3700,7 +3703,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
                if (IS_VALID_PAE_ROOT(root)) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = to_shadow_page(root);
-                       mmu_sync_children(vcpu, sp);
+                       mmu_sync_children(vcpu, sp, true);
                }
        }
 
index 269f11f92fd052d5dcf2f6ed53d6f55bad21eecd..21427e84a82ef6dd2097e08e570b9cdd730a40a8 100644 (file)
@@ -164,13 +164,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
        cleanup_srcu_struct(&head->track_srcu);
 }
 
-void kvm_page_track_init(struct kvm *kvm)
+int kvm_page_track_init(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_head *head;
 
        head = &kvm->arch.track_notifier_head;
-       init_srcu_struct(&head->track_srcu);
        INIT_HLIST_HEAD(&head->track_notifier_list);
+       return init_srcu_struct(&head->track_srcu);
 }
 
 /*
index 7d03e9b7ccfa9796f91434498e7ebaacf16db4a4..913d52a7923e654576b1af8ad0400a55cd36e86a 100644 (file)
@@ -707,8 +707,27 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
                if (!is_shadow_present_pte(*it.sptep)) {
                        table_gfn = gw->table_gfn[it.level - 2];
                        access = gw->pt_access[it.level - 2];
-                       sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
-                                             false, access);
+                       sp = kvm_mmu_get_page(vcpu, table_gfn, addr,
+                                             it.level-1, false, access);
+                       /*
+                        * We must synchronize the pagetable before linking it
+                        * because the guest doesn't need to flush tlb when
+                        * the gpte is changed from non-present to present.
+                        * Otherwise, the guest may use the wrong mapping.
+                        *
+                        * For PG_LEVEL_4K, kvm_mmu_get_page() has already
+                        * synchronized it transiently via kvm_sync_page().
+                        *
+                        * For higher level pagetable, we synchronize it via
+                        * the slower mmu_sync_children().  If it needs to
+                        * break, some progress has been made; return
+                        * RET_PF_RETRY and retry on the next #PF.
+                        * KVM_REQ_MMU_SYNC is not necessary but it
+                        * expedites the process.
+                        */
+                       if (sp->unsync_children &&
+                           mmu_sync_children(vcpu, sp, false))
+                               return RET_PF_RETRY;
                }
 
                /*
@@ -1047,14 +1066,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
  * Using the cached information from sp->gfns is safe because:
  * - The spte has a reference to the struct page, so the pfn for a given gfn
  *   can't change unless all sptes pointing to it are nuked first.
- *
- * Note:
- *   We should flush all tlbs if spte is dropped even though guest is
- *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
- *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
- *   used by guest then tlbs are not flushed, so guest is allowed to access the
- *   freed pages.
- *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  */
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
@@ -1107,13 +1118,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                        return 0;
 
                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
-                       /*
-                        * Update spte before increasing tlbs_dirty to make
-                        * sure no tlb flush is lost after spte is zapped; see
-                        * the comments in kvm_flush_remote_tlbs().
-                        */
-                       smp_wmb();
-                       vcpu->kvm->tlbs_dirty++;
+                       set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
                        continue;
                }
 
@@ -1128,12 +1133,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
                if (gfn != sp->gfns[i]) {
                        drop_spte(vcpu->kvm, &sp->spt[i]);
-                       /*
-                        * The same as above where we are doing
-                        * prefetch_invalid_gpte().
-                        */
-                       smp_wmb();
-                       vcpu->kvm->tlbs_dirty++;
+                       set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
                        continue;
                }
 
index 2545d0c61985bd47b1d79384bfc4133cc3ce3649..510b833cbd39915c021db25d357cf0bedf777a43 100644 (file)
@@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
                (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
                (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
 
-       svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
        svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
        svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
        svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
@@ -579,7 +578,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
 }
 
 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
-                        struct vmcb *vmcb12)
+                        struct vmcb *vmcb12, bool from_vmrun)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        int ret;
@@ -609,13 +608,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
        nested_vmcb02_prepare_save(svm, vmcb12);
 
        ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
-                                 nested_npt_enabled(svm), true);
+                                 nested_npt_enabled(svm), from_vmrun);
        if (ret)
                return ret;
 
        if (!npt_enabled)
                vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
 
+       if (!from_vmrun)
+               kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
        svm_set_gif(svm, true);
 
        return 0;
@@ -681,7 +683,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
 
        svm->nested.nested_run_pending = 1;
 
-       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
+       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
                goto out_exit_err;
 
        if (nested_svm_vmrun_msrpm(svm))
index 75e0b21ad07c9e89a54131be45bdcca3fa88af4e..c36b5fe4c27ca5bf399db828927ab02e4f7d9ec7 100644 (file)
@@ -595,43 +595,50 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
        return 0;
 }
 
-static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
+                                   int *error)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
        struct sev_data_launch_update_vmsa vmsa;
+       struct vcpu_svm *svm = to_svm(vcpu);
+       int ret;
+
+       /* Perform some pre-encryption checks against the VMSA */
+       ret = sev_es_sync_vmsa(svm);
+       if (ret)
+               return ret;
+
+       /*
+        * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
+        * the VMSA memory content (i.e it will write the same memory region
+        * with the guest's key), so invalidate it first.
+        */
+       clflush_cache_range(svm->vmsa, PAGE_SIZE);
+
+       vmsa.reserved = 0;
+       vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
+       vmsa.address = __sme_pa(svm->vmsa);
+       vmsa.len = PAGE_SIZE;
+       return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
+}
+
+static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
        struct kvm_vcpu *vcpu;
        int i, ret;
 
        if (!sev_es_guest(kvm))
                return -ENOTTY;
 
-       vmsa.reserved = 0;
-
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               struct vcpu_svm *svm = to_svm(vcpu);
-
-               /* Perform some pre-encryption checks against the VMSA */
-               ret = sev_es_sync_vmsa(svm);
+               ret = mutex_lock_killable(&vcpu->mutex);
                if (ret)
                        return ret;
 
-               /*
-                * The LAUNCH_UPDATE_VMSA command will perform in-place
-                * encryption of the VMSA memory content (i.e it will write
-                * the same memory region with the guest's key), so invalidate
-                * it first.
-                */
-               clflush_cache_range(svm->vmsa, PAGE_SIZE);
+               ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
 
-               vmsa.handle = sev->handle;
-               vmsa.address = __sme_pa(svm->vmsa);
-               vmsa.len = PAGE_SIZE;
-               ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
-                                   &argp->error);
+               mutex_unlock(&vcpu->mutex);
                if (ret)
                        return ret;
-
-               svm->vcpu.arch.guest_state_protected = true;
        }
 
        return 0;
@@ -1397,8 +1404,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Bind ASID to this guest */
        ret = sev_bind_asid(kvm, start.handle, error);
-       if (ret)
+       if (ret) {
+               sev_decommission(start.handle);
                goto e_free_session;
+       }
 
        params.handle = start.handle;
        if (copy_to_user((void __user *)(uintptr_t)argp->data,
@@ -1464,7 +1473,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Pin guest memory */
        guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
-                                   PAGE_SIZE, &n, 0);
+                                   PAGE_SIZE, &n, 1);
        if (IS_ERR(guest_page)) {
                ret = PTR_ERR(guest_page);
                goto e_free_trans;
@@ -1501,6 +1510,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
        return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
 }
 
+static bool cmd_allowed_from_miror(u32 cmd_id)
+{
+       /*
+        * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
+        * active mirror VMs. Also allow the debugging and status commands.
+        */
+       if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
+           cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
+           cmd_id == KVM_SEV_DBG_ENCRYPT)
+               return true;
+
+       return false;
+}
+
 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 {
        struct kvm_sev_cmd sev_cmd;
@@ -1517,8 +1540,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 
        mutex_lock(&kvm->lock);
 
-       /* enc_context_owner handles all memory enc operations */
-       if (is_mirroring_enc_context(kvm)) {
+       /* Only the enc_context_owner handles some memory enc operations. */
+       if (is_mirroring_enc_context(kvm) &&
+           !cmd_allowed_from_miror(sev_cmd.id)) {
                r = -EINVAL;
                goto out;
        }
@@ -1715,8 +1739,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
 {
        struct file *source_kvm_file;
        struct kvm *source_kvm;
-       struct kvm_sev_info *mirror_sev;
-       unsigned int asid;
+       struct kvm_sev_info source_sev, *mirror_sev;
        int ret;
 
        source_kvm_file = fget(source_fd);
@@ -1739,7 +1762,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
                goto e_source_unlock;
        }
 
-       asid = to_kvm_svm(source_kvm)->sev_info.asid;
+       memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
+              sizeof(source_sev));
 
        /*
         * The mirror kvm holds an enc_context_owner ref so its asid can't
@@ -1759,8 +1783,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
        /* Set enc_context_owner and copy its encryption context over */
        mirror_sev = &to_kvm_svm(kvm)->sev_info;
        mirror_sev->enc_context_owner = source_kvm;
-       mirror_sev->asid = asid;
        mirror_sev->active = true;
+       mirror_sev->asid = source_sev.asid;
+       mirror_sev->fd = source_sev.fd;
+       mirror_sev->es_active = source_sev.es_active;
+       mirror_sev->handle = source_sev.handle;
+       /*
+        * Do not copy ap_jump_table. Since the mirror does not share the same
+        * KVM contexts as the original, and they may have different
+        * memory-views.
+        */
 
        mutex_unlock(&kvm->lock);
        return 0;
index 05e8d4d279699ee003a028eaee2a5047a38f7587..989685098b3ea7d62f251bd3b1ac39de7e2391c6 100644 (file)
@@ -1566,6 +1566,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
 
                svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
                        V_IRQ_INJECTION_BITS_MASK;
+
+               svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
        }
 
        vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
@@ -2222,6 +2224,10 @@ static int gp_interception(struct kvm_vcpu *vcpu)
        if (error_code)
                goto reinject;
 
+       /* All SVM instructions expect page aligned RAX */
+       if (svm->vmcb->save.rax & ~PAGE_MASK)
+               goto reinject;
+
        /* Decode the instruction for usage later */
        if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
                goto reinject;
@@ -4285,43 +4291,44 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        struct kvm_host_map map_save;
        int ret;
 
-       if (is_guest_mode(vcpu)) {
-               /* FED8h - SVM Guest */
-               put_smstate(u64, smstate, 0x7ed8, 1);
-               /* FEE0h - SVM Guest VMCB Physical Address */
-               put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
+       if (!is_guest_mode(vcpu))
+               return 0;
 
-               svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
-               svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-               svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+       /* FED8h - SVM Guest */
+       put_smstate(u64, smstate, 0x7ed8, 1);
+       /* FEE0h - SVM Guest VMCB Physical Address */
+       put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
 
-               ret = nested_svm_vmexit(svm);
-               if (ret)
-                       return ret;
+       svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+       svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+       svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
 
-               /*
-                * KVM uses VMCB01 to store L1 host state while L2 runs but
-                * VMCB01 is going to be used during SMM and thus the state will
-                * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
-                * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
-                * format of the area is identical to guest save area offsetted
-                * by 0x400 (matches the offset of 'struct vmcb_save_area'
-                * within 'struct vmcb'). Note: HSAVE area may also be used by
-                * L1 hypervisor to save additional host context (e.g. KVM does
-                * that, see svm_prepare_guest_switch()) which must be
-                * preserved.
-                */
-               if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                                &map_save) == -EINVAL)
-                       return 1;
+       ret = nested_svm_vmexit(svm);
+       if (ret)
+               return ret;
 
-               BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
+       /*
+        * KVM uses VMCB01 to store L1 host state while L2 runs but
+        * VMCB01 is going to be used during SMM and thus the state will
+        * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
+        * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
+        * format of the area is identical to guest save area offsetted
+        * by 0x400 (matches the offset of 'struct vmcb_save_area'
+        * within 'struct vmcb'). Note: HSAVE area may also be used by
+        * L1 hypervisor to save additional host context (e.g. KVM does
+        * that, see svm_prepare_guest_switch()) which must be
+        * preserved.
+        */
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+                        &map_save) == -EINVAL)
+               return 1;
 
-               svm_copy_vmrun_state(map_save.hva + 0x400,
-                                    &svm->vmcb01.ptr->save);
+       BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
 
-               kvm_vcpu_unmap(vcpu, &map_save, true);
-       }
+       svm_copy_vmrun_state(map_save.hva + 0x400,
+                            &svm->vmcb01.ptr->save);
+
+       kvm_vcpu_unmap(vcpu, &map_save, true);
        return 0;
 }
 
@@ -4329,50 +4336,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map, map_save;
-       int ret = 0;
+       u64 saved_efer, vmcb12_gpa;
+       struct vmcb *vmcb12;
+       int ret;
 
-       if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
-               u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
-               u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
-               u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
-               struct vmcb *vmcb12;
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+               return 0;
 
-               if (guest) {
-                       if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                               return 1;
+       /* Non-zero if SMI arrived while vCPU was in guest mode. */
+       if (!GET_SMSTATE(u64, smstate, 0x7ed8))
+               return 0;
 
-                       if (!(saved_efer & EFER_SVME))
-                               return 1;
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+               return 1;
 
-                       if (kvm_vcpu_map(vcpu,
-                                        gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
-                               return 1;
+       saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+       if (!(saved_efer & EFER_SVME))
+               return 1;
 
-                       if (svm_allocate_nested(svm))
-                               return 1;
+       vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
+               return 1;
 
-                       vmcb12 = map.hva;
+       ret = 1;
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
+               goto unmap_map;
 
-                       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+       if (svm_allocate_nested(svm))
+               goto unmap_save;
 
-                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
-                       kvm_vcpu_unmap(vcpu, &map, true);
+       /*
+        * Restore L1 host state from L1 HSAVE area as VMCB01 was
+        * used during SMM (see svm_enter_smm())
+        */
 
-                       /*
-                        * Restore L1 host state from L1 HSAVE area as VMCB01 was
-                        * used during SMM (see svm_enter_smm())
-                        */
-                       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                                        &map_save) == -EINVAL)
-                               return 1;
+       svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
 
-                       svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
-                                            map_save.hva + 0x400);
+       /*
+        * Enter the nested guest now
+        */
 
-                       kvm_vcpu_unmap(vcpu, &map_save, true);
-               }
-       }
+       vmcb12 = map.hva;
+       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
 
+unmap_save:
+       kvm_vcpu_unmap(vcpu, &map_save, true);
+unmap_map:
+       kvm_vcpu_unmap(vcpu, &map, true);
        return ret;
 }
 
index 524d943f3efc6a60b6b97a24d243094b694dc397..128a54b1fbf141214a2b47acd42d68babfa36d65 100644 (file)
@@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
        return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
 }
 
-int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
+int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
+                        u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
 void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);
index 0dab1b7b529f409b352891ae7addc21e5e3cd1f2..ba6f99f584ac33e1f0053bf0200cb4f6668aba98 100644 (file)
@@ -353,14 +353,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
        switch (msr_index) {
        case MSR_IA32_VMX_EXIT_CTLS:
        case MSR_IA32_VMX_TRUE_EXIT_CTLS:
-               ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
                break;
        case MSR_IA32_VMX_ENTRY_CTLS:
        case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
-               ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
                break;
        case MSR_IA32_VMX_PROCBASED_CTLS2:
-               ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
+               break;
+       case MSR_IA32_VMX_PINBASED_CTLS:
+               ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+               break;
+       case MSR_IA32_VMX_VMFUNC:
+               ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
                break;
        }
 
index ccb03d69546ce716b8ed5026fc89ce35beaa20e4..eedcebf5800412433c44a8b63698ad165f4d4097 100644 (file)
@@ -2583,8 +2583,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
         * Guest state is invalid and unrestricted guest is disabled,
         * which means L1 attempted VMEntry to L2 with invalid state.
         * Fail the VMEntry.
+        *
+        * However when force loading the guest state (SMM exit or
+        * loading nested state after migration, it is possible to
+        * have invalid guest state now, which will be later fixed by
+        * restoring L2 register state
         */
-       if (CC(!vmx_guest_state_valid(vcpu))) {
+       if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
                *entry_failure_code = ENTRY_FAIL_DEFAULT;
                return -EINVAL;
        }
@@ -4351,6 +4356,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
                                vmcs12->vm_exit_msr_load_count))
                nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+
+       to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
@@ -4899,14 +4906,7 @@ out_vmcs02:
        return -ENOMEM;
 }
 
-/*
- * Emulate the VMXON instruction.
- * Currently, we just remember that VMX is active, and do not save or even
- * inspect the argument to VMXON (the so-called "VMXON pointer") because we
- * do not currently need to store anything in that guest-allocated memory
- * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
- * argument is different from the VMXON pointer (which the spec says they do).
- */
+/* Emulate the VMXON instruction. */
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
        int ret;
@@ -5903,6 +5903,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
        case EXIT_REASON_VMFUNC:
                /* VM functions are emulated through L2->L0 vmexits. */
                return true;
+       case EXIT_REASON_BUS_LOCK:
+               /*
+                * At present, bus lock VM exit is never exposed to L1.
+                * Handle L2's bus locks in L0 directly.
+                */
+               return true;
        default:
                break;
        }
index 0c2c0d5ae8734b0da27563f5e9f97077b6d6d041..116b08904ac34655e13ecced704763e03e6fc14b 100644 (file)
@@ -1323,7 +1323,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
        vmx_prepare_switch_to_host(to_vmx(vcpu));
 }
 
-static bool emulation_required(struct kvm_vcpu *vcpu)
+bool vmx_emulation_required(struct kvm_vcpu *vcpu)
 {
        return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
 }
@@ -1367,7 +1367,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
        vmcs_writel(GUEST_RFLAGS, rflags);
 
        if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
-               vmx->emulation_required = emulation_required(vcpu);
+               vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
@@ -1837,10 +1837,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                    &msr_info->data))
                        return 1;
                /*
-                * Enlightened VMCS v1 doesn't have certain fields, but buggy
-                * Hyper-V versions are still trying to use corresponding
-                * features when they are exposed. Filter out the essential
-                * minimum.
+                * Enlightened VMCS v1 doesn't have certain VMCS fields but
+                * instead of just ignoring the features, different Hyper-V
+                * versions are either trying to use them and fail or do some
+                * sanity checking and refuse to boot. Filter all unsupported
+                * features out.
                 */
                if (!msr_info->host_initiated &&
                    vmx->nested.enlightened_vmcs_enabled)
@@ -3077,7 +3078,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        }
 
        /* depends on vcpu->arch.cr0 to be set to a new value */
-       vmx->emulation_required = emulation_required(vcpu);
+       vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static int vmx_get_max_tdp_level(void)
@@ -3330,7 +3331,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int
 {
        __vmx_set_segment(vcpu, var, seg);
 
-       to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
+       to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -6621,10 +6622,24 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                     vmx->loaded_vmcs->soft_vnmi_blocked))
                vmx->loaded_vmcs->entry_time = ktime_get();
 
-       /* Don't enter VMX if guest state is invalid, let the exit handler
-          start emulation until we arrive back to a valid state */
-       if (vmx->emulation_required)
+       /*
+        * Don't enter VMX if guest state is invalid, let the exit handler
+        * start emulation until we arrive back to a valid state.  Synthesize a
+        * consistency check VM-Exit due to invalid guest state and bail.
+        */
+       if (unlikely(vmx->emulation_required)) {
+
+               /* We don't emulate invalid state of a nested guest */
+               vmx->fail = is_guest_mode(vcpu);
+
+               vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
+               vmx->exit_reason.failed_vmentry = 1;
+               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
+               vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
+               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
+               vmx->exit_intr_info = 0;
                return EXIT_FASTPATH_NONE;
+       }
 
        trace_kvm_entry(vcpu);
 
@@ -6833,7 +6848,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
                 */
                tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
                if (tsx_ctrl)
-                       vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+                       tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
        }
 
        err = alloc_loaded_vmcs(&vmx->vmcs01);
index 4858c5fd95f27dd7b393c6c04c013bacae15a878..592217fd7d920229ff7c1bbc185a68d867254211 100644 (file)
@@ -248,12 +248,8 @@ struct vcpu_vmx {
         * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
         * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
         * be loaded into hardware if those conditions aren't met.
-        * nr_active_uret_msrs tracks the number of MSRs that need to be loaded
-        * into hardware when running the guest.  guest_uret_msrs[] is resorted
-        * whenever the number of "active" uret MSRs is modified.
         */
        struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
-       int                   nr_active_uret_msrs;
        bool                  guest_uret_msrs_loaded;
 #ifdef CONFIG_X86_64
        u64                   msr_host_kernel_gs_base;
@@ -359,6 +355,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
                        unsigned long fs_base, unsigned long gs_base);
 int vmx_get_cpl(struct kvm_vcpu *vcpu);
+bool vmx_emulation_required(struct kvm_vcpu *vcpu);
 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
index 28ef14155726461ec935c711ce8cc0ebe22c0bdf..aabd3a2ec1bc73eda8dcc86e90a65a3964c19468 100644 (file)
@@ -1332,6 +1332,13 @@ static const u32 msrs_to_save_all[] = {
        MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
        MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
        MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
+
+       MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
+       MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
+       MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
+       MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
+       MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
+       MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
 };
 
 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
@@ -2969,7 +2976,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                                       offsetof(struct compat_vcpu_info, time));
        if (vcpu->xen.vcpu_time_info_set)
                kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
-       if (v == kvm_get_vcpu(v->kvm, 0))
+       if (!v->vcpu_idx)
                kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
 }
@@ -7658,6 +7665,13 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
 
                /* Process a latched INIT or SMI, if any.  */
                kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+               /*
+                * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
+                * on SMM exit we still need to reload them from
+                * guest memory
+                */
+               vcpu->arch.pdptrs_from_userspace = false;
        }
 
        kvm_mmu_reset_context(vcpu);
@@ -10652,6 +10666,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        int r;
 
        vcpu->arch.last_vmentry_cpu = -1;
+       vcpu->arch.regs_avail = ~0;
+       vcpu->arch.regs_dirty = ~0;
 
        if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -10893,6 +10909,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
        kvm_rip_write(vcpu, 0xfff0);
 
+       vcpu->arch.cr3 = 0;
+       kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
+
        /*
         * CR0.CD/NW are set on RESET, preserved on INIT.  Note, some versions
         * of Intel's SDM list CD/NW as being set on INIT, but they contradict
@@ -11139,9 +11158,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
 
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
+       int ret;
+
        if (type)
                return -EINVAL;
 
+       ret = kvm_page_track_init(kvm);
+       if (ret)
+               return ret;
+
        INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@@ -11174,7 +11199,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_apicv_init(kvm);
        kvm_hv_init_vm(kvm);
-       kvm_page_track_init(kvm);
        kvm_mmu_init_vm(kvm);
        kvm_xen_init_vm(kvm);
 
index 058f19b20465f7b084cefcaa020518c4a433eaac..c565def611e2492788f557f16d2e28d84b48f458 100644 (file)
        ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
 
 #define __get_next(t, insn)    \
-       ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
+       ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
 
 #define __peek_nbyte_next(t, insn, n)  \
-       ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
+       ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
 
 #define get_next(t, insn)      \
        ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
index b2eefdefc1083316ea27e6bfd8bb1ddbcea715ab..84a2c8c4af7358b67c3ab3257103a76559087e20 100644 (file)
@@ -710,7 +710,8 @@ oops:
 
 static noinline void
 kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
-                        unsigned long address, int signal, int si_code)
+                        unsigned long address, int signal, int si_code,
+                        u32 pkey)
 {
        WARN_ON_ONCE(user_mode(regs));
 
@@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
 
                        set_signal_archinfo(address, error_code);
 
-                       /* XXX: hwpoison faults will set the wrong code. */
-                       force_sig_fault(signal, si_code, (void __user *)address);
+                       if (si_code == SEGV_PKUERR) {
+                               force_sig_pkuerr((void __user *)address, pkey);
+                       } else {
+                               /* XXX: hwpoison faults will set the wrong code. */
+                               force_sig_fault(signal, si_code, (void __user *)address);
+                       }
                }
 
                /*
@@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
        struct task_struct *tsk = current;
 
        if (!user_mode(regs)) {
-               kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
+               kernelmode_fixup_or_oops(regs, error_code, address,
+                                        SIGSEGV, si_code, pkey);
                return;
        }
 
@@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 {
        /* Kernel mode? Handle exceptions or die: */
        if (!user_mode(regs)) {
-               kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
+               kernelmode_fixup_or_oops(regs, error_code, address,
+                                        SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
                return;
        }
 
@@ -1396,7 +1403,8 @@ good_area:
                 */
                if (!user_mode(regs))
                        kernelmode_fixup_or_oops(regs, error_code, address,
-                                                SIGBUS, BUS_ADRERR);
+                                                SIGBUS, BUS_ADRERR,
+                                                ARCH_DEFAULT_PKEY);
                return;
        }
 
@@ -1416,7 +1424,8 @@ good_area:
                return;
 
        if (fatal_signal_pending(current) && !user_mode(regs)) {
-               kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
+               kernelmode_fixup_or_oops(regs, error_code, address,
+                                        0, 0, ARCH_DEFAULT_PKEY);
                return;
        }
 
@@ -1424,7 +1433,8 @@ good_area:
                /* Kernel mode? Handle exceptions or die: */
                if (!user_mode(regs)) {
                        kernelmode_fixup_or_oops(regs, error_code, address,
-                                                SIGSEGV, SEGV_MAPERR);
+                                                SIGSEGV, SEGV_MAPERR,
+                                                ARCH_DEFAULT_PKEY);
                        return;
                }
 
index a6e11763763fb3de079e0971c4d6e518492ea997..36098226a95731b5f91a2f2ab156bb9e25716aca 100644 (file)
@@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
                return 0;
 
        p4d = p4d_offset(pgd, addr);
-       if (p4d_none(*p4d))
+       if (!p4d_present(*p4d))
                return 0;
 
        pud = pud_offset(p4d, addr);
-       if (pud_none(*pud))
+       if (!pud_present(*pud))
                return 0;
 
        if (pud_large(*pud))
                return pfn_valid(pud_pfn(*pud));
 
        pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       if (!pmd_present(*pmd))
                return 0;
 
        if (pmd_large(*pmd))
index 1a50434c8a4dab44d6d23742d3bcd2dbbc1e2d99..ef885370719a605876b3375e22db72f8d025cb64 100644 (file)
@@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
                        p = early_alloc(PMD_SIZE, nid, false);
                        if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
                                return;
-                       else if (p)
-                               memblock_free(__pa(p), PMD_SIZE);
+                       memblock_free_ptr(p, PMD_SIZE);
                }
 
                p = early_alloc(PAGE_SIZE, nid, true);
@@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
                        p = early_alloc(PUD_SIZE, nid, false);
                        if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
                                return;
-                       else if (p)
-                               memblock_free(__pa(p), PUD_SIZE);
+                       memblock_free_ptr(p, PUD_SIZE);
                }
 
                p = early_alloc(PAGE_SIZE, nid, true);
index a1b5c71099e61d50095666e18e16e6b921c5d664..1e9b93b088dbf0dc454d2ebb30dcab055d1f7f59 100644 (file)
@@ -355,7 +355,7 @@ void __init numa_reset_distance(void)
 
        /* numa_distance could be 1LU marking allocation failure, test cnt */
        if (numa_distance_cnt)
-               memblock_free(__pa(numa_distance), size);
+               memblock_free_ptr(numa_distance, size);
        numa_distance_cnt = 0;
        numa_distance = NULL;   /* enable table creation */
 }
index 737491b13728c3ba88146d8ba728abbc2cdd92db..e801e30089c436087681b8f09ad88e76d3046d56 100644 (file)
@@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
        }
 
        /* free the copied physical distance table */
-       if (phys_dist)
-               memblock_free(__pa(phys_dist), phys_size);
+       memblock_free_ptr(phys_dist, phys_size);
        return;
 
 no_emu:
index 3112ca7786ed1462686fb8fbae96b621d3334195..4ba2a3ee4bce1258f0b86a47a47c823e3ff2eb49 100644 (file)
@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
        int err = 0;
 
        start = sanitize_phys(start);
-       end = sanitize_phys(end);
+
+       /*
+        * The end address passed into this function is exclusive, but
+        * sanitize_phys() expects an inclusive address.
+        */
+       end = sanitize_phys(end - 1) + 1;
        if (start >= end) {
                WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
                                start, end - 1, cattr_name(req_type));
index 0fe6aacef3db2bd2203d687e91c88ef09e173385..9ea57389c554b7313d726e780fc51fea6977a761 100644 (file)
@@ -1341,9 +1341,10 @@ st:                      if (is_imm8(insn->off))
                        if (insn->imm == (BPF_AND | BPF_FETCH) ||
                            insn->imm == (BPF_OR | BPF_FETCH) ||
                            insn->imm == (BPF_XOR | BPF_FETCH)) {
-                               u8 *branch_target;
                                bool is64 = BPF_SIZE(insn->code) == BPF_DW;
                                u32 real_src_reg = src_reg;
+                               u32 real_dst_reg = dst_reg;
+                               u8 *branch_target;
 
                                /*
                                 * Can't be implemented with a single x86 insn.
@@ -1354,11 +1355,13 @@ st:                     if (is_imm8(insn->off))
                                emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
                                if (src_reg == BPF_REG_0)
                                        real_src_reg = BPF_REG_AX;
+                               if (dst_reg == BPF_REG_0)
+                                       real_dst_reg = BPF_REG_AX;
 
                                branch_target = prog;
                                /* Load old value */
                                emit_ldx(&prog, BPF_SIZE(insn->code),
-                                        BPF_REG_0, dst_reg, insn->off);
+                                        BPF_REG_0, real_dst_reg, insn->off);
                                /*
                                 * Perform the (commutative) operation locally,
                                 * put the result in the AUX_REG.
@@ -1369,7 +1372,8 @@ st:                       if (is_imm8(insn->off))
                                      add_2reg(0xC0, AUX_REG, real_src_reg));
                                /* Attempt to swap in new value */
                                err = emit_atomic(&prog, BPF_CMPXCHG,
-                                                 dst_reg, AUX_REG, insn->off,
+                                                 real_dst_reg, AUX_REG,
+                                                 insn->off,
                                                  BPF_SIZE(insn->code));
                                if (WARN_ON(err))
                                        return err;
@@ -1383,11 +1387,10 @@ st:                     if (is_imm8(insn->off))
                                /* Restore R0 after clobbering RAX */
                                emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
                                break;
-
                        }
 
                        err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
-                                                 insn->off, BPF_SIZE(insn->code));
+                                         insn->off, BPF_SIZE(insn->code));
                        if (err)
                                return err;
                        break;
@@ -1744,7 +1747,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
 }
 
 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
-                          struct bpf_prog *p, int stack_size, bool mod_ret)
+                          struct bpf_prog *p, int stack_size, bool save_ret)
 {
        u8 *prog = *pprog;
        u8 *jmp_insn;
@@ -1777,11 +1780,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
        if (emit_call(&prog, p->bpf_func, prog))
                return -EINVAL;
 
-       /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
+       /*
+        * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
         * of the previous call which is then passed on the stack to
         * the next BPF program.
+        *
+        * BPF_TRAMP_FENTRY trampoline may need to return the return
+        * value of BPF_PROG_TYPE_STRUCT_OPS prog.
         */
-       if (mod_ret)
+       if (save_ret)
                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
 
        /* replace 2 nops with JE insn, since jmp target is known */
@@ -1828,13 +1835,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
 }
 
 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
-                     struct bpf_tramp_progs *tp, int stack_size)
+                     struct bpf_tramp_progs *tp, int stack_size,
+                     bool save_ret)
 {
        int i;
        u8 *prog = *pprog;
 
        for (i = 0; i < tp->nr_progs; i++) {
-               if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
+               if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
+                                   save_ret))
                        return -EINVAL;
        }
        *pprog = prog;
@@ -1877,6 +1886,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
        return 0;
 }
 
+static bool is_valid_bpf_tramp_flags(unsigned int flags)
+{
+       if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
+           (flags & BPF_TRAMP_F_SKIP_FRAME))
+               return false;
+
+       /*
+        * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
+        * and it must be used alone.
+        */
+       if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
+           (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
+               return false;
+
+       return true;
+}
+
 /* Example:
  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
  * its 'struct btf_func_model' will be nr_args=2
@@ -1949,17 +1975,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
        u8 **branches = NULL;
        u8 *prog;
+       bool save_ret;
 
        /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
        if (nr_args > 6)
                return -ENOTSUPP;
 
-       if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
-           (flags & BPF_TRAMP_F_SKIP_FRAME))
+       if (!is_valid_bpf_tramp_flags(flags))
                return -EINVAL;
 
-       if (flags & BPF_TRAMP_F_CALL_ORIG)
-               stack_size += 8; /* room for return value of orig_call */
+       /* room for return value of orig_call or fentry prog */
+       save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+       if (save_ret)
+               stack_size += 8;
 
        if (flags & BPF_TRAMP_F_IP_ARG)
                stack_size += 8; /* room for IP address argument */
@@ -2005,7 +2033,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        }
 
        if (fentry->nr_progs)
-               if (invoke_bpf(m, &prog, fentry, stack_size))
+               if (invoke_bpf(m, &prog, fentry, stack_size,
+                              flags & BPF_TRAMP_F_RET_FENTRY_RET))
                        return -EINVAL;
 
        if (fmod_ret->nr_progs) {
@@ -2052,7 +2081,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
        }
 
        if (fexit->nr_progs)
-               if (invoke_bpf(m, &prog, fexit, stack_size)) {
+               if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
                        ret = -EINVAL;
                        goto cleanup;
                }
@@ -2072,9 +2101,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
                        ret = -EINVAL;
                        goto cleanup;
                }
-               /* restore original return value back into RAX */
-               emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
        }
+       /* restore return value of orig_call or fentry prog back into RAX */
+       if (save_ret)
+               emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
 
        EMIT1(0x5B); /* pop rbx */
        EMIT1(0xC9); /* leave */
index 3d41a09c2c14ca58f9303166103af28b2a0934db..5debe4ac6f8192b4c3ed89e10e8babd9919616f6 100644 (file)
@@ -113,7 +113,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
                                 false /* no mapping of GSI to PIRQ */);
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 static int xen_register_gsi(u32 gsi, int triggering, int polarity)
 {
        int rc, irq;
@@ -261,7 +261,7 @@ error:
        return irq;
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 static bool __read_mostly pci_seg_supported = true;
 
 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -375,10 +375,10 @@ static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
                WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
        }
 }
-#else /* CONFIG_XEN_DOM0 */
+#else /* CONFIG_XEN_PV_DOM0 */
 #define xen_initdom_setup_msi_irqs     NULL
 #define xen_initdom_restore_msi_irqs   NULL
-#endif /* !CONFIG_XEN_DOM0 */
+#endif /* !CONFIG_XEN_PV_DOM0 */
 
 static void xen_teardown_msi_irqs(struct pci_dev *dev)
 {
@@ -555,7 +555,7 @@ int __init pci_xen_hvm_init(void)
        return 0;
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 int __init pci_xen_initial_domain(void)
 {
        int irq;
@@ -583,6 +583,9 @@ int __init pci_xen_initial_domain(void)
        }
        return 0;
 }
+#endif
+
+#ifdef CONFIG_XEN_DOM0
 
 struct xen_device_domain_owner {
        domid_t domain;
@@ -656,4 +659,4 @@ int xen_unregister_device_domain_owner(struct pci_dev *dev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
-#endif
+#endif /* CONFIG_XEN_DOM0 */
index ee2beda590d0d0156e2dd0f0febf45b71452cdd3..1d4a00e767ece00edc2a0ed8b3fcdfe160ee7b54 100644 (file)
@@ -274,7 +274,7 @@ static struct olpc_ec_driver ec_xo1_driver = {
 
 static struct olpc_ec_driver ec_xo1_5_driver = {
        .ec_cmd = olpc_xo1_ec_cmd,
-#ifdef CONFIG_OLPC_XO1_5_SCI
+#ifdef CONFIG_OLPC_XO15_SCI
        /*
         * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
         * compiled in
index 9ac7457f52a3cd1d4c8e32585b3a18c44a415b5b..ed0442e354344fad84fce7fcc29c67934828a9f9 100644 (file)
 /*
  * PVH variables.
  *
- * pvh_bootparams and pvh_start_info need to live in the data segment since
+ * pvh_bootparams and pvh_start_info need to live in a data segment since
  * they are used after startup_{32|64}, which clear .bss, are invoked.
  */
-struct boot_params pvh_bootparams __section(".data");
-struct hvm_start_info pvh_start_info __section(".data");
+struct boot_params __initdata pvh_bootparams;
+struct hvm_start_info __initdata pvh_start_info;
 
-unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
+const unsigned int __initconst pvh_start_info_sz = sizeof(pvh_start_info);
 
-static u64 pvh_get_root_pointer(void)
+static u64 __init pvh_get_root_pointer(void)
 {
        return pvh_start_info.rsdp_paddr;
 }
@@ -107,7 +107,7 @@ void __init __weak xen_pvh_init(struct boot_params *boot_params)
        BUG();
 }
 
-static void hypervisor_specific_init(bool xen_guest)
+static void __init hypervisor_specific_init(bool xen_guest)
 {
        if (xen_guest)
                xen_pvh_init(&pvh_bootparams);
index afc1da68b06d80bcd8afda6989689f400f90af56..6bcd3d8ca6ac5fa46290b369e0119d9ea652c7d8 100644 (file)
@@ -43,13 +43,9 @@ config XEN_PV_SMP
        def_bool y
        depends on XEN_PV && SMP
 
-config XEN_DOM0
-       bool "Xen PV Dom0 support"
-       default y
-       depends on XEN_PV && PCI_XEN && SWIOTLB_XEN
-       depends on X86_IO_APIC && ACPI && PCI
-       help
-         Support running as a Xen PV Dom0 guest.
+config XEN_PV_DOM0
+       def_bool y
+       depends on XEN_PV && XEN_DOM0
 
 config XEN_PVHVM
        def_bool y
@@ -86,3 +82,12 @@ config XEN_PVH
        def_bool n
        help
          Support for running as a Xen PVH guest.
+
+config XEN_DOM0
+       bool "Xen Dom0 support"
+       default XEN_PV
+       depends on (XEN_PV && SWIOTLB_XEN) || (XEN_PVH && X86_64)
+       depends on X86_IO_APIC && ACPI && PCI
+       select X86_X2APIC if XEN_PVH && X86_64
+       help
+         Support running as a Xen Dom0 guest.
index 40b5779fce21cf85be58b626ecb4e7245ff75a4f..4953260e281c38d651b65cc0affaba99d7ecf645 100644 (file)
@@ -45,7 +45,7 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
 
-obj-$(CONFIG_XEN_DOM0)         += vga.o
+obj-$(CONFIG_XEN_PV_DOM0)      += vga.o
 
 obj-$(CONFIG_SWIOTLB_XEN)      += pci-swiotlb-xen.o
 
index c79bd0af2e8c26d819fdda1b07072c52b256effa..95d970359e1746e230eadd5c8e0ae776ad3720ba 100644 (file)
@@ -3,6 +3,7 @@
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 #include <linux/memblock.h>
 #endif
+#include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/kexec.h>
 #include <linux/slab.h>
 
 #include <xen/xen.h>
 #include <xen/features.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/version.h>
 #include <xen/page.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/cpu.h>
 #include <asm/e820/api.h> 
+#include <asm/setup.h>
 
 #include "xen-ops.h"
 #include "smp.h"
@@ -52,9 +56,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
 
-enum xen_domain_type xen_domain_type = XEN_NATIVE;
-EXPORT_SYMBOL_GPL(xen_domain_type);
-
 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
 EXPORT_SYMBOL(machine_to_phys_mapping);
 unsigned long  machine_to_phys_nr;
@@ -69,10 +70,12 @@ __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
 /*
- * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
- * before clearing the bss.
+ * NB: These need to live in .data or alike because they're used by
+ * xen_prepare_pvh() which runs before clearing the bss.
  */
-uint32_t xen_start_flags __section(".data") = 0;
+enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
+EXPORT_SYMBOL_GPL(xen_domain_type);
+uint32_t __ro_after_init xen_start_flags;
 EXPORT_SYMBOL(xen_start_flags);
 
 /*
@@ -258,6 +261,45 @@ int xen_vcpu_setup(int cpu)
        return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
 }
 
+void __init xen_banner(void)
+{
+       unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
+       struct xen_extraversion extra;
+
+       HYPERVISOR_xen_version(XENVER_extraversion, &extra);
+
+       pr_info("Booting kernel on %s\n", pv_info.name);
+       pr_info("Xen version: %u.%u%s%s\n",
+               version >> 16, version & 0xffff, extra.extraversion,
+               xen_feature(XENFEAT_mmu_pt_update_preserve_ad)
+               ? " (preserve-AD)" : "");
+}
+
+/* Check if running on Xen version (major, minor) or later */
+bool xen_running_on_version_or_later(unsigned int major, unsigned int minor)
+{
+       unsigned int version;
+
+       if (!xen_domain())
+               return false;
+
+       version = HYPERVISOR_xen_version(XENVER_version, NULL);
+       if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
+               ((version >> 16) > major))
+               return true;
+       return false;
+}
+
+void __init xen_add_preferred_consoles(void)
+{
+       add_preferred_console("xenboot", 0, NULL);
+       if (!boot_params.screen_info.orig_video_isVGA)
+               add_preferred_console("tty", 0, NULL);
+       add_preferred_console("hvc", 0, NULL);
+       if (boot_params.screen_info.orig_video_isVGA)
+               add_preferred_console("tty", 0, NULL);
+}
+
 void xen_reboot(int reason)
 {
        struct sched_shutdown r = { .reason = reason };
index 753f63734c1345bdeb64b78e16d7363fad5e647a..a7b7d674f50058c22a9154b02f79f26aac6b616f 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/mm.h>
 #include <linux/page-flags.h>
 #include <linux/highmem.h>
-#include <linux/console.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
 #include <linux/edd.h>
@@ -109,17 +108,6 @@ struct tls_descs {
  */
 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
 
-static void __init xen_banner(void)
-{
-       unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
-       struct xen_extraversion extra;
-       HYPERVISOR_xen_version(XENVER_extraversion, &extra);
-
-       pr_info("Booting paravirtualized kernel on %s\n", pv_info.name);
-       pr_info("Xen version: %d.%d%s (preserve-AD)\n",
-               version >> 16, version & 0xffff, extra.extraversion);
-}
-
 static void __init xen_pv_init_platform(void)
 {
        populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
@@ -142,22 +130,6 @@ static void __init xen_pv_guest_late_init(void)
 #endif
 }
 
-/* Check if running on Xen version (major, minor) or later */
-bool
-xen_running_on_version_or_later(unsigned int major, unsigned int minor)
-{
-       unsigned int version;
-
-       if (!xen_domain())
-               return false;
-
-       version = HYPERVISOR_xen_version(XENVER_version, NULL);
-       if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
-               ((version >> 16) > major))
-               return true;
-       return false;
-}
-
 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
 static __read_mostly unsigned int cpuid_leaf5_edx_val;
 
@@ -755,8 +727,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
        preempt_enable();
 }
 
-static void xen_convert_trap_info(const struct desc_ptr *desc,
-                                 struct trap_info *traps)
+static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
+                                     struct trap_info *traps, bool full)
 {
        unsigned in, out, count;
 
@@ -766,17 +738,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
        for (in = out = 0; in < count; in++) {
                gate_desc *entry = (gate_desc *)(desc->address) + in;
 
-               if (cvt_gate_to_trap(in, entry, &traps[out]))
+               if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
                        out++;
        }
-       traps[out].address = 0;
+
+       return out;
 }
 
 void xen_copy_trap_info(struct trap_info *traps)
 {
        const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
 
-       xen_convert_trap_info(desc, traps);
+       xen_convert_trap_info(desc, traps, true);
 }
 
 /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
@@ -786,6 +759,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
 {
        static DEFINE_SPINLOCK(lock);
        static struct trap_info traps[257];
+       unsigned out;
 
        trace_xen_cpu_load_idt(desc);
 
@@ -793,7 +767,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
 
        memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
 
-       xen_convert_trap_info(desc, traps);
+       out = xen_convert_trap_info(desc, traps, false);
+       memset(&traps[out], 0, sizeof(traps[0]));
 
        xen_mc_flush();
        if (HYPERVISOR_set_trap_table(traps))
@@ -1214,6 +1189,11 @@ static void __init xen_dom0_set_legacy_features(void)
        x86_platform.legacy.rtc = 1;
 }
 
+static void __init xen_domu_set_legacy_features(void)
+{
+       x86_platform.legacy.rtc = 0;
+}
+
 /* First C function to be called on Xen boot */
 asmlinkage __visible void __init xen_start_kernel(void)
 {
@@ -1356,9 +1336,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
        boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
 
        if (!xen_initial_domain()) {
-               add_preferred_console("xenboot", 0, NULL);
                if (pci_xen)
                        x86_init.pci.arch_init = pci_xen_init;
+               x86_platform.set_legacy_features =
+                               xen_domu_set_legacy_features;
        } else {
                const struct dom0_vga_console_info *info =
                        (void *)((char *)xen_start_info +
@@ -1399,11 +1380,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 #endif
        }
 
-       if (!boot_params.screen_info.orig_video_isVGA)
-               add_preferred_console("tty", 0, NULL);
-       add_preferred_console("hvc", 0, NULL);
-       if (boot_params.screen_info.orig_video_isVGA)
-               add_preferred_console("tty", 0, NULL);
+       xen_add_preferred_consoles();
 
 #ifdef CONFIG_PCI
        /* PCI BIOS service won't work from a PV guest. */
index 0d5e34b9e6f93985a243cc69c632ae0d8f9e8ce1..bcae606bbc5cfd3145aefb4f3bf2c11b5e07afe0 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/acpi.h>
+#include <linux/export.h>
 
 #include <xen/hvc-console.h>
 
 /*
  * PVH variables.
  *
- * The variable xen_pvh needs to live in the data segment since it is used
+ * The variable xen_pvh needs to live in a data segment since it is used
  * after startup_{32|64} is invoked, which will clear the .bss segment.
  */
-bool xen_pvh __section(".data") = 0;
+bool __ro_after_init xen_pvh;
+EXPORT_SYMBOL_GPL(xen_pvh);
 
 void __init xen_pvh_init(struct boot_params *boot_params)
 {
@@ -36,6 +38,10 @@ void __init xen_pvh_init(struct boot_params *boot_params)
        pfn = __pa(hypercall_page);
        wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
 
+       if (xen_initial_domain())
+               x86_init.oem.arch_setup = xen_add_preferred_consoles;
+       x86_init.oem.banner = xen_banner;
+
        xen_efi_init(boot_params);
 }
 
index 1df5f01529e59856fcde44d615650a07d1c90c83..3359c23573c50167dbc9f55030e5ea7c82f62f54 100644 (file)
@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
        if (pinned) {
                struct page *page = pfn_to_page(pfn);
 
-               if (static_branch_likely(&xen_struct_pages_ready))
+               pinned = false;
+               if (static_branch_likely(&xen_struct_pages_ready)) {
+                       pinned = PagePinned(page);
                        SetPagePinned(page);
+               }
 
                xen_mc_batch();
 
                __set_pfn_prot(pfn, PAGE_KERNEL_RO);
 
-               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
                        __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
 
                xen_mc_issue(PARAVIRT_LAZY_MMU);
@@ -2395,7 +2398,7 @@ static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
 
 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
-                 unsigned int domid, bool no_translate, struct page **pages)
+                 unsigned int domid, bool no_translate)
 {
        int err = 0;
        struct remap_data rmd;
index 54f9aa7e8457399906070ee3b41be9470cfd3b02..46df59aeaa06a685c2e58c489d2b648f24d284cb 100644 (file)
@@ -18,7 +18,7 @@
 #endif
 #include <linux/export.h>
 
-int xen_swiotlb __read_mostly;
+static int xen_swiotlb __read_mostly;
 
 /*
  * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
@@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void)
        return xen_swiotlb;
 }
 
-void __init pci_xen_swiotlb_init(void)
+static void __init pci_xen_swiotlb_init(void)
 {
        if (xen_swiotlb) {
                xen_swiotlb_init_early();
index 96afadf9878ef14ca00625445b362d4a4c07996a..7ed56c6075b0cc8d7e1fd97288fac2761983e2bc 100644 (file)
@@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 
        gdt = get_cpu_gdt_rw(cpu);
 
-       memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
-
        /*
         * Bring up the CPU in cpu_bringup_and_idle() with the stack
         * pointing just below where pt_regs would be if it were a normal
@@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 
        xen_copy_trap_info(ctxt->trap_ctxt);
 
-       ctxt->ldt_ents = 0;
-
        BUG_ON((unsigned long)gdt & ~PAGE_MASK);
 
        gdt_mfn = arbitrary_virt_to_mfn(gdt);
index 8d7ec49a35fbb686c5ec93842ce681390039a1e1..8bc8b72a205d460743a76bfac00e88e4921c5015 100644 (file)
@@ -51,6 +51,7 @@ void __init xen_remap_memory(void);
 phys_addr_t __init xen_find_free_area(phys_addr_t size);
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
+void xen_banner(void);
 void xen_enable_sysenter(void);
 void xen_enable_syscall(void);
 void xen_vcpu_restore(void);
@@ -109,7 +110,7 @@ static inline void xen_uninit_lock_cpu(int cpu)
 
 struct dom0_vga_console_info;
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
 #else
 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
@@ -118,6 +119,8 @@ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
 }
 #endif
 
+void xen_add_preferred_consoles(void);
+
 void __init xen_init_apic(void);
 
 #ifdef CONFIG_XEN_EFI
index 7cbf68ca71060dec42583ac60cd2e5f47b68ed09..6fc05cba61a27563273cde66d2f99ffe56fd1ab1 100644 (file)
@@ -78,7 +78,7 @@
 #endif
 #define XCHAL_KIO_SIZE                 0x10000000
 
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
 #define XCHAL_KIO_PADDR                        xtensa_get_kio_paddr()
 #ifndef __ASSEMBLY__
 extern unsigned long xtensa_kio_paddr;
index 764b54bef701943c1719f4982773b994684a84a7..15051a8a153998efe65196aecbfbf6dc91af1e02 100644 (file)
@@ -143,7 +143,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
 
 void __init init_IRQ(void)
 {
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
        irqchip_init();
 #else
 #ifdef CONFIG_HAVE_SMP
index ed184106e4cf945a0631f7f43d72a700b568507a..ee9082a142feb49cf187c014df9d6df8b605a939 100644 (file)
@@ -63,7 +63,7 @@ extern unsigned long initrd_end;
 extern int initrd_below_start_ok;
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 void *dtb_start = __dtb_start;
 #endif
 
@@ -125,7 +125,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
 
 #endif /* CONFIG_BLK_DEV_INITRD */
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 static int __init parse_tag_fdt(const bp_tag_t *tag)
 {
@@ -135,7 +135,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
 
 __tagtable(BP_TAG_FDT, parse_tag_fdt);
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
 
 static int __init parse_tag_cmdline(const bp_tag_t* tag)
 {
@@ -183,7 +183,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
 }
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
 unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -232,7 +232,7 @@ void __init early_init_devtree(void *params)
                strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 }
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
 
 /*
  * Initialize architecture. (Early stage)
@@ -253,7 +253,7 @@ void __init init_arch(bp_tag_t *bp_start)
        if (bp_start)
                parse_bootparam(bp_start);
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
        early_init_devtree(dtb_start);
 #endif
 
index 7e4d97dc8bd8fb3ed37863a713d7bd1291f8b1f9..38acda4f04e85d5db3d91bb0a0f97819bf6dc7a9 100644 (file)
@@ -101,7 +101,7 @@ void init_mmu(void)
 
 void init_kio(void)
 {
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
        /*
         * Update the IO area mapping in case xtensa_kio_paddr has changed
         */
index 4f7d6142d41fa4c0847b8a68bf82e7067a95fce8..538e6748e85a7dbb912bd7df2e04860ff51dbb03 100644 (file)
@@ -51,8 +51,12 @@ void platform_power_off(void)
 
 void platform_restart(void)
 {
-       /* Flush and reset the mmu, simulate a processor reset, and
-        * jump to the reset vector. */
+       /* Try software reset first. */
+       WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
+
+       /* If software reset did not work, flush and reset the mmu,
+        * simulate a processor reset, and jump to the reset vector.
+        */
        cpu_reset();
        /* control never gets here */
 }
@@ -66,7 +70,7 @@ void __init platform_calibrate_ccount(void)
 
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 static void __init xtfpga_clk_setup(struct device_node *np)
 {
@@ -284,4 +288,4 @@ static int __init xtavnet_init(void)
  */
 arch_initcall(xtavnet_init);
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
index cf2780cb44a74fddb4624aecb22161203b62d6b7..485a258b0ab37ee7cab7b9e28411dd0032f19830 100644 (file)
@@ -490,7 +490,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
        bdev = I_BDEV(inode);
        mutex_init(&bdev->bd_fsfreeze_mutex);
        spin_lock_init(&bdev->bd_size_lock);
-       bdev->bd_disk = disk;
        bdev->bd_partno = partno;
        bdev->bd_inode = inode;
        bdev->bd_stats = alloc_percpu(struct disk_stats);
@@ -498,6 +497,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
                iput(inode);
                return NULL;
        }
+       bdev->bd_disk = disk;
        return bdev;
 }
 
index dd13c2bbc29c1164b6d01432b28c13545cad8a90..480e1a13485966104362f265d842887640775781 100644 (file)
@@ -2662,15 +2662,6 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
         * are likely to increase the throughput.
         */
        bfqq->new_bfqq = new_bfqq;
-       /*
-        * The above assignment schedules the following redirections:
-        * each time some I/O for bfqq arrives, the process that
-        * generated that I/O is disassociated from bfqq and
-        * associated with new_bfqq. Here we increases new_bfqq->ref
-        * in advance, adding the number of processes that are
-        * expected to be associated with new_bfqq as they happen to
-        * issue I/O.
-        */
        new_bfqq->ref += process_refs;
        return new_bfqq;
 }
@@ -2733,10 +2724,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 {
        struct bfq_queue *in_service_bfqq, *new_bfqq;
 
-       /* if a merge has already been setup, then proceed with that first */
-       if (bfqq->new_bfqq)
-               return bfqq->new_bfqq;
-
        /*
         * Check delayed stable merge for rotational or non-queueing
         * devs. For this branch to be executed, bfqq must not be
@@ -2838,6 +2825,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
        if (bfq_too_late_for_merging(bfqq))
                return NULL;
 
+       if (bfqq->new_bfqq)
+               return bfqq->new_bfqq;
+
        if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
                return NULL;
 
index 5df3dd282e40e011257fccbddb395c757a6620bb..a6fb6a0b429550d0559924afac6cb7326b2669d2 100644 (file)
@@ -1466,7 +1466,7 @@ again:
        if (!bio_integrity_endio(bio))
                return;
 
-       if (bio->bi_bdev)
+       if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
                rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
 
        if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
index 3c88a79a319bbb0bad48fc147bf5e2ac7ed2a9ec..38b9f7684952a71607a0c7869fcd885995ed80e9 100644 (file)
@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
        if (preloaded)
                radix_tree_preload_end();
 
-       ret = blk_iolatency_init(q);
-       if (ret)
-               goto err_destroy_all;
-
        ret = blk_ioprio_init(q);
        if (ret)
                goto err_destroy_all;
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
        if (ret)
                goto err_destroy_all;
 
+       ret = blk_iolatency_init(q);
+       if (ret) {
+               blk_throtl_exit(q);
+               goto err_destroy_all;
+       }
+
        return 0;
 
 err_destroy_all:
@@ -1364,10 +1366,14 @@ enomem:
        /* alloc failed, nothing's initialized yet, free everything */
        spin_lock_irq(&q->queue_lock);
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
+
+               spin_lock(&blkcg->lock);
                if (blkg->pd[pol->plid]) {
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
+               spin_unlock(&blkcg->lock);
        }
        spin_unlock_irq(&q->queue_lock);
        ret = -ENOMEM;
@@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
        __clear_bit(pol->plid, q->blkcg_pols);
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
+
+               spin_lock(&blkcg->lock);
                if (blkg->pd[pol->plid]) {
                        if (pol->pd_offline_fn)
                                pol->pd_offline_fn(blkg->pd[pol->plid]);
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
+               spin_unlock(&blkcg->lock);
        }
 
        spin_unlock_irq(&q->queue_lock);
index 69a12177dfb62f6d1f1fc3f1f1166ca52cceae5c..16d5d5338392a22ac6fd41bc93025943b22a7138 100644 (file)
@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
  */
 void blk_integrity_unregister(struct gendisk *disk)
 {
+       struct blk_integrity *bi = &disk->queue->integrity;
+
+       if (!bi->profile)
+               return;
+
+       /* ensure all bios are off the integrity workqueue */
+       blk_flush_integrity();
        blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
-       memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+       memset(bi, 0, sizeof(*bi));
 }
 EXPORT_SYMBOL(blk_integrity_unregister);
 
index 4b66d2776edac02dd73c16a85e51d629d4f0dd1a..3b38d15723de182d66151cde2d14a9d519a43691 100644 (file)
@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(PCI_P2PDMA),
        QUEUE_FLAG_NAME(ZONE_RESETALL),
        QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
+       QUEUE_FLAG_NAME(HCTX_ACTIVE),
        QUEUE_FLAG_NAME(NOWAIT),
 };
 #undef QUEUE_FLAG_NAME
index 86f87346232a6b9a7f704dc4e0d41378ee7bd056..ff5caeb825429d91c3f6bf58af68d91fc50b222d 100644 (file)
@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
 
        spin_lock_irqsave(&tags->lock, flags);
        rq = tags->rqs[bitnr];
-       if (!rq || !refcount_inc_not_zero(&rq->ref))
+       if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
                rq = NULL;
        spin_unlock_irqrestore(&tags->lock, flags);
        return rq;
index 351095193788247d9b31b2f4a66568cac26ea6c6..882f56bff14f830234ee38d1ddfbe6aebf81ce91 100644 (file)
@@ -165,13 +165,20 @@ static const struct file_operations bsg_fops = {
        .llseek         =       default_llseek,
 };
 
+static void bsg_device_release(struct device *dev)
+{
+       struct bsg_device *bd = container_of(dev, struct bsg_device, device);
+
+       ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+       kfree(bd);
+}
+
 void bsg_unregister_queue(struct bsg_device *bd)
 {
        if (bd->queue->kobj.sd)
                sysfs_remove_link(&bd->queue->kobj, "bsg");
        cdev_device_del(&bd->cdev, &bd->device);
-       ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
-       kfree(bd);
+       put_device(&bd->device);
 }
 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
 
@@ -193,11 +200,13 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
        if (ret < 0) {
                if (ret == -ENOSPC)
                        dev_err(parent, "bsg: too many bsg devices\n");
-               goto out_kfree;
+               kfree(bd);
+               return ERR_PTR(ret);
        }
        bd->device.devt = MKDEV(bsg_major, ret);
        bd->device.class = bsg_class;
        bd->device.parent = parent;
+       bd->device.release = bsg_device_release;
        dev_set_name(&bd->device, "%s", name);
        device_initialize(&bd->device);
 
@@ -205,7 +214,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
        bd->cdev.owner = THIS_MODULE;
        ret = cdev_device_add(&bd->cdev, &bd->device);
        if (ret)
-               goto out_ida_remove;
+               goto out_put_device;
 
        if (q->kobj.sd) {
                ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
@@ -217,10 +226,8 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
 
 out_device_del:
        cdev_device_del(&bd->cdev, &bd->device);
-out_ida_remove:
-       ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
-out_kfree:
-       kfree(bd);
+out_put_device:
+       put_device(&bd->device);
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(bsg_register_queue);
index ffce6f6c68ddbdb4cf8c6e7764e3bd8cff5d683b..1e970c247e0ebb6bb0cabe77be5a74f60bbdade8 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/task_io_accounting_ops.h>
 #include <linux/falloc.h>
 #include <linux/suspend.h>
+#include <linux/fs.h>
 #include "blk.h"
 
 static struct inode *bdev_file_inode(struct file *file)
@@ -553,7 +554,8 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
                             loff_t len)
 {
-       struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+       struct inode *inode = bdev_file_inode(file);
+       struct block_device *bdev = I_BDEV(inode);
        loff_t end = start + len - 1;
        loff_t isize;
        int error;
@@ -580,10 +582,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
        if ((start | len) & (bdev_logical_block_size(bdev) - 1))
                return -EINVAL;
 
+       filemap_invalidate_lock(inode->i_mapping);
+
        /* Invalidate the page cache, including dirty pages. */
        error = truncate_bdev_range(bdev, file->f_mode, start, end);
        if (error)
-               return error;
+               goto fail;
 
        switch (mode) {
        case FALLOC_FL_ZERO_RANGE:
@@ -600,17 +604,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
                                             GFP_KERNEL, 0);
                break;
        default:
-               return -EOPNOTSUPP;
+               error = -EOPNOTSUPP;
        }
-       if (error)
-               return error;
 
-       /*
-        * Invalidate the page cache again; if someone wandered in and dirtied
-        * a page, we just discard it - userspace has no way of knowing whether
-        * the write happened before or after discard completing...
-        */
-       return truncate_bdev_range(bdev, file->f_mode, start, end);
+ fail:
+       filemap_invalidate_unlock(inode->i_mapping);
+       return error;
 }
 
 const struct file_operations def_blk_fops = {
index 7b6e5e1cf95642adaa6a6fb53e902b6466d1f0f7..496e8458c3574343807bd25459bca4bfd729d0fd 100644 (file)
@@ -1268,6 +1268,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
 
 out_destroy_part_tbl:
        xa_destroy(&disk->part_tbl);
+       disk->part0->bd_disk = NULL;
        iput(disk->part0->bd_inode);
 out_free_bdi:
        bdi_put(disk->bdi);
index 30d2db37cc8735195bae14633cc08f9b94f1ea76..0d399ddaa185a7cdf5974f7f294f50032747f577 100644 (file)
@@ -17,6 +17,8 @@ source "drivers/bus/Kconfig"
 
 source "drivers/connector/Kconfig"
 
+source "drivers/firmware/Kconfig"
+
 source "drivers/gnss/Kconfig"
 
 source "drivers/mtd/Kconfig"
index 0a0a982f9c28d44e5cf48b170662c0df40852272..c0e77c1c8e09d6fa15094b2fff28d1c7bef0aed4 100644 (file)
@@ -36,7 +36,7 @@ struct acpi_gtdt_descriptor {
 
 static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
 
-static inline void *next_platform_timer(void *platform_timer)
+static inline __init void *next_platform_timer(void *platform_timer)
 {
        struct acpi_gtdt_header *gh = platform_timer;
 
index a3ef6cce644cc071e5d58e5698cc1bf52947b6df..7dd80acf92c78c08870ff26c035d46b64cfff5cc 100644 (file)
@@ -3007,6 +3007,18 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
                ndr_desc->target_node = NUMA_NO_NODE;
        }
 
+       /* Fallback to address based numa information if node lookup failed */
+       if (ndr_desc->numa_node == NUMA_NO_NODE) {
+               ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
+               dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
+                       NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+       }
+       if (ndr_desc->target_node == NUMA_NO_NODE) {
+               ndr_desc->target_node = phys_to_target_node(spa->address);
+               dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
+                       NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+       }
+
        /*
         * Persistence domain bits are hierarchical, if
         * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
index a43f1521efe6d967f73e25c4c950a7c598834c8d..45c5c0e45e3327a3c6a1fe1acf81f2b7c709f379 100644 (file)
@@ -284,8 +284,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
 #define should_use_kmap(pfn)   page_is_ram(pfn)
 #endif
 
-static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
-                             bool memory)
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
 {
        unsigned long pfn;
 
@@ -295,8 +294,7 @@ static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
                        return NULL;
                return (void __iomem __force *)kmap(pfn_to_page(pfn));
        } else
-               return memory ? acpi_os_memmap(pg_off, pg_sz) :
-                               acpi_os_ioremap(pg_off, pg_sz);
+               return acpi_os_ioremap(pg_off, pg_sz);
 }
 
 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
@@ -311,10 +309,9 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
 }
 
 /**
- * __acpi_os_map_iomem - Get a virtual address for a given physical address range.
+ * acpi_os_map_iomem - Get a virtual address for a given physical address range.
  * @phys: Start of the physical address range to map.
  * @size: Size of the physical address range to map.
- * @memory: true if remapping memory, false if IO
  *
  * Look up the given physical address range in the list of existing ACPI memory
  * mappings.  If found, get a reference to it and return a pointer to it (its
@@ -324,8 +321,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
  * During early init (when acpi_permanent_mmap has not been set yet) this
  * routine simply calls __acpi_map_table() to get the job done.
  */
-static void __iomem __ref
-*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)
+void __iomem __ref
+*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
 {
        struct acpi_ioremap *map;
        void __iomem *virt;
@@ -356,7 +353,7 @@ static void __iomem __ref
 
        pg_off = round_down(phys, PAGE_SIZE);
        pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-       virt = acpi_map(phys, size, memory);
+       virt = acpi_map(phys, size);
        if (!virt) {
                mutex_unlock(&acpi_ioremap_lock);
                kfree(map);
@@ -375,17 +372,11 @@ out:
        mutex_unlock(&acpi_ioremap_lock);
        return map->virt + (phys - map->phys);
 }
-
-void __iomem *__ref
-acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
-{
-       return __acpi_os_map_iomem(phys, size, false);
-}
 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
 
 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
-       return (void *)__acpi_os_map_iomem(phys, size, true);
+       return (void *)acpi_os_map_iomem(phys, size);
 }
 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
 
index bd92b549fd5a4cae9f96f2cc84b9337ab976daa0..1c48358b43ba30306a84894bfadb4444e7896176 100644 (file)
@@ -371,7 +371,7 @@ static int lps0_device_attach(struct acpi_device *adev,
                return 0;
 
        if (acpi_s2idle_vendor_amd()) {
-               /* AMD0004, AMDI0005:
+               /* AMD0004, AMD0005, AMDI0005:
                 * - Should use rev_id 0x0
                 * - function mask > 0x3: Should use AMD method, but has off by one bug
                 * - function mask = 0x3: Should use Microsoft method
@@ -390,6 +390,7 @@ static int lps0_device_attach(struct acpi_device *adev,
                                        ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
                                        &lps0_dsm_guid_microsoft);
                if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
+                                                !strcmp(hid, "AMD0005") ||
                                                 !strcmp(hid, "AMDI0005"))) {
                        lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
                        acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
index d9030cb6b1e4802aeac5d33cf59f484d701969e4..9edacc8b97688ed381854ab2ea521e81fbe13090 100644 (file)
@@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
 }
 
 static void binder_transaction_buffer_release(struct binder_proc *proc,
+                                             struct binder_thread *thread,
                                              struct binder_buffer *buffer,
                                              binder_size_t failed_at,
                                              bool is_failure)
@@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                                                &proc->alloc, &fd, buffer,
                                                offset, sizeof(fd));
                                WARN_ON(err);
-                               if (!err)
+                               if (!err) {
                                        binder_deferred_fd_close(fd);
+                                       /*
+                                        * Need to make sure the thread goes
+                                        * back to userspace to complete the
+                                        * deferred close
+                                        */
+                                       if (thread)
+                                               thread->looper_need_return = true;
+                               }
                        }
                } break;
                default:
@@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
        if (reply) {
                binder_enqueue_thread_work(thread, tcomplete);
                binder_inner_proc_lock(target_proc);
-               if (target_thread->is_dead || target_proc->is_frozen) {
-                       return_error = target_thread->is_dead ?
-                               BR_DEAD_REPLY : BR_FROZEN_REPLY;
+               if (target_thread->is_dead) {
+                       return_error = BR_DEAD_REPLY;
                        binder_inner_proc_unlock(target_proc);
                        goto err_dead_proc_or_thread;
                }
@@ -3105,7 +3113,7 @@ err_bad_parent:
 err_copy_data_failed:
        binder_free_txn_fixups(t);
        trace_binder_transaction_failed_buffer_release(t->buffer);
-       binder_transaction_buffer_release(target_proc, t->buffer,
+       binder_transaction_buffer_release(target_proc, NULL, t->buffer,
                                          buffer_offset, true);
        if (target_node)
                binder_dec_node_tmpref(target_node);
@@ -3184,7 +3192,9 @@ err_invalid_target_handle:
  * Cleanup buffer and free it.
  */
 static void
-binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
+binder_free_buf(struct binder_proc *proc,
+               struct binder_thread *thread,
+               struct binder_buffer *buffer)
 {
        binder_inner_proc_lock(proc);
        if (buffer->transaction) {
@@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
                binder_node_inner_unlock(buf_node);
        }
        trace_binder_transaction_buffer_release(buffer);
-       binder_transaction_buffer_release(proc, buffer, 0, false);
+       binder_transaction_buffer_release(proc, thread, buffer, 0, false);
        binder_alloc_free_buf(&proc->alloc, buffer);
 }
 
@@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
                                     proc->pid, thread->pid, (u64)data_ptr,
                                     buffer->debug_id,
                                     buffer->transaction ? "active" : "finished");
-                       binder_free_buf(proc, buffer);
+                       binder_free_buf(proc, thread, buffer);
                        break;
                }
 
@@ -4107,7 +4117,7 @@ retry:
                        buffer->transaction = NULL;
                        binder_cleanup_transaction(t, "fd fixups failed",
                                                   BR_FAILED_REPLY);
-                       binder_free_buf(proc, buffer);
+                       binder_free_buf(proc, thread, buffer);
                        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
                                     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
                                     proc->pid, thread->pid,
@@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
        return 0;
 }
 
+static bool binder_txns_pending_ilocked(struct binder_proc *proc)
+{
+       struct rb_node *n;
+       struct binder_thread *thread;
+
+       if (proc->outstanding_txns > 0)
+               return true;
+
+       for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
+               thread = rb_entry(n, struct binder_thread, rb_node);
+               if (thread->transaction_stack)
+                       return true;
+       }
+       return false;
+}
+
 static int binder_ioctl_freeze(struct binder_freeze_info *info,
                               struct binder_proc *target_proc)
 {
@@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
                        (!target_proc->outstanding_txns),
                        msecs_to_jiffies(info->timeout_ms));
 
-       if (!ret && target_proc->outstanding_txns)
-               ret = -EAGAIN;
+       /* Check pending transactions that wait for reply */
+       if (ret >= 0) {
+               binder_inner_proc_lock(target_proc);
+               if (binder_txns_pending_ilocked(target_proc))
+                       ret = -EAGAIN;
+               binder_inner_proc_unlock(target_proc);
+       }
 
        if (ret < 0) {
                binder_inner_proc_lock(target_proc);
@@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
 {
        struct binder_proc *target_proc;
        bool found = false;
+       __u32 txns_pending;
 
        info->sync_recv = 0;
        info->async_recv = 0;
@@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
                if (target_proc->pid == info->pid) {
                        found = true;
                        binder_inner_proc_lock(target_proc);
-                       info->sync_recv |= target_proc->sync_recv;
+                       txns_pending = binder_txns_pending_ilocked(target_proc);
+                       info->sync_recv |= target_proc->sync_recv |
+                                       (txns_pending << 1);
                        info->async_recv |= target_proc->async_recv;
                        binder_inner_proc_unlock(target_proc);
                }
index 810c0b84d3f81374438c42b25cb7b9983ee96feb..402c4d4362a83f08d1215edec9c16b3cafccaa1c 100644 (file)
@@ -378,6 +378,8 @@ struct binder_ref {
  *                        binder transactions
  *                        (protected by @inner_lock)
  * @sync_recv:            process received sync transactions since last frozen
+ *                        bit 0: received sync transaction after being frozen
+ *                        bit 1: new pending sync transaction during freezing
  *                        (protected by @inner_lock)
  * @async_recv:           process received async transactions since last frozen
  *                        (protected by @inner_lock)
index 46c503486e9629a1786c63f14f80405da8f468a7..00fb4120a5b3a8166882216b91316d86ab0c9950 100644 (file)
@@ -264,7 +264,7 @@ void __init numa_free_distance(void)
        size = numa_distance_cnt * numa_distance_cnt *
                sizeof(numa_distance[0]);
 
-       memblock_free(__pa(numa_distance), size);
+       memblock_free_ptr(numa_distance, size);
        numa_distance_cnt = 0;
        numa_distance = NULL;
 }
index e65dd803a453cae9aab09c62685422c53e4b64b8..249da496581a0dac798502d32dc8ebf016a7c63c 100644 (file)
@@ -95,12 +95,29 @@ int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
 
        list_add(&link->s_hook, &sup->consumers);
        list_add(&link->c_hook, &con->suppliers);
+       pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+                con, sup);
 out:
        mutex_unlock(&fwnode_link_lock);
 
        return ret;
 }
 
+/**
+ * __fwnode_link_del - Delete a link between two fwnode_handles.
+ * @link: the fwnode_link to be deleted
+ *
+ * The fwnode_link_lock needs to be held when this function is called.
+ */
+static void __fwnode_link_del(struct fwnode_link *link)
+{
+       pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
+                link->consumer, link->supplier);
+       list_del(&link->s_hook);
+       list_del(&link->c_hook);
+       kfree(link);
+}
+
 /**
  * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
  * @fwnode: fwnode whose supplier links need to be deleted
@@ -112,11 +129,8 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
        struct fwnode_link *link, *tmp;
 
        mutex_lock(&fwnode_link_lock);
-       list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
-               list_del(&link->s_hook);
-               list_del(&link->c_hook);
-               kfree(link);
-       }
+       list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
+               __fwnode_link_del(link);
        mutex_unlock(&fwnode_link_lock);
 }
 
@@ -131,11 +145,8 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
        struct fwnode_link *link, *tmp;
 
        mutex_lock(&fwnode_link_lock);
-       list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
-               list_del(&link->s_hook);
-               list_del(&link->c_hook);
-               kfree(link);
-       }
+       list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
+               __fwnode_link_del(link);
        mutex_unlock(&fwnode_link_lock);
 }
 
@@ -676,7 +687,8 @@ struct device_link *device_link_add(struct device *consumer,
 {
        struct device_link *link;
 
-       if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
+       if (!consumer || !supplier || consumer == supplier ||
+           flags & ~DL_ADD_VALID_FLAGS ||
            (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
            (flags & DL_FLAG_SYNC_STATE_ONLY &&
             (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
@@ -975,6 +987,7 @@ int device_links_check_suppliers(struct device *dev)
 {
        struct device_link *link;
        int ret = 0;
+       struct fwnode_handle *sup_fw;
 
        /*
         * Device waiting for supplier to become available is not allowed to
@@ -983,10 +996,11 @@ int device_links_check_suppliers(struct device *dev)
        mutex_lock(&fwnode_link_lock);
        if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
            !fw_devlink_is_permissive()) {
-               dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
-                       list_first_entry(&dev->fwnode->suppliers,
-                       struct fwnode_link,
-                       c_hook)->supplier);
+               sup_fw = list_first_entry(&dev->fwnode->suppliers,
+                                         struct fwnode_link,
+                                         c_hook)->supplier;
+               dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",
+                             sup_fw);
                mutex_unlock(&fwnode_link_lock);
                return -EPROBE_DEFER;
        }
@@ -1001,8 +1015,9 @@ int device_links_check_suppliers(struct device *dev)
                if (link->status != DL_STATE_AVAILABLE &&
                    !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
                        device_links_missing_supplier(dev);
-                       dev_dbg(dev, "probe deferral - supplier %s not ready\n",
-                               dev_name(link->supplier));
+                       dev_err_probe(dev, -EPROBE_DEFER,
+                                     "supplier %s not ready\n",
+                                     dev_name(link->supplier));
                        ret = -EPROBE_DEFER;
                        break;
                }
@@ -1722,6 +1737,25 @@ static int fw_devlink_create_devlink(struct device *con,
        struct device *sup_dev;
        int ret = 0;
 
+       /*
+        * In some cases, a device P might also be a supplier to its child node
+        * C. However, this would defer the probe of C until the probe of P
+        * completes successfully. This is perfectly fine in the device driver
+        * model. device_add() doesn't guarantee probe completion of the device
+        * by the time it returns.
+        *
+        * However, there are a few drivers that assume C will finish probing
+        * as soon as it's added and before P finishes probing. So, we provide
+        * a flag to let fw_devlink know not to delay the probe of C until the
+        * probe of P completes successfully.
+        *
+        * When such a flag is set, we can't create device links where P is the
+        * supplier of C as that would delay the probe of C.
+        */
+       if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
+           fwnode_is_ancestor_of(sup_handle, con->fwnode))
+               return -EINVAL;
+
        sup_dev = get_dev_from_fwnode(sup_handle);
        if (sup_dev) {
                /*
@@ -1772,14 +1806,21 @@ static int fw_devlink_create_devlink(struct device *con,
         * be broken by applying logic. Check for these types of cycles and
         * break them so that devices in the cycle probe properly.
         *
-        * If the supplier's parent is dependent on the consumer, then
-        * the consumer-supplier dependency is a false dependency. So,
-        * treat it as an invalid link.
+        * If the supplier's parent is dependent on the consumer, then the
+        * consumer and supplier have a cyclic dependency. Since fw_devlink
+        * can't tell which of the inferred dependencies are incorrect, don't
+        * enforce probe ordering between any of the devices in this cyclic
+        * dependency. Do this by relaxing all the fw_devlink device links in
+        * this cycle and by treating the fwnode link between the consumer and
+        * the supplier as an invalid dependency.
         */
        sup_dev = fwnode_get_next_parent_dev(sup_handle);
        if (sup_dev && device_is_dependent(con, sup_dev)) {
-               dev_dbg(con, "Not linking to %pfwP - False link\n",
-                       sup_handle);
+               dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
+                        sup_handle, dev_name(sup_dev));
+               device_links_write_lock();
+               fw_devlink_relax_cycle(con, sup_dev);
+               device_links_write_unlock();
                ret = -EINVAL;
        } else {
                /*
@@ -1858,9 +1899,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
                if (!own_link || ret == -EAGAIN)
                        continue;
 
-               list_del(&link->s_hook);
-               list_del(&link->c_hook);
-               kfree(link);
+               __fwnode_link_del(link);
        }
 }
 
@@ -1912,9 +1951,7 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
                if (!own_link || ret == -EAGAIN)
                        continue;
 
-               list_del(&link->s_hook);
-               list_del(&link->c_hook);
-               kfree(link);
+               __fwnode_link_del(link);
 
                /* If no device link was created, nothing more to do. */
                if (ret)
index a97f33d0c59f93846c46521823903d5aeb62426a..94665037f4a35aff03273c61c3703a44244c7841 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/rtc.h>
 #include <linux/suspend.h>
+#include <linux/init.h>
 
 #include <linux/mc146818rtc.h>
 
@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
        const char *file = *(const char **)(tracedata + 2);
        unsigned int user_hash_value, file_hash_value;
 
+       if (!x86_platform.legacy.rtc)
+               return;
+
        user_hash_value = user % USERHASH;
        file_hash_value = hash_string(lineno, file, FILEHASH);
        set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
 
 static int __init early_resume_init(void)
 {
+       if (!x86_platform.legacy.rtc)
+               return 0;
+
        hash_value_early_read = read_magic_time();
        register_pm_notifier(&pm_trace_nb);
        return 0;
@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
        unsigned int val = hash_value_early_read;
        unsigned int user, file, dev;
 
+       if (!x86_platform.legacy.rtc)
+               return 0;
+
        user = val % USERHASH;
        val = val / USERHASH;
        file = val % FILEHASH;
index 7bd0f3cfb7eb43f60879b6d44bd0e222eb9411a9..c46f6a8e14d2393f3ee2b730976ed2f3ad0f60ec 100644 (file)
@@ -1116,6 +1116,9 @@ int device_create_managed_software_node(struct device *dev,
        to_swnode(fwnode)->managed = true;
        set_secondary_fwnode(dev, fwnode);
 
+       if (device_is_registered(dev))
+               software_node_notify(dev);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(device_create_managed_software_node);
index 64b2f3d744d51aa91d98d64012adb8fce6bae54b..7f76fee6f989d85d64bd29bb894003b0d5a2509b 100644 (file)
@@ -2,4 +2,4 @@
 obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE)  += test_async_driver_probe.o
 
 obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
-CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
+CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
index 5170a630778dc08ef4d4d16680693def45a57f61..1183f7872b7134f154d8a6f6827e3b4e1fcffb90 100644 (file)
@@ -97,13 +97,18 @@ struct nbd_config {
 
        atomic_t recv_threads;
        wait_queue_head_t recv_wq;
-       loff_t blksize;
+       unsigned int blksize_bits;
        loff_t bytesize;
 #if IS_ENABLED(CONFIG_DEBUG_FS)
        struct dentry *dbg_dir;
 #endif
 };
 
+static inline unsigned int nbd_blksize(struct nbd_config *config)
+{
+       return 1u << config->blksize_bits;
+}
+
 struct nbd_device {
        struct blk_mq_tag_set tag_set;
 
@@ -146,7 +151,7 @@ static struct dentry *nbd_dbg_dir;
 
 #define NBD_MAGIC 0x68797548
 
-#define NBD_DEF_BLKSIZE 1024
+#define NBD_DEF_BLKSIZE_BITS 10
 
 static unsigned int nbds_max = 16;
 static int max_part = 16;
@@ -317,12 +322,12 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
                loff_t blksize)
 {
        if (!blksize)
-               blksize = NBD_DEF_BLKSIZE;
+               blksize = 1u << NBD_DEF_BLKSIZE_BITS;
        if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
                return -EINVAL;
 
        nbd->config->bytesize = bytesize;
-       nbd->config->blksize = blksize;
+       nbd->config->blksize_bits = __ffs(blksize);
 
        if (!nbd->task_recv)
                return 0;
@@ -1337,7 +1342,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                args->index = i;
                queue_work(nbd->recv_workq, &args->work);
        }
-       return nbd_set_size(nbd, config->bytesize, config->blksize);
+       return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
 }
 
 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
@@ -1406,11 +1411,11 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        case NBD_SET_BLKSIZE:
                return nbd_set_size(nbd, config->bytesize, arg);
        case NBD_SET_SIZE:
-               return nbd_set_size(nbd, arg, config->blksize);
+               return nbd_set_size(nbd, arg, nbd_blksize(config));
        case NBD_SET_SIZE_BLOCKS:
-               if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
+               if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
                        return -EINVAL;
-               return nbd_set_size(nbd, bytesize, config->blksize);
+               return nbd_set_size(nbd, bytesize, nbd_blksize(config));
        case NBD_SET_TIMEOUT:
                nbd_set_cmd_timeout(nbd, arg);
                return 0;
@@ -1476,7 +1481,7 @@ static struct nbd_config *nbd_alloc_config(void)
        atomic_set(&config->recv_threads, 0);
        init_waitqueue_head(&config->recv_wq);
        init_waitqueue_head(&config->conn_wait);
-       config->blksize = NBD_DEF_BLKSIZE;
+       config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
        atomic_set(&config->live_connections, 0);
        try_module_get(THIS_MODULE);
        return config;
@@ -1604,7 +1609,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
        debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
        debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
        debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
-       debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
+       debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
        debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
 
        return 0;
@@ -1826,7 +1831,7 @@ nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
 {
        struct nbd_config *config = nbd->config;
-       u64 bsize = config->blksize;
+       u64 bsize = nbd_blksize(config);
        u64 bytes = config->bytesize;
 
        if (info->attrs[NBD_ATTR_SIZE_BYTES])
@@ -1835,7 +1840,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
        if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
                bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
 
-       if (bytes != config->bytesize || bsize != config->blksize)
+       if (bytes != config->bytesize || bsize != nbd_blksize(config))
                return nbd_set_size(nbd, bytes, bsize);
        return 0;
 }
index a5b96f3aad677be64b8313744612fe2469150aa0..a4cf3d692dc30137023e8d2b361e804cc593b505 100644 (file)
@@ -152,18 +152,6 @@ config QCOM_EBI2
          Interface 2, which can be used to connect things like NAND Flash,
          SRAM, ethernet adapters, FPGAs and LCD displays.
 
-config SIMPLE_PM_BUS
-       tristate "Simple Power-Managed Bus Driver"
-       depends on OF && PM
-       help
-         Driver for transparent busses that don't need a real driver, but
-         where the bus controller is part of a PM domain, or under the control
-         of a functional clock, and thus relies on runtime PM for managing
-         this PM domain and/or clock.
-         An example of such a bus controller is the Renesas Bus State
-         Controller (BSC, sometimes called "LBSC within Bus Bridge", or
-         "External Bus Interface") as found on several Renesas ARM SoCs.
-
 config SUN50I_DE2_BUS
        bool "Allwinner A64 DE2 Bus Driver"
          default ARM64
index 1c29c5e8ffb83ff08576c0018923dc11de1c4095..52c2f35a26a99d587e11f9751bbaf341be703dc6 100644 (file)
@@ -27,7 +27,7 @@ obj-$(CONFIG_OMAP_OCP2SCP)    += omap-ocp2scp.o
 obj-$(CONFIG_QCOM_EBI2)                += qcom-ebi2.o
 obj-$(CONFIG_SUN50I_DE2_BUS)   += sun50i-de2.o
 obj-$(CONFIG_SUNXI_RSB)                += sunxi-rsb.o
-obj-$(CONFIG_SIMPLE_PM_BUS)    += simple-pm-bus.o
+obj-$(CONFIG_OF)               += simple-pm-bus.o
 obj-$(CONFIG_TEGRA_ACONNECT)   += tegra-aconnect.o
 obj-$(CONFIG_TEGRA_GMI)                += tegra-gmi.o
 obj-$(CONFIG_TI_PWMSS)         += ti-pwmss.o
index 01a3d0cd08edc7271215e0e61c5a1d5497ac33ac..6b8d6257ed8a49cbddce1b3c5ba03588ddea8654 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
-
 static int simple_pm_bus_probe(struct platform_device *pdev)
 {
-       const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
-       struct device_node *np = pdev->dev.of_node;
+       const struct device *dev = &pdev->dev;
+       const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
+       struct device_node *np = dev->of_node;
+       const struct of_device_id *match;
+
+       /*
+        * Allow user to use driver_override to bind this driver to a
+        * transparent bus device which has a different compatible string
+        * that's not listed in simple_pm_bus_of_match. We don't want to do any
+        * of the simple-pm-bus tasks for these devices, so return early.
+        */
+       if (pdev->driver_override)
+               return 0;
+
+       match = of_match_device(dev->driver->of_match_table, dev);
+       /*
+        * These are transparent bus devices (not simple-pm-bus matches) that
+        * have their child nodes populated automatically.  So, don't need to
+        * do anything more. We only match with the device if this driver is
+        * the most specific match because we don't want to incorrectly bind to
+        * a device that has a more specific driver.
+        */
+       if (match && match->data) {
+               if (of_property_match_string(np, "compatible", match->compatible) == 0)
+                       return 0;
+               else
+                       return -ENODEV;
+       }
 
        dev_dbg(&pdev->dev, "%s\n", __func__);
 
@@ -31,14 +56,25 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
 
 static int simple_pm_bus_remove(struct platform_device *pdev)
 {
+       const void *data = of_device_get_match_data(&pdev->dev);
+
+       if (pdev->driver_override || data)
+               return 0;
+
        dev_dbg(&pdev->dev, "%s\n", __func__);
 
        pm_runtime_disable(&pdev->dev);
        return 0;
 }
 
+#define ONLY_BUS       ((void *) 1) /* Match if the device is only a bus. */
+
 static const struct of_device_id simple_pm_bus_of_match[] = {
        { .compatible = "simple-pm-bus", },
+       { .compatible = "simple-bus",   .data = ONLY_BUS },
+       { .compatible = "simple-mfd",   .data = ONLY_BUS },
+       { .compatible = "isa",          .data = ONLY_BUS },
+       { .compatible = "arm,amba-bus", .data = ONLY_BUS },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
index a51c2a8feed90c52eec79a80bc4922edd6259ece..6a8b7fb5be58df3aeb0270854aace22c2b956899 100644 (file)
@@ -1464,6 +1464,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        /* Quirks that need to be set based on detected module */
        SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
                   SYSC_MODULE_QUIRK_AESS),
+       /* Errata i893 handling for dra7 dcan1 and 2 */
+       SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
+                  SYSC_QUIRK_CLKDM_NOAUTO),
        SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
                   SYSC_QUIRK_CLKDM_NOAUTO),
        SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
@@ -2954,6 +2957,7 @@ static int sysc_init_soc(struct sysc *ddata)
                        break;
                case SOC_AM3:
                        sysc_add_disabled(0x48310000);  /* rng */
+                       break;
                default:
                        break;
                }
index 0a5596797b9346ca1bccc7d4ecfe17797cc001ca..9ef007b3cf9b41689ac9799c41e4fbb2f238ff4a 100644 (file)
@@ -564,6 +564,7 @@ config SM_GCC_6125
 
 config SM_GCC_6350
        tristate "SM6350 Global Clock Controller"
+       select QCOM_GDSC
        help
          Support for the global clock controller on SM6350 devices.
          Say Y if you want to use peripheral devices such as UART,
index bc09736ece76c9b883ad1e66f4b682c914ef1d06..68fe9f6f0d2f3bc8428e33357d3cdaa09355307d 100644 (file)
@@ -3242,7 +3242,7 @@ static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
 };
 
 static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
-       .gdscr = 0x7d060,
+       .gdscr = 0x7d07c,
        .pd = {
                .name = "hlos1_vote_turing_mmu_tbu0",
        },
index 4c94b94c41253a3fafa58674d5b87e8df4e9be94..1490446985e2e887abe47ebd42f2afc083844792 100644 (file)
@@ -186,6 +186,8 @@ static struct rzg2l_reset r9a07g044_resets[] = {
 
 static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
        MOD_CLK_BASE + R9A07G044_GIC600_GICCLK,
+       MOD_CLK_BASE + R9A07G044_IA55_CLK,
+       MOD_CLK_BASE + R9A07G044_DMAC_ACLK,
 };
 
 const struct rzg2l_cpg_info r9a07g044_cpg_info = {
index 3b3b2c3347f3763fb0cdd04392f23a4d4add426c..761922ea5db7616129ddd44b9b8644da66662900 100644 (file)
@@ -391,7 +391,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
 
        value = readl(priv->base + CLK_MON_R(clock->off));
 
-       return !(value & bitmask);
+       return value & bitmask;
 }
 
 static const struct clk_ops rzg2l_mod_clock_ops = {
index 242e94c0cf8a387f267d6cfcf543a58395c48d94..bf8cd928c2283de16b85c56ec3adef3c8f36b918 100644 (file)
@@ -165,13 +165,6 @@ static const struct clk_parent_data mpu_mux[] = {
          .name = "boot_clk", },
 };
 
-static const struct clk_parent_data s2f_usr0_mux[] = {
-       { .fw_name = "f2s-free-clk",
-         .name = "f2s-free-clk", },
-       { .fw_name = "boot_clk",
-         .name = "boot_clk", },
-};
-
 static const struct clk_parent_data emac_mux[] = {
        { .fw_name = "emaca_free_clk",
          .name = "emaca_free_clk", },
@@ -312,8 +305,6 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
          4, 0x44, 28, 1, 0, 0, 0},
        { AGILEX_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
          5, 0, 0, 0, 0x30, 1, 0},
-       { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
-         6, 0, 0, 0, 0, 0, 0},
        { AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
          0, 0, 0, 0, 0x94, 26, 0},
        { AGILEX_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
index df77b6bf5c641d56da651d46d388fdfcb3c9424e..763cea8418f8e84ee606aa4d5ab986e7f6de988b 100644 (file)
@@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
        mutex_lock(&dev->mutex);
        rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
        mutex_unlock(&dev->mutex);
+       kfree(insns);
        return rc;
 }
 
index 66b05a326910eaa01c3e1c50e551f5e9e4cf530b..a6f365b9cc1ad8b026d906dcff015885cebdef40 100644 (file)
@@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
        if (count)
                return count;
 
-       kobject_put(&attr_set->kobj);
        mutex_destroy(&attr_set->update_lock);
+       kobject_put(&attr_set->kobj);
        return 0;
 }
 EXPORT_SYMBOL_GPL(gov_attr_set_put);
index 1097f826ad70eab9197310f3258c13c69d687a62..8c176b7dae415bbb112b5721deb67faffa7c1de5 100644 (file)
@@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                return -ENODEV;
 
-       if (no_load)
-               return -ENODEV;
-
        id = x86_match_cpu(hwp_support_ids);
        if (id) {
+               bool hwp_forced = intel_pstate_hwp_is_enabled();
+
+               if (hwp_forced)
+                       pr_info("HWP enabled by BIOS\n");
+               else if (no_load)
+                       return -ENODEV;
+
                copy_cpu_funcs(&core_funcs);
                /*
                 * Avoid enabling HWP for processors without EPP support,
@@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
                 * If HWP is enabled already, though, there is no choice but to
                 * deal with it.
                 */
-               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
-                   intel_pstate_hwp_is_enabled()) {
+               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
                        hwp_active++;
                        hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
 
                        goto hwp_cpu_matched;
                }
+               pr_info("HWP not enabled\n");
        } else {
+               if (no_load)
+                       return -ENODEV;
+
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id) {
                        pr_info("CPU model not supported\n");
@@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
        else if (!strcmp(str, "passive"))
                default_driver = &intel_cpufreq;
 
-       if (!strcmp(str, "no_hwp")) {
-               pr_info("HWP disabled\n");
+       if (!strcmp(str, "no_hwp"))
                no_hwp = 1;
-       }
+
        if (!strcmp(str, "force"))
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
index 284b6bd040b1af3bb214aba57bf1ee7fdac4c1b8..d295f405c4bb08cb371741fb98c44a61e8f0b814 100644 (file)
@@ -451,7 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
 static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
 {
        struct device *cpu_dev;
-       int cur_cluster = cpu_to_cluster(policy->cpu);
 
        cpu_dev = get_cpu_device(policy->cpu);
        if (!cpu_dev) {
index bb88198c874e0ed2b8ed55a03fb93a9d9bef0aa0..aa4e1a5006919da78f1e5392c686479da2d579b9 100644 (file)
@@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                                    in_place ? DMA_BIDIRECTIONAL
                                             : DMA_TO_DEVICE);
                if (ret)
-                       goto e_ctx;
+                       goto e_aad;
 
                if (in_place) {
                        dst = src;
@@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
        op.u.aes.size = 0;
        ret = cmd_q->ccp->vdata->perform->aes(&op);
        if (ret)
-               goto e_dst;
+               goto e_final_wa;
 
        if (aes->action == CCP_AES_ACTION_ENCRYPT) {
                /* Put the ciphered tag after the ciphertext. */
@@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
                                           DMA_BIDIRECTIONAL);
                if (ret)
-                       goto e_tag;
+                       goto e_final_wa;
                ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
-               if (ret)
-                       goto e_tag;
+               if (ret) {
+                       ccp_dm_free(&tag);
+                       goto e_final_wa;
+               }
 
                ret = crypto_memneq(tag.address, final_wa.address,
                                    authsize) ? -EBADMSG : 0;
                ccp_dm_free(&tag);
        }
 
-e_tag:
+e_final_wa:
        ccp_dm_free(&final_wa);
 
 e_dst:
index fc1153ab1ebbc933c9cfd83576764bfe348cdd7b..b8a7d9594afd42babdb0e93e6cfd94b41f562f6c 100644 (file)
@@ -464,7 +464,7 @@ static void dmc520_init_csrow(struct mem_ctl_info *mci)
                        dimm->grain     = pvt->mem_width_in_bytes;
                        dimm->dtype     = dt;
                        dimm->mtype     = mt;
-                       dimm->edac_mode = EDAC_FLAG_SECDED;
+                       dimm->edac_mode = EDAC_SECDED;
                        dimm->nr_pages  = pages_per_rank / csi->nr_channels;
                }
        }
index 7e7146b22c160739b75ef70a8f9811254d980200..7d08627e738b3d7bfc4339cd90583db8915229f6 100644 (file)
@@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
 
                for (j = 0; j < csi->nr_channels; j++) {
                        dimm            = csi->channels[j]->dimm;
-                       dimm->edac_mode = EDAC_FLAG_SECDED;
+                       dimm->edac_mode = EDAC_SECDED;
                        dimm->mtype     = p_data->get_mtype(priv->baseaddr);
                        dimm->nr_pages  = (size >> PAGE_SHIFT) / csi->nr_channels;
                        dimm->grain     = SYNPS_EDAC_ERR_GRAIN;
index 220a58cf0a441ca2be1e5f9dc5480e06b90fd5bb..cda7d7162cbbd0dbc1ce6222227c0488f5ff7adc 100644 (file)
@@ -203,10 +203,7 @@ config INTEL_STRATIX10_RSU
          Say Y here if you want Intel RSU support.
 
 config QCOM_SCM
-       tristate "Qcom SCM driver"
-       depends on ARM || ARM64
-       depends on HAVE_ARM_SMCCC
-       select RESET_CONTROLLER
+       tristate
 
 config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
        bool "Qualcomm download mode enabled by default"
index 00fe595a5bc8972ffc90363cda045b234d96dc01..641a91819088050759f995bbf346449a28f6a53b 100644 (file)
@@ -49,6 +49,13 @@ static int ffa_device_probe(struct device *dev)
        return ffa_drv->probe(ffa_dev);
 }
 
+static void ffa_device_remove(struct device *dev)
+{
+       struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
+
+       ffa_drv->remove(to_ffa_dev(dev));
+}
+
 static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
        struct ffa_device *ffa_dev = to_ffa_dev(dev);
@@ -86,6 +93,7 @@ struct bus_type ffa_bus_type = {
        .name           = "arm_ffa",
        .match          = ffa_device_match,
        .probe          = ffa_device_probe,
+       .remove         = ffa_device_remove,
        .uevent         = ffa_device_uevent,
        .dev_groups     = ffa_device_attributes_groups,
 };
@@ -127,7 +135,7 @@ static void ffa_release_device(struct device *dev)
 
 static int __ffa_devices_unregister(struct device *dev, void *data)
 {
-       ffa_release_device(dev);
+       device_unregister(dev);
 
        return 0;
 }
index 7f4d2435503b8ad2e650bce50c0cfc8e1cf616b9..3d7081e848536a5b587f0ad2254d1a692fc7c47a 100644 (file)
@@ -68,7 +68,7 @@ config ARM_SCMI_TRANSPORT_SMC
 
 config ARM_SCMI_TRANSPORT_VIRTIO
        bool "SCMI transport based on VirtIO"
-       depends on VIRTIO
+       depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
        select ARM_SCMI_HAVE_TRANSPORT
        select ARM_SCMI_HAVE_MSG
        help
index 224577f869286c75ae2a277b738c2613b79ecfa1..11e8efb7137512fb292bb6055501829bec0cc881 100644 (file)
@@ -110,18 +110,16 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
        if (vioch->is_rx) {
                scmi_vio_feed_vq_rx(vioch, msg);
        } else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&vioch->lock, flags);
+               /* Here IRQs are assumed to be already disabled by the caller */
+               spin_lock(&vioch->lock);
                list_add(&msg->list, &vioch->free_list);
-               spin_unlock_irqrestore(&vioch->lock, flags);
+               spin_unlock(&vioch->lock);
        }
 }
 
 static void scmi_vio_complete_cb(struct virtqueue *vqueue)
 {
        unsigned long ready_flags;
-       unsigned long flags;
        unsigned int length;
        struct scmi_vio_channel *vioch;
        struct scmi_vio_msg *msg;
@@ -140,7 +138,8 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
                        goto unlock_ready_out;
                }
 
-               spin_lock_irqsave(&vioch->lock, flags);
+               /* IRQs already disabled here no need to irqsave */
+               spin_lock(&vioch->lock);
                if (cb_enabled) {
                        virtqueue_disable_cb(vqueue);
                        cb_enabled = false;
@@ -151,7 +150,7 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
                                goto unlock_out;
                        cb_enabled = true;
                }
-               spin_unlock_irqrestore(&vioch->lock, flags);
+               spin_unlock(&vioch->lock);
 
                if (msg) {
                        msg->rx_len = length;
@@ -161,11 +160,18 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
                        scmi_finalize_message(vioch, msg);
                }
 
+               /*
+                * Release ready_lock and re-enable IRQs between loop iterations
+                * to allow virtio_chan_free() to possibly kick in and set the
+                * flag vioch->ready to false even in between processing of
+                * messages, so as to force outstanding messages to be ignored
+                * when system is shutting down.
+                */
                spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
        }
 
 unlock_out:
-       spin_unlock_irqrestore(&vioch->lock, flags);
+       spin_unlock(&vioch->lock);
 unlock_ready_out:
        spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
 }
@@ -384,8 +390,11 @@ static int scmi_vio_probe(struct virtio_device *vdev)
        struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
 
        /* Only one SCMI VirtiO device allowed */
-       if (scmi_vdev)
-               return -EINVAL;
+       if (scmi_vdev) {
+               dev_err(dev,
+                       "One SCMI Virtio device was already initialized: only one allowed.\n");
+               return -EBUSY;
+       }
 
        have_vq_rx = scmi_vio_have_vq_rx(vdev);
        vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
@@ -428,16 +437,25 @@ static int scmi_vio_probe(struct virtio_device *vdev)
        }
 
        vdev->priv = channels;
-       scmi_vdev = vdev;
+       /* Ensure initialized scmi_vdev is visible */
+       smp_store_mb(scmi_vdev, vdev);
 
        return 0;
 }
 
 static void scmi_vio_remove(struct virtio_device *vdev)
 {
+       /*
+        * Once we get here, virtio_chan_free() will have already been called by
+        * the SCMI core for any existing channel and, as a consequence, all the
+        * virtio channels will have been already marked NOT ready, causing any
+        * outstanding message on any vqueue to be ignored by complete_cb: now
+        * we can just stop processing buffers and destroy the vqueues.
+        */
        vdev->config->reset(vdev);
        vdev->config->del_vqs(vdev);
-       scmi_vdev = NULL;
+       /* Ensure scmi_vdev is visible as NULL */
+       smp_store_mb(scmi_vdev, NULL);
 }
 
 static int scmi_vio_validate(struct virtio_device *vdev)
@@ -476,7 +494,7 @@ static int __init virtio_scmi_init(void)
        return register_virtio_driver(&virtio_scmi_driver);
 }
 
-static void __exit virtio_scmi_exit(void)
+static void virtio_scmi_exit(void)
 {
        unregister_virtio_driver(&virtio_scmi_driver);
 }
index c99b78ee008aa3d50c4b3cf8952495bbb7976d51..f86666cf2c6a867f53a10bc407074a6be1c3d3f6 100644 (file)
@@ -1019,16 +1019,18 @@ create_feature_instance(struct build_feature_devs_info *binfo,
 {
        unsigned int irq_base, nr_irqs;
        struct dfl_feature_info *finfo;
+       u8 revision = 0;
        int ret;
-       u8 revision;
        u64 v;
 
-       v = readq(binfo->ioaddr + ofst);
-       revision = FIELD_GET(DFH_REVISION, v);
+       if (fid != FEATURE_ID_AFU) {
+               v = readq(binfo->ioaddr + ofst);
+               revision = FIELD_GET(DFH_REVISION, v);
 
-       /* read feature size and id if inputs are invalid */
-       size = size ? size : feature_size(v);
-       fid = fid ? fid : feature_id(v);
+               /* read feature size and id if inputs are invalid */
+               size = size ? size : feature_size(v);
+               fid = fid ? fid : feature_id(v);
+       }
 
        if (binfo->len - ofst < size)
                return -EINVAL;
index 69dec5af23c366f7acb2e6198b2357d0e3316acb..029d3cdb918d12e83e332ae3d2ec7096ee201324 100644 (file)
@@ -192,12 +192,19 @@ static const struct of_device_id ice40_fpga_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
 
+static const struct spi_device_id ice40_fpga_spi_ids[] = {
+       { .name = "ice40-fpga-mgr", },
+       {},
+};
+MODULE_DEVICE_TABLE(spi, ice40_fpga_spi_ids);
+
 static struct spi_driver ice40_fpga_driver = {
        .probe = ice40_fpga_probe,
        .driver = {
                .name = "ice40spi",
                .of_match_table = of_match_ptr(ice40_fpga_of_match),
        },
+       .id_table = ice40_fpga_spi_ids,
 };
 
 module_spi_driver(ice40_fpga_driver);
index 1afb41aa20d7151ff82ceb05d656e244b83b2079..ea2ec3c6815cb3ee0f12bdfa04958f9769211aad 100644 (file)
@@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
                goto fail;
 
        get_status(spi, &status);
-       if (test_bit(FAIL, &status))
+       if (test_bit(FAIL, &status)) {
+               ret = -EINVAL;
                goto fail;
+       }
        dump_status_reg(&status);
 
        spi_message_init(&msg);
@@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
        dump_status_reg(&status);
        if (!test_bit(DONE, &status)) {
                machxo2_cleanup(mgr);
+               ret = -EINVAL;
                goto fail;
        }
 
@@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
                        break;
                if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
                        machxo2_cleanup(mgr);
+                       ret = -EINVAL;
                        goto fail;
                }
        } while (1);
index 05637d58515269b75c5a279291744ca2b6ad2481..4a55cdf089d62a33a793b4eb402ff08a19df720f 100644 (file)
@@ -174,6 +174,13 @@ static int gen_74x164_remove(struct spi_device *spi)
        return 0;
 }
 
+static const struct spi_device_id gen_74x164_spi_ids[] = {
+       { .name = "74hc595" },
+       { .name = "74lvc594" },
+       {},
+};
+MODULE_DEVICE_TABLE(spi, gen_74x164_spi_ids);
+
 static const struct of_device_id gen_74x164_dt_ids[] = {
        { .compatible = "fairchild,74hc595" },
        { .compatible = "nxp,74lvc594" },
@@ -188,6 +195,7 @@ static struct spi_driver gen_74x164_driver = {
        },
        .probe          = gen_74x164_probe,
        .remove         = gen_74x164_remove,
+       .id_table       = gen_74x164_spi_ids,
 };
 module_spi_driver(gen_74x164_driver);
 
index 10f303d15225619d5aa07a36854d6522e12660f9..3d6ef37a7702a6ae775c1fb863e5882062dec372 100644 (file)
@@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
                reg = ioread32(bank_reg(data, bank, reg_irq_status));
 
                for_each_set_bit(p, &reg, 32)
-                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
+                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
        }
 
        chained_irq_exit(ic, desc);
index 0a9d746a0fe0a22fd654e3c90de1ce7e7532d9ac..d26bff29157b5d9b5004dbfc59457d99e2ca5dbc 100644 (file)
@@ -476,10 +476,19 @@ static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
 
 static void gpio_mockup_unregister_pdevs(void)
 {
+       struct platform_device *pdev;
+       struct fwnode_handle *fwnode;
        int i;
 
-       for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++)
-               platform_device_unregister(gpio_mockup_pdevs[i]);
+       for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
+               pdev = gpio_mockup_pdevs[i];
+               if (!pdev)
+                       continue;
+
+               fwnode = dev_fwnode(&pdev->dev);
+               platform_device_unregister(pdev);
+               fwnode_remove_software_node(fwnode);
+       }
 }
 
 static __init char **gpio_mockup_make_line_names(const char *label,
@@ -508,6 +517,7 @@ static int __init gpio_mockup_register_chip(int idx)
        struct property_entry properties[GPIO_MOCKUP_MAX_PROP];
        struct platform_device_info pdevinfo;
        struct platform_device *pdev;
+       struct fwnode_handle *fwnode;
        char **line_names = NULL;
        char chip_label[32];
        int prop = 0, base;
@@ -536,13 +546,18 @@ static int __init gpio_mockup_register_chip(int idx)
                                        "gpio-line-names", line_names, ngpio);
        }
 
+       fwnode = fwnode_create_software_node(properties, NULL);
+       if (IS_ERR(fwnode))
+               return PTR_ERR(fwnode);
+
        pdevinfo.name = "gpio-mockup";
        pdevinfo.id = idx;
-       pdevinfo.properties = properties;
+       pdevinfo.fwnode = fwnode;
 
        pdev = platform_device_register_full(&pdevinfo);
        kfree_strarray(line_names, ngpio);
        if (IS_ERR(pdev)) {
+               fwnode_remove_software_node(fwnode);
                pr_err("error registering device");
                return PTR_ERR(pdev);
        }
index f5cfc0698799aa87d6b9bc0d13ef3d8b06c4b3b0..d2fe76f3f34fd4930c62bc1b5213422bb04386b9 100644 (file)
@@ -468,15 +468,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
        mutex_lock(&chip->i2c_lock);
        ret = regmap_read(chip->regmap, inreg, &reg_val);
        mutex_unlock(&chip->i2c_lock);
-       if (ret < 0) {
-               /*
-                * NOTE:
-                * diagnostic already emitted; that's all we should
-                * do unless gpio_*_value_cansleep() calls become different
-                * from their nonsleeping siblings (and report faults).
-                */
-               return 0;
-       }
+       if (ret < 0)
+               return ret;
 
        return !!(reg_val & bit);
 }
@@ -566,21 +559,21 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
 
        mutex_lock(&chip->i2c_lock);
 
-       /* Disable pull-up/pull-down */
-       ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
-       if (ret)
-               goto exit;
-
        /* Configure pull-up/pull-down */
        if (config == PIN_CONFIG_BIAS_PULL_UP)
                ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit);
        else if (config == PIN_CONFIG_BIAS_PULL_DOWN)
                ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0);
+       else
+               ret = 0;
        if (ret)
                goto exit;
 
-       /* Enable pull-up/pull-down */
-       ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
+       /* Disable/Enable pull-up/pull-down */
+       if (config == PIN_CONFIG_BIAS_DISABLE)
+               ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
+       else
+               ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
 
 exit:
        mutex_unlock(&chip->i2c_lock);
@@ -594,7 +587,9 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
 
        switch (pinconf_to_config_param(config)) {
        case PIN_CONFIG_BIAS_PULL_UP:
+       case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
        case PIN_CONFIG_BIAS_PULL_DOWN:
+       case PIN_CONFIG_BIAS_DISABLE:
                return pca953x_gpio_set_pull_up_down(chip, offset, config);
        default:
                return -ENOTSUPP;
index 036b2d95950355d83eb5efa74cf60ac37f34e57e..ce63cbd14d69a0444b277f638d70c9303ad58523 100644 (file)
@@ -141,7 +141,7 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip,
        u32 data;
 
        data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);
-       if (data & BIT(offset))
+       if (data)
                return GPIO_LINE_DIRECTION_OUT;
 
        return GPIO_LINE_DIRECTION_IN;
@@ -195,7 +195,7 @@ static int rockchip_gpio_set_debounce(struct gpio_chip *gc,
        unsigned int cur_div_reg;
        u64 div;
 
-       if (!IS_ERR(bank->db_clk)) {
+       if (bank->gpio_type == GPIO_TYPE_V2 && !IS_ERR(bank->db_clk)) {
                div_debounce_support = true;
                freq = clk_get_rate(bank->db_clk);
                max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;
@@ -689,6 +689,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
        struct device_node *pctlnp = of_get_parent(np);
        struct pinctrl_dev *pctldev = NULL;
        struct rockchip_pin_bank *bank = NULL;
+       struct rockchip_pin_output_deferred *cfg;
        static int gpio;
        int id, ret;
 
@@ -716,12 +717,33 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       /*
+        * Prevent clashes with a deferred output setting
+        * being added right at this moment.
+        */
+       mutex_lock(&bank->deferred_lock);
+
        ret = rockchip_gpiolib_register(bank);
        if (ret) {
                clk_disable_unprepare(bank->clk);
+               mutex_unlock(&bank->deferred_lock);
                return ret;
        }
 
+       while (!list_empty(&bank->deferred_output)) {
+               cfg = list_first_entry(&bank->deferred_output,
+                                      struct rockchip_pin_output_deferred, head);
+               list_del(&cfg->head);
+
+               ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
+               if (ret)
+                       dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);
+
+               kfree(cfg);
+       }
+
+       mutex_unlock(&bank->deferred_lock);
+
        platform_set_drvdata(pdev, bank);
        dev_info(dev, "probed %pOF\n", np);
 
index f99f3c10bed036b5ec6878d24746f398fae1f0b2..39dca147d587a93ddca942ccba90529e816f66ba 100644 (file)
@@ -184,7 +184,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
 
        uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
 
-       return irq_chip_mask_parent(data);
+       irq_chip_mask_parent(data);
 }
 
 static void uniphier_gpio_irq_unmask(struct irq_data *data)
@@ -194,7 +194,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
 
        uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
 
-       return irq_chip_unmask_parent(data);
+       irq_chip_unmask_parent(data);
 }
 
 static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
index 411525ac4cc45c020642592946d215ccd81363ee..47712b6903b51fecb1d5d9f4f1648f80de68d417 100644 (file)
@@ -313,9 +313,11 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
 
        ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
        if (ret)
-               gpiochip_free_own_desc(desc);
+               dev_warn(chip->parent,
+                        "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
+                        pin, ret);
 
-       return ret ? ERR_PTR(ret) : desc;
+       return desc;
 }
 
 static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
index dc3c6b3a00e531f2e325b59de35db3804bf6b6f3..269437b01328006936a9c0fded1e6b46a249beac 100644 (file)
@@ -758,7 +758,7 @@ enum amd_hw_ip_block_type {
        MAX_HWIP
 };
 
-#define HWIP_MAX_INSTANCE      8
+#define HWIP_MAX_INSTANCE      10
 
 struct amd_powerplay {
        void *pp_handle;
@@ -1087,6 +1087,7 @@ struct amdgpu_device {
 
        bool                            no_hw_access;
        struct pci_saved_state          *pci_state;
+       pci_channel_state_t             pci_channel_state;
 
        struct amdgpu_reset_control     *reset_cntl;
 };
index 3003ee1c94873a195ac2daf2a56b2899b74c4940..1d41c2c00623ba209b32f4a034e960ecdadb5c2e 100644 (file)
@@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
                kgd2kfd_suspend(adev->kfd.dev, run_pm);
 }
 
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
+{
+       int r = 0;
+
+       if (adev->kfd.dev)
+               r = kgd2kfd_resume_iommu(adev->kfd.dev);
+
+       return r;
+}
+
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
 {
        int r = 0;
index ec028cf963f5a70404df5e26bc5371fa94ab43b8..3bc52b2c604fbe50e79cc8c124fb74920451e843 100644 (file)
@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
 void amdgpu_amdkfd_fini(void);
 
 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                        const void *ih_ring_entry);
@@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                         const struct kgd2kfd_shared_resources *gpu_resources);
 void kgd2kfd_device_exit(struct kfd_dev *kfd);
 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
 int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
 }
 
+static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+{
+       return 0;
+}
+
 static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
        return 0;
index 2d6b2d77b73848e2e43144ca9ce94a5ec8c62a20..054c1a224defb8e6bce04523fd46978aab228108 100644 (file)
@@ -563,6 +563,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
 
        dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
        sg_free_table(ttm->sg);
+       kfree(ttm->sg);
        ttm->sg = NULL;
 }
 
index 277128846dd128cd78a890c7b772cd99c023caeb..463b9c0283f7ea3862441f5923139e632d11554a 100644 (file)
@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
        struct dentry *ent;
        int r, i;
 
-
-
        ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
                                  &fops_ib_preempt);
-       if (!ent) {
+       if (IS_ERR(ent)) {
                DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
-               return -EIO;
+               return PTR_ERR(ent);
        }
 
        ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
                                  &fops_sclk_set);
-       if (!ent) {
+       if (IS_ERR(ent)) {
                DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
-               return -EIO;
+               return PTR_ERR(ent);
        }
 
        /* Register debugfs entries for amdgpu_ttm */
index 41c6b3aacd37921ab5c392ee84240561fb2624bb..af9bdf16eefd48761fdd9e1ce24627d0c3e083e4 100644 (file)
@@ -2432,6 +2432,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (!adev->gmc.xgmi.pending_reset)
                amdgpu_amdkfd_device_init(adev);
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               goto init_failed;
+
        amdgpu_fru_get_product_info(adev);
 
 init_failed:
@@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
 {
        int r;
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               return r;
+
        r = amdgpu_device_ip_resume_phase1(adev);
        if (r)
                return r;
@@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                                dev_warn(tmp_adev->dev, "asic atom init failed!");
                        } else {
                                dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+                               r = amdgpu_amdkfd_resume_iommu(tmp_adev);
+                               if (r)
+                                       goto out;
+
                                r = amdgpu_device_ip_resume_phase1(tmp_adev);
                                if (r)
                                        goto out;
@@ -5387,6 +5399,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
+       adev->pci_channel_state = state;
+
        switch (state) {
        case pci_channel_io_normal:
                return PCI_ERS_RESULT_CAN_RECOVER;
@@ -5529,6 +5543,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
 
        DRM_INFO("PCI error: resume callback!!\n");
 
+       /* Only continue execution for the case of pci_channel_io_frozen */
+       if (adev->pci_channel_state != pci_channel_io_frozen)
+               return;
+
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
index 7a731673191168bee5042115b729812a35607d20..dc50c05f23fc2d6490d045bf8ea444a2ab340748 100644 (file)
@@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
        return 0;
 }
 
+/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
+static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+{
+       u64 micro_tile_mode;
+
+       /* Zero swizzle mode means linear */
+       if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
+               return 0;
+
+       micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+       switch (micro_tile_mode) {
+       case 0: /* DISPLAY */
+       case 3: /* RENDER */
+               return 0;
+       default:
+               drm_dbg_kms(afb->base.dev,
+                           "Micro tile mode %llu not supported for scanout\n",
+                           micro_tile_mode);
+               return -EINVAL;
+       }
+}
+
 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
                                 unsigned int *width, unsigned int *height)
 {
@@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
                                    const struct drm_mode_fb_cmd2 *mode_cmd,
                                    struct drm_gem_object *obj)
 {
+       struct amdgpu_device *adev = drm_to_adev(dev);
        int ret, i;
 
        /*
@@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
        if (ret)
                return ret;
 
+       if (!dev->mode_config.allow_fb_modifiers) {
+               drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
+                             "GFX9+ requires FB check based on format modifier\n");
+               ret = check_tiling_flags_gfx6(rfb);
+               if (ret)
+                       return ret;
+       }
+
        if (dev->mode_config.allow_fb_modifiers &&
            !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
                ret = convert_tiling_flags_to_modifier(rfb);
index e7f06bd0f0cd959f1b6f188183e8c15647484d84..1916ec84dd71f8a4bd787d13322165aadc71a023 100644 (file)
@@ -31,6 +31,8 @@
 /* delay 0.1 second to enable gfx off feature */
 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
 
+#define GFX_OFF_NO_DELAY 0
+
 /*
  * GPU GFX IP block helpers function.
  */
@@ -558,6 +560,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 
 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 {
+       unsigned long delay = GFX_OFF_DELAY_ENABLE;
+
        if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
                return;
 
@@ -573,8 +577,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 
                adev->gfx.gfx_off_req_count--;
 
-               if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
-                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+               if (adev->gfx.gfx_off_req_count == 0 &&
+                   !adev->gfx.gfx_off_state) {
+                       /* If going to s2idle, no need to wait */
+                       if (adev->in_s0ix)
+                               delay = GFX_OFF_NO_DELAY;
+                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+                                             delay);
+               }
        } else {
                if (adev->gfx.gfx_off_req_count == 0) {
                        cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
index c7797eac83c3a09cccbca9a9df03692aa318c6e4..9ff600a38559d8f37b761797b012f1b39c75219e 100644 (file)
@@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
                break;
        default:
                adev->gmc.tmz_enabled = false;
-               dev_warn(adev->dev,
+               dev_info(adev->dev,
                         "Trusted Memory Zone (TMZ) feature not supported\n");
                break;
        }
index dc44c946a2442a430559a5625e28f7011abbf4ff..98732518543e539e601866a4beda3666732408db 100644 (file)
@@ -757,7 +757,7 @@ Out:
        return res;
 }
 
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void)
+uint32_t amdgpu_ras_eeprom_max_record_count(void)
 {
        return RAS_MAX_RECORD_COUNT;
 }
index f95fc61b30219c9b6d27a2ff1befe21a41294b32..6bb00578bfbbcdc2cb99e85304007d85ab09560e 100644 (file)
@@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
 int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
                             struct eeprom_table_record *records, const u32 num);
 
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
+uint32_t amdgpu_ras_eeprom_max_record_count(void);
 
 void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
 
index 7b634a1517f9c1e90c0f8c61f4e8d9735fe279e5..0554576d36955832c2483043f5aaeff08fea45e4 100644 (file)
@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
        ent = debugfs_create_file(name,
                                  S_IFREG | S_IRUGO, root,
                                  ring, &amdgpu_debugfs_ring_fops);
-       if (!ent)
-               return -ENOMEM;
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
 
        i_size_write(ent->d_inode, ring->ring_size + 12);
        ring->ent = ent;
index 38dade421d465e6f4dd0841d6082f998c80e2e34..94126dc3968882fc4bda5d1a045db6f6655c2b5b 100644 (file)
@@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                goto out;
        }
 
+       if (bo->type == ttm_bo_type_device &&
+           new_mem->mem_type == TTM_PL_VRAM &&
+           old_mem->mem_type != TTM_PL_VRAM) {
+               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+                * accesses the BO after it's moved.
+                */
+               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+       }
+
        if (adev->mman.buffer_funcs_enabled) {
                if (((old_mem->mem_type == TTM_PL_SYSTEM &&
                      new_mem->mem_type == TTM_PL_VRAM) ||
@@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                        return r;
        }
 
-       if (bo->type == ttm_bo_type_device &&
-           new_mem->mem_type == TTM_PL_VRAM &&
-           old_mem->mem_type != TTM_PL_VRAM) {
-               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
-                * accesses the BO after it's moved.
-                */
-               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-       }
-
 out:
        /* update statistics */
        atomic64_add(bo->base.size, &adev->num_bytes_moved);
index 603c259b073b4035dacc44f1f9d1647b1d043eb9..025184a556ee6ab539503b6808d4fe261ecd186e 100644 (file)
@@ -3599,7 +3599,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
 
        /* set static priority for a queue/ring */
        gfx_v9_0_mqd_set_priority(ring, mqd);
-       mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+       mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
 
        /* map_queues packet doesn't need activate the queue,
         * so only kiq need set this field.
index 41c3a0d70b7c0b309af25d8495c07c0c94edbad8..e47104a1f55967958ad4982af315b955f9865d0e 100644 (file)
@@ -1098,6 +1098,8 @@ static int gmc_v10_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       gmc_v10_0_gart_disable(adev);
+
        if (amdgpu_sriov_vf(adev)) {
                /* full access mode, so don't touch any GMC register */
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1106,7 +1108,6 @@ static int gmc_v10_0_hw_fini(void *handle)
 
        amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
-       gmc_v10_0_gart_disable(adev);
 
        return 0;
 }
index d90c16a6b2b8085d701249d0a0eaf024a8ba21cb..5551359d5dfdc77f55e9c868a47e550032926674 100644 (file)
@@ -1794,6 +1794,8 @@ static int gmc_v9_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       gmc_v9_0_gart_disable(adev);
+
        if (amdgpu_sriov_vf(adev)) {
                /* full access mode, so don't touch any GMC register */
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1802,7 +1804,6 @@ static int gmc_v9_0_hw_fini(void *handle)
 
        amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
-       gmc_v9_0_gart_disable(adev);
 
        return 0;
 }
index 779f5c911e1123188d075f4973877a5aec2d2bc1..e32efcfb0c8b14196165860d8ada47211b4b33a8 100644 (file)
@@ -868,6 +868,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
                        msleep(1000);
        }
 
+       /* TODO: check whether can submit a doorbell request to raise
+        * a doorbell fence to exit gfxoff.
+        */
+       if (adev->in_s0ix)
+               amdgpu_gfx_off_ctrl(adev, false);
+
        sdma_v5_2_soft_reset(adev);
        /* unhalt the MEs */
        sdma_v5_2_enable(adev, true);
@@ -876,6 +882,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v5_2_gfx_resume(adev);
+       if (adev->in_s0ix)
+               amdgpu_gfx_off_ctrl(adev, true);
        if (r)
                return r;
        r = sdma_v5_2_rlc_resume(adev);
index 16a57b70cc1a97a3a485b7aa297d3c2414205587..4a416231b24c8b42a1269227c5e7789702f9016e 100644 (file)
@@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 4,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = {
        .mqd_size_aligned = MQD_SIZE_ALIGNED,
        .needs_iommu_device = false,
        .supports_cwsr = true,
-       .needs_pci_atomics = false,
+       .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 2,
@@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
        .mqd_size_aligned = MQD_SIZE_ALIGNED,
        .needs_iommu_device = false,
        .supports_cwsr = true,
-       .needs_pci_atomics = false,
+       .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 2,
@@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
        if (!kfd)
                return NULL;
 
-       /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
-        * 32 and 64-bit requests are possible and must be
-        * supported.
-        */
-       kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
-       if (device_info->needs_pci_atomics &&
-           !kfd->pci_atomic_requested) {
-               dev_info(kfd_device,
-                        "skipped device %x:%x, PCI rejects atomics\n",
-                        pdev->vendor, pdev->device);
-               kfree(kfd);
-               return NULL;
-       }
-
        kfd->kgd = kgd;
        kfd->device_info = device_info;
        kfd->pdev = pdev;
@@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
                        - kfd->vm_info.first_vmid_kfd + 1;
 
+       /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+        * 32 and 64-bit requests are possible and must be
+        * supported.
+        */
+       kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+       if (!kfd->pci_atomic_requested &&
+           kfd->device_info->needs_pci_atomics &&
+           (!kfd->device_info->no_atomic_fw_version ||
+            kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+               dev_info(kfd_device,
+                        "skipped device %x:%x, PCI rejects atomics %d<%d\n",
+                        kfd->pdev->vendor, kfd->pdev->device,
+                        kfd->mec_fw_version,
+                        kfd->device_info->no_atomic_fw_version);
+               return false;
+       }
+
        /* Verify module parameters regarding mapped process number*/
        if ((hws_max_conc_proc < 0)
                        || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
@@ -959,7 +971,6 @@ out:
 void kgd2kfd_device_exit(struct kfd_dev *kfd)
 {
        if (kfd->init_complete) {
-               svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
                device_queue_manager_uninit(kfd->dqm);
                kfd_interrupt_exit(kfd);
                kfd_topology_remove_device(kfd);
@@ -1057,31 +1068,29 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
        return ret;
 }
 
-static int kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
 {
        int err = 0;
 
        err = kfd_iommu_resume(kfd);
-       if (err) {
+       if (err)
                dev_err(kfd_device,
                        "Failed to resume IOMMU for device %x:%x\n",
                        kfd->pdev->vendor, kfd->pdev->device);
-               return err;
-       }
+       return err;
+}
+
+static int kfd_resume(struct kfd_dev *kfd)
+{
+       int err = 0;
 
        err = kfd->dqm->ops.start(kfd->dqm);
-       if (err) {
+       if (err)
                dev_err(kfd_device,
                        "Error starting queue manager for device %x:%x\n",
                        kfd->pdev->vendor, kfd->pdev->device);
-               goto dqm_start_error;
-       }
 
        return err;
-
-dqm_start_error:
-       kfd_iommu_suspend(kfd);
-       return err;
 }
 
 static inline void kfd_queue_work(struct workqueue_struct *wq,
index dab290a4d19d1020c2739f5c63e13c5a71d9da77..4a16e3c257b9268b4eb6ff2788a4685610ceaca6 100644 (file)
@@ -891,9 +891,16 @@ int svm_migrate_init(struct amdgpu_device *adev)
        pgmap->ops = &svm_migrate_pgmap_ops;
        pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
        pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+
+       /* Device manager releases device-specific resources, memory region and
+        * pgmap when driver disconnects from device.
+        */
        r = devm_memremap_pages(adev->dev, pgmap);
        if (IS_ERR(r)) {
                pr_err("failed to register HMM device memory\n");
+
+               /* Disable SVM support capability */
+               pgmap->type = 0;
                devm_release_mem_region(adev->dev, res->start,
                                        res->end - res->start + 1);
                return PTR_ERR(r);
@@ -908,12 +915,3 @@ int svm_migrate_init(struct amdgpu_device *adev)
 
        return 0;
 }
-
-void svm_migrate_fini(struct amdgpu_device *adev)
-{
-       struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap;
-
-       devm_memunmap_pages(adev->dev, pgmap);
-       devm_release_mem_region(adev->dev, pgmap->range.start,
-                               pgmap->range.end - pgmap->range.start + 1);
-}
index 0de76b5d49739afcd283dd1b9e3aa96c43dd6c8e..2f5b3394c9ed9ad0f8c67d1f98f8bb960e9c5ce0 100644 (file)
@@ -47,7 +47,6 @@ unsigned long
 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
 
 int svm_migrate_init(struct amdgpu_device *adev);
-void svm_migrate_fini(struct amdgpu_device *adev);
 
 #else
 
@@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev)
 {
        return 0;
 }
-static inline void svm_migrate_fini(struct amdgpu_device *adev)
-{
-       /* empty */
-}
 
 #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
 
index ab83b0de6b22d5d152dd5f3023e48f80013007d0..6d8f9bb2d9057310a2cb3ca47017592c94fae420 100644 (file)
@@ -207,6 +207,7 @@ struct kfd_device_info {
        bool supports_cwsr;
        bool needs_iommu_device;
        bool needs_pci_atomics;
+       uint32_t no_atomic_fw_version;
        unsigned int num_sdma_engines;
        unsigned int num_xgmi_sdma_engines;
        unsigned int num_sdma_queues_per_engine;
index 9fc8021bb0ab25a430e4957b3e6a16c636d937ad..9d0f65a90002d89478ca5832f617304e42fd7134 100644 (file)
@@ -118,6 +118,13 @@ static void svm_range_remove_notifier(struct svm_range *prange)
                mmu_interval_notifier_remove(&prange->notifier);
 }
 
+static bool
+svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr && !dma_mapping_error(dev, dma_addr) &&
+              !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
+}
+
 static int
 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
                      unsigned long offset, unsigned long npages,
@@ -139,8 +146,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 
        addr += offset;
        for (i = 0; i < npages; i++) {
-               if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
-                             "leaking dma mapping\n"))
+               if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
                        dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
 
                page = hmm_pfn_to_page(hmm_pfns[i]);
@@ -209,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
                return;
 
        for (i = offset; i < offset + npages; i++) {
-               if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+               if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
                        continue;
                pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
                dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
@@ -1165,7 +1171,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        unsigned long last_start;
        int last_domain;
        int r = 0;
-       int64_t i;
+       int64_t i, j;
 
        last_start = prange->start + offset;
 
@@ -1178,7 +1184,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        for (i = offset; i < offset + npages; i++) {
                last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
                dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
-               if ((prange->start + i) < prange->last &&
+
+               /* Collect all pages in the same address range and memory domain
+                * that can be mapped with a single call to update mapping.
+                */
+               if (i < offset + npages - 1 &&
                    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
                        continue;
 
@@ -1201,6 +1211,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                                                NULL, dma_addr,
                                                &vm->last_update,
                                                &table_freed);
+
+               for (j = last_start - prange->start; j <= i; j++)
+                       dma_addr[j] |= last_domain;
+
                if (r) {
                        pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
                        goto out;
index 7dffc04a557ea12fd01f3f5454ab51edcf3738b6..127667e549c199730cef24109417f39b960a44a8 100644 (file)
@@ -25,6 +25,8 @@ config DRM_AMD_DC_HDCP
 
 config DRM_AMD_DC_SI
        bool "AMD DC support for Southern Islands ASICs"
+       depends on DRM_AMDGPU_SI
+       depends on DRM_AMD_DC
        default n
        help
          Choose this option to enable new AMD DC support for SI asics
index 9b1fc54555ee63130dcbaf49f2750ddf325cfae7..1ea31dcc7a8b0af245272277c535a7a701edf0f8 100644 (file)
@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        uint32_t agp_base, agp_bot, agp_top;
        PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 
+       memset(pa_config, 0, sizeof(*pa_config));
+
        logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
        pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 
@@ -1113,6 +1115,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        init_data.asic_id.pci_revision_id = adev->pdev->revision;
        init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+       init_data.asic_id.chip_id = adev->pdev->device;
 
        init_data.asic_id.vram_width = adev->gmc.vram_width;
        /* TODO: initialize init_data.asic_id.vram_type here!!!! */
@@ -1717,6 +1720,7 @@ static int dm_late_init(void *handle)
                linear_lut[i] = 0xFFFF * i / 15;
 
        params.set = 0;
+       params.backlight_ramping_override = false;
        params.backlight_ramping_start = 0xCCCC;
        params.backlight_ramping_reduction = 0xCCCCCCCC;
        params.backlight_lut_array_size = 16;
@@ -6024,21 +6028,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
                return 0;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       work = kzalloc(sizeof(*work), GFP_ATOMIC);
-       if (!work)
-               return -ENOMEM;
+       if (dm->vblank_control_workqueue) {
+               work = kzalloc(sizeof(*work), GFP_ATOMIC);
+               if (!work)
+                       return -ENOMEM;
 
-       INIT_WORK(&work->work, vblank_control_worker);
-       work->dm = dm;
-       work->acrtc = acrtc;
-       work->enable = enable;
+               INIT_WORK(&work->work, vblank_control_worker);
+               work->dm = dm;
+               work->acrtc = acrtc;
+               work->enable = enable;
 
-       if (acrtc_state->stream) {
-               dc_stream_retain(acrtc_state->stream);
-               work->stream = acrtc_state->stream;
-       }
+               if (acrtc_state->stream) {
+                       dc_stream_retain(acrtc_state->stream);
+                       work->stream = acrtc_state->stream;
+               }
 
-       queue_work(dm->vblank_control_workqueue, &work->work);
+               queue_work(dm->vblank_control_workqueue, &work->work);
+       }
 #endif
 
        return 0;
@@ -6792,14 +6798,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
-                                           struct dc_state *dc_state)
+                                           struct dc_state *dc_state,
+                                           struct dsc_mst_fairness_vars *vars)
 {
        struct dc_stream_state *stream = NULL;
        struct drm_connector *connector;
        struct drm_connector_state *new_con_state;
        struct amdgpu_dm_connector *aconnector;
        struct dm_connector_state *dm_conn_state;
-       int i, j, clock, bpp;
+       int i, j, clock;
        int vcpi, pbn_div, pbn = 0;
 
        for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6838,9 +6845,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
                }
 
                pbn_div = dm_mst_get_pbn_divider(stream->link);
-               bpp = stream->timing.dsc_cfg.bits_per_pixel;
                clock = stream->timing.pix_clk_100hz / 10;
-               pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+               /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+               for (j = 0; j < dc_state->stream_count; j++) {
+                       if (vars[j].aconnector == aconnector) {
+                               pbn = vars[j].pbn;
+                               break;
+                       }
+               }
+
                vcpi = drm_dp_mst_atomic_enable_dsc(state,
                                                    aconnector->port,
                                                    pbn, pbn_div,
@@ -7519,6 +7532,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
        }
 }
 
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+       const struct drm_display_mode *native_mode;
+
+       if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+           connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+               return;
+
+       encoder = amdgpu_dm_connector_to_encoder(connector);
+       if (!encoder)
+               return;
+
+       amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       native_mode = &amdgpu_encoder->native_mode;
+       if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+               return;
+
+       drm_connector_set_panel_orientation_with_quirk(connector,
+                                                      DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+                                                      native_mode->hdisplay,
+                                                      native_mode->vdisplay);
+}
+
 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                                              struct edid *edid)
 {
@@ -7547,6 +7586,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                 * restored here.
                 */
                amdgpu_dm_update_freesync_caps(connector, edid);
+
+               amdgpu_set_panel_orientation(connector);
        } else {
                amdgpu_dm_connector->num_modes = 0;
        }
@@ -8058,8 +8099,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
            state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
                state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 
-       /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
-        * hot-plug, headless s3, dpms
+       /* Stream removed and re-enabled
+        *
+        * Can sometimes overlap with the HPD case,
+        * thus set update_hdcp to false to avoid
+        * setting HDCP multiple times.
+        *
+        * Handles:     DESIRED -> DESIRED (Special case)
+        */
+       if (!(old_state->crtc && old_state->crtc->enabled) &&
+               state->crtc && state->crtc->enabled &&
+               connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               dm_con_state->update_hdcp = false;
+               return true;
+       }
+
+       /* Hot-plug, headless s3, dpms
+        *
+        * Only start HDCP if the display is connected/enabled.
+        * update_hdcp flag will be set to false until the next
+        * HPD comes in.
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
@@ -8648,7 +8707,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                 * If PSR or idle optimizations are enabled then flush out
                 * any pending work before hardware programming.
                 */
-               flush_workqueue(dm->vblank_control_workqueue);
+               if (dm->vblank_control_workqueue)
+                       flush_workqueue(dm->vblank_control_workqueue);
 #endif
 
                bundle->stream_update.stream = acrtc_state->stream;
@@ -8983,7 +9043,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                /* if there mode set or reset, disable eDP PSR */
                if (mode_set_reset_required) {
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-                       flush_workqueue(dm->vblank_control_workqueue);
+                       if (dm->vblank_control_workqueue)
+                               flush_workqueue(dm->vblank_control_workqueue);
 #endif
                        amdgpu_dm_psr_disable_all(dm);
                }
@@ -10243,6 +10304,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        int ret, i;
        bool lock_and_validation_needed = false;
        struct dm_crtc_state *dm_old_crtc_state;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       struct dsc_mst_fairness_vars vars[MAX_PIPES];
+#endif
 
        trace_amdgpu_dm_atomic_check_begin(state);
 
@@ -10473,10 +10537,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+               if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
                        goto fail;
 
-               ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+               ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
                if (ret)
                        goto fail;
 #endif
@@ -10492,7 +10556,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                status = dc_validate_global_state(dc, dm_state->context, false);
                if (status != DC_OK) {
-                       DC_LOG_WARNING("DC global validation failure: %s (%d)",
+                       drm_dbg_atomic(dev,
+                                      "DC global validation failure: %s (%d)",
                                       dc_status_to_str(status), status);
                        ret = -EINVAL;
                        goto fail;
index 1bcba6943fd7ffb2196daf59068b8f9e71e497da..7af0d58c231b6cb0b4acdfe2e9d639b1b448d2c3 100644 (file)
@@ -518,12 +518,7 @@ struct dsc_mst_fairness_params {
        uint32_t num_slices_h;
        uint32_t num_slices_v;
        uint32_t bpp_overwrite;
-};
-
-struct dsc_mst_fairness_vars {
-       int pbn;
-       bool dsc_enabled;
-       int bpp_x16;
+       struct amdgpu_dm_connector *aconnector;
 };
 
 static int kbps_to_peak_pbn(int kbps)
@@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
 
 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                                             struct dc_state *dc_state,
-                                            struct dc_link *dc_link)
+                                            struct dc_link *dc_link,
+                                            struct dsc_mst_fairness_vars *vars)
 {
        int i;
        struct dc_stream_state *stream;
        struct dsc_mst_fairness_params params[MAX_PIPES];
-       struct dsc_mst_fairness_vars vars[MAX_PIPES];
        struct amdgpu_dm_connector *aconnector;
        int count = 0;
        bool debugfs_overwrite = false;
@@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                params[count].timing = &stream->timing;
                params[count].sink = stream->sink;
                aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+               params[count].aconnector = aconnector;
                params[count].port = aconnector->port;
                params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
                if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
@@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
        }
        /* Try no compression */
        for (i = 0; i < count; i++) {
+               vars[i].aconnector = params[i].aconnector;
                vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
                vars[i].dsc_enabled = false;
                vars[i].bpp_x16 = 0;
@@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
 }
 
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state)
+                                      struct dc_state *dc_state,
+                                      struct dsc_mst_fairness_vars *vars)
 {
        int i, j;
        struct dc_stream_state *stream;
@@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                        return false;
 
                mutex_lock(&aconnector->mst_mgr.lock);
-               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
                        mutex_unlock(&aconnector->mst_mgr.lock);
                        return false;
                }
index b38bd68121ceb56cae8fbb847ff7852d08bff03e..900d3f7a84989e3b22f855a36c82191482bc7963 100644 (file)
@@ -39,8 +39,17 @@ void
 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_vars {
+       int pbn;
+       bool dsc_enabled;
+       int bpp_x16;
+       struct amdgpu_dm_connector *aconnector;
+};
+
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state);
+                                      struct dc_state *dc_state,
+                                      struct dsc_mst_fairness_vars *vars);
 #endif
 
 #endif
index c9f47d16747235faaf85134bd386050e4b75a48f..b1bf80da3a55fd6f59f9153312043a0a8463564f 100644 (file)
@@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void)
        depth = *pcpu;
        put_cpu_ptr(&fpu_recursion_depth);
 
-       ASSERT(depth > 1);
+       ASSERT(depth >= 1);
 }
 
 /**
index 8bd7f42a8053fc21067c4f116e2e702feab2874c..1e44b13c1c7def50df2a317bd85aaa25705c2d19 100644 (file)
@@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
 
 int dc_link_get_backlight_level(const struct dc_link *link)
 {
-
        struct abm *abm = get_abm_from_stream_res(link);
+       struct panel_cntl *panel_cntl = link->panel_cntl;
+       struct dc  *dc = link->ctx->dc;
+       struct dmcu *dmcu = dc->res_pool->dmcu;
+       bool fw_set_brightness = true;
 
-       if (abm == NULL || abm->funcs->get_current_backlight == NULL)
-               return DC_ERROR_UNEXPECTED;
+       if (dmcu)
+               fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-       return (int) abm->funcs->get_current_backlight(abm);
+       if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+               return panel_cntl->funcs->get_current_backlight(panel_cntl);
+       else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+               return (int) abm->funcs->get_current_backlight(abm);
+       else
+               return DC_ERROR_UNEXPECTED;
 }
 
 int dc_link_get_target_backlight_pwm(const struct dc_link *link)
index 330edd666b7d8277bf68c05f4e29ec78d20cb7b1..6d655e158267a837d2085baa0898a137611a35e2 100644 (file)
@@ -1,4 +1,26 @@
-/* Copyright 2015 Advanced Micro Devices, Inc. */
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
 #include "dm_services.h"
 #include "dc.h"
 #include "dc_link_dp.h"
@@ -1284,12 +1306,6 @@ static void override_training_settings(
 {
        uint32_t lane;
 
-       /* Override link settings */
-       if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
-               lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
-       if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
-               lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
-
        /* Override link spread */
        if (!link->dp_ss_off && overrides->downspread != NULL)
                lt_settings->link_settings.link_spread = *overrides->downspread ?
@@ -1804,14 +1820,13 @@ bool perform_link_training_with_retries(
                if (panel_mode == DP_PANEL_MODE_EDP) {
                        struct cp_psp *cp_psp = &stream->ctx->cp_psp;
 
-                       if (cp_psp && cp_psp->funcs.enable_assr) {
-                               if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
-                                       /* since eDP implies ASSR on, change panel
-                                        * mode to disable ASSR
-                                        */
-                                       panel_mode = DP_PANEL_MODE_DEFAULT;
-                               }
-                       }
+                       if (cp_psp && cp_psp->funcs.enable_assr)
+                               /* ASSR is bound to fail with unsigned PSP
+                                * verstage used during devlopment phase.
+                                * Report and continue with eDP panel mode to
+                                * perform eDP link training with right settings
+                                */
+                               cp_psp->funcs.enable_assr(cp_psp->handle, link);
                }
 #endif
 
@@ -1840,9 +1855,13 @@ bool perform_link_training_with_retries(
                dp_disable_link_phy(link, signal);
 
                /* Abort link training if failure due to sink being unplugged. */
-               if (status == LINK_TRAINING_ABORT)
-                       break;
-               else if (do_fallback) {
+               if (status == LINK_TRAINING_ABORT) {
+                       enum dc_connection_type type = dc_connection_none;
+
+                       dc_link_detect_sink(link, &type);
+                       if (type == dc_connection_none)
+                               break;
+               } else if (do_fallback) {
                        decide_fallback_link_setting(*link_setting, &current_setting, status);
                        /* Fail link training if reduced link bandwidth no longer meets
                         * stream requirements.
index e14f99b4b0c3f626ecb13dfd799fe25acdf44f93..3c334734110352590b6cad835ff1cca269275bdb 100644 (file)
@@ -42,7 +42,7 @@
 #define DC_LOGGER \
        engine->ctx->logger
 
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
 #define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
 #define LOG_FLAG_Error_I2cAux LOG_ERROR
 #define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
@@ -76,7 +76,7 @@ enum {
 #define DEFAULT_AUX_ENGINE_MULT   0
 #define DEFAULT_AUX_ENGINE_LENGTH 69
 
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
 
 static void release_engine(
        struct dce_aux *engine)
index e9233923586316349952ad5dc9914be364044a74..e8570060d007ba5bab0db3b3395aca2b9c487573 100644 (file)
@@ -49,7 +49,6 @@
 static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
 {
        uint64_t current_backlight;
-       uint32_t round_result;
        uint32_t bl_period, bl_int_count;
        uint32_t bl_pwm, fractional_duty_cycle_en;
        uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
        current_backlight = div_u64(current_backlight, bl_period);
        current_backlight = (current_backlight + 1) >> 1;
 
-       current_backlight = (uint64_t)(current_backlight) * bl_period;
-
-       round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
-       round_result = (round_result >> (bl_int_count-1)) & 1;
-
-       current_backlight >>= bl_int_count;
-       current_backlight += round_result;
-
        return (uint32_t)(current_backlight);
 }
 
index d8b22618b79e8bfce533f2fe523e80f5f7b227ec..c337588231ff0a485ded6951fed6862653b073d6 100644 (file)
@@ -118,6 +118,7 @@ struct dcn10_link_enc_registers {
        uint32_t RDPCSTX_PHY_CNTL4;
        uint32_t RDPCSTX_PHY_CNTL5;
        uint32_t RDPCSTX_PHY_CNTL6;
+       uint32_t RDPCSPIPE_PHY_CNTL6;
        uint32_t RDPCSTX_PHY_CNTL7;
        uint32_t RDPCSTX_PHY_CNTL8;
        uint32_t RDPCSTX_PHY_CNTL9;
index 90127c1f9e35da969d0e2897d3c46bfa18171165..b0892443fbd57d9cd76de5d7ddd3a56a060d007c 100644 (file)
@@ -37,6 +37,7 @@
 
 #include "link_enc_cfg.h"
 #include "dc_dmub_srv.h"
+#include "dal_asic_id.h"
 
 #define CTX \
        enc10->base.ctx
 #define AUX_REG_WRITE(reg_name, val) \
                        dm_write_reg(CTX, AUX_REG(reg_name), val)
 
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
 void dcn31_link_encoder_set_dio_phy_mux(
        struct link_encoder *enc,
        enum encoder_type_select sel,
@@ -215,8 +220,8 @@ static const struct link_encoder_funcs dcn31_link_enc_funcs = {
        .fec_is_active = enc2_fec_is_active,
        .get_dig_frontend = dcn10_get_dig_frontend,
        .get_dig_mode = dcn10_get_dig_mode,
-       .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
-       .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
+       .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
+       .get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
        .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
 };
 
@@ -404,3 +409,60 @@ void dcn31_link_encoder_disable_output(
        }
 }
 
+bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t dp_alt_mode_disable;
+       bool is_usb_c_alt_mode = false;
+
+       if (enc->features.flags.bits.DP_IS_USB_C) {
+               if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+                       // [Note] no need to check hw_internal_rev once phy mux selection is ready
+                       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+               } else {
+               /*
+                * B0 phys use a new set of registers to check whether alt mode is disabled.
+                * if value == 1 alt mode is disabled, otherwise it is enabled.
+                */
+                       if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+                               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+                       } else {
+                       // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready
+                               REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+                       }
+               }
+
+               is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+       }
+
+       return is_usb_c_alt_mode;
+}
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+                                                                                struct dc_link_settings *link_settings)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t is_in_usb_c_dp4_mode = 0;
+
+       dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+       /* in usb c dp2 mode, max lane count is 2 */
+       if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+               if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+                       // [Note] no need to check hw_internal_rev once phy mux selection is ready
+                       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+               } else {
+                       if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+                               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+                       } else {
+                               REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+                       }
+               }
+               if (!is_in_usb_c_dp4_mode)
+                       link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+       }
+}
index 32d146312838bb2e40d49cd256aba24cbff0e1c6..3454f1e7c1f176db3f00482198bbce5e25a5117c 100644 (file)
@@ -69,6 +69,7 @@
        SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+       SRI(RDPCSPIPE_PHY_CNTL6, RDPCSPIPE, id), \
        SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
-       LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
@@ -243,4 +246,13 @@ void dcn31_link_encoder_disable_output(
        struct link_encoder *enc,
        enum signal_type signal);
 
+/*
+ * Check whether USB-C DP Alt mode is disabled
+ */
+bool dcn31_link_encoder_is_in_alt_mode(
+       struct link_encoder *enc);
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
+
 #endif /* __DC_LINK_ENCODER__DCN31_H__ */
index a7702d3c75cdd8d0856160d4984ab39194426815..0006bbac466c8092ce554322ceab9758471f487a 100644 (file)
@@ -928,7 +928,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .disable_dcc = DCC_ENABLE,
        .vsr_support = true,
        .performance_trace = false,
-       .max_downscale_src_width = 7680,/*upto 8K*/
+       .max_downscale_src_width = 3840,/*upto 4K*/
        .disable_pplib_wm_range = false,
        .scl_reset_length10 = true,
        .sanity_checks = false,
@@ -1284,6 +1284,12 @@ static struct stream_encoder *dcn31_stream_encoder_create(
        if (!enc1 || !vpg || !afmt)
                return NULL;
 
+       if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+                       ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+               if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD))
+                       eng_id = eng_id + 3; // For B0 only. C->F, D->G.
+       }
+
        dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
                                        eng_id, vpg, afmt,
                                        &stream_enc_regs[eng_id],
index 381c17caace189fa06b0c416d86ab74ae9b64bd8..5adc471bef57f8a89dc76a390f5b6ea2e2f76cfd 100644 (file)
@@ -227,7 +227,7 @@ enum {
 #define FAMILY_YELLOW_CARP                     146
 
 #define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x02            // TODO: DCN31 - update with correct B0 ID
+#define YELLOW_CARP_B0 0x1A
 #define YELLOW_CARP_UNKNOWN 0xFF
 
 #ifndef ASICREV_IS_YELLOW_CARP
index 92caf8441d1e08506a78317651711c60a3e2aaf6..01a56556cde13004567c3dcb4e4fe2fa7ed355f4 100644 (file)
 #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_RX_OVRD_OUT_2                                                0xe0c7
 #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_TX_OVRD_IN_2                                                 0xe0c8
 
+//RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                            0x10
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                        0x11
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                    0x12
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                              0x00010000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                          0x00020000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                      0x00040000L
+
+//RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                            0x10
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                        0x11
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                    0x12
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                              0x00010000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                          0x00020000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                      0x00040000L
+
+//[Note] Hack. RDPCSPIPE only has 2 instances.
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6                                                              0x2e4b
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6                                                              0x2e4b
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
 
 #endif
index 8a08ecc34c6992a64ead985bf1f2047075a35800..4884a4e1f261c88fb6c09647c884e2fbd4da9ec9 100644 (file)
 #define TABLE_PMSTATUSLOG        3 // Called by Tools for Agm logging
 #define TABLE_DPMCLOCKS          4 // Called by Driver; defined here, but not used, for backward compatible
 #define TABLE_MOMENTARY_PM       5 // Called by Tools; defined here, but not used, for backward compatible
-#define TABLE_COUNT              6
+#define TABLE_SMU_METRICS        6 // Called by Driver
+#define TABLE_COUNT              7
 
-#define NUM_DSPCLK_LEVELS              8
-#define NUM_SOCCLK_DPM_LEVELS  8
-#define NUM_DCEFCLK_DPM_LEVELS 4
-#define NUM_FCLK_DPM_LEVELS            4
-#define NUM_MEMCLK_DPM_LEVELS  4
+typedef struct SmuMetricsTable_t {
+       //CPU status
+       uint16_t CoreFrequency[6];              //[MHz]
+       uint32_t CorePower[6];                  //[mW]
+       uint16_t CoreTemperature[6];            //[centi-Celsius]
+       uint16_t L3Frequency[2];                //[MHz]
+       uint16_t L3Temperature[2];              //[centi-Celsius]
+       uint16_t C0Residency[6];                //Percentage
 
-#define NUMBER_OF_PSTATES              8
-#define NUMBER_OF_CORES                        8
+       // GFX status
+       uint16_t GfxclkFrequency;               //[MHz]
+       uint16_t GfxTemperature;                //[centi-Celsius]
 
-typedef enum {
-       S3_TYPE_ENTRY,
-       S5_TYPE_ENTRY,
-} Sleep_Type_e;
+       // SOC IP info
+       uint16_t SocclkFrequency;               //[MHz]
+       uint16_t VclkFrequency;                 //[MHz]
+       uint16_t DclkFrequency;                 //[MHz]
+       uint16_t MemclkFrequency;               //[MHz]
 
-typedef enum {
-       GFX_OFF = 0,
-       GFX_ON  = 1,
-} GFX_Mode_e;
+       // power, VF info for CPU/GFX telemetry rails, and then socket power total
+       uint32_t Voltage[2];                    //[mV] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t Current[2];                    //[mA] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t Power[2];                      //[mW] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t CurrentSocketPower;            //[mW]
 
-typedef enum {
-       CPU_P0 = 0,
-       CPU_P1,
-       CPU_P2,
-       CPU_P3,
-       CPU_P4,
-       CPU_P5,
-       CPU_P6,
-       CPU_P7
-} CPU_PState_e;
+       uint16_t SocTemperature;                //[centi-Celsius]
+       uint16_t EdgeTemperature;
+       uint16_t ThrottlerStatus;
+       uint16_t Spare;
 
-typedef enum {
-       CPU_CORE0 = 0,
-       CPU_CORE1,
-       CPU_CORE2,
-       CPU_CORE3,
-       CPU_CORE4,
-       CPU_CORE5,
-       CPU_CORE6,
-       CPU_CORE7
-} CORE_ID_e;
+} SmuMetricsTable_t;
 
-typedef enum {
-       DF_DPM0 = 0,
-       DF_DPM1,
-       DF_DPM2,
-       DF_DPM3,
-       DF_PState_Count
-} DF_PState_e;
-
-typedef enum {
-       GFX_DPM0 = 0,
-       GFX_DPM1,
-       GFX_DPM2,
-       GFX_DPM3,
-       GFX_PState_Count
-} GFX_PState_e;
+typedef struct SmuMetrics_t {
+       SmuMetricsTable_t Current;
+       SmuMetricsTable_t Average;
+       uint32_t SampleStartTime;
+       uint32_t SampleStopTime;
+       uint32_t Accnt;
+} SmuMetrics_t;
 
 #endif
index 6f1b1b50d527c6c0f43b9edd8e3d551d0b7dc6e1..18b862a90fbe5c8857686b35acc0616f8c27aa87 100644 (file)
        __SMU_DUMMY_MAP(SetUclkDpmMode),                \
        __SMU_DUMMY_MAP(LightSBR),                      \
        __SMU_DUMMY_MAP(GfxDriverResetRecovery),        \
-       __SMU_DUMMY_MAP(BoardPowerCalibration),
+       __SMU_DUMMY_MAP(BoardPowerCalibration),   \
+       __SMU_DUMMY_MAP(RequestGfxclk),           \
+       __SMU_DUMMY_MAP(ForceGfxVid),             \
+       __SMU_DUMMY_MAP(UnforceGfxVid),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index 6e6088760b180a3bc06f69c8e6fb3ac29bfe19c4..909a86aa60f328e3c33d0e4d3def613f05b93039 100644 (file)
 #define PPSMC_MSG_SetDriverTableVMID                    0x34
 #define PPSMC_MSG_SetSoftMinCclk                        0x35
 #define PPSMC_MSG_SetSoftMaxCclk                        0x36
-#define PPSMC_Message_Count                             0x37
+#define PPSMC_MSG_GetGfxFrequency                       0x37
+#define PPSMC_MSG_GetGfxVid                             0x38
+#define PPSMC_MSG_ForceGfxFreq                          0x39
+#define PPSMC_MSG_UnForceGfxFreq                        0x3A
+#define PPSMC_MSG_ForceGfxVid                           0x3B
+#define PPSMC_MSG_UnforceGfxVid                         0x3C
+#define PPSMC_MSG_GetEnabledSmuFeatures                 0x3D
+#define PPSMC_Message_Count                             0x3E
 
 #endif
index bdbbeb959c681453ae5447c83161bbd01adee98d..81f82aa05ec287234094c02d6e4f5fde5b28dc0e 100644 (file)
@@ -6867,6 +6867,8 @@ static int si_dpm_enable(struct amdgpu_device *adev)
        si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
        si_thermal_start_thermal_controller(adev);
 
+       ni_update_current_ps(adev, boot_ps);
+
        return 0;
 }
 
index 3ab1ce4d3419b0da81a68d25bd2c839af839a65d..04863a7971155ab1177f37b9f8bdc343d0c1c0c3 100644 (file)
@@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
         */
        if (smu->uploading_custom_pp_table &&
            (adev->asic_type >= CHIP_NAVI10) &&
-           (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+           (adev->asic_type <= CHIP_BEIGE_GOBY))
                return smu_disable_all_features_with_exception(smu,
                                                               true,
                                                               SMU_FEATURE_COUNT);
index e343cc218990ba4ccebc6727e059d86cbb125a1f..082f01893f3d130ca660a937926e7e2b24376ea4 100644 (file)
@@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
        struct smu_11_0_dpm_context *dpm_context = NULL;
        uint32_t gen_speed, lane_width;
 
-       if (amdgpu_ras_intr_triggered())
-               return sysfs_emit(buf, "unavailable\n");
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       if (amdgpu_ras_intr_triggered()) {
+               size += sysfs_emit_at(buf, size, "unavailable\n");
+               return size;
+       }
 
        dpm_context = smu_dpm->dpm_context;
 
index b05f9541accc3222855962abfebd479aa3e9291b..3d4c65bc29dc3daa29c53753421764c79d710bcc 100644 (file)
 #undef pr_info
 #undef pr_debug
 
+/* unit: MHz */
+#define CYAN_SKILLFISH_SCLK_MIN                        1000
+#define CYAN_SKILLFISH_SCLK_MAX                        2000
+#define CYAN_SKILLFISH_SCLK_DEFAULT                    1800
+
+/* unit: mV */
+#define CYAN_SKILLFISH_VDDC_MIN                        700
+#define CYAN_SKILLFISH_VDDC_MAX                        1129
+#define CYAN_SKILLFISH_VDDC_MAGIC                      5118 // 0x13fe
+
+static struct gfx_user_settings {
+       uint32_t sclk;
+       uint32_t vddc;
+} cyan_skillfish_user_settings;
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+       FEATURE_MASK(FEATURE_FCLK_DPM_BIT)      |       \
+       FEATURE_MASK(FEATURE_SOC_DPM_BIT)       |       \
+       FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+
 static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  0),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                0),
@@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT]
        MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverTableDramAddrLow,    0),
        MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,        0),
        MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,        0),
+       MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,        0),
+       MSG_MAP(RequestGfxclk,                  PPSMC_MSG_RequestGfxclk,                0),
+       MSG_MAP(ForceGfxVid,                    PPSMC_MSG_ForceGfxVid,                  0),
+       MSG_MAP(UnforceGfxVid,                  PPSMC_MSG_UnforceGfxVid,                0),
+};
+
+static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
+       TAB_MAP_VALID(SMU_METRICS),
 };
 
+static int cyan_skillfish_tables_init(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+
+       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+                               sizeof(SmuMetrics_t),
+                               PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_VRAM);
+
+       smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+       if (!smu_table->metrics_table)
+               goto err0_out;
+
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+       smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+       if (!smu_table->gpu_metrics_table)
+               goto err1_out;
+
+       smu_table->metrics_time = 0;
+
+       return 0;
+
+err1_out:
+       smu_table->gpu_metrics_table_size = 0;
+       kfree(smu_table->metrics_table);
+err0_out:
+       return -ENOMEM;
+}
+
+static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
+{
+       int ret = 0;
+
+       ret = cyan_skillfish_tables_init(smu);
+       if (ret)
+               return ret;
+
+       return smu_v11_0_init_smc_tables(smu);
+}
+
+static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+
+       kfree(smu_table->metrics_table);
+       smu_table->metrics_table = NULL;
+
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+       smu_table->gpu_metrics_table_size = 0;
+
+       smu_table->metrics_time = 0;
+
+       return 0;
+}
+
+static int
+cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
+                                       MetricsMember_t member,
+                                       uint32_t *value)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+       int ret = 0;
+
+       mutex_lock(&smu->metrics_lock);
+
+       ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+       if (ret) {
+               mutex_unlock(&smu->metrics_lock);
+               return ret;
+       }
+
+       switch (member) {
+       case METRICS_CURR_GFXCLK:
+               *value = metrics->Current.GfxclkFrequency;
+               break;
+       case METRICS_CURR_SOCCLK:
+               *value = metrics->Current.SocclkFrequency;
+               break;
+       case METRICS_CURR_VCLK:
+               *value = metrics->Current.VclkFrequency;
+               break;
+       case METRICS_CURR_DCLK:
+               *value = metrics->Current.DclkFrequency;
+               break;
+       case METRICS_CURR_UCLK:
+               *value = metrics->Current.MemclkFrequency;
+               break;
+       case METRICS_AVERAGE_SOCKETPOWER:
+               *value = (metrics->Current.CurrentSocketPower << 8) /
+                               1000;
+               break;
+       case METRICS_TEMPERATURE_EDGE:
+               *value = metrics->Current.GfxTemperature / 100 *
+                               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_TEMPERATURE_HOTSPOT:
+               *value = metrics->Current.SocTemperature / 100 *
+                               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_VOLTAGE_VDDSOC:
+               *value = metrics->Current.Voltage[0];
+               break;
+       case METRICS_VOLTAGE_VDDGFX:
+               *value = metrics->Current.Voltage[1];
+               break;
+       case METRICS_THROTTLER_STATUS:
+               *value = metrics->Current.ThrottlerStatus;
+               break;
+       default:
+               *value = UINT_MAX;
+               break;
+       }
+
+       mutex_unlock(&smu->metrics_lock);
+
+       return ret;
+}
+
+static int cyan_skillfish_read_sensor(struct smu_context *smu,
+                                       enum amd_pp_sensors sensor,
+                                       void *data,
+                                       uint32_t *size)
+{
+       int ret = 0;
+
+       if (!data || !size)
+               return -EINVAL;
+
+       mutex_lock(&smu->sensor_lock);
+
+       switch (sensor) {
+       case AMDGPU_PP_SENSOR_GFX_SCLK:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_GFXCLK,
+                                                  (uint32_t *)data);
+               *(uint32_t *)data *= 100;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_GFX_MCLK:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_UCLK,
+                                                  (uint32_t *)data);
+               *(uint32_t *)data *= 100;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_GPU_POWER:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_AVERAGE_SOCKETPOWER,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_TEMPERATURE_HOTSPOT,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_EDGE_TEMP:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_TEMPERATURE_EDGE,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VDDNB:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_VOLTAGE_VDDSOC,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VDDGFX:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_VOLTAGE_VDDGFX,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       mutex_unlock(&smu->sensor_lock);
+
+       return ret;
+}
+
+static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
+                                               enum smu_clk_type clk_type,
+                                               uint32_t *value)
+{
+       MetricsMember_t member_type;
+
+       switch (clk_type) {
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               member_type = METRICS_CURR_GFXCLK;
+               break;
+       case SMU_FCLK:
+       case SMU_MCLK:
+               member_type = METRICS_CURR_UCLK;
+               break;
+       case SMU_SOCCLK:
+               member_type = METRICS_CURR_SOCCLK;
+               break;
+       case SMU_VCLK:
+               member_type = METRICS_CURR_VCLK;
+               break;
+       case SMU_DCLK:
+               member_type = METRICS_CURR_DCLK;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
+                                       enum smu_clk_type clk_type,
+                                       char *buf)
+{
+       int ret = 0, size = 0;
+       uint32_t cur_value = 0;
+
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       switch (clk_type) {
+       case SMU_OD_SCLK:
+               ret  = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+               break;
+       case SMU_OD_VDDC_CURVE:
+               ret  = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
+               size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
+               break;
+       case SMU_OD_RANGE:
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+                                               CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+               size += sysfs_emit_at(buf, size, "VDDC: %7umV  %10umV\n",
+                                               CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+       case SMU_FCLK:
+       case SMU_MCLK:
+       case SMU_SOCCLK:
+       case SMU_VCLK:
+       case SMU_DCLK:
+               ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+               break;
+       default:
+               dev_warn(smu->adev->dev, "Unsupported clock type\n");
+               return ret;
+       }
+
+       return size;
+}
+
+static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+       uint32_t feature_mask[2];
+       uint64_t feature_enabled;
+
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
+       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+
+       if (ret)
+               return false;
+
+       feature_enabled = (uint64_t)feature_mask[0] |
+                               ((uint64_t)feature_mask[1] << 32);
+
+       return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
+                                               void **table)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct gpu_metrics_v2_2 *gpu_metrics =
+               (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
+       SmuMetrics_t metrics;
+       int i, ret = 0;
+
+       ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+       if (ret)
+               return ret;
+
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
+
+       gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+       gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+
+       gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+       gpu_metrics->average_soc_power = metrics.Current.Power[0];
+       gpu_metrics->average_gfx_power = metrics.Current.Power[1];
+
+       gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+       gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+       gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+       gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+       gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+       gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+       gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+       gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+       for (i = 0; i < 6; i++) {
+               gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
+               gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
+               gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
+       }
+
+       for (i = 0; i < 2; i++) {
+               gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
+               gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
+       }
+
+       gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+       gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+       *table = (void *)gpu_metrics;
+
+       return sizeof(struct gpu_metrics_v2_2);
+}
+
+static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
+                                       enum PP_OD_DPM_TABLE_COMMAND type,
+                                       long input[], uint32_t size)
+{
+       int ret = 0;
+       uint32_t vid;
+
+       switch (type) {
+       case PP_OD_EDIT_VDDC_CURVE:
+               if (size != 3 || input[0] != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+                       input[1] > CYAN_SKILLFISH_SCLK_MAX) {
+                       dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+                                       CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+                       return -EINVAL;
+               }
+
+               if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+                       input[2] > CYAN_SKILLFISH_VDDC_MAX) {
+                       dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+                                       CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+                       return -EINVAL;
+               }
+
+               cyan_skillfish_user_settings.sclk = input[1];
+               cyan_skillfish_user_settings.vddc = input[2];
+
+               break;
+       case PP_OD_RESTORE_DEFAULT_TABLE:
+               if (size != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+               cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
+
+               break;
+       case PP_OD_COMMIT_DPM_TABLE:
+               if (size != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
+                   cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
+                       dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+                                       CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+                       return -EINVAL;
+               }
+
+               if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
+                       (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
+                       cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
+                       dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+                                       CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+                       return -EINVAL;
+               }
+
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
+                                       cyan_skillfish_user_settings.sclk, NULL);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Set sclk failed!\n");
+                       return ret;
+               }
+
+               if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Unforce vddc failed!\n");
+                               return ret;
+                       }
+               } else {
+                       /*
+                        * PMFW accepts SVI2 VID code, convert voltage to VID:
+                        * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
+                        */
+                       vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Force vddc failed!\n");
+                               return ret;
+                       }
+               }
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
 static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
 
        .check_fw_status = smu_v11_0_check_fw_status,
        .check_fw_version = smu_v11_0_check_fw_version,
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
+       .init_smc_tables = cyan_skillfish_init_smc_tables,
+       .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+       .read_sensor = cyan_skillfish_read_sensor,
+       .print_clk_levels = cyan_skillfish_print_clk_levels,
+       .is_dpm_running = cyan_skillfish_is_dpm_running,
+       .get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
+       .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
        .register_irq_handler = smu_v11_0_register_irq_handler,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
@@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
        smu->message_map = cyan_skillfish_message_map;
+       smu->table_map = cyan_skillfish_table_map;
        smu->is_apu = true;
 }
index a5fc5d7cb6c79226072a416a885865fce0749798..b1ad451af06bd02a764fc9b2b5136889206692bc 100644 (file)
@@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
        uint32_t min_value, max_value;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        case SMU_OD_RANGE:
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
 
                if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
@@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm)
+       /*
+        * This aims the case below:
+        *   amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
+        *
+        * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
+        * make that possible, PMFW needs to acknowledge the dstate transition
+        * process for both gfx(function 0) and audio(function 1) function of
+        * the ASIC.
+        *
+        * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
+        * device representing the audio function of the ASIC. And that means
+        * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
+        * possible runpm suspend kicked on the ASIC. However without the dstate
+        * transition notification from audio function, pmfw cannot handle the
+        * BACO in/exit correctly. And that will cause driver hang on runpm
+        * resuming.
+        *
+        * To address this, we revert to legacy message way(driver masters the
+        * timing for BACO in/exit) on sound driver missing.
+        */
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
        else
                return smu_v11_0_baco_enter(smu);
@@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm) {
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                msleep(10);
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
index 5e292c3f5050e3a806b530236ac6f490be515828..ca57221e39629c68c9c62b836131edbd149058b6 100644 (file)
@@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
        uint32_t min_value, max_value;
        uint32_t smu_version;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
 
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
 
                if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
                        sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
@@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm)
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
        else
                return smu_v11_0_baco_enter(smu);
@@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm) {
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                msleep(10);
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
index 3a3421452e57a1f52173081ab304994b6fe845e4..f6ef0ce6e9e2c30e509df6c6e077b02fa3af7f29 100644 (file)
@@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                        size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                        size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
@@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                        size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                        size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
index 5aa175e12a7837ad7a6b3dfcd427df9eb7a0de57..145f13b8c977d6406ea30b60efef69be53ba1042 100644 (file)
@@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
index ab652028e0034d12a2575c523e85e6c356e6f55a..5019903db492afc5197cfa396eaf3e5eebb7315b 100644 (file)
@@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
        uint32_t freq_values[3] = {0};
        uint32_t min_clk, max_clk;
 
-       if (amdgpu_ras_intr_triggered())
-               return sysfs_emit(buf, "unavailable\n");
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       if (amdgpu_ras_intr_triggered()) {
+               size += sysfs_emit_at(buf, size, "unavailable\n");
+               return size;
+       }
 
        dpm_context = smu_dpm->dpm_context;
 
        switch (type) {
 
        case SMU_OD_SCLK:
-               size = sysfs_emit(buf, "%s:\n", "GFXCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
                fallthrough;
        case SMU_SCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                break;
 
        case SMU_OD_MCLK:
-               size = sysfs_emit(buf, "%s:\n", "MCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
                fallthrough;
        case SMU_MCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
index 627ba2eec7fda2a2281dfb9dd5b9d7d366c5cf71..a403657151ba19f6826710540cde08a05fed4221 100644 (file)
@@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
        int i, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
-               size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
                break;
        case SMU_OD_RANGE:
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                break;
index 66711ab24c15a13e197a6150b117437afbb7c83c..843d2cbfc71d4caea9570d6408ffd76bf9828bd9 100644 (file)
@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
 
        return ret;
 }
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
+{
+       struct pci_dev *p = NULL;
+       bool snd_driver_loaded;
+
+       /*
+        * If the ASIC comes with no audio function, we always assume
+        * it is "enabled".
+        */
+       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                       adev->pdev->bus->number, 1);
+       if (!p)
+               return true;
+
+       snd_driver_loaded = pci_is_enabled(p) ? true : false;
+
+       pci_dev_put(p);
+
+       return snd_driver_loaded;
+}
index 16993daa2ae0425a17572b02bf0a231a87559c15..beea03810bcaabc476e682446c5fc35973ed9dba 100644 (file)
@@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
 int smu_cmn_set_mp1_state(struct smu_context *smu,
                          enum pp_mp1_state mp1_state);
 
+/*
+ * Helper function to make sysfs_emit_at() happy. Align buf to
+ * the current page boundary and record the offset.
+ */
+static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
+{
+       if (!*buf || !offset)
+               return;
+
+       *offset = offset_in_page(*buf);
+       *buf -= *offset;
+}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
+
 #endif
 #endif
index 6325877c5fd6e642169f350fa17d08593aaa95a0..ea9a79bc958391f6714249b438c96d65b94f080c 100644 (file)
@@ -1834,11 +1834,20 @@ static void connector_bad_edid(struct drm_connector *connector,
                               u8 *edid, int num_blocks)
 {
        int i;
-       u8 num_of_ext = edid[0x7e];
+       u8 last_block;
+
+       /*
+        * 0x7e in the EDID is the number of extension blocks. The EDID
+        * is 1 (base block) + num_ext_blocks big. That means we can think
+        * of 0x7e in the EDID of the _index_ of the last block in the
+        * combined chunk of memory.
+        */
+       last_block = edid[0x7e];
 
        /* Calculate real checksum for the last edid extension block data */
-       connector->real_edid_checksum =
-               drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
+       if (last_block < num_blocks)
+               connector->real_edid_checksum =
+                       drm_edid_block_checksum(edid + last_block * EDID_LENGTH);
 
        if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
                return;
index 3ab07832104587d97b774381b9110a570b7d714c..8e7a124d6c5a3ae27f18c268af7d02e4fa952b77 100644 (file)
@@ -1506,6 +1506,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 {
        struct drm_client_dev *client = &fb_helper->client;
        struct drm_device *dev = fb_helper->dev;
+       struct drm_mode_config *config = &dev->mode_config;
        int ret = 0;
        int crtc_count = 0;
        struct drm_connector_list_iter conn_iter;
@@ -1663,6 +1664,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        /* Handle our overallocation */
        sizes.surface_height *= drm_fbdev_overalloc;
        sizes.surface_height /= 100;
+       if (sizes.surface_height > config->max_height) {
+               drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
+                           config->max_height);
+               sizes.surface_height = config->max_height;
+       }
 
        /* push down into drivers */
        ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
index 76d38561c91031c06bd4a39fd13782695813b417..cf741c5c82d2533747c28590e6591b5bc8d7bcf9 100644 (file)
@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
                if (switch_mmu_context) {
                        struct etnaviv_iommu_context *old_context = gpu->mmu_context;
 
-                       etnaviv_iommu_context_get(mmu_context);
-                       gpu->mmu_context = mmu_context;
+                       gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
                        etnaviv_iommu_context_put(old_context);
                }
 
index 8f1b5af47dd633bd3c6f6a9abcaf81daf765e587..f0b2540e60e46ee587b1039d51dd44ce5934d0db 100644 (file)
@@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
                list_del(&mapping->obj_node);
        }
 
-       etnaviv_iommu_context_get(mmu_context);
-       mapping->context = mmu_context;
+       mapping->context = etnaviv_iommu_context_get(mmu_context);
        mapping->use = 1;
 
        ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
index 4dd7d9d541c0903c1e66fbe074af9d846e396c6b..486259e154aff63f7cdfed03395348cfd822084e 100644 (file)
@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
                goto err_submit_objects;
 
        submit->ctx = file->driver_priv;
-       etnaviv_iommu_context_get(submit->ctx->mmu);
-       submit->mmu_context = submit->ctx->mmu;
+       submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
        submit->exec_state = args->exec_state;
        submit->flags = args->flags;
 
index c297fffe06eb3021d1a0ac22fad55573dc5b206c..cc5b07f86346320c9380c3c32519e992cdf6dc7c 100644 (file)
@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        /* We rely on the GPU running, so program the clock */
        etnaviv_gpu_update_clock(gpu);
 
+       gpu->fe_running = false;
+       gpu->exec_state = -1;
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = NULL;
+
        return 0;
 }
 
@@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
        }
+
+       gpu->fe_running = true;
 }
 
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+                                         struct etnaviv_iommu_context *context)
 {
-       u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
-                               &gpu->mmu_context->cmdbuf_mapping);
        u16 prefetch;
+       u32 address;
 
        /* setup the MMU */
-       etnaviv_iommu_restore(gpu, gpu->mmu_context);
+       etnaviv_iommu_restore(gpu, context);
 
        /* Start command processor */
        prefetch = etnaviv_buffer_init(gpu);
+       address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+                                       &gpu->mmu_context->cmdbuf_mapping);
 
        etnaviv_gpu_start_fe(gpu, address, prefetch);
 }
@@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        /* Now program the hardware */
        mutex_lock(&gpu->lock);
        etnaviv_gpu_hw_init(gpu);
-       gpu->exec_state = -1;
        mutex_unlock(&gpu->lock);
 
        pm_runtime_mark_last_busy(gpu->dev);
@@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
        spin_unlock(&gpu->event_spinlock);
 
        etnaviv_gpu_hw_init(gpu);
-       gpu->exec_state = -1;
-       gpu->mmu_context = NULL;
 
        mutex_unlock(&gpu->lock);
        pm_runtime_mark_last_busy(gpu->dev);
@@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
                goto out_unlock;
        }
 
-       if (!gpu->mmu_context) {
-               etnaviv_iommu_context_get(submit->mmu_context);
-               gpu->mmu_context = submit->mmu_context;
-               etnaviv_gpu_start_fe_idleloop(gpu);
-       } else {
-               etnaviv_iommu_context_get(gpu->mmu_context);
-               submit->prev_mmu_context = gpu->mmu_context;
-       }
+       if (!gpu->fe_running)
+               etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+       if (submit->prev_mmu_context)
+               etnaviv_iommu_context_put(submit->prev_mmu_context);
+       submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
 
        if (submit->nr_pmrs) {
                gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
 
 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 {
-       if (gpu->initialized && gpu->mmu_context) {
+       if (gpu->initialized && gpu->fe_running) {
                /* Replace the last WAIT with END */
                mutex_lock(&gpu->lock);
                etnaviv_buffer_end(gpu);
@@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
                 */
                etnaviv_gpu_wait_idle(gpu, 100);
 
-               etnaviv_iommu_context_put(gpu->mmu_context);
-               gpu->mmu_context = NULL;
+               gpu->fe_running = false;
        }
 
        gpu->exec_state = -1;
@@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
        etnaviv_gpu_hw_suspend(gpu);
 #endif
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+
        if (gpu->initialized) {
                etnaviv_cmdbuf_free(&gpu->buffer);
                etnaviv_iommu_global_fini(gpu);
index 8ea48697d13218da8f2cfdc2e657932365f48391..1c75c8ed5bcea2c5bec1d923ed35fc9a78884c67 100644 (file)
@@ -101,6 +101,7 @@ struct etnaviv_gpu {
        struct workqueue_struct *wq;
        struct drm_gpu_scheduler sched;
        bool initialized;
+       bool fe_running;
 
        /* 'ring'-buffer: */
        struct etnaviv_cmdbuf buffer;
index 1a7c89a67bea3c4e2464c8017d7f4d69474d78b1..afe5dd6a9925bf2cbf0171afd579ccaaab153851 100644 (file)
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
        u32 pgtable;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        /* set base addresses */
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
index f8bf488e9d717360aa21a9f7f3db45ed25dd5278..d664ae29ae2096996996bf2ed7c624a33396620a 100644 (file)
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        prefetch = etnaviv_buffer_config_mmuv2(gpu,
                                (u32)v2_context->mtlb_dma,
                                (u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
                  lower_32_bits(context->global->v2.pta_dma));
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
index dab1b58006d836dea6b6c5ea88a5fb3a3d5b57d4..9fb1a2aadbcb0fa9c740cc33cb68123a0a111895 100644 (file)
@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
                 */
                list_for_each_entry_safe(m, n, &list, scan_node) {
                        etnaviv_iommu_remove_mapping(context, m);
+                       etnaviv_iommu_context_put(m->context);
                        m->context = NULL;
                        list_del_init(&m->mmu_node);
                        list_del_init(&m->scan_node);
index d1d6902fd13beeaf1eda709cbc249a574115a5e6..e4a0b7d09c2eab6f39498d03123e575b60cd4198 100644 (file)
@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
 struct etnaviv_iommu_context *
 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
                           struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
 {
        kref_get(&ctx->refcount);
+       return ctx;
 }
 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
index 9870c4e6af36cdaaf37253ab1d69952a43001dd6..b5001db7a95c6eb85a8f9bffd12267ea5034866f 100644 (file)
@@ -793,7 +793,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct decon_context *ctx;
-       struct resource *res;
        int ret;
        int i;
 
@@ -818,8 +817,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
                ctx->clks[i] = clk;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctx->addr = devm_ioremap_resource(dev, res);
+       ctx->addr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctx->addr))
                return PTR_ERR(ctx->addr);
 
index e39fac889edc339960ee280a81635c0fb6a21615..8d137857818cab55f2304bd7a18a63171f251b07 100644 (file)
@@ -1738,7 +1738,6 @@ static const struct component_ops exynos_dsi_component_ops = {
 static int exynos_dsi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *res;
        struct exynos_dsi *dsi;
        int ret, i;
 
@@ -1789,8 +1788,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
                }
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       dsi->reg_base = devm_ioremap_resource(dev, res);
+       dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(dsi->reg_base))
                return PTR_ERR(dsi->reg_base);
 
index a3c718148c451122f6b622f81941d2f416dd92b0..ecfd82d0afb7e29552f19a496e7327cc1ab2d67f 100644 (file)
@@ -85,7 +85,6 @@ struct fimc_scaler {
 /*
  * A structure of fimc context.
  *
- * @regs_res: register resources.
  * @regs: memory mapped io registers.
  * @lock: locking of operations.
  * @clocks: fimc clocks.
@@ -103,7 +102,6 @@ struct fimc_context {
        struct exynos_drm_ipp_formats   *formats;
        unsigned int                    num_formats;
 
-       struct resource *regs_res;
        void __iomem    *regs;
        spinlock_t      lock;
        struct clk      *clocks[FIMC_CLKS_MAX];
@@ -1327,8 +1325,7 @@ static int fimc_probe(struct platform_device *pdev)
        ctx->num_formats = num_formats;
 
        /* resource memory */
-       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+       ctx->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
index 700ca4fa6665c577f48623916cfc3c66418cda41..c735e53939d88c86799afb873afbb75f4dd54ea9 100644 (file)
@@ -1202,9 +1202,7 @@ static int fimd_probe(struct platform_device *pdev)
                return PTR_ERR(ctx->lcd_clk);
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       ctx->regs = devm_ioremap_resource(dev, res);
+       ctx->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
index b00230626c6a008fcd46d9d6ae92861ac193c307..471fd6c8135f2c996acb77c7b43d1fbdba0b6570 100644 (file)
@@ -1449,7 +1449,6 @@ static const struct component_ops g2d_component_ops = {
 static int g2d_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *res;
        struct g2d_data *g2d;
        int ret;
 
@@ -1491,9 +1490,7 @@ static int g2d_probe(struct platform_device *pdev)
        clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
        clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       g2d->regs = devm_ioremap_resource(dev, res);
+       g2d->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(g2d->regs)) {
                ret = PTR_ERR(g2d->regs);
                goto err_put_clk;
index 90d7bf9068852cfad01b945047783cdf83045128..166a802628963226b6678d5668660a0761c3e29d 100644 (file)
@@ -86,7 +86,6 @@ struct gsc_scaler {
 /*
  * A structure of gsc context.
  *
- * @regs_res: register resources.
  * @regs: memory mapped io registers.
  * @gsc_clk: gsc gate clock.
  * @sc: scaler infomations.
@@ -103,7 +102,6 @@ struct gsc_context {
        struct exynos_drm_ipp_formats   *formats;
        unsigned int                    num_formats;
 
-       struct resource *regs_res;
        void __iomem    *regs;
        const char      **clk_names;
        struct clk      *clocks[GSC_MAX_CLOCKS];
@@ -1272,9 +1270,7 @@ static int gsc_probe(struct platform_device *pdev)
                }
        }
 
-       /* resource memory */
-       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+       ctx->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
index ee61be4cf15216f0c5044a1e93e2e4d92965822b..dec7df35baa9dd063a5fb34f02bea41d1f05e059 100644 (file)
@@ -278,7 +278,6 @@ static const struct component_ops rotator_component_ops = {
 static int rotator_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *regs_res;
        struct rot_context *rot;
        const struct rot_variant *variant;
        int irq;
@@ -292,8 +291,7 @@ static int rotator_probe(struct platform_device *pdev)
        rot->formats = variant->formats;
        rot->num_formats = variant->num_formats;
        rot->dev = dev;
-       regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       rot->regs = devm_ioremap_resource(dev, regs_res);
+       rot->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(rot->regs))
                return PTR_ERR(rot->regs);
 
index f9ae5b038d59301a24c456aaeb1e91803ad3260a..3a7851b7dc668fefbe91b1cd6ed577245a779e55 100644 (file)
@@ -485,7 +485,6 @@ static const struct component_ops scaler_component_ops = {
 static int scaler_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *regs_res;
        struct scaler_context *scaler;
        int irq;
        int ret, i;
@@ -498,8 +497,7 @@ static int scaler_probe(struct platform_device *pdev)
                (struct scaler_data *)of_device_get_match_data(dev);
 
        scaler->dev = dev;
-       regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       scaler->regs = devm_ioremap_resource(dev, regs_res);
+       scaler->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(scaler->regs))
                return PTR_ERR(scaler->regs);
 
index c769dec576de54f13c8a7f37127579fc5b990e8c..7655142a4651cadc40d25e5fb6f719aa28dec516 100644 (file)
@@ -1957,7 +1957,6 @@ static int hdmi_probe(struct platform_device *pdev)
        struct hdmi_audio_infoframe *audio_infoframe;
        struct device *dev = &pdev->dev;
        struct hdmi_context *hdata;
-       struct resource *res;
        int ret;
 
        hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
@@ -1979,8 +1978,7 @@ static int hdmi_probe(struct platform_device *pdev)
                return ret;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hdata->regs = devm_ioremap_resource(dev, res);
+       hdata->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hdata->regs)) {
                ret = PTR_ERR(hdata->regs);
                return ret;
index 886add4f9cd0fde76b784d8c6e04903819fdd757..d2d8582b36df9aebac0ebbd6e4ac5f30ddf1bef8 100644 (file)
@@ -46,6 +46,7 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv);
 int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
 int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
                            u32 w, u32 h, u32 pitch);
+int hyperv_hide_hw_ptr(struct hv_device *hdev);
 int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
 int hyperv_connect_vsp(struct hv_device *hdev);
 
index 6dd4717d3e1eacd74be7e66e574052a02388ee69..8c97a20dfe2310505110a4e125b3983490b41385 100644 (file)
@@ -101,6 +101,7 @@ static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
        struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
        struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
 
+       hyperv_hide_hw_ptr(hv->hdev);
        hyperv_update_situation(hv->hdev, 1,  hv->screen_depth,
                                crtc_state->mode.hdisplay,
                                crtc_state->mode.vdisplay,
index 6d4bdccfbd1adff363d04622d065187453173509..c0155c6271bf8935f9b1c967a9d2fd739fc4f074 100644 (file)
@@ -299,6 +299,55 @@ int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
        return 0;
 }
 
+/*
+ * Hyper-V supports a hardware cursor feature. It's not used by Linux VM,
+ * but the Hyper-V host still draws a point as an extra mouse pointer,
+ * which is unwanted, especially when Xorg is running.
+ *
+ * The hyperv_fb driver uses synthvid_send_ptr() to hide the unwanted
+ * pointer, by setting msg.ptr_pos.is_visible = 1 and setting the
+ * msg.ptr_shape.data. Note: setting msg.ptr_pos.is_visible to 0 doesn't
+ * work in tests.
+ *
+ * Copy synthvid_send_ptr() to hyperv_drm and rename it to
+ * hyperv_hide_hw_ptr(). Note: hyperv_hide_hw_ptr() is also called in the
+ * handler of the SYNTHVID_FEATURE_CHANGE event, otherwise the host still
+ * draws an extra unwanted mouse pointer after the VM Connection window is
+ * closed and reopened.
+ */
+int hyperv_hide_hw_ptr(struct hv_device *hdev)
+{
+       struct synthvid_msg msg;
+
+       memset(&msg, 0, sizeof(struct synthvid_msg));
+       msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
+       msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+               sizeof(struct synthvid_pointer_position);
+       msg.ptr_pos.is_visible = 1;
+       msg.ptr_pos.video_output = 0;
+       msg.ptr_pos.image_x = 0;
+       msg.ptr_pos.image_y = 0;
+       hyperv_sendpacket(hdev, &msg);
+
+       memset(&msg, 0, sizeof(struct synthvid_msg));
+       msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
+       msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+               sizeof(struct synthvid_pointer_shape);
+       msg.ptr_shape.part_idx = SYNTHVID_CURSOR_COMPLETE;
+       msg.ptr_shape.is_argb = 1;
+       msg.ptr_shape.width = 1;
+       msg.ptr_shape.height = 1;
+       msg.ptr_shape.hot_x = 0;
+       msg.ptr_shape.hot_y = 0;
+       msg.ptr_shape.data[0] = 0;
+       msg.ptr_shape.data[1] = 1;
+       msg.ptr_shape.data[2] = 1;
+       msg.ptr_shape.data[3] = 1;
+       hyperv_sendpacket(hdev, &msg);
+
+       return 0;
+}
+
 int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
 {
        struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
@@ -392,8 +441,11 @@ static void hyperv_receive_sub(struct hv_device *hdev)
                return;
        }
 
-       if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE)
+       if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
                hv->dirt_needed = msg->feature_chg.is_dirt_needed;
+               if (hv->dirt_needed)
+                       hyperv_hide_hw_ptr(hv->hdev);
+       }
 }
 
 static void hyperv_receive(void *ctx)
index 642a5b5a1b81c4cf803c06e74eddd9ff9453a13d..335ba9f43d8f7c1d721970a977024e597bc78bf0 100644 (file)
@@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
 subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
 # clang warnings
 subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
 subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
 subdir-ccflags-y += $(call cc-disable-warning, frame-address)
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
index 43ec7fcd3f5d284c08d275640d110b69ae64a159..a3eae3f3eadcee308442f9cf2be36dacdfeeb0b5 100644 (file)
@@ -1577,8 +1577,14 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *intel_crtc;
+       enum pipe pipe;
+
+       if (!crtc_state)
+               return;
+
+       intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       pipe = intel_crtc->pipe;
 
        /* wa verify 1409054076:icl,jsl,ehl */
        if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
index 7cfe91fc05f2470a21133cb2f7d7d3a1bc28e1c6..68abeaf2d7d4d818c3f6c5ddd3374838469b8bd5 100644 (file)
@@ -186,13 +186,16 @@ void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
 {
        struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
        acpi_handle dhandle;
+       union acpi_object *obj;
 
        dhandle = ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return;
 
-       acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
-                         INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+       obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
+                               INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+       if (obj)
+               ACPI_FREE(obj);
 }
 
 /*
index 532237588511822c336c998a7181d794ff787b70..4e0f96bf61585bba4a9dcb375326f3b1505294ee 100644 (file)
@@ -1308,8 +1308,9 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
                else
                        aud_freq = aud_freq_init;
 
-               /* use BIOS provided value for TGL unless it is a known bad value */
-               if (IS_TIGERLAKE(dev_priv) && aud_freq_init != AUD_FREQ_TGL_BROKEN)
+               /* use BIOS provided value for TGL and RKL unless it is a known bad value */
+               if ((IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) &&
+                   aud_freq_init != AUD_FREQ_TGL_BROKEN)
                        aud_freq = aud_freq_init;
 
                drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
index e86e6ed2d3bf20613e45f2e86882fd511d850863..fd71346aac7bcb77d8688a0cb0e5e63e4255298b 100644 (file)
@@ -451,13 +451,23 @@ parse_lfp_backlight(struct drm_i915_private *i915,
        }
 
        i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
-       if (bdb->version >= 191 &&
-           get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
-               const struct lfp_backlight_control_method *method;
+       if (bdb->version >= 191) {
+               size_t exp_size;
 
-               method = &backlight_data->backlight_control[panel_type];
-               i915->vbt.backlight.type = method->type;
-               i915->vbt.backlight.controller = method->controller;
+               if (bdb->version >= 236)
+                       exp_size = sizeof(struct bdb_lfp_backlight_data);
+               else if (bdb->version >= 234)
+                       exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
+               else
+                       exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
+
+               if (get_blocksize(backlight_data) >= exp_size) {
+                       const struct lfp_backlight_control_method *method;
+
+                       method = &backlight_data->backlight_control[panel_type];
+                       i915->vbt.backlight.type = method->type;
+                       i915->vbt.backlight.controller = method->controller;
+               }
        }
 
        i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
index e91e0e0191fb86c975b14228c73bef9e205a9b90..4b94256d73197970b504e67c0f65a710fb94fa64 100644 (file)
@@ -222,31 +222,42 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
 
 struct intel_sa_info {
        u16 displayrtids;
-       u8 deburst, deprogbwlimit;
+       u8 deburst, deprogbwlimit, derating;
 };
 
 static const struct intel_sa_info icl_sa_info = {
        .deburst = 8,
        .deprogbwlimit = 25, /* GB/s */
        .displayrtids = 128,
+       .derating = 10,
 };
 
 static const struct intel_sa_info tgl_sa_info = {
        .deburst = 16,
        .deprogbwlimit = 34, /* GB/s */
        .displayrtids = 256,
+       .derating = 10,
 };
 
 static const struct intel_sa_info rkl_sa_info = {
        .deburst = 16,
        .deprogbwlimit = 20, /* GB/s */
        .displayrtids = 128,
+       .derating = 10,
 };
 
 static const struct intel_sa_info adls_sa_info = {
        .deburst = 16,
        .deprogbwlimit = 38, /* GB/s */
        .displayrtids = 256,
+       .derating = 10,
+};
+
+static const struct intel_sa_info adlp_sa_info = {
+       .deburst = 16,
+       .deprogbwlimit = 38, /* GB/s */
+       .displayrtids = 256,
+       .derating = 20,
 };
 
 static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
@@ -302,7 +313,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
                        bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
 
                        bi->deratedbw[j] = min(maxdebw,
-                                              bw * 9 / 10); /* 90% */
+                                              bw * (100 - sa->derating) / 100);
 
                        drm_dbg_kms(&dev_priv->drm,
                                    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
@@ -400,7 +411,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
 
        if (IS_DG2(dev_priv))
                dg2_get_bw_info(dev_priv);
-       else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+       else if (IS_ALDERLAKE_P(dev_priv))
+               icl_get_bw_info(dev_priv, &adlp_sa_info);
+       else if (IS_ALDERLAKE_S(dev_priv))
                icl_get_bw_info(dev_priv, &adls_sa_info);
        else if (IS_ROCKETLAKE(dev_priv))
                icl_get_bw_info(dev_priv, &rkl_sa_info);
index 9903a78df896fd141d035810f75351d5c0c0ade7..bd184325d0c75832b77130fb0c70c1c84bedaf94 100644 (file)
@@ -3807,7 +3807,13 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
 static void intel_ddi_sync_state(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *crtc_state)
 {
-       if (intel_crtc_has_dp_encoder(crtc_state))
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+       if (intel_phy_is_tc(i915, phy))
+               intel_tc_port_sanitize(enc_to_dig_port(encoder));
+
+       if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
                intel_dp_sync_state(encoder, crtc_state);
 }
 
index 134a6acbd8fbe396584e2c7590c2c8796da1aaf2..17f44ffea5866f5ff22f4fa84d7c4aef730f3658 100644 (file)
@@ -13082,18 +13082,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        readout_plane_state(dev_priv);
 
        for_each_intel_encoder(dev, encoder) {
+               struct intel_crtc_state *crtc_state = NULL;
+
                pipe = 0;
 
                if (encoder->get_hw_state(encoder, &pipe)) {
-                       struct intel_crtc_state *crtc_state;
-
                        crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
                        crtc_state = to_intel_crtc_state(crtc->base.state);
 
                        encoder->base.crtc = &crtc->base;
                        intel_encoder_get_config(encoder, crtc_state);
-                       if (encoder->sync_state)
-                               encoder->sync_state(encoder, crtc_state);
 
                        /* read out to slave crtc as well for bigjoiner */
                        if (crtc_state->bigjoiner) {
@@ -13108,6 +13106,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                        encoder->base.crtc = NULL;
                }
 
+               if (encoder->sync_state)
+                       encoder->sync_state(encoder, crtc_state);
+
                drm_dbg_kms(&dev_priv->drm,
                            "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
                            encoder->base.base.id, encoder->base.name,
@@ -13390,17 +13391,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        intel_modeset_readout_hw_state(dev);
 
        /* HW state is read out, now we need to sanitize this mess. */
-
-       /* Sanitize the TypeC port mode upfront, encoders depend on this */
-       for_each_intel_encoder(dev, encoder) {
-               enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
-               /* We need to sanitize only the MST primary port. */
-               if (encoder->type != INTEL_OUTPUT_DP_MST &&
-                   intel_phy_is_tc(dev_priv, phy))
-                       intel_tc_port_sanitize(enc_to_dig_port(encoder));
-       }
-
        get_encoder_power_domains(dev_priv);
 
        if (HAS_PCH_IBX(dev_priv))
index 3c3c6cb5c0dffa712aca459ab7685175b7f58c59..b3c8e1c450efb6dc8375adb022ebccc9a1c712ba 100644 (file)
@@ -805,11 +805,14 @@ void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
  */
 void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
 {
+       int id;
+
        if (!HAS_DMC(dev_priv))
                return;
 
        intel_dmc_ucode_suspend(dev_priv);
        drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
 
-       kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload);
+       for (id = 0; id < DMC_FW_MAX; id++)
+               kfree(dev_priv->dmc.dmc_info[id].payload);
 }
index 04175f359fd6857c4941095cf0e6489cd9bb74bc..abe3d61b624329c9a993aa3a763e5c39bce67588 100644 (file)
@@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
         */
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
                             intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
-                            sizeof(intel_dp->edp_dpcd))
+                            sizeof(intel_dp->edp_dpcd)) {
                drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
                            (int)sizeof(intel_dp->edp_dpcd),
                            intel_dp->edp_dpcd);
 
+               intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+       }
+
        /*
         * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
         * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
index 053a3c2f726776b5e50aaaf72c40b31630fe5674..508a514c5e37dc241d0b6e3cdd7aab44ab0eb617 100644 (file)
@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
        }
 
        if (ret)
-               intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+               ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
 
        if (intel_dp->set_idle_link_train)
                intel_dp->set_idle_link_train(intel_dp, crtc_state);
index 330077c2e5883df6810c5ceca805e2f9dad443da..a2108a8f544d8040db7186a23f043123061d25d4 100644 (file)
@@ -814,6 +814,11 @@ struct lfp_brightness_level {
        u16 reserved;
 } __packed;
 
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
+       offsetof(struct bdb_lfp_backlight_data, brightness_level)
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
+       offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
+
 struct bdb_lfp_backlight_data {
        u8 entry_size;
        struct lfp_backlight_data_entry data[16];
index cff72679ad7c36f40f33900cd310b60efaff31a0..166bb46408a9bd655c044db8895e648cff79f765 100644 (file)
@@ -937,6 +937,10 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
        unsigned int n;
 
        e = alloc_engines(num_engines);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
+       e->num_engines = num_engines;
+
        for (n = 0; n < num_engines; n++) {
                struct intel_context *ce;
                int ret;
@@ -970,7 +974,6 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
                        goto free_engines;
                }
        }
-       e->num_engines = num_engines;
 
        return e;
 
@@ -986,6 +989,9 @@ void i915_gem_context_release(struct kref *ref)
        trace_i915_context_free(ctx);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+       if (ctx->syncobj)
+               drm_syncobj_put(ctx->syncobj);
+
        mutex_destroy(&ctx->engines_mutex);
        mutex_destroy(&ctx->lut_mutex);
 
@@ -1205,9 +1211,6 @@ static void context_close(struct i915_gem_context *ctx)
        if (vm)
                i915_vm_close(vm);
 
-       if (ctx->syncobj)
-               drm_syncobj_put(ctx->syncobj);
-
        ctx->file_priv = ERR_PTR(-EBADF);
 
        /*
index e382b7f2353b89d775a269ac544751e2ce081341..5ab136ffdeb2dd4fa06cdcb052d9e77fd7398eb7 100644 (file)
@@ -118,7 +118,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
        intel_wakeref_t wakeref = 0;
        unsigned long count = 0;
        unsigned long scanned = 0;
-       int err;
+       int err = 0;
 
        /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
        bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
@@ -242,12 +242,15 @@ skip:
                list_splice_tail(&still_in_list, phase->list);
                spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
                if (err)
-                       return err;
+                       break;
        }
 
        if (shrink & I915_SHRINK_BOUND)
                intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 
+       if (err)
+               return err;
+
        if (nr_scanned)
                *nr_scanned += scanned;
        return count;
index 35eedc14f5228d5d4ca1f6339f52d3be987b48b2..6ea13159bffcc212912c8ed5c4601063471b46c3 100644 (file)
@@ -356,11 +356,8 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
 {
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 
-       if (likely(obj)) {
-               /* This releases all gem object bindings to the backend. */
+       if (likely(obj))
                i915_ttm_free_cached_io_st(obj);
-               __i915_gem_free_object(obj);
-       }
 }
 
 static struct intel_memory_region *
@@ -875,8 +872,12 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 {
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 
+       /* This releases all gem object bindings to the backend. */
+       __i915_gem_free_object(obj);
+
        i915_gem_object_release_memory_region(obj);
        mutex_destroy(&obj->ttm.get_io_page.lock);
+
        if (obj->ttm.created)
                call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
 }
index ffae7df5e4d7d76e737e6c2a7ee8c41545637037..4a6bb64c3a35410717ccd42c2104c1ba77181ffe 100644 (file)
@@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
                err = PTR_ERR(import);
                goto out_dmabuf;
        }
+       import_obj = to_intel_bo(import);
 
        if (import != &obj->base) {
                pr_err("i915_gem_prime_import created a new object!\n");
                err = -EINVAL;
                goto out_import;
        }
-       import_obj = to_intel_bo(import);
 
        i915_gem_object_lock(import_obj, NULL);
        err = __i915_gem_object_get_pages(import_obj);
@@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
                pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
                       PTR_ERR(import));
                err = PTR_ERR(import);
+       } else {
+               err = 0;
        }
 
        dma_buf_put(dmabuf);
@@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
                err = PTR_ERR(import);
                goto out_dmabuf;
        }
+       import_obj = to_intel_bo(import);
 
        if (import == &obj->base) {
                pr_err("i915_gem_prime_import reused gem object!\n");
@@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
                goto out_import;
        }
 
-       import_obj = to_intel_bo(import);
-
        i915_gem_object_lock(import_obj, NULL);
        err = __i915_gem_object_get_pages(import_obj);
        if (err) {
index b20f5621f62b127a21cd5d5b8c3b311c02405738..a2c34e5a1c540c944f47ac5db53b7b6ba33cd198 100644 (file)
@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
        return I915_MMAP_TYPE_GTT;
 }
 
+static struct drm_i915_gem_object *
+create_sys_or_internal(struct drm_i915_private *i915,
+                      unsigned long size)
+{
+       if (HAS_LMEM(i915)) {
+               struct intel_memory_region *sys_region =
+                       i915->mm.regions[INTEL_REGION_SMEM];
+
+               return __i915_gem_object_create_user(i915, size, &sys_region, 1);
+       }
+
+       return i915_gem_object_create_internal(i915, size);
+}
+
 static bool assert_mmap_offset(struct drm_i915_private *i915,
                               unsigned long size,
                               int expected)
@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
        u64 offset;
        int ret;
 
-       obj = i915_gem_object_create_internal(i915, size);
+       obj = create_sys_or_internal(i915, size);
        if (IS_ERR(obj))
                return expected && expected == PTR_ERR(obj);
 
@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
        struct drm_mm_node *hole, *next;
        int loop, err = 0;
        u64 offset;
+       int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
 
        /* Disable background reaper */
        disable_retire_worker(i915);
@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
        }
 
        /* Too large */
-       if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
+       if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
                pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
                err = -EINVAL;
                goto out;
        }
 
        /* Fill the hole, further allocation attempts should then fail */
-       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       obj = create_sys_or_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
                pr_err("Unable to create object for reclaimed hole\n");
@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
                goto err_obj;
        }
 
-       if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+       if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
                pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
                err = -EINVAL;
                goto err_obj;
@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
 
 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        bool no_map;
 
-       if (HAS_LMEM(i915))
+       if (obj->ops->mmap_offset)
                return type == I915_MMAP_TYPE_FIXED;
        else if (type == I915_MMAP_TYPE_FIXED)
                return false;
index 745e84c72c908f8a741f32e5f01e28f6c41da205..17ca4dc4d0cb3a03169fc0efc3934a0851631a7e 100644 (file)
@@ -362,8 +362,9 @@ static int __intel_context_active(struct i915_active *active)
        return 0;
 }
 
-static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
-                                enum i915_sw_fence_notify state)
+static int __i915_sw_fence_call
+sw_fence_dummy_notify(struct i915_sw_fence *sf,
+                     enum i915_sw_fence_notify state)
 {
        return NOTIFY_DONE;
 }
@@ -420,6 +421,7 @@ void intel_context_fini(struct intel_context *ce)
 
        mutex_destroy(&ce->pin_mutex);
        i915_active_fini(&ce->active);
+       i915_sw_fence_fini(&ce->guc_blocked);
 }
 
 void i915_context_module_exit(void)
index d812b27835f8ce8f9388f7e0b018025684215106..0a03fbed9f9b7117a86620abde12597716a2adc2 100644 (file)
@@ -882,8 +882,6 @@ void intel_rps_park(struct intel_rps *rps)
        if (!intel_rps_is_enabled(rps))
                return;
 
-       GEM_BUG_ON(atomic_read(&rps->num_waiters));
-
        if (!intel_rps_clear_active(rps))
                return;
 
@@ -1973,8 +1971,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
 u32 intel_rps_read_punit_req(struct intel_rps *rps)
 {
        struct intel_uncore *uncore = rps_to_uncore(rps);
+       struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+       intel_wakeref_t wakeref;
+       u32 freq = 0;
 
-       return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+       with_intel_runtime_pm_if_in_use(rpm, wakeref)
+               freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+       return freq;
 }
 
 static u32 intel_rps_get_req(u32 pureq)
index 99e1fad5ca206cd1308b641bf659513604fb3628..c9086a600bce57c485d2d6fd87c7c8a44278937f 100644 (file)
@@ -102,11 +102,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
  *  |   +-------+--------------------------------------------------------------+
  *  |   |   7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message  |
  *  +---+-------+--------------------------------------------------------------+
- *  | 1 |  31:0 |  +--------------------------------------------------------+  |
- *  +---+-------+  |                                                        |  |
- *  |...|       |  |  Embedded `HXG Message`_                               |  |
- *  +---+-------+  |                                                        |  |
- *  | n |  31:0 |  +--------------------------------------------------------+  |
+ *  | 1 |  31:0 |                                                              |
+ *  +---+-------+                                                              |
+ *  |...|       | [Embedded `HXG Message`_]                                    |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
  *  +---+-------+--------------------------------------------------------------+
  */
 
index bbf1ddb7743427e857f1b95f79a0cd044e80a7bd..9baa3cb07d1372b622b9d09452893758c0f2f48f 100644 (file)
  *  +---+-------+--------------------------------------------------------------+
  *  |   | Bits  | Description                                                  |
  *  +===+=======+==============================================================+
- *  | 0 |  31:0 |  +--------------------------------------------------------+  |
- *  +---+-------+  |                                                        |  |
- *  |...|       |  |  Embedded `HXG Message`_                               |  |
- *  +---+-------+  |                                                        |  |
- *  | n |  31:0 |  +--------------------------------------------------------+  |
+ *  | 0 |  31:0 |                                                              |
+ *  +---+-------+                                                              |
+ *  |...|       | [Embedded `HXG Message`_]                                    |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
  *  +---+-------+--------------------------------------------------------------+
  */
 
index b104fb7607eb25b8b2587d73fdcc743d45b11a95..86c318516e14f1fc5a92126597d147a92637cbb1 100644 (file)
@@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
        __uc_free_load_err_log(uc);
 }
 
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
-       return intel_guc_ct_enabled(&guc->ct);
-}
-
 /*
  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
  * register using the same bits used in the CT message payload. Since our
@@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
 static void guc_handle_mmio_msg(struct intel_guc *guc)
 {
        /* we need communication to be enabled to reply to GuC */
-       GEM_BUG_ON(!guc_communication_enabled(guc));
+       GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
 
        spin_lock_irq(&guc->irq_lock);
        if (guc->mmio_msg) {
@@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
        struct drm_i915_private *i915 = gt->i915;
        int ret;
 
-       GEM_BUG_ON(guc_communication_enabled(guc));
+       GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
 
        ret = i915_inject_probe_error(i915, -ENXIO);
        if (ret)
@@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
                return 0;
 
        /* Make sure we enable communication if and only if it's disabled */
-       GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+       GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
 
        if (enable_communication)
                guc_enable_communication(guc);
index b56a8e37a3cd6acc794f8e36bfb89337153b101a..1bb1be5c48c847e1ef0c9a681400c5c664b8e781 100644 (file)
@@ -576,7 +576,7 @@ retry:
 
                        /* No one is going to touch shadow bb from now on. */
                        i915_gem_object_flush_map(bb->obj);
-                       i915_gem_object_unlock(bb->obj);
+                       i915_gem_ww_ctx_fini(&ww);
                }
        }
        return 0;
@@ -630,7 +630,7 @@ retry:
                return ret;
        }
 
-       i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
+       i915_gem_ww_ctx_fini(&ww);
 
        /* FIXME: we are not tracking our pinned VMA leaving it
         * up to the core to fix up the stray pin_count upon
index 664970f2bc62a76cf78956aa990b4d0186396fe7..4037030f0984740911ba5c683785f46f8afaa1b3 100644 (file)
@@ -8193,6 +8193,11 @@ enum {
 #define  HSW_SPR_STRETCH_MAX_X1                REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
 #define  HSW_FBCQ_DIS                  (1 << 22)
 #define  BDW_DPRS_MASK_VBLANK_SRD      (1 << 0)
+#define  SKL_PLANE1_STRETCH_MAX_MASK   REG_GENMASK(1, 0)
+#define  SKL_PLANE1_STRETCH_MAX_X8     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
+#define  SKL_PLANE1_STRETCH_MAX_X4     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
+#define  SKL_PLANE1_STRETCH_MAX_X2     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
+#define  SKL_PLANE1_STRETCH_MAX_X1     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
 #define _CHICKEN_TRANS_A       0x420c0
index ce446716d0929f75632415901122d435bbe270e6..79da5eca60af54b97a7ecea3d18b8dfa2a34377a 100644 (file)
@@ -829,8 +829,6 @@ static void __i915_request_ctor(void *arg)
        i915_sw_fence_init(&rq->submit, submit_notify);
        i915_sw_fence_init(&rq->semaphore, semaphore_notify);
 
-       dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
-
        rq->capture_list = NULL;
 
        init_llist_head(&rq->execute_cb);
@@ -905,17 +903,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
        rq->ring = ce->ring;
        rq->execution_mask = ce->engine->mask;
 
-       kref_init(&rq->fence.refcount);
-       rq->fence.flags = 0;
-       rq->fence.error = 0;
-       INIT_LIST_HEAD(&rq->fence.cb_list);
-
        ret = intel_timeline_get_seqno(tl, rq, &seqno);
        if (ret)
                goto err_free;
 
-       rq->fence.context = tl->fence_context;
-       rq->fence.seqno = seqno;
+       dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+                      tl->fence_context, seqno);
 
        RCU_INIT_POINTER(rq->timeline, tl);
        rq->hwsp_seqno = tl->hwsp_seqno;
index 65bc3709f54c5aa4b1a9b9b981c7da6d62287df2..a725792d5248bb49799737d69ea54b99ea45dc10 100644 (file)
@@ -76,6 +76,8 @@ struct intel_wm_config {
 
 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
 {
+       enum pipe pipe;
+
        if (HAS_LLC(dev_priv)) {
                /*
                 * WaCompressedResourceDisplayNewHashMode:skl,kbl
@@ -89,6 +91,16 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
                           SKL_DE_COMPRESSED_HASH_MODE);
        }
 
+       for_each_pipe(dev_priv, pipe) {
+               /*
+                * "Plane N strech max must be programmed to 11b (x1)
+                *  when Async flips are enabled on that plane."
+                */
+               if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active())
+                       intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+                                        SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
+       }
+
        /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
        intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
                   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
index 1c2f4799f421ddcd520917dd95d2aa2d6830c819..12ce669650cc0715ad804d55aa036ebafbea18a0 100644 (file)
@@ -172,10 +172,10 @@ static int kmb_setup_mode_config(struct drm_device *drm)
        ret = drmm_mode_config_init(drm);
        if (ret)
                return ret;
-       drm->mode_config.min_width = KMB_MIN_WIDTH;
-       drm->mode_config.min_height = KMB_MIN_HEIGHT;
-       drm->mode_config.max_width = KMB_MAX_WIDTH;
-       drm->mode_config.max_height = KMB_MAX_HEIGHT;
+       drm->mode_config.min_width = KMB_FB_MIN_WIDTH;
+       drm->mode_config.min_height = KMB_FB_MIN_HEIGHT;
+       drm->mode_config.max_width = KMB_FB_MAX_WIDTH;
+       drm->mode_config.max_height = KMB_FB_MAX_HEIGHT;
        drm->mode_config.funcs = &kmb_mode_config_funcs;
 
        ret = kmb_setup_crtc(drm);
index ebbaa5f422d591ea285544898eec1a84ccf38de0..69a62e2d03ffb814639010f4bfd9ffc6f9e7909f 100644 (file)
 #define DRIVER_MAJOR                   1
 #define DRIVER_MINOR                   1
 
+#define KMB_FB_MAX_WIDTH               1920
+#define KMB_FB_MAX_HEIGHT              1080
+#define KMB_FB_MIN_WIDTH               1
+#define KMB_FB_MIN_HEIGHT              1
+
 #define KMB_LCD_DEFAULT_CLK            200000000
 #define KMB_SYS_CLK_MHZ                        500
 
index ecee6782612d87342d2800265633d114572fd72b..06b0c42c9e9173a8435e14f03a1cce5b2d1c29b9 100644 (file)
@@ -94,9 +94,10 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
        if (ret)
                return ret;
 
-       if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT)
-               return -EINVAL;
-       if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT)
+       if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH ||
+           new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT ||
+           new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
+           new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
                return -EINVAL;
        can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
        crtc_state =
@@ -277,6 +278,44 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id)
        kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]);
 }
 
+static void kmb_plane_set_alpha(struct kmb_drm_private *kmb,
+                               const struct drm_plane_state *state,
+                               unsigned char plane_id,
+                               unsigned int *val)
+{
+       u16 plane_alpha = state->alpha;
+       u16 pixel_blend_mode = state->pixel_blend_mode;
+       int has_alpha = state->fb->format->has_alpha;
+
+       if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE)
+               *val |= LCD_LAYER_ALPHA_STATIC;
+
+       if (has_alpha) {
+               switch (pixel_blend_mode) {
+               case DRM_MODE_BLEND_PIXEL_NONE:
+                       break;
+               case DRM_MODE_BLEND_PREMULTI:
+                       *val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT;
+                       break;
+               case DRM_MODE_BLEND_COVERAGE:
+                       *val |= LCD_LAYER_ALPHA_EMBED;
+                       break;
+               default:
+                       DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n",
+                                 __stringify(pixel_blend_mode),
+                                 (long)pixel_blend_mode);
+                       break;
+               }
+       }
+
+       if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) {
+               *val &= LCD_LAYER_ALPHA_DISABLED;
+               return;
+       }
+
+       kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha);
+}
+
 static void kmb_plane_atomic_update(struct drm_plane *plane,
                                    struct drm_atomic_state *state)
 {
@@ -303,11 +342,12 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
        fb = new_plane_state->fb;
        if (!fb)
                return;
+
        num_planes = fb->format->num_planes;
        kmb_plane = to_kmb_plane(plane);
-       plane_id = kmb_plane->id;
 
        kmb = to_kmb(plane->dev);
+       plane_id = kmb_plane->id;
 
        spin_lock_irq(&kmb->irq_lock);
        if (kmb->kmb_under_flow || kmb->kmb_flush_done) {
@@ -400,20 +440,32 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
                config_csc(kmb, plane_id);
        }
 
+       kmb_plane_set_alpha(kmb, plane->state, plane_id, &val);
+
        kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val);
 
+       /* Configure LCD_CONTROL */
+       ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+
+       /* Set layer blending config */
+       ctrl &= ~LCD_CTRL_ALPHA_ALL;
+       ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 |
+               LCD_CTRL_ALPHA_BLEND_VL2;
+
+       ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE;
+
        switch (plane_id) {
        case LAYER_0:
-               ctrl = LCD_CTRL_VL1_ENABLE;
+               ctrl |= LCD_CTRL_VL1_ENABLE;
                break;
        case LAYER_1:
-               ctrl = LCD_CTRL_VL2_ENABLE;
+               ctrl |= LCD_CTRL_VL2_ENABLE;
                break;
        case LAYER_2:
-               ctrl = LCD_CTRL_GL1_ENABLE;
+               ctrl |= LCD_CTRL_GL1_ENABLE;
                break;
        case LAYER_3:
-               ctrl = LCD_CTRL_GL2_ENABLE;
+               ctrl |= LCD_CTRL_GL2_ENABLE;
                break;
        }
 
@@ -425,7 +477,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
         */
        ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL;
 
-       kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
+       kmb_write_lcd(kmb, LCD_CONTROL, ctrl);
 
        /* Enable pipeline AXI read transactions for the DMA
         * after setting graphics layers. This must be done
@@ -490,6 +542,9 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
        enum drm_plane_type plane_type;
        const u32 *plane_formats;
        int num_plane_formats;
+       unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+                                 BIT(DRM_MODE_BLEND_PREMULTI)   |
+                                 BIT(DRM_MODE_BLEND_COVERAGE);
 
        for (i = 0; i < KMB_MAX_PLANES; i++) {
                plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL);
@@ -521,8 +576,16 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
                drm_dbg(drm, "%s : %d i=%d type=%d",
                        __func__, __LINE__,
                          i, plane_type);
+               drm_plane_create_alpha_property(&plane->base_plane);
+
+               drm_plane_create_blend_mode_property(&plane->base_plane,
+                                                    blend_caps);
+
+               drm_plane_create_zpos_immutable_property(&plane->base_plane, i);
+
                drm_plane_helper_add(&plane->base_plane,
                                     &kmb_plane_helper_funcs);
+
                if (plane_type == DRM_PLANE_TYPE_PRIMARY) {
                        primary = plane;
                        kmb->plane = plane;
index 486490f7a3ec55b3dc63b680ae57fa4cfd141901..6e8d22cf8819b17913e61fee289ad2d3a2f53fc8 100644 (file)
@@ -35,6 +35,9 @@
 #define POSSIBLE_CRTCS 1
 #define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane)
 
+#define POSSIBLE_CRTCS         1
+#define KMB_MAX_PLANES         2
+
 enum layer_id {
        LAYER_0,
        LAYER_1,
@@ -43,8 +46,6 @@ enum layer_id {
        /* KMB_MAX_PLANES */
 };
 
-#define KMB_MAX_PLANES 1
-
 enum sub_plane_id {
        Y_PLANE,
        U_PLANE,
index 48150569f7025f64955f3e7690a383279e3738d0..9756101b0d32f8609191ed626197e5aec6982f17 100644 (file)
 #define LCD_CTRL_OUTPUT_ENABLED                          BIT(19)
 #define LCD_CTRL_BPORCH_ENABLE                   BIT(21)
 #define LCD_CTRL_FPORCH_ENABLE                   BIT(22)
+#define LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE       BIT(23)
 #define LCD_CTRL_PIPELINE_DMA                    BIT(28)
 #define LCD_CTRL_VHSYNC_IDLE_LVL                 BIT(31)
+#define LCD_CTRL_ALPHA_ALL                       (0xff << 6)
 
 /* interrupts */
 #define LCD_INT_STATUS                         (0x4 * 0x001)
 #define LCD_LAYER_ALPHA_EMBED                  BIT(5)
 #define LCD_LAYER_ALPHA_COMBI                  (LCD_LAYER_ALPHA_STATIC | \
                                                      LCD_LAYER_ALPHA_EMBED)
+#define LCD_LAYER_ALPHA_DISABLED               ~(LCD_LAYER_ALPHA_COMBI)
 /* RGB multiplied with alpha */
 #define LCD_LAYER_ALPHA_PREMULT                        BIT(6)
 #define LCD_LAYER_INVERT_COL                   BIT(7)
index 5f81489fc60c7959a003d17094a22224d6f40360..a4e80e4996748d84bb1e8e170718491c696ad35e 100644 (file)
@@ -4,8 +4,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/mailbox_controller.h>
 #include <linux/pm_runtime.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
 #include <linux/soc/mediatek/mtk-mmsys.h>
@@ -52,11 +50,8 @@ struct mtk_drm_crtc {
        bool                            pending_async_planes;
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       struct mbox_client              cmdq_cl;
-       struct mbox_chan                *cmdq_chan;
-       struct cmdq_pkt                 cmdq_handle;
+       struct cmdq_client              *cmdq_client;
        u32                             cmdq_event;
-       u32                             cmdq_vblank_cnt;
 #endif
 
        struct device                   *mmsys_dev;
@@ -227,79 +222,9 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
 }
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt,
-                                   size_t size)
+static void ddp_cmdq_cb(struct cmdq_cb_data data)
 {
-       struct device *dev;
-       dma_addr_t dma_addr;
-
-       pkt->va_base = kzalloc(size, GFP_KERNEL);
-       if (!pkt->va_base) {
-               kfree(pkt);
-               return -ENOMEM;
-       }
-       pkt->buf_size = size;
-
-       dev = chan->mbox->dev;
-       dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
-                                 DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, dma_addr)) {
-               dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
-               kfree(pkt->va_base);
-               kfree(pkt);
-               return -ENOMEM;
-       }
-
-       pkt->pa_base = dma_addr;
-
-       return 0;
-}
-
-static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
-{
-       dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
-                        DMA_TO_DEVICE);
-       kfree(pkt->va_base);
-       kfree(pkt);
-}
-
-static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
-{
-       struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
-       struct cmdq_cb_data *data = mssg;
-       struct mtk_crtc_state *state;
-       unsigned int i;
-
-       state = to_mtk_crtc_state(mtk_crtc->base.state);
-
-       state->pending_config = false;
-
-       if (mtk_crtc->pending_planes) {
-               for (i = 0; i < mtk_crtc->layer_nr; i++) {
-                       struct drm_plane *plane = &mtk_crtc->planes[i];
-                       struct mtk_plane_state *plane_state;
-
-                       plane_state = to_mtk_plane_state(plane->state);
-
-                       plane_state->pending.config = false;
-               }
-               mtk_crtc->pending_planes = false;
-       }
-
-       if (mtk_crtc->pending_async_planes) {
-               for (i = 0; i < mtk_crtc->layer_nr; i++) {
-                       struct drm_plane *plane = &mtk_crtc->planes[i];
-                       struct mtk_plane_state *plane_state;
-
-                       plane_state = to_mtk_plane_state(plane->state);
-
-                       plane_state->pending.async_config = false;
-               }
-               mtk_crtc->pending_async_planes = false;
-       }
-
-       mtk_crtc->cmdq_vblank_cnt = 0;
-       mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
+       cmdq_pkt_destroy(data.data);
 }
 #endif
 
@@ -453,8 +378,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
                                    state->pending_vrefresh, 0,
                                    cmdq_handle);
 
-               if (!cmdq_handle)
-                       state->pending_config = false;
+               state->pending_config = false;
        }
 
        if (mtk_crtc->pending_planes) {
@@ -474,12 +398,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
                                mtk_ddp_comp_layer_config(comp, local_layer,
                                                          plane_state,
                                                          cmdq_handle);
-                       if (!cmdq_handle)
-                               plane_state->pending.config = false;
+                       plane_state->pending.config = false;
                }
-
-               if (!cmdq_handle)
-                       mtk_crtc->pending_planes = false;
+               mtk_crtc->pending_planes = false;
        }
 
        if (mtk_crtc->pending_async_planes) {
@@ -499,12 +420,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
                                mtk_ddp_comp_layer_config(comp, local_layer,
                                                          plane_state,
                                                          cmdq_handle);
-                       if (!cmdq_handle)
-                               plane_state->pending.async_config = false;
+                       plane_state->pending.async_config = false;
                }
-
-               if (!cmdq_handle)
-                       mtk_crtc->pending_async_planes = false;
+               mtk_crtc->pending_async_planes = false;
        }
 }
 
@@ -512,7 +430,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
                                       bool needs_vblank)
 {
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
+       struct cmdq_pkt *cmdq_handle;
 #endif
        struct drm_crtc *crtc = &mtk_crtc->base;
        struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -550,24 +468,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
                mtk_mutex_release(mtk_crtc->mutex);
        }
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       if (mtk_crtc->cmdq_chan) {
-               mbox_flush(mtk_crtc->cmdq_chan, 2000);
-               cmdq_handle->cmd_buf_size = 0;
+       if (mtk_crtc->cmdq_client) {
+               mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
+               cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
                cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
                cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
                mtk_crtc_ddp_config(crtc, cmdq_handle);
                cmdq_pkt_finalize(cmdq_handle);
-               dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev,
-                                           cmdq_handle->pa_base,
-                                           cmdq_handle->cmd_buf_size,
-                                           DMA_TO_DEVICE);
-               /*
-                * CMDQ command should execute in next vblank,
-                * If it fail to execute in next 2 vblank, timeout happen.
-                */
-               mtk_crtc->cmdq_vblank_cnt = 2;
-               mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
-               mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
+               cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
        }
 #endif
        mtk_crtc->config_updating = false;
@@ -581,15 +489,12 @@ static void mtk_crtc_ddp_irq(void *data)
        struct mtk_drm_private *priv = crtc->dev->dev_private;
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
-               mtk_crtc_ddp_config(crtc, NULL);
-       else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
-               DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
-                         drm_crtc_index(&mtk_crtc->base));
+       if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
 #else
        if (!priv->data->shadow_register)
-               mtk_crtc_ddp_config(crtc, NULL);
 #endif
+               mtk_crtc_ddp_config(crtc, NULL);
+
        mtk_drm_finish_page_flip(mtk_crtc);
 }
 
@@ -924,20 +829,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
        mutex_init(&mtk_crtc->hw_lock);
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev;
-       mtk_crtc->cmdq_cl.tx_block = false;
-       mtk_crtc->cmdq_cl.knows_txdone = true;
-       mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
-       mtk_crtc->cmdq_chan =
-                       mbox_request_channel(&mtk_crtc->cmdq_cl,
-                                             drm_crtc_index(&mtk_crtc->base));
-       if (IS_ERR(mtk_crtc->cmdq_chan)) {
+       mtk_crtc->cmdq_client =
+                       cmdq_mbox_create(mtk_crtc->mmsys_dev,
+                                        drm_crtc_index(&mtk_crtc->base));
+       if (IS_ERR(mtk_crtc->cmdq_client)) {
                dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
                        drm_crtc_index(&mtk_crtc->base));
-               mtk_crtc->cmdq_chan = NULL;
+               mtk_crtc->cmdq_client = NULL;
        }
 
-       if (mtk_crtc->cmdq_chan) {
+       if (mtk_crtc->cmdq_client) {
                ret = of_property_read_u32_index(priv->mutex_node,
                                                 "mediatek,gce-events",
                                                 drm_crtc_index(&mtk_crtc->base),
@@ -945,18 +846,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
                if (ret) {
                        dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
                                drm_crtc_index(&mtk_crtc->base));
-                       mbox_free_channel(mtk_crtc->cmdq_chan);
-                       mtk_crtc->cmdq_chan = NULL;
-               } else {
-                       ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
-                                                      &mtk_crtc->cmdq_handle,
-                                                      PAGE_SIZE);
-                       if (ret) {
-                               dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
-                                       drm_crtc_index(&mtk_crtc->base));
-                               mbox_free_channel(mtk_crtc->cmdq_chan);
-                               mtk_crtc->cmdq_chan = NULL;
-                       }
+                       cmdq_mbox_destroy(mtk_crtc->cmdq_client);
+                       mtk_crtc->cmdq_client = NULL;
                }
        }
 #endif
index e9c6af78b1d7c340d996f85644a3fbb39d8aa9f9..3ddf739a6f9b8b10dd5f49977cfbd7d9ac1f5c48 100644 (file)
@@ -17,7 +17,7 @@ config DRM_MSM
        select DRM_SCHED
        select SHMEM
        select TMPFS
-       select QCOM_SCM if ARCH_QCOM
+       select QCOM_SCM
        select WANT_DEV_COREDUMP
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
@@ -55,7 +55,7 @@ config DRM_MSM_GPU_SUDO
 
 config DRM_MSM_HDMI_HDCP
        bool "Enable HDMI HDCP support in MSM DRM driver"
-       depends on DRM_MSM && QCOM_SCM
+       depends on DRM_MSM
        default y
        help
          Choose this option to enable HDCP state machine
index 4534633fe7cdb267718cbf58efef5ab5554203a4..8fb847c174ff840a26d778df02f1532221456304 100644 (file)
@@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
        }
 
        icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
-       ret = IS_ERR(icc_path);
-       if (ret)
+       if (IS_ERR(icc_path)) {
+               ret = PTR_ERR(icc_path);
                goto fail;
+       }
 
        ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
-       ret = IS_ERR(ocmem_icc_path);
-       if (ret) {
+       if (IS_ERR(ocmem_icc_path)) {
+               ret = PTR_ERR(ocmem_icc_path);
                /* allow -ENODATA, ocmem icc is optional */
                if (ret != -ENODATA)
                        goto fail;
index 82bebb40234de14016e4e25768690706d8120283..a96ee79cc5e0886029b2ef94216562ace0d83c3e 100644 (file)
@@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
        }
 
        icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
-       ret = IS_ERR(icc_path);
-       if (ret)
+       if (IS_ERR(icc_path)) {
+               ret = PTR_ERR(icc_path);
                goto fail;
+       }
 
        ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
-       ret = IS_ERR(ocmem_icc_path);
-       if (ret) {
+       if (IS_ERR(ocmem_icc_path)) {
+               ret = PTR_ERR(ocmem_icc_path);
                /* allow -ENODATA, ocmem icc is optional */
                if (ret != -ENODATA)
                        goto fail;
index a7c58018959fbf28266e0f406a1e47a8b45e0be2..8b73f70766a4771771f2dd04cb98822554c3e6e8 100644 (file)
@@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
        u32 val;
        int request, ack;
 
+       WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
        if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
                return -EINVAL;
 
@@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
 {
        int bit;
 
+       WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
        if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
                return;
 
@@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
        if (!pdev)
                return -ENODEV;
 
+       mutex_init(&gmu->lock);
+
        gmu->dev = &pdev->dev;
 
        of_dma_configure(gmu->dev, node, true);
index 3c74f64e31262cc9a13e4bd9b944542cbf8b1269..84bd516f01e895b27a54756463427779f54eaa43 100644 (file)
@@ -44,6 +44,9 @@ struct a6xx_gmu_bo {
 struct a6xx_gmu {
        struct device *dev;
 
+       /* For serializing communication with the GMU: */
+       struct mutex lock;
+
        struct msm_gem_address_space *aspace;
 
        void * __iomem mmio;
index 40c9fef457a49122b2bfa24570f1a477d13748a4..33da25b81615a7dcbd9b9ea17ce04e066109d35e 100644 (file)
@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
        u32 asid;
        u64 memptr = rbmemptr(ring, ttbr0);
 
-       if (ctx == a6xx_gpu->cur_ctx)
+       if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
                return;
 
        if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
        OUT_PKT7(ring, CP_EVENT_WRITE, 1);
        OUT_RING(ring, 0x31);
 
-       a6xx_gpu->cur_ctx = ctx;
+       a6xx_gpu->cur_ctx_seqno = ctx->seqno;
 }
 
 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
@@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
          A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
          A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
 
-static int a6xx_hw_init(struct msm_gpu *gpu)
+static int hw_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        /* Always come up on rb 0 */
        a6xx_gpu->cur_ring = gpu->rb[0];
 
-       a6xx_gpu->cur_ctx = NULL;
+       a6xx_gpu->cur_ctx_seqno = 0;
 
        /* Enable the SQE_to start the CP engine */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -1135,6 +1135,19 @@ out:
        return ret;
 }
 
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       int ret;
+
+       mutex_lock(&a6xx_gpu->gmu.lock);
+       ret = hw_init(gpu);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
+
+       return ret;
+}
+
 static void a6xx_dump(struct msm_gpu *gpu)
 {
        DRM_DEV_INFO(&gpu->pdev->dev, "status:   %08x\n",
@@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
 
        trace_msm_gpu_resume(0);
 
+       mutex_lock(&a6xx_gpu->gmu.lock);
        ret = a6xx_gmu_resume(a6xx_gpu);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
        if (ret)
                return ret;
 
@@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
 
        msm_devfreq_suspend(gpu);
 
+       mutex_lock(&a6xx_gpu->gmu.lock);
        ret = a6xx_gmu_stop(a6xx_gpu);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
        if (ret)
                return ret;
 
@@ -1547,18 +1564,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
-       static DEFINE_MUTEX(perfcounter_oob);
 
-       mutex_lock(&perfcounter_oob);
+       mutex_lock(&a6xx_gpu->gmu.lock);
 
        /* Force the GPU power on so we can read this register */
        a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
        *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
-               REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
+                           REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
 
        a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
-       mutex_unlock(&perfcounter_oob);
+
+       mutex_unlock(&a6xx_gpu->gmu.lock);
+
        return 0;
 }
 
@@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
+void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+       mutex_lock(&a6xx_gpu->gmu.lock);
+       a6xx_gmu_set_freq(gpu, opp);
+       mutex_unlock(&a6xx_gpu->gmu.lock);
+}
+
 static struct msm_gem_address_space *
 a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
 {
@@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = {
 #endif
                .gpu_busy = a6xx_gpu_busy,
                .gpu_get_freq = a6xx_gmu_get_freq,
-               .gpu_set_freq = a6xx_gmu_set_freq,
+               .gpu_set_freq = a6xx_gpu_set_freq,
 #if defined(CONFIG_DRM_MSM_GPU_STATE)
                .gpu_state_get = a6xx_gpu_state_get,
                .gpu_state_put = a6xx_gpu_state_put,
index 0bc2d062f54ab15a52e925a30ef0de130af92e83..8e5527c881b1e83e820b6919f6e4ea18916510b6 100644 (file)
@@ -19,7 +19,16 @@ struct a6xx_gpu {
        uint64_t sqe_iova;
 
        struct msm_ringbuffer *cur_ring;
-       struct msm_file_private *cur_ctx;
+
+       /**
+        * cur_ctx_seqno:
+        *
+        * The ctx->seqno value of the context with current pgtables
+        * installed.  Tracked by seqno rather than pointer value to
+        * avoid dangling pointers, and cases where a ctx can be freed
+        * and a new one created with the same address.
+        */
+       int cur_ctx_seqno;
 
        struct a6xx_gmu gmu;
 
index b131fd376192b6a9aede1cb4cdea0af7efe65910..700d65e39feb02471d7520b6ae9b2c1f82901a0e 100644 (file)
@@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
                        -1),
        PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
-                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
                        -1),
 };
 
index f482e0911d039d3dce49b263e934a0ee201d2ce5..bb7d066618e6490bbffbb2cc16aedc0438be79fe 100644 (file)
@@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
        __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
 }
 
+static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+       .set_config = drm_atomic_helper_set_config,
+       .destroy = mdp5_crtc_destroy,
+       .page_flip = drm_atomic_helper_page_flip,
+       .reset = mdp5_crtc_reset,
+       .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+       .atomic_destroy_state = mdp5_crtc_destroy_state,
+       .atomic_print_state = mdp5_crtc_atomic_print_state,
+       .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+       .enable_vblank  = msm_crtc_enable_vblank,
+       .disable_vblank = msm_crtc_disable_vblank,
+       .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
        .destroy = mdp5_crtc_destroy,
@@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
        mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
 
        drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+                                 cursor_plane ?
+                                 &mdp5_crtc_no_lm_cursor_funcs :
                                  &mdp5_crtc_funcs, NULL);
 
        drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
index fbe4c2cd52a3d9e2cc947801b206c05b1297268f..a0392e4d8134c3ad98f5b35aae65aec846236d16 100644 (file)
@@ -1309,14 +1309,14 @@ static int dp_pm_resume(struct device *dev)
         * can not declared display is connected unless
         * HDMI cable is plugged in and sink_count of
         * dongle become 1
+        * also only signal audio when disconnected
         */
-       if (dp->link->sink_count)
+       if (dp->link->sink_count) {
                dp->dp_display.is_connected = true;
-       else
+       } else {
                dp->dp_display.is_connected = false;
-
-       dp_display_handle_plugged_change(g_dp_display,
-                               dp->dp_display.is_connected);
+               dp_display_handle_plugged_change(g_dp_display, false);
+       }
 
        DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
                        dp->link->sink_count, dp->dp_display.is_connected,
index 614dc7f26f2c8189e7510367fe9c4137da3823ab..75ae3008b68f4a2bb81f2cb4a439a42174c8d199 100644 (file)
@@ -215,8 +215,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
                goto fail;
        }
 
-       if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+       if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) {
+               ret = -EINVAL;
                goto fail;
+       }
 
        msm_dsi->encoder = encoder;
 
index e269df285136c2c9d56fd0e171e336f0f46b209b..c86b5090fae60db7bc854496fffbdb368eacadaa 100644 (file)
@@ -451,7 +451,7 @@ static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
 
        return 0;
 err:
-       for (; i > 0; i--)
+       while (--i >= 0)
                clk_disable_unprepare(msm_host->bus_clks[i]);
 
        return ret;
index d13552b2213b695ae9e970a310f2f487134375ec..5b4e991f220d684f3bcc3abb23d9c1df210dbb24 100644 (file)
@@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
 static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
                                    u32 nb_tries, u32 timeout_us)
 {
-       bool pll_locked = false;
+       bool pll_locked = false, pll_ready = false;
        void __iomem *base = pll_14nm->phy->pll_base;
        u32 tries, val;
 
        tries = nb_tries;
        while (tries--) {
-               val = dsi_phy_read(base +
-                              REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+               val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
                pll_locked = !!(val & BIT(5));
 
                if (pll_locked)
@@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
                udelay(timeout_us);
        }
 
-       if (!pll_locked) {
-               tries = nb_tries;
-               while (tries--) {
-                       val = dsi_phy_read(base +
-                               REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
-                       pll_locked = !!(val & BIT(0));
+       if (!pll_locked)
+               goto out;
 
-                       if (pll_locked)
-                               break;
+       tries = nb_tries;
+       while (tries--) {
+               val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+               pll_ready = !!(val & BIT(0));
 
-                       udelay(timeout_us);
-               }
+               if (pll_ready)
+                       break;
+
+               udelay(timeout_us);
        }
 
-       DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+out:
+       DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
 
-       return pll_locked;
+       return pll_locked && pll_ready;
 }
 
 static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
index aaa37456f4ee3cf55831cac498823454b1e2f1aa..71ed4aa0dc67e6c7ce1707a3094f8be221166395 100644 (file)
@@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
        bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
 
        snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
-       snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
+       snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
 
        bytediv_init.name = clk_name;
        bytediv_init.ops = &clk_bytediv_ops;
@@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
                return ret;
        provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
 
-       snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
+       snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
        /* DIV3 */
        hw = devm_clk_hw_register_divider(dev, clk_name,
                                parent_name, 0, pll_28nm->phy->pll_base +
index 4fb397ee7c8425c7809cc12e2f4d502faee4b5d1..fe1366b4c49f58083b554de380812b4dacbd762e 100644 (file)
@@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
 int msm_edp_ctrl_init(struct msm_edp *edp)
 {
        struct edp_ctrl *ctrl = NULL;
-       struct device *dev = &edp->pdev->dev;
+       struct device *dev;
        int ret;
 
        if (!edp) {
@@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
                return -EINVAL;
        }
 
+       dev = &edp->pdev->dev;
        ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
        if (!ctrl)
                return -ENOMEM;
index 2e6fc185e54da4b3e8a79a8cb5fcfd873bf76ce6..d4e09703a87dbb0db3333636eb0019f03016347f 100644 (file)
@@ -630,10 +630,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
        if (ret)
                goto err_msm_uninit;
 
-       ret = msm_disp_snapshot_init(ddev);
-       if (ret)
-               DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
-
+       if (kms) {
+               ret = msm_disp_snapshot_init(ddev);
+               if (ret)
+                       DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+       }
        drm_mode_config_reset(ddev);
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -682,6 +683,7 @@ static void load_gpu(struct drm_device *dev)
 
 static int context_init(struct drm_device *dev, struct drm_file *file)
 {
+       static atomic_t ident = ATOMIC_INIT(0);
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_file_private *ctx;
 
@@ -689,12 +691,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
        if (!ctx)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&ctx->submitqueues);
+       rwlock_init(&ctx->queuelock);
+
        kref_init(&ctx->ref);
        msm_submitqueue_init(dev, ctx);
 
        ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
        file->driver_priv = ctx;
 
+       ctx->seqno = atomic_inc_return(&ident);
+
        return 0;
 }
 
index 8b005d1ac89974aa61b7ee960c68750af2d07b26..c552f0c3890c1c6107bbb9b70c2de50cbaeae3d9 100644 (file)
@@ -53,14 +53,6 @@ struct msm_disp_state;
 
 #define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
 
-struct msm_file_private {
-       rwlock_t queuelock;
-       struct list_head submitqueues;
-       int queueid;
-       struct msm_gem_address_space *aspace;
-       struct kref ref;
-};
-
 enum msm_mdp_plane_property {
        PLANE_PROP_ZPOS,
        PLANE_PROP_ALPHA,
@@ -488,41 +480,6 @@ void msm_writel(u32 data, void __iomem *addr);
 u32 msm_readl(const void __iomem *addr);
 void msm_rmw(void __iomem *addr, u32 mask, u32 or);
 
-struct msm_gpu_submitqueue;
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
-               u32 id);
-int msm_submitqueue_create(struct drm_device *drm,
-               struct msm_file_private *ctx,
-               u32 prio, u32 flags, u32 *id);
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
-               struct drm_msm_submitqueue_query *args);
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
-void msm_submitqueue_close(struct msm_file_private *ctx);
-
-void msm_submitqueue_destroy(struct kref *kref);
-
-static inline void __msm_file_private_destroy(struct kref *kref)
-{
-       struct msm_file_private *ctx = container_of(kref,
-               struct msm_file_private, ref);
-
-       msm_gem_address_space_put(ctx->aspace);
-       kfree(ctx);
-}
-
-static inline void msm_file_private_put(struct msm_file_private *ctx)
-{
-       kref_put(&ctx->ref, __msm_file_private_destroy);
-}
-
-static inline struct msm_file_private *msm_file_private_get(
-       struct msm_file_private *ctx)
-{
-       kref_get(&ctx->ref);
-       return ctx;
-}
-
 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
 
@@ -547,7 +504,7 @@ static inline int align_pitch(int width, int bpp)
 static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
 {
        ktime_t now = ktime_get();
-       unsigned long remaining_jiffies;
+       s64 remaining_jiffies;
 
        if (ktime_compare(*timeout, now) < 0) {
                remaining_jiffies = 0;
@@ -556,7 +513,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
                remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
        }
 
-       return remaining_jiffies;
+       return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
 }
 
 #endif /* __MSM_DRV_H__ */
index fdc5367aecaa35190aa1d1afc0172b55ceb18368..151d19e4453cd477233d00cd4c102a26e3c33925 100644 (file)
@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
        if (!submit)
                return ERR_PTR(-ENOMEM);
 
-       ret = drm_sched_job_init(&submit->base, &queue->entity, queue);
+       ret = drm_sched_job_init(&submit->base, queue->entity, queue);
        if (ret) {
                kfree(submit);
                return ERR_PTR(ret);
@@ -171,7 +171,8 @@ out:
 static int submit_lookup_cmds(struct msm_gem_submit *submit,
                struct drm_msm_gem_submit *args, struct drm_file *file)
 {
-       unsigned i, sz;
+       unsigned i;
+       size_t sz;
        int ret = 0;
 
        for (i = 0; i < args->nr_cmds; i++) {
@@ -907,7 +908,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        /* The scheduler owns a ref now: */
        msm_gem_submit_get(submit);
 
-       drm_sched_entity_push_job(&submit->base, &queue->entity);
+       drm_sched_entity_push_job(&submit->base, queue->entity);
 
        args->fence = submit->fence_id;
 
index 0e4b45bff2e6e1b1d960e7cbbaed71c46ff4a8d9..030f82f149c2c7a4ad8b29b9c266a5b991d3e0be 100644 (file)
@@ -257,6 +257,39 @@ struct msm_gpu_perfcntr {
  */
 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
 
+/**
+ * struct msm_file_private - per-drm_file context
+ *
+ * @queuelock:    synchronizes access to submitqueues list
+ * @submitqueues: list of &msm_gpu_submitqueue created by userspace
+ * @queueid:      counter incremented each time a submitqueue is created,
+ *                used to assign &msm_gpu_submitqueue.id
+ * @aspace:       the per-process GPU address-space
+ * @ref:          reference count
+ * @seqno:        unique per process seqno
+ */
+struct msm_file_private {
+       rwlock_t queuelock;
+       struct list_head submitqueues;
+       int queueid;
+       struct msm_gem_address_space *aspace;
+       struct kref ref;
+       int seqno;
+
+       /**
+        * entities:
+        *
+        * Table of per-priority-level sched entities used by submitqueues
+        * associated with this &drm_file.  Because some userspace apps
+        * make assumptions about rendering from multiple gl contexts
+        * (of the same priority) within the process happening in FIFO
+        * order without requiring any fencing beyond MakeCurrent(), we
+        * create at most one &drm_sched_entity per-process per-priority-
+        * level.
+        */
+       struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
+};
+
 /**
  * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
  *
@@ -304,6 +337,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
 }
 
 /**
+ * struct msm_gpu_submitqueues - Userspace created context.
+ *
  * A submitqueue is associated with a gl context or vk queue (or equiv)
  * in userspace.
  *
@@ -321,7 +356,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
  *             seqno, protected by submitqueue lock
  * @lock:      submitqueue lock
  * @ref:       reference count
- * @entity: the submit job-queue
+ * @entity:    the submit job-queue
  */
 struct msm_gpu_submitqueue {
        int id;
@@ -333,7 +368,7 @@ struct msm_gpu_submitqueue {
        struct idr fence_idr;
        struct mutex lock;
        struct kref ref;
-       struct drm_sched_entity entity;
+       struct drm_sched_entity *entity;
 };
 
 struct msm_gpu_state_bo {
@@ -421,6 +456,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
 int msm_gpu_pm_resume(struct msm_gpu *gpu);
 
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+               u32 id);
+int msm_submitqueue_create(struct drm_device *drm,
+               struct msm_file_private *ctx,
+               u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+               struct drm_msm_submitqueue_query *args);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
+void __msm_file_private_destroy(struct kref *kref);
+
+static inline void msm_file_private_put(struct msm_file_private *ctx)
+{
+       kref_put(&ctx->ref, __msm_file_private_destroy);
+}
+
+static inline struct msm_file_private *msm_file_private_get(
+       struct msm_file_private *ctx)
+{
+       kref_get(&ctx->ref);
+       return ctx;
+}
+
 void msm_devfreq_init(struct msm_gpu *gpu);
 void msm_devfreq_cleanup(struct msm_gpu *gpu);
 void msm_devfreq_resume(struct msm_gpu *gpu);
index 0a1ee20296a2cddc4b2d1c099d225d395a89ba3c..84e98c07c9001aed6e5ae299885c8a5a6dd047f2 100644 (file)
@@ -151,6 +151,9 @@ void msm_devfreq_active(struct msm_gpu *gpu)
        unsigned int idle_time;
        unsigned long target_freq = df->idle_freq;
 
+       if (!df->devfreq)
+               return;
+
        /*
         * Hold devfreq lock to synchronize with get_dev_status()/
         * target() callbacks
@@ -186,6 +189,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
        struct msm_gpu_devfreq *df = &gpu->devfreq;
        unsigned long idle_freq, target_freq = 0;
 
+       if (!df->devfreq)
+               return;
+
        /*
         * Hold devfreq lock to synchronize with get_dev_status()/
         * target() callbacks
index 32a55d81b58b6d3d28e815a06256ad0931ca287b..b8621c6e055460caaf1bc2779945702670af9da1 100644 (file)
@@ -7,6 +7,24 @@
 
 #include "msm_gpu.h"
 
+void __msm_file_private_destroy(struct kref *kref)
+{
+       struct msm_file_private *ctx = container_of(kref,
+               struct msm_file_private, ref);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
+               if (!ctx->entities[i])
+                       continue;
+
+               drm_sched_entity_destroy(ctx->entities[i]);
+               kfree(ctx->entities[i]);
+       }
+
+       msm_gem_address_space_put(ctx->aspace);
+       kfree(ctx);
+}
+
 void msm_submitqueue_destroy(struct kref *kref)
 {
        struct msm_gpu_submitqueue *queue = container_of(kref,
@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref)
 
        idr_destroy(&queue->fence_idr);
 
-       drm_sched_entity_destroy(&queue->entity);
-
        msm_file_private_put(queue->ctx);
 
        kfree(queue);
@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
        }
 }
 
+static struct drm_sched_entity *
+get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+                unsigned ring_nr, enum drm_sched_priority sched_prio)
+{
+       static DEFINE_MUTEX(entity_lock);
+       unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
+
+       /* We should have already validated that the requested priority is
+        * valid by the time we get here.
+        */
+       if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
+               return ERR_PTR(-EINVAL);
+
+       mutex_lock(&entity_lock);
+
+       if (!ctx->entities[idx]) {
+               struct drm_sched_entity *entity;
+               struct drm_gpu_scheduler *sched = &ring->sched;
+               int ret;
+
+               entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
+
+               ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
+               if (ret) {
+                       kfree(entity);
+                       return ERR_PTR(ret);
+               }
+
+               ctx->entities[idx] = entity;
+       }
+
+       mutex_unlock(&entity_lock);
+
+       return ctx->entities[idx];
+}
+
 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
                u32 prio, u32 flags, u32 *id)
 {
        struct msm_drm_private *priv = drm->dev_private;
        struct msm_gpu_submitqueue *queue;
-       struct msm_ringbuffer *ring;
-       struct drm_gpu_scheduler *sched;
        enum drm_sched_priority sched_prio;
        unsigned ring_nr;
        int ret;
@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
        queue->flags = flags;
        queue->ring_nr = ring_nr;
 
-       ring = priv->gpu->rb[ring_nr];
-       sched = &ring->sched;
-
-       ret = drm_sched_entity_init(&queue->entity,
-                       sched_prio, &sched, 1, NULL);
-       if (ret) {
+       queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+                                        ring_nr, sched_prio);
+       if (IS_ERR(queue->entity)) {
+               ret = PTR_ERR(queue->entity);
                kfree(queue);
                return ret;
        }
@@ -140,10 +188,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
         */
        default_prio = DIV_ROUND_UP(max_priority, 2);
 
-       INIT_LIST_HEAD(&ctx->submitqueues);
-
-       rwlock_init(&ctx->queuelock);
-
        return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
 }
 
index b8c31b697797ee294d9804621314a4e61ffa5593..66f32d965c7239496f38f783ddebf2ca0f5b0e33 100644 (file)
@@ -704,6 +704,7 @@ static const struct file_operations nv50_crc_flip_threshold_fops = {
        .open = nv50_crc_debugfs_flip_threshold_open,
        .read = seq_read,
        .write = nv50_crc_debugfs_flip_threshold_set,
+       .release = single_release,
 };
 
 int nv50_head_crc_late_register(struct nv50_head *head)
index d66f97280282a3a2f2a85299280f01a5a174c06f..72099d1e4816902f50a623107af7c64edc6f29aa 100644 (file)
@@ -52,6 +52,7 @@ nv50_head_flush_clr(struct nv50_head *head,
 void
 nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
+       if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.olut   ) {
                asyh->olut.offset = nv50_lut_load(&head->olut,
                                                  asyh->olut.buffer,
@@ -67,7 +68,6 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
        if (asyh->set.view   ) head->func->view    (head, asyh);
        if (asyh->set.mode   ) head->func->mode    (head, asyh);
        if (asyh->set.core   ) head->func->core_set(head, asyh);
-       if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.base   ) head->func->base    (head, asyh);
        if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
        if (asyh->set.dither ) head->func->dither  (head, asyh);
index c68cc957248e2b268f7fcdc2b0f77b2cf4d4a948..a582c0cb0cb0d2c5919a0b6a2cbbaf685df1651e 100644 (file)
@@ -71,6 +71,7 @@
 #define PASCAL_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000c06f
 #define VOLTA_CHANNEL_GPFIFO_A                        /* clc36f.h */ 0x0000c36f
 #define TURING_CHANNEL_GPFIFO_A                       /* clc36f.h */ 0x0000c46f
+#define AMPERE_CHANNEL_GPFIFO_B                       /* clc36f.h */ 0x0000c76f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
 #define G82_DISP                                      /* cl5070.h */ 0x00008270
 #define PASCAL_DMA_COPY_B                                            0x0000c1b5
 #define VOLTA_DMA_COPY_A                                             0x0000c3b5
 #define TURING_DMA_COPY_A                                            0x0000c5b5
+#define AMPERE_DMA_COPY_B                                            0x0000c7b5
 
 #define FERMI_DECOMPRESS                                             0x000090b8
 
index 54fab7cc36c1b84ba925ffa78a846fe0918a02bc..64ee82c7c1be5936de87c8ab155b47920fbe91ff 100644 (file)
@@ -77,4 +77,5 @@ int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
 int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int ga102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 #endif
index 6d07e653f82d5b74a330eadea83ea0a4074da7a8..c58bcdba2c7aa30d2423390b10020f750dd5df96 100644 (file)
@@ -844,6 +844,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
                            struct ttm_resource *, struct ttm_resource *);
                int (*init)(struct nouveau_channel *, u32 handle);
        } _methods[] = {
+               {  "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
index 80099ef7570226b65ce3fa98bc4b2e19999dedcb..ea7769135b0dcf0cd23aa43e4fb2f3fde31e41e7 100644 (file)
@@ -250,7 +250,8 @@ static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
                    u64 runlist, bool priv, struct nouveau_channel **pchan)
 {
-       static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+       static const u16 oclasses[] = { AMPERE_CHANNEL_GPFIFO_B,
+                                       TURING_CHANNEL_GPFIFO_A,
                                        VOLTA_CHANNEL_GPFIFO_A,
                                        PASCAL_CHANNEL_GPFIFO_A,
                                        MAXWELL_CHANNEL_GPFIFO_A,
@@ -386,7 +387,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 
        nvif_object_map(&chan->user, NULL, 0);
 
-       if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
+       if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO &&
+           chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) {
                ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled",
                                       nouveau_channel_killed,
                                       true, NV906F_V0_NTFY_KILLED,
index c2bc05eb2e54a2e2dbcbdd4eb13d8cae56f7a770..1cbe01048b9304fe838252fbd70c2217b60dda8d 100644 (file)
@@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = {
        .open = nouveau_debugfs_pstate_open,
        .read = seq_read,
        .write = nouveau_debugfs_pstate_set,
+       .release = single_release,
 };
 
 static struct drm_info_list nouveau_debugfs_list[] = {
index 1f828c9f691cd5e1e375087ff2a1a90dcc31c731..6109cd9e339918f5798787aad76cd3f1e69961f6 100644 (file)
@@ -345,6 +345,9 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
        u32 arg0, arg1;
        int ret;
 
+       if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE)
+               return;
+
        /* Allocate channel that has access to the graphics engine. */
        if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
                arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
@@ -469,6 +472,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
                case PASCAL_CHANNEL_GPFIFO_A:
                case VOLTA_CHANNEL_GPFIFO_A:
                case TURING_CHANNEL_GPFIFO_A:
+               case AMPERE_CHANNEL_GPFIFO_B:
                        ret = nvc0_fence_create(drm);
                        break;
                default:
index 5b27845075a1c37bca5ab637aa4ff7b14c5e5c9e..8c2ecc282723222a64880e5c782c5dc4224aefb8 100644 (file)
@@ -247,10 +247,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
        }
 
        ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
-       if (ret) {
-               nouveau_bo_ref(NULL, &nvbo);
+       if (ret)
                return ret;
-       }
 
        /* we restrict allowed domains on nv50+ to only the types
         * that were requested at creation time.  not possibly on
index 7c9c928c319668d48b11d0081f0c60153b36c3c6..c3526a8622e3e204adc453f6226cc2abeea2755f 100644 (file)
@@ -204,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm)
        priv->base.context_new = nv84_fence_context_new;
        priv->base.context_del = nv84_fence_context_del;
 
-       priv->base.uevent = true;
+       priv->base.uevent = drm->client.device.info.family < NV_DEVICE_INFO_V0_AMPERE;
 
        mutex_init(&priv->mutex);
 
index 93ddf63d111408ecc354b54eb3e682278899b84e..ca75c5f6ecaf80a96429b5a6a81432183ccd02f5 100644 (file)
@@ -2602,6 +2602,7 @@ nv172_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static const struct nvkm_device_chip
@@ -2622,6 +2623,7 @@ nv174_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static const struct nvkm_device_chip
@@ -2642,6 +2644,7 @@ nv177_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static int
index b0ece71aefdee1b0d8259920532820e1c8adabcc..ce774579c89d1fa285916e729adc64ea5bd8e8e4 100644 (file)
@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
                args->v0.count = 0;
                args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
                args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
-               args->v0.pwrsrc = -ENOSYS;
+               args->v0.pwrsrc = -ENODEV;
                args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
        }
 
index 3209eb7af65fb35e4b4dd70b33f77cd592c726e4..5e831d347a95795f3b22217b532afd4ceeb81714 100644 (file)
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/fifo/gp100.o
 nvkm-y += nvkm/engine/fifo/gp10b.o
 nvkm-y += nvkm/engine/fifo/gv100.o
 nvkm-y += nvkm/engine/fifo/tu102.o
+nvkm-y += nvkm/engine/fifo/ga102.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
index 353b77d9b3dcffe35f12172e36e24b75cd454950..3492c561f2cfc858c4f776e1fd3d754d0136d544 100644 (file)
@@ -82,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
        if (offset < 0)
                return 0;
 
-       engn = fifo->base.func->engine_id(&fifo->base, engine);
+       engn = fifo->base.func->engine_id(&fifo->base, engine) - 1;
        save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
        nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
        done = nvkm_msec(device, 2000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
new file mode 100644 (file)
index 0000000..c630dbd
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine)
+#define ga102_chan(p) container_of((p), struct ga102_chan, object)
+#include <engine/fifo.h>
+#include "user.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+
+#include <nvif/cl0080.h>
+#include <nvif/clc36f.h>
+#include <nvif/class.h>
+
+struct ga102_fifo {
+       struct nvkm_fifo base;
+};
+
+struct ga102_chan {
+       struct nvkm_object object;
+
+       struct {
+               u32 runl;
+               u32 chan;
+       } ctrl;
+
+       struct nvkm_memory *mthd;
+       struct nvkm_memory *inst;
+       struct nvkm_memory *user;
+       struct nvkm_memory *runl;
+
+       struct nvkm_vmm *vmm;
+};
+
+static int
+ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
+{
+       if (index == 0) {
+               oclass->ctor = nvkm_object_new;
+               oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B };
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static int
+ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+              enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+       u64 bar2 = nvkm_memory_bar2(chan->user);
+
+       if (bar2 == ~0ULL)
+               return -EFAULT;
+
+       *type = NVKM_OBJECT_MAP_IO;
+       *addr = device->func->resource_addr(device, 3) + bar2;
+       *size = 0x1000;
+       return 0;
+}
+
+static int
+ga102_chan_fini(struct nvkm_object *object, bool suspend)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+
+       nvkm_wr32(device, chan->ctrl.chan, 0x00000003);
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000);
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000))
+                       break;
+       );
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x088, 0);
+
+       nvkm_wr32(device, chan->ctrl.chan, 0xffffffff);
+       return 0;
+}
+
+static int
+ga102_chan_init(struct nvkm_object *object)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+
+       nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000);
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl)));
+       nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl)));
+       nvkm_wr32(device, chan->ctrl.runl + 0x088, 2);
+
+       nvkm_wr32(device, chan->ctrl.chan, 0x00000002);
+       nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0);
+       return 0;
+}
+
+static void *
+ga102_chan_dtor(struct nvkm_object *object)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+
+       if (chan->vmm) {
+               nvkm_vmm_part(chan->vmm, chan->inst);
+               nvkm_vmm_unref(&chan->vmm);
+       }
+
+       nvkm_memory_unref(&chan->runl);
+       nvkm_memory_unref(&chan->user);
+       nvkm_memory_unref(&chan->inst);
+       nvkm_memory_unref(&chan->mthd);
+       return chan;
+}
+
+static const struct nvkm_object_func
+ga102_chan = {
+       .dtor = ga102_chan_dtor,
+       .init = ga102_chan_init,
+       .fini = ga102_chan_fini,
+       .map = ga102_chan_map,
+       .sclass = ga102_chan_sclass,
+};
+
+static int
+ga102_chan_new(struct nvkm_device *device,
+              const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+       struct volta_channel_gpfifo_a_v0 *args = argv;
+       struct nvkm_top_device *tdev;
+       struct nvkm_vmm *vmm;
+       struct ga102_chan *chan;
+       int ret;
+
+       if (argc != sizeof(*args))
+               return -ENOSYS;
+
+       vmm = nvkm_uvmm_search(oclass->client, args->vmm);
+       if (IS_ERR(vmm))
+               return PTR_ERR(vmm);
+
+       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&ga102_chan, oclass, &chan->object);
+       *pobject = &chan->object;
+
+       list_for_each_entry(tdev, &device->top->device, head) {
+               if (tdev->type == NVKM_ENGINE_CE) {
+                       chan->ctrl.runl = tdev->runlist;
+                       break;
+               }
+       }
+
+       if (!chan->ctrl.runl)
+               return -ENODEV;
+
+       chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0;
+
+       args->chid = 0;
+       args->inst = 0;
+       args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst);
+       if (ret)
+               return ret;
+
+       nvkm_kmap(chan->inst);
+       nvkm_wo32(chan->inst, 0x010, 0x0000face);
+       nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
+       nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset));
+       nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) |
+                                    (order_base_2(args->ilength / 8) << 16));
+       nvkm_wo32(chan->inst, 0x084, 0x20400000);
+       nvkm_wo32(chan->inst, 0x094, 0x30000001);
+       nvkm_wo32(chan->inst, 0x0ac, 0x00020000);
+       nvkm_wo32(chan->inst, 0x0e4, 0x00000000);
+       nvkm_wo32(chan->inst, 0x0e8, 0);
+       nvkm_wo32(chan->inst, 0x0f4, 0x00001000);
+       nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
+       nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
+       nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd)));
+       nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd)));
+       nvkm_done(chan->inst);
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl);
+       if (ret)
+               return ret;
+
+       nvkm_kmap(chan->runl);
+       nvkm_wo32(chan->runl, 0x00, 0x80030001);
+       nvkm_wo32(chan->runl, 0x04, 1);
+       nvkm_wo32(chan->runl, 0x08, 0);
+       nvkm_wo32(chan->runl, 0x0c, 0x00000000);
+       nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user)));
+       nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user)));
+       nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst)));
+       nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst)));
+       nvkm_done(chan->runl);
+
+       ret = nvkm_vmm_join(vmm, chan->inst);
+       if (ret)
+               return ret;
+
+       chan->vmm = nvkm_vmm_ref(vmm);
+       return 0;
+}
+
+static const struct nvkm_device_oclass
+ga102_chan_oclass = {
+       .ctor = ga102_chan_new,
+};
+
+static int
+ga102_user_new(struct nvkm_device *device,
+              const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+       return tu102_fifo_user_new(oclass, argv, argc, pobject);
+}
+
+static const struct nvkm_device_oclass
+ga102_user_oclass = {
+       .ctor = ga102_user_new,
+};
+
+static int
+ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
+{
+       if (index == 0) {
+               oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A };
+               *class = &ga102_user_oclass;
+               return 0;
+       } else
+       if (index == 1) {
+               oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B };
+               *class = &ga102_chan_oclass;
+               return 0;
+       }
+
+       return 2;
+}
+
+static int
+ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+       switch (mthd) {
+       case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0;
+       default:
+               break;
+       }
+
+       return -ENOSYS;
+}
+
+static void *
+ga102_fifo_dtor(struct nvkm_engine *engine)
+{
+       return ga102_fifo(engine);
+}
+
+static const struct nvkm_engine_func
+ga102_fifo = {
+       .dtor = ga102_fifo_dtor,
+       .info = ga102_fifo_info,
+       .base.sclass = ga102_fifo_sclass,
+};
+
+int
+ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+              struct nvkm_fifo **pfifo)
+{
+       struct ga102_fifo *fifo;
+
+       if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine);
+       *pfifo = &fifo->base;
+       return 0;
+}
index 31933f3e5a076b36e14fadf75cbef8f39a7815fc..c982d834c8d98579dd436eb632a6086c84f594d0 100644 (file)
@@ -54,7 +54,7 @@ ga100_top_oneinit(struct nvkm_top *top)
                        info->reset   = (data & 0x0000001f);
                        break;
                case 2:
-                       info->runlist = (data & 0x0000fc00) >> 10;
+                       info->runlist = (data & 0x00fffc00);
                        info->engine  = (data & 0x00000003);
                        break;
                default:
@@ -85,9 +85,10 @@ ga100_top_oneinit(struct nvkm_top *top)
                }
 
                nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
-                                  "runlist %2d engine %2d reset %2d\n", type, inst,
+                                  "runlist %6x engine %2d reset %2d\n", type, inst,
                           info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
-                          info->addr, info->fault, info->runlist, info->engine, info->reset);
+                          info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist,
+                          info->engine, info->reset);
                info = NULL;
        }
 
index beb581b96ecdc6f9dbdbc64676137de854428cd4..418638e6e3b0a06e1302ccdf682b7ea99232760d 100644 (file)
@@ -295,6 +295,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO
        depends on OF
        depends on I2C
        depends on BACKLIGHT_CLASS_DEVICE
+       select CRC32
        help
          The panel is used with different sizes LCDs, from 480x272 to
          1280x800, and 24 bit per pixel.
index 2d8794d495d08e62d749909f0e87ba040843e01e..3d8a9ab47cae2face971d4deb8ed6863afc9ee71 100644 (file)
@@ -146,8 +146,8 @@ static const struct reg_sequence y030xx067a_init_sequence[] = {
        { 0x09, REG09_SUB_BRIGHT_R(0x20) },
        { 0x0a, REG0A_SUB_BRIGHT_B(0x20) },
        { 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN },
-       { 0x0c, REG0C_CONTRAST_R(0x10) },
-       { 0x0d, REG0D_CONTRAST_G(0x10) },
+       { 0x0c, REG0C_CONTRAST_R(0x00) },
+       { 0x0d, REG0D_CONTRAST_G(0x00) },
        { 0x0e, REG0E_CONTRAST_B(0x10) },
        { 0x0f, 0 },
        { 0x10, REG10_BRIGHT(0x7f) },
index 0ecccf25a3c760337773d2d0e843ec865423df32..d2a0f5394fef69e4b5f4a103139620c04a1c5cf1 100644 (file)
@@ -214,7 +214,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
        }
        ret = 0;
 
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_X86
        wbinvd();
 #else
        mb();
index 0473583dcdac241ab6272005bd41aa23e88ff93d..482fb0ae6cb5db1f6a6723798171924bdf7c15bf 100644 (file)
@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 #endif
 
        if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
-               rdev->agp = radeon_agp_head_init(rdev->ddev);
+               rdev->agp = radeon_agp_head_init(dev);
        if (rdev->agp) {
                rdev->agp->agp_mtrr = arch_phys_wc_add(
                        rdev->agp->agp_info.aper_base,
index 0daa8bba50f5a9097e861773ee725baf9bcb991d..4bf4e25d7f011fdacb1eabb1662f2e9fedf9d448 100644 (file)
@@ -86,12 +86,20 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
        }
 
        /*
-        * Create and initialize the encoder. On Gen3 skip the LVDS1 output if
+        * Create and initialize the encoder. On Gen3, skip the LVDS1 output if
         * the LVDS1 encoder is used as a companion for LVDS0 in dual-link
-        * mode.
+        * mode, or any LVDS output if it isn't connected. The latter may happen
+        * on D3 or E3 as the LVDS encoders are needed to provide the pixel
+        * clock to the DU, even when the LVDS outputs are not used.
         */
-       if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
-               if (rcar_lvds_dual_link(bridge))
+       if (rcdu->info->gen >= 3) {
+               if (output == RCAR_DU_OUTPUT_LVDS1 &&
+                   rcar_lvds_dual_link(bridge))
+                       return -ENOLINK;
+
+               if ((output == RCAR_DU_OUTPUT_LVDS0 ||
+                    output == RCAR_DU_OUTPUT_LVDS1) &&
+                   !rcar_lvds_is_connected(bridge))
                        return -ENOLINK;
        }
 
index d061b8de748fdd44fe71b3628ade5e8d1b8e1a13..b672c5bd72ee888764ba58177729b2b3b8ef2378 100644 (file)
@@ -576,6 +576,9 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
 {
        struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
 
+       if (!lvds->next_bridge)
+               return 0;
+
        return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
                                 flags);
 }
@@ -598,6 +601,14 @@ bool rcar_lvds_dual_link(struct drm_bridge *bridge)
 }
 EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
 
+bool rcar_lvds_is_connected(struct drm_bridge *bridge)
+{
+       struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+       return lvds->next_bridge != NULL;
+}
+EXPORT_SYMBOL_GPL(rcar_lvds_is_connected);
+
 /* -----------------------------------------------------------------------------
  * Probe & Remove
  */
index 222ec0e60785ca31901d0a14cf8ce9ca89860cb3..eb7c6ef03b00afa402f15b9aac6cb4db30624cb3 100644 (file)
@@ -16,6 +16,7 @@ struct drm_bridge;
 int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
 void rcar_lvds_clk_disable(struct drm_bridge *bridge);
 bool rcar_lvds_dual_link(struct drm_bridge *bridge);
+bool rcar_lvds_is_connected(struct drm_bridge *bridge);
 #else
 static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
                                       unsigned long freq)
@@ -27,6 +28,10 @@ static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
 {
        return false;
 }
+static inline bool rcar_lvds_is_connected(struct drm_bridge *bridge)
+{
+       return false;
+}
 #endif /* CONFIG_DRM_RCAR_LVDS */
 
 #endif /* __RCAR_LVDS_H__ */
index 8ab3247dbc4aab72f20c44f5227612ad20413b09..13c6b857158fcf63b2106ad62d496e4997af7534 100644 (file)
@@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev)
        return ret;
 }
 
-static int cdn_dp_resume(struct device *dev)
+static __maybe_unused int cdn_dp_resume(struct device *dev)
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
 
index ba9e14da41b4811a581c76e211a5c5b99f9e32d4..a25b98b7f5bd7875c8be067b5b58e95f8d9a820f 100644 (file)
@@ -1174,26 +1174,24 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
         *
         * Action plan:
         *
-        * 1. When DRM gives us a mode, we should add 999 Hz to it.  That way
-        *    if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
-        *    make 60000 kHz then the clock framework will actually give us
-        *    the right clock.
+        * 1. Try to set the exact rate first, and confirm the clock framework
+        *    can provide it.
         *
-        *    NOTE: if the PLL (maybe through a divider) could actually make
-        *    a clock rate 999 Hz higher instead of the one we want then this
-        *    could be a problem.  Unfortunately there's not much we can do
-        *    since it's baked into DRM to use kHz.  It shouldn't matter in
-        *    practice since Rockchip PLLs are controlled by tables and
-        *    even if there is a divider in the middle I wouldn't expect PLL
-        *    rates in the table that are just a few kHz different.
+        * 2. If the clock framework cannot provide the exact rate, we should
+        *    add 999 Hz to the requested rate.  That way if the clock we need
+        *    is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then
+        *    the clock framework will actually give us the right clock.
         *
-        * 2. Get the clock framework to round the rate for us to tell us
+        * 3. Get the clock framework to round the rate for us to tell us
         *    what it will actually make.
         *
-        * 3. Store the rounded up rate so that we don't need to worry about
+        * 4. Store the rounded up rate so that we don't need to worry about
         *    this in the actual clk_set_rate().
         */
-       rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+       rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000);
+       if (rate / 1000 != adjusted_mode->clock)
+               rate = clk_round_rate(vop->dclk,
+                                     adjusted_mode->clock * 1000 + 999);
        adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
 
        return true;
index f75fb157f2ff757da981031f438f80afda33d355..016b877051dabfc3b1b9eddc7189cb815896c7f1 100644 (file)
@@ -216,11 +216,13 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                goto err_disable_clk_tmds;
        }
 
+       ret = sun8i_hdmi_phy_init(hdmi->phy);
+       if (ret)
+               goto err_disable_clk_tmds;
+
        drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
        drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
-       sun8i_hdmi_phy_init(hdmi->phy);
-
        plat_data->mode_valid = hdmi->quirks->mode_valid;
        plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
        sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
@@ -262,6 +264,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
        struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
 
        dw_hdmi_unbind(hdmi->hdmi);
+       sun8i_hdmi_phy_deinit(hdmi->phy);
        clk_disable_unprepare(hdmi->clk_tmds);
        reset_control_assert(hdmi->rst_ctrl);
        gpiod_set_value(hdmi->ddc_en, 0);
index 74f6ed0e25709f6ae3bbfebb430ae89bf75534cb..bffe1b9cd3dcb435f019d25668cd3e9c02beaf2c 100644 (file)
@@ -169,6 +169,7 @@ struct sun8i_hdmi_phy {
        struct clk                      *clk_phy;
        struct clk                      *clk_pll0;
        struct clk                      *clk_pll1;
+       struct device                   *dev;
        unsigned int                    rcal;
        struct regmap                   *regs;
        struct reset_control            *rst_phy;
@@ -205,7 +206,8 @@ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
 
 int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
 
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy);
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
                            struct dw_hdmi_plat_data *plat_data);
 
index c9239708d398cdaa85ebffba7ca4a41ecba5a760..b64d93da651d22d03e74057018cea28d37004a02 100644 (file)
@@ -506,9 +506,60 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
        phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2;
 }
 
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
 {
+       int ret;
+
+       ret = reset_control_deassert(phy->rst_phy);
+       if (ret) {
+               dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(phy->clk_bus);
+       if (ret) {
+               dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret);
+               goto err_assert_rst_phy;
+       }
+
+       ret = clk_prepare_enable(phy->clk_mod);
+       if (ret) {
+               dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret);
+               goto err_disable_clk_bus;
+       }
+
+       if (phy->variant->has_phy_clk) {
+               ret = sun8i_phy_clk_create(phy, phy->dev,
+                                          phy->variant->has_second_pll);
+               if (ret) {
+                       dev_err(phy->dev, "Couldn't create the PHY clock\n");
+                       goto err_disable_clk_mod;
+               }
+
+               clk_prepare_enable(phy->clk_phy);
+       }
+
        phy->variant->phy_init(phy);
+
+       return 0;
+
+err_disable_clk_mod:
+       clk_disable_unprepare(phy->clk_mod);
+err_disable_clk_bus:
+       clk_disable_unprepare(phy->clk_bus);
+err_assert_rst_phy:
+       reset_control_assert(phy->rst_phy);
+
+       return ret;
+}
+
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy)
+{
+       clk_disable_unprepare(phy->clk_mod);
+       clk_disable_unprepare(phy->clk_bus);
+       clk_disable_unprepare(phy->clk_phy);
+
+       reset_control_assert(phy->rst_phy);
 }
 
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
@@ -638,6 +689,7 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        phy->variant = (struct sun8i_hdmi_phy_variant *)match->data;
+       phy->dev = dev;
 
        ret = of_address_to_resource(node, 0, &res);
        if (ret) {
@@ -696,47 +748,10 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
                goto err_put_clk_pll1;
        }
 
-       ret = reset_control_deassert(phy->rst_phy);
-       if (ret) {
-               dev_err(dev, "Cannot deassert phy reset control: %d\n", ret);
-               goto err_put_rst_phy;
-       }
-
-       ret = clk_prepare_enable(phy->clk_bus);
-       if (ret) {
-               dev_err(dev, "Cannot enable bus clock: %d\n", ret);
-               goto err_deassert_rst_phy;
-       }
-
-       ret = clk_prepare_enable(phy->clk_mod);
-       if (ret) {
-               dev_err(dev, "Cannot enable mod clock: %d\n", ret);
-               goto err_disable_clk_bus;
-       }
-
-       if (phy->variant->has_phy_clk) {
-               ret = sun8i_phy_clk_create(phy, dev,
-                                          phy->variant->has_second_pll);
-               if (ret) {
-                       dev_err(dev, "Couldn't create the PHY clock\n");
-                       goto err_disable_clk_mod;
-               }
-
-               clk_prepare_enable(phy->clk_phy);
-       }
-
        platform_set_drvdata(pdev, phy);
 
        return 0;
 
-err_disable_clk_mod:
-       clk_disable_unprepare(phy->clk_mod);
-err_disable_clk_bus:
-       clk_disable_unprepare(phy->clk_bus);
-err_deassert_rst_phy:
-       reset_control_assert(phy->rst_phy);
-err_put_rst_phy:
-       reset_control_put(phy->rst_phy);
 err_put_clk_pll1:
        clk_put(phy->clk_pll1);
 err_put_clk_pll0:
@@ -753,12 +768,6 @@ static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
 {
        struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
 
-       clk_disable_unprepare(phy->clk_mod);
-       clk_disable_unprepare(phy->clk_bus);
-       clk_disable_unprepare(phy->clk_phy);
-
-       reset_control_assert(phy->rst_phy);
-
        reset_control_put(phy->rst_phy);
 
        clk_put(phy->clk_pll0);
index 16c7aabb94d3722d14dffa1971153a26703e47c6..a29d64f8756354fcc84db6f812a1a24906928b37 100644 (file)
@@ -1845,7 +1845,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
                                   bool prepare_bandwidth_transition)
 {
        const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
-       const struct tegra_dc_state *old_dc_state, *new_dc_state;
        u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
        const struct drm_plane_state *old_plane_state;
        const struct drm_crtc_state *old_crtc_state;
@@ -1858,8 +1857,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
                return;
 
        old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
-       old_dc_state = to_const_dc_state(old_crtc_state);
-       new_dc_state = to_const_dc_state(crtc->state);
 
        if (!crtc->state->active) {
                if (!old_crtc_state->active)
index f0cb691852a1c91335466d3e1e834c45ace668cd..40378308d527a1369ed0f19e644e506a8b45e145 100644 (file)
@@ -35,12 +35,6 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
        return NULL;
 }
 
-static inline const struct tegra_dc_state *
-to_const_dc_state(const struct drm_crtc_state *state)
-{
-       return to_dc_state((struct drm_crtc_state *)state);
-}
-
 struct tegra_dc_stats {
        unsigned long frames;
        unsigned long vblank;
index dc16a24f4dbe2df287fc296d5381686b0dc41f14..690a339c52ec6efebd6cbd5f5eff9529e26d5c10 100644 (file)
@@ -222,7 +222,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
                mapping->iova = sg_dma_address(mapping->sgt->sgl);
        }
 
-       mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size;
+       mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
 
        err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
                       GFP_KERNEL);
index cb38b1a17b09852d1a509964b5608eebb47129fc..82cbb29a05aa3904d7adc4d22933db989d05b04d 100644 (file)
@@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
        else
                gfp_flags |= GFP_HIGHUSER;
 
-       for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+       for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+            num_pages;
             order = min_t(unsigned int, order, __fls(num_pages))) {
                bool apply_caching = false;
                struct ttm_pool_type *pt;
index 4a11150431140a10a15cb9cb93a43c69423a2548..ed8a4b7f8b6e28b903d3fc0a7543aad9035e2d92 100644 (file)
@@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
        bool connected = false;
 
-       WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
-
        if (vc4_hdmi->hpd_gpio &&
            gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
                connected = true;
@@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
                        }
                }
 
-               pm_runtime_put(&vc4_hdmi->pdev->dev);
                return connector_status_connected;
        }
 
        cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
-       pm_runtime_put(&vc4_hdmi->pdev->dev);
        return connector_status_disconnected;
 }
 
@@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        struct drm_connector *connector = &vc4_hdmi->connector;
        struct drm_connector_state *cstate = connector->state;
-       struct drm_crtc *crtc = cstate->crtc;
+       struct drm_crtc *crtc = encoder->crtc;
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        union hdmi_infoframe frame;
        int ret;
@@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
 
 static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
 {
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_connector_state *cstate = connector->state;
-       struct drm_crtc *crtc = cstate->crtc;
-       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 
        if (!vc4_hdmi_supports_scrambling(encoder, mode))
                return;
@@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
 static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
 {
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_connector_state *cstate = connector->state;
+       struct drm_crtc *crtc = encoder->crtc;
 
        /*
-        * At boot, connector->state will be NULL. Since we don't know the
+        * At boot, encoder->crtc will be NULL. Since we don't know the
         * state of the scrambler and in order to avoid any
         * inconsistency, let's disable it all the time.
         */
-       if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
+       if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
                return;
 
-       if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
+       if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
                return;
 
        if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
                vc4_hdmi->variant->phy_disable(vc4_hdmi);
 
        clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+       clk_disable_unprepare(vc4_hdmi->hsm_clock);
        clk_disable_unprepare(vc4_hdmi->pixel_clock);
 
        ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                vc4_hdmi_encoder_get_connector_state(encoder, state);
        struct vc4_hdmi_connector_state *vc4_conn_state =
                conn_state_to_vc4_hdmi_conn_state(conn_state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
        unsigned long bvb_rate, pixel_rate, hsm_rate;
        int ret;
@@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                return;
        }
 
+       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+       if (ret) {
+               DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->pixel_clock);
+               return;
+       }
+
        vc4_hdmi_cec_update_clk_div(vc4_hdmi);
 
        if (pixel_rate > 297000000)
@@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
        if (ret) {
                DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
        if (ret) {
                DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
 static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
                                             struct drm_atomic_state *state)
 {
-       struct drm_connector_state *conn_state =
-               vc4_hdmi_encoder_get_connector_state(encoder, state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 
@@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
 static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
                                              struct drm_atomic_state *state)
 {
-       struct drm_connector_state *conn_state =
-               vc4_hdmi_encoder_get_connector_state(encoder, state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
 
 static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
 {
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_crtc *crtc = connector->state->crtc;
+       struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+       struct drm_crtc *crtc = encoder->crtc;
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        u32 n, cts;
        u64 tmp;
@@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
 static int vc4_hdmi_audio_startup(struct device *dev, void *data)
 {
        struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-       struct drm_connector *connector = &vc4_hdmi->connector;
+       struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
 
        /*
         * If the HDMI encoder hasn't probed, or the encoder is
         * currently in DVI mode, treat the codec dai as missing.
         */
-       if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
+       if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
                                VC4_HDMI_RAM_PACKET_ENABLE))
                return -ENODEV;
 
@@ -1403,14 +1395,6 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
        return 0;
 }
 
-static const struct snd_soc_dapm_widget vc4_hdmi_audio_widgets[] = {
-       SND_SOC_DAPM_OUTPUT("TX"),
-};
-
-static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = {
-       { "TX", NULL, "Playback" },
-};
-
 static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
        .name = "vc4-hdmi-cpu-dai-component",
 };
@@ -2114,29 +2098,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int vc4_hdmi_runtime_suspend(struct device *dev)
-{
-       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-
-       clk_disable_unprepare(vc4_hdmi->hsm_clock);
-
-       return 0;
-}
-
-static int vc4_hdmi_runtime_resume(struct device *dev)
-{
-       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-       int ret;
-
-       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-#endif
-
 static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2391,18 +2352,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
        {}
 };
 
-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
-       SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
-                          vc4_hdmi_runtime_resume,
-                          NULL)
-};
-
 struct platform_driver vc4_hdmi_driver = {
        .probe = vc4_hdmi_dev_probe,
        .remove = vc4_hdmi_dev_remove,
        .driver = {
                .name = "vc4_hdmi",
                .of_match_table = vc4_hdmi_dt_match,
-               .pm = &vc4_hdmi_pm_ops,
        },
 };
index 6941add95d0faddcf3b9412b6d5091d2b73e4dfa..ecab72882192903ef400da94cc0d95f8b7a48d31 100644 (file)
@@ -15,7 +15,7 @@
 #include "intr.h"
 #include "syncpt.h"
 
-DEFINE_SPINLOCK(lock);
+static DEFINE_SPINLOCK(lock);
 
 struct host1x_syncpt_fence {
        struct dma_fence base;
@@ -152,8 +152,10 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
                return ERR_PTR(-ENOMEM);
 
        fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
-       if (!fence->waiter)
+       if (!fence->waiter) {
+               kfree(fence);
                return ERR_PTR(-ENOMEM);
+       }
 
        fence->sp = sp;
        fence->threshold = threshold;
index 79b138fd426197a2ba76f29e585c303b49537644..05c007b213f245ac202ab4399cd3e7c16dd4aab5 100644 (file)
@@ -255,13 +255,13 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
        if (!privdata->cl_data)
                return -ENOMEM;
 
-       rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
+       mp2_select_ops(privdata);
+
+       rc = amd_sfh_hid_client_init(privdata);
        if (rc)
                return rc;
 
-       mp2_select_ops(privdata);
-
-       return amd_sfh_hid_client_init(privdata);
+       return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
 }
 
 static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
index 833fcf07ff35ad5bce4310def979e45e83983bc6..6ccfa0cb997aba82a1b3728c26ed34516b0cc828 100644 (file)
@@ -336,12 +336,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
 
 /*
  * MacBook JIS keyboard has wrong logical maximum
+ * Magic Keyboard JIS has wrong logical maximum
  */
 static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
        struct apple_sc *asc = hid_get_drvdata(hdev);
 
+       if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
+               hid_info(hdev,
+                        "fixing up Magic Keyboard JIS report descriptor\n");
+               rdesc[64] = rdesc[70] = 0xe7;
+       }
+
        if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
                        rdesc[53] == 0x65 && rdesc[59] == 0x65) {
                hid_info(hdev,
index 0790fbd3fc9a26c8a4e7c518f541aac7bec5a10a..467d789f9bc2d3a0d846c661b618d1b640a63188 100644 (file)
@@ -56,15 +56,22 @@ static int betopff_init(struct hid_device *hid)
 {
        struct betopff_device *betopff;
        struct hid_report *report;
-       struct hid_input *hidinput =
-                       list_first_entry(&hid->inputs, struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int field_count = 0;
        int error;
        int i, j;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 95e0807878c7ea9137e4b29159bca278c761152b..d70cd3d7f583b25f5736e464f7e8bac09deb302c 100644 (file)
@@ -198,7 +198,9 @@ static int u2fzero_rng_read(struct hwrng *rng, void *data,
        }
 
        ret = u2fzero_recv(dev, &req, &resp);
-       if (ret < 0)
+
+       /* ignore errors or packets without data */
+       if (ret < offsetof(struct u2f_hid_msg, init.data))
                return 0;
 
        /* only take the minimum amount of data it is safe to take */
index fd51769d0994ba5e189870fe782819412b6a77ef..33a6908995b1be7818d9e9ee0a4869573da67627 100644 (file)
@@ -4746,6 +4746,12 @@ static const struct wacom_features wacom_features_0x393 =
        { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
          INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
          .touch_max = 10 };
+static const struct wacom_features wacom_features_0x3c6 =
+       { "Wacom Intuos BT S", 15200, 9500, 4095, 63,
+         INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+static const struct wacom_features wacom_features_0x3c8 =
+       { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
+         INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4919,6 +4925,8 @@ const struct hid_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x37A) },
        { USB_DEVICE_WACOM(0x37B) },
        { BT_DEVICE_WACOM(0x393) },
+       { BT_DEVICE_WACOM(0x3c6) },
+       { BT_DEVICE_WACOM(0x3c8) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x4004) },
        { USB_DEVICE_WACOM(0x5000) },
index 2aee356840a2b480cb88ef569ef12b2d9da44a59..314015d9e912d2e8693e54e5de4dcd650aec1dcc 100644 (file)
@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
        mutex_unlock(&ring_info->ring_buffer_mutex);
 
        kfree(ring_info->pkt_buffer);
+       ring_info->pkt_buffer = NULL;
        ring_info->pkt_buffer_size = 0;
 }
 
index 38bc35ac8135eee0effae59e0512c9f78d47ba5a..3618a924e78e4a47dfb882c0c824e9a3e9c37b46 100644 (file)
@@ -362,12 +362,6 @@ static const struct hwmon_channel_info *k10temp_info[] = {
                           HWMON_T_INPUT | HWMON_T_LABEL,
                           HWMON_T_INPUT | HWMON_T_LABEL,
                           HWMON_T_INPUT | HWMON_T_LABEL),
-       HWMON_CHANNEL_INFO(in,
-                          HWMON_I_INPUT | HWMON_I_LABEL,
-                          HWMON_I_INPUT | HWMON_I_LABEL),
-       HWMON_CHANNEL_INFO(curr,
-                          HWMON_C_INPUT | HWMON_C_LABEL,
-                          HWMON_C_INPUT | HWMON_C_LABEL),
        NULL
 };
 
index bb3f7749a0b001cb21a668f62de984363671575b..5423466de697af5bf704217f26eaccea058f404d 100644 (file)
@@ -989,8 +989,12 @@ static int ltc2947_setup(struct ltc2947_data *st)
                return ret;
 
        /* check external clock presence */
-       extclk = devm_clk_get(st->dev, NULL);
-       if (!IS_ERR(extclk)) {
+       extclk = devm_clk_get_optional(st->dev, NULL);
+       if (IS_ERR(extclk))
+               return dev_err_probe(st->dev, PTR_ERR(extclk),
+                                    "Failed to get external clock\n");
+
+       if (extclk) {
                unsigned long rate_hz;
                u8 pre = 0, div, tbctl;
                u64 aux;
index 116681fde33d2de2aeea021f3edc10e921e76eb5..89fe7b9fe26be3e837f879b2dad1a97fba6de9ea 100644 (file)
@@ -315,8 +315,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
 {
        struct mlxreg_fan *fan = cdev->devdata;
        unsigned long cur_state;
+       int i, config = 0;
        u32 regval;
-       int i;
        int err;
 
        /*
@@ -329,6 +329,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
         * overwritten.
         */
        if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+               /*
+                * This is configuration change, which is only supported through sysfs.
+                * For configuration non-zero value is to be returned to avoid thermal
+                * statistics update.
+                */
+               config = 1;
                state -= MLXREG_FAN_MAX_STATE;
                for (i = 0; i < state; i++)
                        fan->cooling_levels[i] = state;
@@ -343,7 +349,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
 
                cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
                if (state < cur_state)
-                       return 0;
+                       return config;
 
                state = cur_state;
        }
@@ -359,7 +365,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
                dev_err(fan->dev, "Failed to write PWM duty\n");
                return err;
        }
-       return 0;
+       return config;
 }
 
 static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
index 0d68a78be980db1488de7140b9889807a4a5626e..ae664613289c45c81b3c6a40e374075fc51a3032 100644 (file)
@@ -340,18 +340,11 @@ static ssize_t occ_show_temp_10(struct device *dev,
                if (val == OCC_TEMP_SENSOR_FAULT)
                        return -EREMOTEIO;
 
-               /*
-                * VRM doesn't return temperature, only alarm bit. This
-                * attribute maps to tempX_alarm instead of tempX_input for
-                * VRM
-                */
-               if (temp->fru_type != OCC_FRU_TYPE_VRM) {
-                       /* sensor not ready */
-                       if (val == 0)
-                               return -EAGAIN;
+               /* sensor not ready */
+               if (val == 0)
+                       return -EAGAIN;
 
-                       val *= 1000;
-               }
+               val *= 1000;
                break;
        case 2:
                val = temp->fru_type;
@@ -886,7 +879,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                             0, i);
                attr++;
 
-               if (sensors->temp.version > 1 &&
+               if (sensors->temp.version == 2 &&
                    temp->fru_type == OCC_FRU_TYPE_VRM) {
                        snprintf(attr->name, sizeof(attr->name),
                                 "temp%d_alarm", s);
index df712ce4b164dbe4c5a841616804c2a47d9a0df5..53f7d1418bc9077d05f624c1475611e31bcbb87e 100644 (file)
@@ -171,8 +171,14 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
                cmd = CFFPS_SN_CMD;
                break;
        case CFFPS_DEBUGFS_MAX_POWER_OUT:
-               rc = i2c_smbus_read_word_swapped(psu->client,
-                                                CFFPS_MAX_POWER_OUT_CMD);
+               if (psu->version == cffps1) {
+                       rc = i2c_smbus_read_word_swapped(psu->client,
+                                       CFFPS_MAX_POWER_OUT_CMD);
+               } else {
+                       rc = i2c_smbus_read_word_data(psu->client,
+                                       CFFPS_MAX_POWER_OUT_CMD);
+               }
+
                if (rc < 0)
                        return rc;
 
index eb94bd5f4e2a883925c55e47615ac543b00e07f6..51986adfbf47c56f653576f027ccbc4913e214be 100644 (file)
@@ -54,7 +54,7 @@
 
 #define MP2975_RAIL2_FUNC      (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
                                 PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
-                                PMBUS_PHASE_VIRTUAL)
+                                PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
 
 struct mp2975_data {
        struct pmbus_driver_info info;
index ede66ea6a730dd82787c05cec909c920d4ba5d2c..b963a369c5ab3069aa55568f706d4dc1af4aeeeb 100644 (file)
@@ -100,71 +100,81 @@ struct tmp421_data {
        s16 temp[4];
 };
 
-static int temp_from_s16(s16 reg)
+static int temp_from_raw(u16 reg, bool extended)
 {
        /* Mask out status bits */
        int temp = reg & ~0xf;
 
-       return (temp * 1000 + 128) / 256;
-}
-
-static int temp_from_u16(u16 reg)
-{
-       /* Mask out status bits */
-       int temp = reg & ~0xf;
-
-       /* Add offset for extended temperature range. */
-       temp -= 64 * 256;
+       if (extended)
+               temp = temp - 64 * 256;
+       else
+               temp = (s16)temp;
 
-       return (temp * 1000 + 128) / 256;
+       return DIV_ROUND_CLOSEST(temp * 1000, 256);
 }
 
-static struct tmp421_data *tmp421_update_device(struct device *dev)
+static int tmp421_update_device(struct tmp421_data *data)
 {
-       struct tmp421_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
+       int ret = 0;
        int i;
 
        mutex_lock(&data->update_lock);
 
        if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
            !data->valid) {
-               data->config = i2c_smbus_read_byte_data(client,
-                       TMP421_CONFIG_REG_1);
+               ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
+               if (ret < 0)
+                       goto exit;
+               data->config = ret;
 
                for (i = 0; i < data->channels; i++) {
-                       data->temp[i] = i2c_smbus_read_byte_data(client,
-                               TMP421_TEMP_MSB[i]) << 8;
-                       data->temp[i] |= i2c_smbus_read_byte_data(client,
-                               TMP421_TEMP_LSB[i]);
+                       ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
+                       if (ret < 0)
+                               goto exit;
+                       data->temp[i] = ret << 8;
+
+                       ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
+                       if (ret < 0)
+                               goto exit;
+                       data->temp[i] |= ret;
                }
                data->last_updated = jiffies;
                data->valid = 1;
        }
 
+exit:
        mutex_unlock(&data->update_lock);
 
-       return data;
+       if (ret < 0) {
+               data->valid = 0;
+               return ret;
+       }
+
+       return 0;
 }
 
 static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
                       u32 attr, int channel, long *val)
 {
-       struct tmp421_data *tmp421 = tmp421_update_device(dev);
+       struct tmp421_data *tmp421 = dev_get_drvdata(dev);
+       int ret = 0;
+
+       ret = tmp421_update_device(tmp421);
+       if (ret)
+               return ret;
 
        switch (attr) {
        case hwmon_temp_input:
-               if (tmp421->config & TMP421_CONFIG_RANGE)
-                       *val = temp_from_u16(tmp421->temp[channel]);
-               else
-                       *val = temp_from_s16(tmp421->temp[channel]);
+               *val = temp_from_raw(tmp421->temp[channel],
+                                    tmp421->config & TMP421_CONFIG_RANGE);
                return 0;
        case hwmon_temp_fault:
                /*
-                * The OPEN bit signals a fault. This is bit 0 of the temperature
-                * register (low byte).
+                * Any of OPEN or /PVLD bits indicate a hardware mulfunction
+                * and the conversion result may be incorrect
                 */
-               *val = tmp421->temp[channel] & 0x01;
+               *val = !!(tmp421->temp[channel] & 0x03);
                return 0;
        default:
                return -EOPNOTSUPP;
@@ -177,9 +187,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
 {
        switch (attr) {
        case hwmon_temp_fault:
-               if (channel == 0)
-                       return 0;
-               return 0444;
        case hwmon_temp_input:
                return 0444;
        default:
index 37b25a1474c4633ba40bf4a5fd1d435983357519..3c1be2c11fdf0252daf6bc66fc751874227229b9 100644 (file)
@@ -273,9 +273,6 @@ struct w83791d_data {
        char valid;                     /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
 
-       /* array of 2 pointers to subclients */
-       struct i2c_client *lm75[2];
-
        /* volts */
        u8 in[NUMBER_OF_VIN];           /* Register value */
        u8 in_max[NUMBER_OF_VIN];       /* Register value */
@@ -1257,7 +1254,6 @@ static const struct attribute_group w83791d_group_fanpwm45 = {
 static int w83791d_detect_subclients(struct i2c_client *client)
 {
        struct i2c_adapter *adapter = client->adapter;
-       struct w83791d_data *data = i2c_get_clientdata(client);
        int address = client->addr;
        int i, id;
        u8 val;
@@ -1280,22 +1276,19 @@ static int w83791d_detect_subclients(struct i2c_client *client)
        }
 
        val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
-       if (!(val & 0x08))
-               data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
-                                                         0x48 + (val & 0x7));
-       if (!(val & 0x80)) {
-               if (!IS_ERR(data->lm75[0]) &&
-                               ((val & 0x7) == ((val >> 4) & 0x7))) {
-                       dev_err(&client->dev,
-                               "duplicate addresses 0x%x, "
-                               "use force_subclient\n",
-                               data->lm75[0]->addr);
-                       return -ENODEV;
-               }
-               data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
-                                                         0x48 + ((val >> 4) & 0x7));
+
+       if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
+               dev_err(&client->dev,
+                       "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
+               return -ENODEV;
        }
 
+       if (!(val & 0x08))
+               devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (val & 0x7));
+
+       if (!(val & 0x80))
+               devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
+
        return 0;
 }
 
index abd5c3a722b91a4aa9c6a1ebb2d24a376d00b801..1f175f3813506ddd074650a09eb5970b32ddfbc7 100644 (file)
@@ -264,9 +264,6 @@ struct w83792d_data {
        char valid;             /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
 
-       /* array of 2 pointers to subclients */
-       struct i2c_client *lm75[2];
-
        u8 in[9];               /* Register value */
        u8 in_max[9];           /* Register value */
        u8 in_min[9];           /* Register value */
@@ -927,7 +924,6 @@ w83792d_detect_subclients(struct i2c_client *new_client)
        int address = new_client->addr;
        u8 val;
        struct i2c_adapter *adapter = new_client->adapter;
-       struct w83792d_data *data = i2c_get_clientdata(new_client);
 
        id = i2c_adapter_id(adapter);
        if (force_subclients[0] == id && force_subclients[1] == address) {
@@ -946,21 +942,19 @@ w83792d_detect_subclients(struct i2c_client *new_client)
        }
 
        val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
-       if (!(val & 0x08))
-               data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
-                                                         0x48 + (val & 0x7));
-       if (!(val & 0x80)) {
-               if (!IS_ERR(data->lm75[0]) &&
-                       ((val & 0x7) == ((val >> 4) & 0x7))) {
-                       dev_err(&new_client->dev,
-                               "duplicate addresses 0x%x, use force_subclient\n",
-                               data->lm75[0]->addr);
-                       return -ENODEV;
-               }
-               data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
-                                                         0x48 + ((val >> 4) & 0x7));
+
+       if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
+               dev_err(&new_client->dev,
+                       "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
+               return -ENODEV;
        }
 
+       if (!(val & 0x08))
+               devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + (val & 0x7));
+
+       if (!(val & 0x80))
+               devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
+
        return 0;
 }
 
index e7d0484eabe4c2a8b59470c69b8aac4def755c5a..1d2854de1cfc961b705516fc3f50b90865eac64e 100644 (file)
@@ -202,7 +202,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
 }
 
 struct w83793_data {
-       struct i2c_client *lm75[2];
        struct device *hwmon_dev;
        struct mutex update_lock;
        char valid;                     /* !=0 if following fields are valid */
@@ -1566,7 +1565,6 @@ w83793_detect_subclients(struct i2c_client *client)
        int address = client->addr;
        u8 tmp;
        struct i2c_adapter *adapter = client->adapter;
-       struct w83793_data *data = i2c_get_clientdata(client);
 
        id = i2c_adapter_id(adapter);
        if (force_subclients[0] == id && force_subclients[1] == address) {
@@ -1586,21 +1584,19 @@ w83793_detect_subclients(struct i2c_client *client)
        }
 
        tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
-       if (!(tmp & 0x08))
-               data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
-                                                         0x48 + (tmp & 0x7));
-       if (!(tmp & 0x80)) {
-               if (!IS_ERR(data->lm75[0])
-                   && ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
-                       dev_err(&client->dev,
-                               "duplicate addresses 0x%x, "
-                               "use force_subclients\n", data->lm75[0]->addr);
-                       return -ENODEV;
-               }
-               data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
-                                                         0x48 + ((tmp >> 4) & 0x7));
+
+       if (!(tmp & 0x88) && (tmp & 0x7) == ((tmp >> 4) & 0x7)) {
+               dev_err(&client->dev,
+                       "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (tmp & 0x7));
+               return -ENODEV;
        }
 
+       if (!(tmp & 0x08))
+               devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (tmp & 0x7));
+
+       if (!(tmp & 0x80))
+               devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((tmp >> 4) & 0x7));
+
        return 0;
 }
 
index fc0760f55c53fab7a77d8b94d86dcae9bb653797..43054568430f2ee9d09ec428c6ae3282ae455357 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 
 #include "coresight-config.h"
 #include "coresight-etm-perf.h"
index 4e0b7c2882cede7b7d2a30c3a77b1bcba18f378c..015e11c4663f3b2df0cb05764f1fd3c887b4ca46 100644 (file)
@@ -49,7 +49,7 @@
 #define MLXCPLD_LPCI2C_NACK_IND                2
 
 #define MLXCPLD_I2C_FREQ_1000KHZ_SET   0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0f
+#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0c
 #define MLXCPLD_I2C_FREQ_100KHZ_SET    0x42
 
 enum mlxcpld_i2c_frequency {
@@ -495,7 +495,7 @@ mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
                return err;
 
        /* Set frequency only if it is not 100KHz, which is default. */
-       switch ((data->reg & data->mask) >> data->bit) {
+       switch ((regval & data->mask) >> data->bit) {
        case MLXCPLD_I2C_FREQ_1000KHZ:
                freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
                break;
index 477480d1de6bd10054009114381429949465af1f..7d4b3eb7077ad62e13db6ca2eda1b60521723232 100644 (file)
@@ -41,6 +41,8 @@
 #define I2C_HANDSHAKE_RST              0x0020
 #define I2C_FIFO_ADDR_CLR              0x0001
 #define I2C_DELAY_LEN                  0x0002
+#define I2C_ST_START_CON               0x8001
+#define I2C_FS_START_CON               0x1800
 #define I2C_TIME_CLR_VALUE             0x0000
 #define I2C_TIME_DEFAULT_VALUE         0x0003
 #define I2C_WRRD_TRANAC_VALUE          0x0002
@@ -480,6 +482,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 {
        u16 control_reg;
        u16 intr_stat_reg;
+       u16 ext_conf_val;
 
        mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
        intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
@@ -518,8 +521,13 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
        if (i2c->dev_comp->ltiming_adjust)
                mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
 
+       if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
+               ext_conf_val = I2C_ST_START_CON;
+       else
+               ext_conf_val = I2C_FS_START_CON;
+
        if (i2c->dev_comp->timing_adjust) {
-               mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF);
+               ext_conf_val = i2c->ac_timing.ext;
                mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div,
                               OFFSET_CLOCK_DIV);
                mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE,
@@ -544,6 +552,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
                                       OFFSET_HS_STA_STO_AC_TIMING);
                }
        }
+       mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF);
 
        /* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
        if (i2c->have_pmic)
index aaeeacc121215004c5d41d71e8f703035a4a1e34..546cc935e035a57dcb65c2896216a408f3a4a705 100644 (file)
@@ -454,6 +454,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
                        break;
 
                i2c_acpi_register_device(adapter, adev, &info);
+               put_device(&adapter->dev);
                break;
        case ACPI_RECONFIG_DEVICE_REMOVE:
                if (!acpi_device_enumerated(adev))
index 0019f1ea7df27248766bc0ee699a34a60a038af4..f41db9e0249a71be2ea4f9a9fd29cc33d6a474ae 100644 (file)
@@ -738,7 +738,7 @@ static irqreturn_t fxls8962af_interrupt(int irq, void *p)
 
        if (reg & FXLS8962AF_INT_STATUS_SRC_BUF) {
                ret = fxls8962af_fifo_flush(indio_dev);
-               if (ret)
+               if (ret < 0)
                        return IRQ_NONE;
 
                return IRQ_HANDLED;
index ee8ed9481025d2d26f192a5fd0450e3350799a29..2121a812b0c3148054b241da5e8c0da4c7fc4db0 100644 (file)
@@ -293,6 +293,7 @@ static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
        .has_registers = true,
        .addr_shift = 3,
        .read_mask = BIT(6),
+       .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 static const struct ad_sd_calib_data ad7192_calib_arr[8] = {
index 42bb952f47388abc9ab34fe15db200b3258ba914..b6e8c8abf6f4cdac9d357c78630a9186b68f8cb8 100644 (file)
@@ -203,7 +203,7 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
        .set_mode = ad7780_set_mode,
        .postprocess_sample = ad7780_postprocess_sample,
        .has_registers = false,
-       .irq_flags = IRQF_TRIGGER_LOW,
+       .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 #define _AD7780_CHANNEL(_bits, _wordsize, _mask_all)           \
index ef3e2d3ecb0c6d015a7c3456a3e6e5a6947f3551..0e7ab3fb072a961ecb6a0aaa60f36fa6110f4045 100644 (file)
@@ -206,7 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
        .has_registers = true,
        .addr_shift = 3,
        .read_mask = BIT(6),
-       .irq_flags = IRQF_TRIGGER_LOW,
+       .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
index 19efaa41bc344b6def9d03bc59efbe62c6e64d82..34ec0c28b2dff6b145445e1aabf5fc89c20ac363 100644 (file)
@@ -183,6 +183,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
 
        data = iio_priv(indio_dev);
        data->dev = &pdev->dev;
+       platform_set_drvdata(pdev, indio_dev);
 
        data->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(data->base))
index 655ab02d03d8496bf73cabb24898ba7a1b8c37db..b753658bb41ec0f6d5ecb58f9955607cfaae4b58 100644 (file)
@@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
                        .sign = 'u',                                    \
                        .realbits = depth,                              \
                        .storagebits = 16,                              \
-                       .shift = 2,                                     \
+                       .shift = (depth == 10) ? 2 : 0,                 \
                        .endianness = IIO_BE,                           \
                },                                                      \
        }
@@ -142,7 +142,6 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
        MAX1027_V_CHAN(11, depth)
 
 #define MAX1X31_CHANNELS(depth)                        \
-       MAX1X27_CHANNELS(depth),                \
        MAX1X29_CHANNELS(depth),                \
        MAX1027_V_CHAN(12, depth),              \
        MAX1027_V_CHAN(13, depth),              \
index 79c1dd68b9092c2449967f12837e0a3314114f70..d4fccd52ef08ba147a3fe3844ea2e31fa4f6fb0a 100644 (file)
@@ -82,6 +82,10 @@ static const struct iio_chan_spec mt6577_auxadc_iio_channels[] = {
        MT6577_AUXADC_CHANNEL(15),
 };
 
+/* For Voltage calculation */
+#define VOLTAGE_FULL_RANGE  1500       /* VA voltage */
+#define AUXADC_PRECISE      4096       /* 12 bits */
+
 static int mt_auxadc_get_cali_data(int rawdata, bool enable_cali)
 {
        return rawdata;
@@ -191,6 +195,10 @@ static int mt6577_auxadc_read_raw(struct iio_dev *indio_dev,
                }
                if (adc_dev->dev_comp->sample_data_cali)
                        *val = mt_auxadc_get_cali_data(*val, true);
+
+               /* Convert adc raw data to voltage: 0 - 1500 mV */
+               *val = *val * VOLTAGE_FULL_RANGE / AUXADC_PRECISE;
+
                return IIO_VAL_INT;
 
        default:
index 9996d5eef2893a4ad3152738064c5531db9b56be..32fbf57c362fa58733df8a576ac6e3481dffe6d0 100644 (file)
@@ -401,7 +401,7 @@ static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
 exit_hw_init:
        clk_disable_unprepare(adc->pclk);
 
-       return 0;
+       return ret;
 }
 
 static void rzg2l_adc_pm_runtime_disable(void *data)
@@ -570,8 +570,10 @@ static int __maybe_unused rzg2l_adc_pm_runtime_resume(struct device *dev)
                return ret;
 
        ret = clk_prepare_enable(adc->adclk);
-       if (ret)
+       if (ret) {
+               clk_disable_unprepare(adc->pclk);
                return ret;
+       }
 
        rzg2l_adc_pwr(adc, true);
 
index 3143f35a6509aa5b92b6aece0fa1de3ab71118d7..83c1ae07b3e9a1c7db56921f14c91c0f2c40c9d8 100644 (file)
@@ -171,7 +171,13 @@ static int adc128_probe(struct spi_device *spi)
        mutex_init(&adc->lock);
 
        ret = iio_device_register(indio_dev);
+       if (ret)
+               goto err_disable_regulator;
 
+       return 0;
+
+err_disable_regulator:
+       regulator_disable(adc->reg);
        return ret;
 }
 
index 4864c38b8d1c2d6f66c89fbe7a1407ebf9f4073f..769bd9280524a0d016eac827b200253844453c80 100644 (file)
@@ -137,7 +137,7 @@ static int ssp_print_mcu_debug(char *data_frame, int *data_index,
        if (length > received_len - *data_index || length <= 0) {
                ssp_dbg("[SSP]: MSG From MCU-invalid debug length(%d/%d)\n",
                        length, received_len);
-               return length ? length : -EPROTO;
+               return -EPROTO;
        }
 
        ssp_dbg("[SSP]: MSG From MCU - %s\n", &data_frame[*data_index]);
@@ -273,6 +273,8 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
        for (idx = 0; idx < len;) {
                switch (dataframe[idx++]) {
                case SSP_MSG2AP_INST_BYPASS_DATA:
+                       if (idx >= len)
+                               return -EPROTO;
                        sd = dataframe[idx++];
                        if (sd < 0 || sd >= SSP_SENSOR_MAX) {
                                dev_err(SSP_DEV,
@@ -282,10 +284,13 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
 
                        if (indio_devs[sd]) {
                                spd = iio_priv(indio_devs[sd]);
-                               if (spd->process_data)
+                               if (spd->process_data) {
+                                       if (idx >= len)
+                                               return -EPROTO;
                                        spd->process_data(indio_devs[sd],
                                                          &dataframe[idx],
                                                          data->timestamp);
+                               }
                        } else {
                                dev_err(SSP_DEV, "no client for frame\n");
                        }
@@ -293,6 +298,8 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
                        idx += ssp_offset_map[sd];
                        break;
                case SSP_MSG2AP_INST_DEBUG_DATA:
+                       if (idx >= len)
+                               return -EPROTO;
                        sd = ssp_print_mcu_debug(dataframe, &idx, len);
                        if (sd) {
                                dev_err(SSP_DEV,
index 2a5ba1b08a1d0af6c6aea10ea016a93a95381781..546a4cf6c5ef8325937b248b883f1663f6ae8a0d 100644 (file)
@@ -350,6 +350,7 @@ static int dac5571_probe(struct i2c_client *client,
                data->dac5571_pwrdwn = dac5571_pwrdwn_quad;
                break;
        default:
+               ret = -EINVAL;
                goto err;
        }
 
index eb48102f94243e41c49b4f4e66864d027fa0dfc3..287fff39a927aacf6a043627c4e0cb78f0ba850a 100644 (file)
@@ -353,10 +353,11 @@ static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
        if (dec > st->info->max_dec)
                dec = st->info->max_dec;
 
-       ret = adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
+       ret = __adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
        if (ret)
                goto error;
 
+       adis_dev_unlock(&st->adis);
        /*
         * If decimation is used, then gyro and accel data will have meaningful
         * bits on the LSB registers. This info is used on the trigger handler.
index a869a6e52a16bec9eca5e196bd6c2ccef85d8ed7..ed129321a14da1175bb29cfae062a90879056e2c 100644 (file)
@@ -144,6 +144,7 @@ struct adis16480_chip_info {
        unsigned int max_dec_rate;
        const unsigned int *filter_freqs;
        bool has_pps_clk_mode;
+       bool has_sleep_cnt;
        const struct adis_data adis_data;
 };
 
@@ -939,6 +940,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
                .temp_scale = 5650, /* 5.65 milli degree Celsius */
                .int_clk = 2460000,
                .max_dec_rate = 2048,
+               .has_sleep_cnt = true,
                .filter_freqs = adis16480_def_filter_freqs,
                .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts, 0),
        },
@@ -952,6 +954,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
                .temp_scale = 5650, /* 5.65 milli degree Celsius */
                .int_clk = 2460000,
                .max_dec_rate = 2048,
+               .has_sleep_cnt = true,
                .filter_freqs = adis16480_def_filter_freqs,
                .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts, 0),
        },
@@ -965,6 +968,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
                .temp_scale = 5650, /* 5.65 milli degree Celsius */
                .int_clk = 2460000,
                .max_dec_rate = 2048,
+               .has_sleep_cnt = true,
                .filter_freqs = adis16480_def_filter_freqs,
                .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0),
        },
@@ -978,6 +982,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
                .temp_scale = 5650, /* 5.65 milli degree Celsius */
                .int_clk = 2460000,
                .max_dec_rate = 2048,
+               .has_sleep_cnt = true,
                .filter_freqs = adis16480_def_filter_freqs,
                .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0),
        },
@@ -1425,9 +1430,12 @@ static int adis16480_probe(struct spi_device *spi)
        if (ret)
                return ret;
 
-       ret = devm_add_action_or_reset(&spi->dev, adis16480_stop, indio_dev);
-       if (ret)
-               return ret;
+       if (st->chip_info->has_sleep_cnt) {
+               ret = devm_add_action_or_reset(&spi->dev, adis16480_stop,
+                                              indio_dev);
+               if (ret)
+                       return ret;
+       }
 
        ret = adis16480_config_irq_pin(spi->dev.of_node, st);
        if (ret)
index 52963da401a78e9bdbba5bc186455312a1c9c882..1880bd5bb2586c5b9f05b61fa1dda532bc7156d0 100644 (file)
@@ -276,6 +276,8 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
                ret = wait_event_timeout(opt->result_ready_queue,
                                opt->result_ready,
                                msecs_to_jiffies(OPT3001_RESULT_READY_LONG));
+               if (ret == 0)
+                       return -ETIMEDOUT;
        } else {
                /* Sleep for result ready time */
                timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ?
@@ -312,9 +314,7 @@ err:
                /* Disallow IRQ to access the device while lock is active */
                opt->ok_to_ignore_lock = false;
 
-       if (ret == 0)
-               return -ETIMEDOUT;
-       else if (ret < 0)
+       if (ret < 0)
                return ret;
 
        if (opt->use_irq) {
index f1099b4953014c628002341426eb949f441fb332..467519a2027e5519ead92730ab91c95473254f9b 100644 (file)
@@ -5,3 +5,4 @@
 
 # Keep in alphabetical order
 obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
+CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
index c40791baced58836ba6bd5711e565c1e7ffedca1..704ce595542c5735e2390ebedc2cf2697333b8e5 100644 (file)
@@ -1746,15 +1746,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
        }
 }
 
-static void cma_cancel_listens(struct rdma_id_private *id_priv)
+static void _cma_cancel_listens(struct rdma_id_private *id_priv)
 {
        struct rdma_id_private *dev_id_priv;
 
+       lockdep_assert_held(&lock);
+
        /*
         * Remove from listen_any_list to prevent added devices from spawning
         * additional listen requests.
         */
-       mutex_lock(&lock);
        list_del(&id_priv->list);
 
        while (!list_empty(&id_priv->listen_list)) {
@@ -1768,6 +1769,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
                rdma_destroy_id(&dev_id_priv->id);
                mutex_lock(&lock);
        }
+}
+
+static void cma_cancel_listens(struct rdma_id_private *id_priv)
+{
+       mutex_lock(&lock);
+       _cma_cancel_listens(id_priv);
        mutex_unlock(&lock);
 }
 
@@ -1776,6 +1783,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
 {
        switch (state) {
        case RDMA_CM_ADDR_QUERY:
+               /*
+                * We can avoid doing the rdma_addr_cancel() based on state,
+                * only RDMA_CM_ADDR_QUERY has a work that could still execute.
+                * Notice that the addr_handler work could still be exiting
+                * outside this state, however due to the interaction with the
+                * handler_mutex the work is guaranteed not to touch id_priv
+                * during exit.
+                */
                rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
                break;
        case RDMA_CM_ROUTE_QUERY:
@@ -1810,6 +1825,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
 static void destroy_mc(struct rdma_id_private *id_priv,
                       struct cma_multicast *mc)
 {
+       bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
+
        if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
                ib_sa_free_multicast(mc->sa_mc);
 
@@ -1826,7 +1843,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
 
                        cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
                                     &mgid);
-                       cma_igmp_send(ndev, &mgid, false);
+
+                       if (!send_only)
+                               cma_igmp_send(ndev, &mgid, false);
+
                        dev_put(ndev);
                }
 
@@ -2574,7 +2594,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
        return 0;
 
 err_listen:
-       list_del(&id_priv->list);
+       _cma_cancel_listens(id_priv);
        mutex_unlock(&lock);
        if (to_destroy)
                rdma_destroy_id(&to_destroy->id);
@@ -3413,6 +3433,21 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
                if (dst_addr->sa_family == AF_IB) {
                        ret = cma_resolve_ib_addr(id_priv);
                } else {
+                       /*
+                        * The FSM can return back to RDMA_CM_ADDR_BOUND after
+                        * rdma_resolve_ip() is called, eg through the error
+                        * path in addr_handler(). If this happens the existing
+                        * request must be canceled before issuing a new one.
+                        * Since canceling a request is a bit slow and this
+                        * oddball path is rare, keep track once a request has
+                        * been issued. The track turns out to be a permanent
+                        * state since this is the only cancel as it is
+                        * immediately before rdma_resolve_ip().
+                        */
+                       if (id_priv->used_resolve_ip)
+                               rdma_addr_cancel(&id->route.addr.dev_addr);
+                       else
+                               id_priv->used_resolve_ip = 1;
                        ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
                                              &id->route.addr.dev_addr,
                                              timeout_ms, addr_handler,
@@ -3771,9 +3806,13 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
        int ret;
 
        if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
+               struct sockaddr_in any_in = {
+                       .sin_family = AF_INET,
+                       .sin_addr.s_addr = htonl(INADDR_ANY),
+               };
+
                /* For a well behaved ULP state will be RDMA_CM_IDLE */
-               id->route.addr.src_addr.ss_family = AF_INET;
-               ret = rdma_bind_addr(id, cma_src_addr(id_priv));
+               ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
                if (ret)
                        return ret;
                if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
index 5c463da9984536c6795ee7905fb08848235b4cb9..f92f101ea9818f8c1ecf11375353103dce234858 100644 (file)
@@ -91,6 +91,7 @@ struct rdma_id_private {
        u8                      afonly;
        u8                      timeout;
        u8                      min_rnr_timer;
+       u8 used_resolve_ip;
        enum ib_gid_type        gid_type;
 
        /*
index e74ddbe465891df1ccf42ba0db1d39ff829e27d7..15b0cb0f363f4244acc7b47be79794445187e55f 100644 (file)
@@ -876,14 +876,14 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
        struct hfi1_ipoib_txq *txq = &priv->txqs[q];
        u64 completed = atomic64_read(&txq->complete_txreqs);
 
-       dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
-                   (unsigned long long)txq, q,
+       dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
+                   txq, q,
                    __netif_subqueue_stopped(dev, txq->q_idx),
                    atomic_read(&txq->stops),
                    atomic_read(&txq->no_desc),
                    atomic_read(&txq->ring_full));
-       dd_dev_info(priv->dd, "sde %llx engine %u\n",
-                   (unsigned long long)txq->sde,
+       dd_dev_info(priv->dd, "sde %p engine %u\n",
+                   txq->sde,
                    txq->sde ? txq->sde->this_idx : 0);
        dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
        dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
index 1e9c3c5bee684485526fd16bd077a9676029d0e7..d763f097599ff8f18a2750fc15464582f475886a 100644 (file)
@@ -326,19 +326,30 @@ static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
        INIT_LIST_HEAD(&hr_cq->rq_list);
 }
 
-static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
-                        struct hns_roce_ib_create_cq *ucmd)
+static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+                       struct hns_roce_ib_create_cq *ucmd)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
 
-       if (udata) {
-               if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
-                       hr_cq->cqe_size = ucmd->cqe_size;
-               else
-                       hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
-       } else {
+       if (!udata) {
                hr_cq->cqe_size = hr_dev->caps.cqe_sz;
+               return 0;
+       }
+
+       if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
+               if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
+                   ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
+                       ibdev_err(&hr_dev->ib_dev,
+                                 "invalid cqe size %u.\n", ucmd->cqe_size);
+                       return -EINVAL;
+               }
+
+               hr_cq->cqe_size = ucmd->cqe_size;
+       } else {
+               hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
        }
+
+       return 0;
 }
 
 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
@@ -366,7 +377,9 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
 
        set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
 
-       set_cqe_size(hr_cq, udata, &ucmd);
+       ret = set_cqe_size(hr_cq, udata, &ucmd);
+       if (ret)
+               return ret;
 
        ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
        if (ret) {
index 5b9953105752c397ac2cacdce4e7eaaf8a993387..d5f3faa1627a41c08224e0fb6fe9c31d8a1e9c13 100644 (file)
@@ -3299,7 +3299,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
                        dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
                                          hr_cq->ib_cq.cqe);
                        owner_bit = hr_reg_read(dest, CQE_OWNER);
-                       memcpy(dest, cqe, sizeof(*cqe));
+                       memcpy(dest, cqe, hr_cq->cqe_size);
                        hr_reg_write(dest, CQE_OWNER, owner_bit);
                }
        }
@@ -4397,7 +4397,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
        hr_qp->path_mtu = ib_mtu;
 
        mtu = ib_mtu_enum_to_int(ib_mtu);
-       if (WARN_ON(mtu < 0))
+       if (WARN_ON(mtu <= 0))
+               return -EINVAL;
+#define MAX_LP_MSG_LEN 65536
+       /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
+       lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
+       if (WARN_ON(lp_pktn_ini >= 0xF))
                return -EINVAL;
 
        if (attr_mask & IB_QP_PATH_MTU) {
@@ -4405,10 +4410,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
                hr_reg_clear(qpc_mask, QPC_MTU);
        }
 
-#define MAX_LP_MSG_LEN 65536
-       /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
-       lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
-
        hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
        hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
 
index 6b62299abfbbbbb49d9539ec872f104b59e51adb..6dea0a49d1718384d662df8071c4fec98791a475 100644 (file)
@@ -3496,7 +3496,7 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
             original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
             last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
             last_ae == IRDMA_AE_BAD_CLOSE ||
-            last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) {
+            last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
                issue_close = 1;
                iwqp->cm_id = NULL;
                qp->term_flags = 0;
@@ -4250,7 +4250,7 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
                                       teardown_entry);
                attr.qp_state = IB_QPS_ERR;
                irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
-               if (iwdev->reset)
+               if (iwdev->rf->reset)
                        irdma_cm_disconn(cm_node->iwqp);
                irdma_rem_ref_cm_node(cm_node);
        }
index 00de5ee9a260950a987625df112165fbf98b70cb..7de525a5ccf8c9a6d72f5c92d6fe05098325bb9b 100644 (file)
@@ -176,6 +176,14 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
        case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
                qp->flush_code = FLUSH_GENERAL_ERR;
                break;
+       case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+               qp->flush_code = FLUSH_RETRY_EXC_ERR;
+               break;
+       case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+       case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+       case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+               qp->flush_code = FLUSH_MW_BIND_ERR;
+               break;
        default:
                qp->flush_code = FLUSH_FATAL_ERR;
                break;
@@ -1489,7 +1497,7 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
 
        irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
        if (irdma_initialize_ieq(iwdev)) {
-               iwdev->reset = true;
+               iwdev->rf->reset = true;
                rf->gen_ops.request_reset(rf);
        }
 }
@@ -1632,13 +1640,13 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
        case IEQ_CREATED:
                if (!iwdev->roce_mode)
                        irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
-                                            iwdev->reset);
+                                            iwdev->rf->reset);
                fallthrough;
        case ILQ_CREATED:
                if (!iwdev->roce_mode)
                        irdma_puda_dele_rsrc(&iwdev->vsi,
                                             IRDMA_PUDA_RSRC_TYPE_ILQ,
-                                            iwdev->reset);
+                                            iwdev->rf->reset);
                break;
        default:
                ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
index bddf88194d095ea377dbecfcbf7fa4701fb512d5..d219f64b2c3d5fbd8b98f8c40ce267d773ef6a9e 100644 (file)
@@ -55,7 +55,7 @@ static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
 
        iwdev = to_iwdev(ibdev);
        if (reset)
-               iwdev->reset = true;
+               iwdev->rf->reset = true;
 
        iwdev->iw_status = 0;
        irdma_port_ibevent(iwdev);
index 743d9e143a999f253a48b1e6f210d8ee0ccbebba..b678fe712447e1bf0126574f12eacee6716c2d34 100644 (file)
@@ -346,7 +346,6 @@ struct irdma_device {
        bool roce_mode:1;
        bool roce_dcqcn_en:1;
        bool dcb:1;
-       bool reset:1;
        bool iw_ooo:1;
        enum init_completion_state init_state;
 
index ff705f323233375a5ea150adb8daa2797e1f224f..3dcbb1fbf2c665585b060bb27af6f7d1099ff292 100644 (file)
@@ -102,6 +102,8 @@ enum irdma_flush_opcode {
        FLUSH_REM_OP_ERR,
        FLUSH_LOC_LEN_ERR,
        FLUSH_FATAL_ERR,
+       FLUSH_RETRY_EXC_ERR,
+       FLUSH_MW_BIND_ERR,
 };
 
 enum irdma_cmpl_status {
index e94470991fe0eb25a0f338ea9a18080c2671f6bb..ac91ea5296db99af4c3e10f3de81cf902ae7ff03 100644 (file)
@@ -2507,7 +2507,7 @@ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
        struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
        struct ib_qp_attr attr;
 
-       if (qp->iwdev->reset)
+       if (qp->iwdev->rf->reset)
                return;
        attr.qp_state = IB_QPS_ERR;
 
index 4fc32340207361c0de003fe0c1f59dad01ff2ca2..7110ebf834f968228f0af163f4037a6927ba5180 100644 (file)
@@ -535,8 +535,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
        irdma_qp_rem_ref(&iwqp->ibqp);
        wait_for_completion(&iwqp->free_qp);
        irdma_free_lsmm_rsrc(iwqp);
-       if (!iwdev->reset)
-               irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+       irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
 
        if (!iwqp->user_mode) {
                if (iwqp->iwscq) {
@@ -2035,7 +2034,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
                /* Kmode allocations */
                int rsize;
 
-               if (entries > rf->max_cqe) {
+               if (entries < 1 || entries > rf->max_cqe) {
                        err_code = -EINVAL;
                        goto cq_free_rsrc;
                }
@@ -3353,6 +3352,10 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
                return IB_WC_LOC_LEN_ERR;
        case FLUSH_GENERAL_ERR:
                return IB_WC_WR_FLUSH_ERR;
+       case FLUSH_RETRY_EXC_ERR:
+               return IB_WC_RETRY_EXC_ERR;
+       case FLUSH_MW_BIND_ERR:
+               return IB_WC_MW_BIND_ERR;
        case FLUSH_FATAL_ERR:
        default:
                return IB_WC_FATAL_ERR;
index 452e2355d24eeb28ca1628ebb3bd4bda9d00ed74..0a3b28142c05b6b5d76910f310e2cdf79159ac5a 100644 (file)
@@ -403,7 +403,7 @@ static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
 }
 
 #define QIB_DIAGC_ATTR(N)                                                      \
-       static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL);    \
+       static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64));  \
        static struct qib_diagc_attr qib_diagc_attr_##N = {                    \
                .attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store),    \
                .counter =                                                     \
index 84dd682d23341beb46c89211cfa3e4385ada5488..b350081aeb5a323d06a64221194ca5ab903248c9 100644 (file)
@@ -90,7 +90,7 @@ struct usnic_ib_dev {
 
 struct usnic_ib_vf {
        struct usnic_ib_dev             *pf;
-       spinlock_t                      lock;
+       struct mutex                    lock;
        struct usnic_vnic               *vnic;
        unsigned int                    qp_grp_ref_cnt;
        struct usnic_ib_pd              *pd;
index 228e9a36dad0d8e17465a141b43e8bb1019bdd3a..d346dd48e731b130d135d9218aa18295aebd52df 100644 (file)
@@ -572,7 +572,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
        }
 
        vf->pf = pf;
-       spin_lock_init(&vf->lock);
+       mutex_init(&vf->lock);
        mutex_lock(&pf->usdev_lock);
        list_add_tail(&vf->link, &pf->vf_dev_list);
        /*
index 06a4e9d4545da0cd38e1ec146352da0ef8803f08..756a83bcff58afbec6bc43f878a4a9a96b1aab98 100644 (file)
@@ -196,7 +196,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
                for (i = 0; dev_list[i]; i++) {
                        dev = dev_list[i];
                        vf = dev_get_drvdata(dev);
-                       spin_lock(&vf->lock);
+                       mutex_lock(&vf->lock);
                        vnic = vf->vnic;
                        if (!usnic_vnic_check_room(vnic, res_spec)) {
                                usnic_dbg("Found used vnic %s from %s\n",
@@ -208,10 +208,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
                                                             vf, pd, res_spec,
                                                             trans_spec);
 
-                               spin_unlock(&vf->lock);
+                               mutex_unlock(&vf->lock);
                                goto qp_grp_check;
                        }
-                       spin_unlock(&vf->lock);
+                       mutex_unlock(&vf->lock);
 
                }
                usnic_uiom_free_dev_list(dev_list);
@@ -220,7 +220,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
 
        /* Try to find resources on an unused vf */
        list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
-               spin_lock(&vf->lock);
+               mutex_lock(&vf->lock);
                vnic = vf->vnic;
                if (vf->qp_grp_ref_cnt == 0 &&
                    usnic_vnic_check_room(vnic, res_spec) == 0) {
@@ -228,10 +228,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
                                                     vf, pd, res_spec,
                                                     trans_spec);
 
-                       spin_unlock(&vf->lock);
+                       mutex_unlock(&vf->lock);
                        goto qp_grp_check;
                }
-               spin_unlock(&vf->lock);
+               mutex_unlock(&vf->lock);
        }
 
        usnic_info("No free qp grp found on %s\n",
@@ -253,9 +253,9 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
 
        WARN_ON(qp_grp->state != IB_QPS_RESET);
 
-       spin_lock(&vf->lock);
+       mutex_lock(&vf->lock);
        usnic_ib_qp_grp_destroy(qp_grp);
-       spin_unlock(&vf->lock);
+       mutex_unlock(&vf->lock);
 }
 
 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
index 29de8412e41655f7fae6e32884180eda090f4a27..4c914f75a9027db503db69d9a04443059c8b5712 100644 (file)
@@ -334,6 +334,7 @@ static const struct xpad_device {
        { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
        { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
        { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+       { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
        { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
        { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
        { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
@@ -451,6 +452,7 @@ static const struct usb_device_id xpad_table[] = {
        XPAD_XBOXONE_VENDOR(0x24c6),            /* PowerA Controllers */
        XPAD_XBOXONE_VENDOR(0x2e24),            /* Hyperkin Duke X-Box One pad */
        XPAD_XBOX360_VENDOR(0x2f24),            /* GameSir Controllers */
+       XPAD_XBOX360_VENDOR(0x3285),            /* Nacon GC-100 */
        { }
 };
 
index 2f5e3ab5ed638c9f7d0691f40528b766cf794134..65286762b02ab918403d91de894e849820010ec1 100644 (file)
@@ -3,6 +3,7 @@
 // Driver for the IMX SNVS ON/OFF Power Key
 // Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
 
+#include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -99,6 +100,11 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static void imx_snvs_pwrkey_disable_clk(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
 static void imx_snvs_pwrkey_act(void *pdata)
 {
        struct pwrkey_drv_data *pd = pdata;
@@ -111,6 +117,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
        struct pwrkey_drv_data *pdata;
        struct input_dev *input;
        struct device_node *np;
+       struct clk *clk;
        int error;
        u32 vid;
 
@@ -134,6 +141,28 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
                dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
        }
 
+       clk = devm_clk_get_optional(&pdev->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
+               return PTR_ERR(clk);
+       }
+
+       error = clk_prepare_enable(clk);
+       if (error) {
+               dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
+                       ERR_PTR(error));
+               return error;
+       }
+
+       error = devm_add_action_or_reset(&pdev->dev,
+                                        imx_snvs_pwrkey_disable_clk, clk);
+       if (error) {
+               dev_err(&pdev->dev,
+                       "Failed to register clock cleanup handler (%pe)\n",
+                       ERR_PTR(error));
+               return error;
+       }
+
        pdata->wakeup = of_property_read_bool(np, "wakeup-source");
 
        pdata->irq = platform_get_irq(pdev, 0);
index dd18cb917c4df7f1241f32e1f546cbd3113ed239..4620e20d0190f7bb0e5ab197cca0765dcd8ad821 100644 (file)
@@ -80,27 +80,27 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
 
        data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x",
                                                input_abs_get_min(input, axis_x),
-                                               &minimum) |
-                      touchscreen_get_prop_u32(dev, "touchscreen-size-x",
-                                               input_abs_get_max(input,
-                                                                 axis_x) + 1,
-                                               &maximum) |
-                      touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
-                                               input_abs_get_fuzz(input, axis_x),
-                                               &fuzz);
+                                               &minimum);
+       data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-x",
+                                                input_abs_get_max(input,
+                                                                  axis_x) + 1,
+                                                &maximum);
+       data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
+                                                input_abs_get_fuzz(input, axis_x),
+                                                &fuzz);
        if (data_present)
                touchscreen_set_params(input, axis_x, minimum, maximum - 1, fuzz);
 
        data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y",
                                                input_abs_get_min(input, axis_y),
-                                               &minimum) |
-                      touchscreen_get_prop_u32(dev, "touchscreen-size-y",
-                                               input_abs_get_max(input,
-                                                                 axis_y) + 1,
-                                               &maximum) |
-                      touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
-                                               input_abs_get_fuzz(input, axis_y),
-                                               &fuzz);
+                                               &minimum);
+       data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-y",
+                                                input_abs_get_max(input,
+                                                                  axis_y) + 1,
+                                                &maximum);
+       data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
+                                                input_abs_get_fuzz(input, axis_y),
+                                                &fuzz);
        if (data_present)
                touchscreen_set_params(input, axis_y, minimum, maximum - 1, fuzz);
 
@@ -108,11 +108,11 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
        data_present = touchscreen_get_prop_u32(dev,
                                                "touchscreen-max-pressure",
                                                input_abs_get_max(input, axis),
-                                               &maximum) |
-                      touchscreen_get_prop_u32(dev,
-                                               "touchscreen-fuzz-pressure",
-                                               input_abs_get_fuzz(input, axis),
-                                               &fuzz);
+                                               &maximum);
+       data_present |= touchscreen_get_prop_u32(dev,
+                                                "touchscreen-fuzz-pressure",
+                                                input_abs_get_fuzz(input, axis),
+                                                &fuzz);
        if (data_present)
                touchscreen_set_params(input, axis, 0, maximum, fuzz);
 
index 744544a723b7748510605329e24910bf42ecc275..6f754a8d30b11776381b3fe78427c09226b98385 100644 (file)
@@ -71,19 +71,22 @@ static int grts_cb(const void *data, void *private)
                unsigned int z2 = touch_info[st->ch_map[GRTS_CH_Z2]];
                unsigned int Rt;
 
-               Rt = z2;
-               Rt -= z1;
-               Rt *= st->x_plate_ohms;
-               Rt = DIV_ROUND_CLOSEST(Rt, 16);
-               Rt *= x;
-               Rt /= z1;
-               Rt = DIV_ROUND_CLOSEST(Rt, 256);
-               /*
-                * On increased pressure the resistance (Rt) is decreasing
-                * so, convert values to make it looks as real pressure.
-                */
-               if (Rt < GRTS_DEFAULT_PRESSURE_MAX)
-                       press = GRTS_DEFAULT_PRESSURE_MAX - Rt;
+               if (likely(x && z1)) {
+                       Rt = z2;
+                       Rt -= z1;
+                       Rt *= st->x_plate_ohms;
+                       Rt = DIV_ROUND_CLOSEST(Rt, 16);
+                       Rt *= x;
+                       Rt /= z1;
+                       Rt = DIV_ROUND_CLOSEST(Rt, 256);
+                       /*
+                        * On increased pressure the resistance (Rt) is
+                        * decreasing so, convert values to make it looks as
+                        * real pressure.
+                        */
+                       if (Rt < GRTS_DEFAULT_PRESSURE_MAX)
+                               press = GRTS_DEFAULT_PRESSURE_MAX - Rt;
+               }
        }
 
        if ((!x && !y) || (st->pressure && (press < st->pressure_min))) {
index 632dbdd2191500e62efc0ff500e0ac488f07d953..fb23a5b780a46e2f5c999b19585472dc12589b8f 100644 (file)
@@ -44,9 +44,9 @@
 #define NOC_PERM_MODE_BYPASS           (1 << NOC_QOS_MODE_BYPASS)
 
 #define NOC_QOS_PRIORITYn_ADDR(n)      (0x8 + (n * 0x1000))
-#define NOC_QOS_PRIORITY_MASK          0xf
+#define NOC_QOS_PRIORITY_P1_MASK       0xc
+#define NOC_QOS_PRIORITY_P0_MASK       0x3
 #define NOC_QOS_PRIORITY_P1_SHIFT      0x2
-#define NOC_QOS_PRIORITY_P0_SHIFT      0x3
 
 #define NOC_QOS_MODEn_ADDR(n)          (0xc + (n * 0x1000))
 #define NOC_QOS_MODEn_MASK             0x3
@@ -173,6 +173,16 @@ static const struct clk_bulk_data bus_mm_clocks[] = {
        { .id = "iface" },
 };
 
+static const struct clk_bulk_data bus_a2noc_clocks[] = {
+       { .id = "bus" },
+       { .id = "bus_a" },
+       { .id = "ipa" },
+       { .id = "ufs_axi" },
+       { .id = "aggre2_ufs_axi" },
+       { .id = "aggre2_usb3_axi" },
+       { .id = "cfg_noc_usb2_axi" },
+};
+
 /**
  * struct qcom_icc_provider - Qualcomm specific interconnect provider
  * @provider: generic interconnect provider
@@ -307,7 +317,7 @@ DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0)
 DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
 DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
 DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_CNOC_MNOC_CFG, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
 DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
 DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
 DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
@@ -624,13 +634,12 @@ static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
        /* Must be updated one at a time, P1 first, P0 last */
        val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
        rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
-                               NOC_QOS_PRIORITY_MASK, val);
+                               NOC_QOS_PRIORITY_P1_MASK, val);
        if (rc)
                return rc;
 
-       val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT;
        return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
-                                 NOC_QOS_PRIORITY_MASK, val);
+                                 NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
 }
 
 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
@@ -810,6 +819,10 @@ static int qnoc_probe(struct platform_device *pdev)
                qp->bus_clks = devm_kmemdup(dev, bus_mm_clocks,
                                            sizeof(bus_mm_clocks), GFP_KERNEL);
                qp->num_clks = ARRAY_SIZE(bus_mm_clocks);
+       } else if (of_device_is_compatible(dev->of_node, "qcom,sdm660-a2noc")) {
+               qp->bus_clks = devm_kmemdup(dev, bus_a2noc_clocks,
+                                           sizeof(bus_a2noc_clocks), GFP_KERNEL);
+               qp->num_clks = ARRAY_SIZE(bus_a2noc_clocks);
        } else {
                if (of_device_is_compatible(dev->of_node, "qcom,sdm660-bimc"))
                        qp->is_bimc_node = true;
index 124c41adeca1875438cffdb9588c9473d86487f6..3eb68fa1b8cc02949fc29468316df560ceaefc31 100644 (file)
@@ -308,7 +308,6 @@ config APPLE_DART
 config ARM_SMMU
        tristate "ARM Ltd. System MMU (SMMU) Support"
        depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
-       depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU if ARM
@@ -356,6 +355,14 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
          'arm-smmu.disable_bypass' will continue to override this
          config.
 
+config ARM_SMMU_QCOM
+       def_tristate y
+       depends on ARM_SMMU && ARCH_QCOM
+       select QCOM_SCM
+       help
+         When running on a Qualcomm platform that has the custom variant
+         of the ARM SMMU, this needs to be built into the SMMU driver.
+
 config ARM_SMMU_V3
        tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
        depends on ARM64
@@ -438,7 +445,7 @@ config QCOM_IOMMU
        # Note: iommu drivers cannot (yet?) be built as modules
        bool "Qualcomm IOMMU Support"
        depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
-       depends on QCOM_SCM=y
+       select QCOM_SCM
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU
index 559db9259e65c76ebf1eef2e02dd2c61be2de929..fdfa39ec2a4d4a50d87d5fa9502efc7b1170bc61 100644 (file)
@@ -183,7 +183,6 @@ struct apple_dart_master_cfg {
 
 static struct platform_driver apple_dart_driver;
 static const struct iommu_ops apple_dart_iommu_ops;
-static const struct iommu_flush_ops apple_dart_tlb_ops;
 
 static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
 {
@@ -338,22 +337,6 @@ static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
        apple_dart_domain_flush_tlb(to_dart_domain(domain));
 }
 
-static void apple_dart_tlb_flush_all(void *cookie)
-{
-       apple_dart_domain_flush_tlb(cookie);
-}
-
-static void apple_dart_tlb_flush_walk(unsigned long iova, size_t size,
-                                     size_t granule, void *cookie)
-{
-       apple_dart_domain_flush_tlb(cookie);
-}
-
-static const struct iommu_flush_ops apple_dart_tlb_ops = {
-       .tlb_flush_all = apple_dart_tlb_flush_all,
-       .tlb_flush_walk = apple_dart_tlb_flush_walk,
-};
-
 static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
                                           dma_addr_t iova)
 {
@@ -435,7 +418,6 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
                .ias = 32,
                .oas = 36,
                .coherent_walk = 1,
-               .tlb = &apple_dart_tlb_ops,
                .iommu_dev = dart->dev,
        };
 
@@ -661,16 +643,34 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
        return -EINVAL;
 }
 
+static DEFINE_MUTEX(apple_dart_groups_lock);
+
+static void apple_dart_release_group(void *iommu_data)
+{
+       int i, sid;
+       struct apple_dart_stream_map *stream_map;
+       struct apple_dart_master_cfg *group_master_cfg = iommu_data;
+
+       mutex_lock(&apple_dart_groups_lock);
+
+       for_each_stream_map(i, group_master_cfg, stream_map)
+               for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+                       stream_map->dart->sid2group[sid] = NULL;
+
+       kfree(iommu_data);
+       mutex_unlock(&apple_dart_groups_lock);
+}
+
 static struct iommu_group *apple_dart_device_group(struct device *dev)
 {
-       static DEFINE_MUTEX(lock);
        int i, sid;
        struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
        struct apple_dart_stream_map *stream_map;
+       struct apple_dart_master_cfg *group_master_cfg;
        struct iommu_group *group = NULL;
        struct iommu_group *res = ERR_PTR(-EINVAL);
 
-       mutex_lock(&lock);
+       mutex_lock(&apple_dart_groups_lock);
 
        for_each_stream_map(i, cfg, stream_map) {
                for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
@@ -698,6 +698,20 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
 #endif
                group = generic_device_group(dev);
 
+       res = ERR_PTR(-ENOMEM);
+       if (!group)
+               goto out;
+
+       group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL);
+       if (!group_master_cfg) {
+               iommu_group_put(group);
+               goto out;
+       }
+
+       memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg));
+       iommu_group_set_iommudata(group, group_master_cfg,
+               apple_dart_release_group);
+
        for_each_stream_map(i, cfg, stream_map)
                for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
                        stream_map->dart->sid2group[sid] = group;
@@ -705,7 +719,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
        res = group;
 
 out:
-       mutex_unlock(&lock);
+       mutex_unlock(&apple_dart_groups_lock);
        return res;
 }
 
index e240a7bcf3107903689a57833c45c0c9e1e38738..b0cc01aa20c98ec2511ce981403f1ddddb2c77ce 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
 obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
index 9f465e146799faa653eef2e26b764465ea4265c0..2c25cce3806032486c4957c2bdca6e03839fe870 100644 (file)
@@ -215,7 +215,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
            of_device_is_compatible(np, "nvidia,tegra186-smmu"))
                return nvidia_smmu_impl_init(smmu);
 
-       smmu = qcom_smmu_impl_init(smmu);
+       if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
+               smmu = qcom_smmu_impl_init(smmu);
 
        if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
                smmu->impl = &mrvl_mmu500_impl;
index 0ec5514c99807f5f46d6c94b8af5ba30fb0e75f3..b7708b93f3fa18819c956d81cf625aeaad1824b5 100644 (file)
@@ -1942,18 +1942,18 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
        reason = dmar_get_fault_reason(fault_reason, &fault_type);
 
        if (fault_type == INTR_REMAP)
-               pr_err("[INTR-REMAP] Request device [0x%02x:0x%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
+               pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
                       source_id >> 8, PCI_SLOT(source_id & 0xFF),
                       PCI_FUNC(source_id & 0xFF), addr >> 48,
                       fault_reason, reason);
        else if (pasid == INVALID_IOASID)
-               pr_err("[%s NO_PASID] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+               pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
                       type ? "DMA Read" : "DMA Write",
                       source_id >> 8, PCI_SLOT(source_id & 0xFF),
                       PCI_FUNC(source_id & 0xFF), addr,
                       fault_reason, reason);
        else
-               pr_err("[%s PASID 0x%x] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+               pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
                       type ? "DMA Read" : "DMA Write", pasid,
                       source_id >> 8, PCI_SLOT(source_id & 0xFF),
                       PCI_FUNC(source_id & 0xFF), addr,
index c14e65a5d38f013d3e129a17304883800cc307a2..c709861198e593ecb5f359772eeb1bb44add21ea 100644 (file)
@@ -33,6 +33,7 @@ struct ipoctal_channel {
        unsigned int                    pointer_read;
        unsigned int                    pointer_write;
        struct tty_port                 tty_port;
+       bool                            tty_registered;
        union scc2698_channel __iomem   *regs;
        union scc2698_block __iomem     *block_regs;
        unsigned int                    board_id;
@@ -81,22 +82,34 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
        return 0;
 }
 
-static int ipoctal_open(struct tty_struct *tty, struct file *file)
+static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty)
 {
        struct ipoctal_channel *channel = dev_get_drvdata(tty->dev);
        struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index);
-       int err;
-
-       tty->driver_data = channel;
+       int res;
 
        if (!ipack_get_carrier(ipoctal->dev))
                return -EBUSY;
 
-       err = tty_port_open(&channel->tty_port, tty, file);
-       if (err)
-               ipack_put_carrier(ipoctal->dev);
+       res = tty_standard_install(driver, tty);
+       if (res)
+               goto err_put_carrier;
+
+       tty->driver_data = channel;
+
+       return 0;
+
+err_put_carrier:
+       ipack_put_carrier(ipoctal->dev);
+
+       return res;
+}
+
+static int ipoctal_open(struct tty_struct *tty, struct file *file)
+{
+       struct ipoctal_channel *channel = tty->driver_data;
 
-       return err;
+       return tty_port_open(&channel->tty_port, tty, file);
 }
 
 static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -264,7 +277,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
        int res;
        int i;
        struct tty_driver *tty;
-       char name[20];
        struct ipoctal_channel *channel;
        struct ipack_region *region;
        void __iomem *addr;
@@ -355,8 +367,11 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
        /* Fill struct tty_driver with ipoctal data */
        tty->owner = THIS_MODULE;
        tty->driver_name = KBUILD_MODNAME;
-       sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
-       tty->name = name;
+       tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
+       if (!tty->name) {
+               res = -ENOMEM;
+               goto err_put_driver;
+       }
        tty->major = 0;
 
        tty->minor_start = 0;
@@ -371,8 +386,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
        res = tty_register_driver(tty);
        if (res) {
                dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
-               tty_driver_kref_put(tty);
-               return res;
+               goto err_free_name;
        }
 
        /* Save struct tty_driver for use it when uninstalling the device */
@@ -383,7 +397,9 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
 
                channel = &ipoctal->channel[i];
                tty_port_init(&channel->tty_port);
-               tty_port_alloc_xmit_buf(&channel->tty_port);
+               res = tty_port_alloc_xmit_buf(&channel->tty_port);
+               if (res)
+                       continue;
                channel->tty_port.ops = &ipoctal_tty_port_ops;
 
                ipoctal_reset_stats(&channel->stats);
@@ -391,13 +407,15 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                spin_lock_init(&channel->lock);
                channel->pointer_read = 0;
                channel->pointer_write = 0;
-               tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL);
+               tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,
+                                                       i, NULL, channel, NULL);
                if (IS_ERR(tty_dev)) {
                        dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
+                       tty_port_free_xmit_buf(&channel->tty_port);
                        tty_port_destroy(&channel->tty_port);
                        continue;
                }
-               dev_set_drvdata(tty_dev, channel);
+               channel->tty_registered = true;
        }
 
        /*
@@ -409,6 +427,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
                                       ipoctal_irq_handler, ipoctal);
 
        return 0;
+
+err_free_name:
+       kfree(tty->name);
+err_put_driver:
+       tty_driver_kref_put(tty);
+
+       return res;
 }
 
 static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
@@ -648,6 +673,7 @@ static void ipoctal_cleanup(struct tty_struct *tty)
 
 static const struct tty_operations ipoctal_fops = {
        .ioctl =                NULL,
+       .install =              ipoctal_install,
        .open =                 ipoctal_open,
        .close =                ipoctal_close,
        .write =                ipoctal_write_tty,
@@ -690,12 +716,17 @@ static void __ipoctal_remove(struct ipoctal *ipoctal)
 
        for (i = 0; i < NR_CHANNELS; i++) {
                struct ipoctal_channel *channel = &ipoctal->channel[i];
+
+               if (!channel->tty_registered)
+                       continue;
+
                tty_unregister_device(ipoctal->tty_drv, i);
                tty_port_free_xmit_buf(&channel->tty_port);
                tty_port_destroy(&channel->tty_port);
        }
 
        tty_unregister_driver(ipoctal->tty_drv);
+       kfree(ipoctal->tty_drv->name);
        tty_driver_kref_put(ipoctal->tty_drv);
        kfree(ipoctal);
 }
index 4d5924e9f766624eb48b3d610216cd52589a7b51..aca7b595c4c78cbef4c7749c67c8005e0bd28c9e 100644 (file)
@@ -409,6 +409,7 @@ config MESON_IRQ_GPIO
 config GOLDFISH_PIC
        bool "Goldfish programmable interrupt controller"
        depends on MIPS && (GOLDFISH || COMPILE_TEST)
+       select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
        help
          Say yes here to enable Goldfish interrupt controller driver used
index 7557ab55129536bfd629ad6b1ff3bb3f6ed88a65..53e0fb0562c11552f20dad9e5889a80b20001cad 100644 (file)
@@ -359,16 +359,16 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d,
                ARMADA_370_XP_SW_TRIG_INT_OFFS);
 }
 
-static void armada_370_xp_ipi_eoi(struct irq_data *d)
+static void armada_370_xp_ipi_ack(struct irq_data *d)
 {
        writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
 }
 
 static struct irq_chip ipi_irqchip = {
        .name           = "IPI",
+       .irq_ack        = armada_370_xp_ipi_ack,
        .irq_mask       = armada_370_xp_ipi_mask,
        .irq_unmask     = armada_370_xp_ipi_unmask,
-       .irq_eoi        = armada_370_xp_ipi_eoi,
        .ipi_send_mask  = armada_370_xp_ipi_send_mask,
 };
 
index 7f40dca8cda5396f7a7a069b720379d022884774..eb0882d1536661475086a132bb0db4efee9ae4e6 100644 (file)
@@ -4501,7 +4501,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
 
        if (err) {
                if (i > 0)
-                       its_vpe_irq_domain_free(domain, virq, i - 1);
+                       its_vpe_irq_domain_free(domain, virq, i);
 
                its_lpi_free(bitmap, base, nr_ids);
                its_free_prop_table(vprop_page);
index d329ec3d64d817ba658e9eb6cb1512f1a0152d29..5f22c9d65e57899375f6e0efe258ed7c07182ef0 100644 (file)
@@ -107,6 +107,8 @@ static DEFINE_RAW_SPINLOCK(cpu_map_lock);
 
 #endif
 
+static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
+
 /*
  * The GIC mapping of CPU interfaces does not necessarily match
  * the logical CPU numbering.  Let's use a mapping as returned
@@ -774,6 +776,25 @@ static int gic_pm_init(struct gic_chip_data *gic)
 #endif
 
 #ifdef CONFIG_SMP
+static void rmw_writeb(u8 bval, void __iomem *addr)
+{
+       static DEFINE_RAW_SPINLOCK(rmw_lock);
+       unsigned long offset = (unsigned long)addr & 3UL;
+       unsigned long shift = offset * 8;
+       unsigned long flags;
+       u32 val;
+
+       raw_spin_lock_irqsave(&rmw_lock, flags);
+
+       addr -= offset;
+       val = readl_relaxed(addr);
+       val &= ~GENMASK(shift + 7, shift);
+       val |= bval << shift;
+       writel_relaxed(val, addr);
+
+       raw_spin_unlock_irqrestore(&rmw_lock, flags);
+}
+
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
@@ -788,7 +809,10 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       writeb_relaxed(gic_cpu_map[cpu], reg);
+       if (static_branch_unlikely(&needs_rmw_access))
+               rmw_writeb(gic_cpu_map[cpu], reg);
+       else
+               writeb_relaxed(gic_cpu_map[cpu], reg);
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK_DONE;
@@ -1375,6 +1399,30 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
        return true;
 }
 
+static bool gic_enable_rmw_access(void *data)
+{
+       /*
+        * The EMEV2 class of machines has a broken interconnect, and
+        * locks up on accesses that are less than 32bit. So far, only
+        * the affinity setting requires it.
+        */
+       if (of_machine_is_compatible("renesas,emev2")) {
+               static_branch_enable(&needs_rmw_access);
+               return true;
+       }
+
+       return false;
+}
+
+static const struct gic_quirk gic_quirks[] = {
+       {
+               .desc           = "broken byte access",
+               .compatible     = "arm,pl390",
+               .init           = gic_enable_rmw_access,
+       },
+       { },
+};
+
 static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
 {
        if (!gic || !node)
@@ -1391,6 +1439,8 @@ static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
        if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
                gic->percpu_offset = 0;
 
+       gic_enable_of_quirks(node, gic_quirks, gic);
+
        return 0;
 
 error:
index f565317a3da3c6cd4bfeb9dc67511d6a02de0666..12df2162108ebd22dcd0c868a2a89c7d58af50b8 100644 (file)
@@ -25,7 +25,7 @@
 /* The maximum IRQ pin number of mbigen chip(start from 0) */
 #define MAXIMUM_IRQ_PIN_NUM            1407
 
-/**
+/*
  * In mbigen vector register
  * bit[21:12]: event id value
  * bit[11:0]:  device id
 /* offset of vector register in mbigen node */
 #define REG_MBIGEN_VEC_OFFSET          0x200
 
-/**
+/*
  * offset of clear register in mbigen node
  * This register is used to clear the status
  * of interrupt
  */
 #define REG_MBIGEN_CLEAR_OFFSET                0xa000
 
-/**
+/*
  * offset of interrupt type register
  * This register is used to configure interrupt
  * trigger type
index b0d46ac42b892340ce649ee14acbd42dc1f0ef8c..72c06e883d1c5fd553117aea4ea731a82a01236e 100644 (file)
@@ -223,12 +223,12 @@ static int rza1_irqc_probe(struct platform_device *pdev)
                goto out_put_node;
        }
 
-       priv->chip.name = "rza1-irqc",
-       priv->chip.irq_mask = irq_chip_mask_parent,
-       priv->chip.irq_unmask = irq_chip_unmask_parent,
-       priv->chip.irq_eoi = rza1_irqc_eoi,
-       priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy,
-       priv->chip.irq_set_type = rza1_irqc_set_type,
+       priv->chip.name = "rza1-irqc";
+       priv->chip.irq_mask = irq_chip_mask_parent;
+       priv->chip.irq_unmask = irq_chip_unmask_parent;
+       priv->chip.irq_eoi = rza1_irqc_eoi;
+       priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy;
+       priv->chip.irq_set_type = rza1_irqc_set_type;
        priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
 
        priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
index cb0afe8971623668b232173e38dc7aad63d887c4..7313454e403a630d64196a07bfc805891753f00f 100644 (file)
@@ -480,6 +480,11 @@ int detach_capi_ctr(struct capi_ctr *ctr)
 
        ctr_down(ctr, CAPI_CTR_DETACHED);
 
+       if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
+               err = -EINVAL;
+               goto unlock_out;
+       }
+
        if (capi_controller[ctr->cnr - 1] != ctr) {
                err = -EINVAL;
                goto unlock_out;
index 2a1ddd47a0968da8db09c9800b5219547ca5e43f..a52f275f826348475b0f49a241ee73b2c6553fca 100644 (file)
@@ -949,8 +949,8 @@ nj_release(struct tiger_hw *card)
                nj_disable_hwirq(card);
                mode_tiger(&card->bc[0], ISDN_P_NONE);
                mode_tiger(&card->bc[1], ISDN_P_NONE);
-               card->isac.release(&card->isac);
                spin_unlock_irqrestore(&card->lock, flags);
+               card->isac.release(&card->isac);
                release_region(card->base, card->base_s);
                card->base_s = 0;
        }
index 94fb63a7b357372a890c95da371964739610f03d..fe63d5ee201b866c0e0681b9605e60ba4aaf2720 100644 (file)
@@ -570,7 +570,7 @@ fail_msg_node:
 fail_db_node:
        of_node_put(smu->db_node);
 fail_bootmem:
-       memblock_free(__pa(smu), sizeof(struct smu_device));
+       memblock_free_ptr(smu, sizeof(struct smu_device));
        smu = NULL;
 fail_np:
        of_node_put(np);
index edf4ee6eff25d58728b00036ccc67166266af3a6..cf128b3471d7845aa991bd9d7cca66186146d1fb 100644 (file)
@@ -275,8 +275,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
 
        bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
        if (bus_nr < 0) {
-               rc = bus_nr;
-               goto err_free;
+               kfree(bus);
+               return ERR_PTR(bus_nr);
        }
 
        bus->bus_nr = bus_nr;
@@ -291,12 +291,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
        dev_set_name(&bus->dev, "mcb:%d", bus_nr);
        rc = device_add(&bus->dev);
        if (rc)
-               goto err_free;
+               goto err_put;
 
        return bus;
-err_free:
-       put_device(carrier);
-       kfree(bus);
+
+err_put:
+       put_device(&bus->dev);
        return ERR_PTR(rc);
 }
 EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
index 84dbe08ad20536b264566ba8ddb133beb441897c..edd22e4d65dffd41d097e8f96d3659303c13425a 100644 (file)
@@ -161,7 +161,7 @@ static const char *clone_device_name(struct clone *clone)
 
 static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
 {
-       const char *descs[] = {
+       static const char * const descs[] = {
                "read-write",
                "read-only",
                "fail"
index 5b95eea517d1b95ed543df0afcd83792c088b086..a896dea9750e434655b9ebef4363da84b59be91e 100644 (file)
@@ -490,6 +490,14 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct mapped_device *md = tio->md;
        struct dm_target *ti = md->immutable_target;
 
+       /*
+        * blk-mq's unquiesce may come from outside events, such as
+        * elevator switch, updating nr_requests or others, and request may
+        * come during suspend, so simply ask for blk-mq to requeue it.
+        */
+       if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
+               return BLK_STS_RESOURCE;
+
        if (unlikely(!ti)) {
                int srcu_idx;
                struct dm_table *map = dm_get_live_table(md, &srcu_idx);
index 22a5ac82446a6c3ce16ccd360e3ab42ceb9a2bed..88288c8d6bc8c0dddcda66d52688382219bd8f78 100644 (file)
@@ -475,6 +475,7 @@ static int verity_verify_io(struct dm_verity_io *io)
        struct bvec_iter start;
        unsigned b;
        struct crypto_wait wait;
+       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
 
        for (b = 0; b < io->n_blocks; b++) {
                int r;
@@ -529,9 +530,17 @@ static int verity_verify_io(struct dm_verity_io *io)
                else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
                                           cur_block, NULL, &start) == 0)
                        continue;
-               else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
-                                          cur_block))
-                       return -EIO;
+               else {
+                       if (bio->bi_status) {
+                               /*
+                                * Error correction failed; Just return error
+                                */
+                               return -EIO;
+                       }
+                       if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+                                             cur_block))
+                               return -EIO;
+               }
        }
 
        return 0;
index a011d09cb0faca9f2acd6b1c3208169724ae3f73..76d9da49fda7563bf45a834243c46f4c691104af 100644 (file)
@@ -496,18 +496,17 @@ static void start_io_acct(struct dm_io *io)
                                    false, 0, &io->stats_aux);
 }
 
-static void end_io_acct(struct dm_io *io)
+static void end_io_acct(struct mapped_device *md, struct bio *bio,
+                       unsigned long start_time, struct dm_stats_aux *stats_aux)
 {
-       struct mapped_device *md = io->md;
-       struct bio *bio = io->orig_bio;
-       unsigned long duration = jiffies - io->start_time;
+       unsigned long duration = jiffies - start_time;
 
-       bio_end_io_acct(bio, io->start_time);
+       bio_end_io_acct(bio, start_time);
 
        if (unlikely(dm_stats_used(&md->stats)))
                dm_stats_account_io(&md->stats, bio_data_dir(bio),
                                    bio->bi_iter.bi_sector, bio_sectors(bio),
-                                   true, duration, &io->stats_aux);
+                                   true, duration, stats_aux);
 
        /* nudge anyone waiting on suspend queue */
        if (unlikely(wq_has_sleeper(&md->wait)))
@@ -790,6 +789,8 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
        blk_status_t io_error;
        struct bio *bio;
        struct mapped_device *md = io->md;
+       unsigned long start_time = 0;
+       struct dm_stats_aux stats_aux;
 
        /* Push-back supersedes any I/O errors */
        if (unlikely(error)) {
@@ -821,8 +822,10 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
                }
 
                io_error = io->status;
-               end_io_acct(io);
+               start_time = io->start_time;
+               stats_aux = io->stats_aux;
                free_io(md, io);
+               end_io_acct(md, bio, start_time, &stats_aux);
 
                if (io_error == BLK_STS_DM_REQUEUE)
                        return;
index ae8fe54ea358194382d5951cd14d83338145dd43..6c0c3d0d905aaf79e6a9e9842a96da59a3e2819c 100644 (file)
@@ -5700,10 +5700,6 @@ static int md_alloc(dev_t dev, char *name)
        disk->flags |= GENHD_FL_EXT_DEVT;
        disk->events |= DISK_EVENT_MEDIA_CHANGE;
        mddev->gendisk = disk;
-       /* As soon as we call add_disk(), another thread could get
-        * through to md_open, so make sure it doesn't get too far
-        */
-       mutex_lock(&mddev->open_mutex);
        add_disk(disk);
 
        error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
@@ -5718,7 +5714,6 @@ static int md_alloc(dev_t dev, char *name)
        if (mddev->kobj.sd &&
            sysfs_create_group(&mddev->kobj, &md_bitmap_group))
                pr_debug("pointless warning\n");
-       mutex_unlock(&mddev->open_mutex);
  abort:
        mutex_unlock(&disks_mutex);
        if (!error && mddev->kobj.sd) {
index 157c924686e4b61b4869aa57e95ee88a1563cc70..80321e03809aca4995a8db0428e54e6f1a2c4009 100644 (file)
@@ -565,7 +565,7 @@ config VIDEO_QCOM_VENUS
        depends on VIDEO_DEV && VIDEO_V4L2 && QCOM_SMEM
        depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
        select QCOM_MDT_LOADER if ARCH_QCOM
-       select QCOM_SCM if ARCH_QCOM
+       select QCOM_SCM
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
        help
index d402e456f27df676b767281ecd4cf7254d978a1d..7d0ab19c38bb91458ddcc430c686a0b239b03465 100644 (file)
@@ -1140,8 +1140,8 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        continue;
                length = 0;
                switch (c) {
-               /* SOF0: baseline JPEG */
-               case SOF0:
+               /* JPEG_MARKER_SOF0: baseline JPEG */
+               case JPEG_MARKER_SOF0:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1172,7 +1172,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        notfound = 0;
                        break;
 
-               case DQT:
+               case JPEG_MARKER_DQT:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1185,7 +1185,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        skip(&jpeg_buffer, length);
                        break;
 
-               case DHT:
+               case JPEG_MARKER_DHT:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1198,15 +1198,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        skip(&jpeg_buffer, length);
                        break;
 
-               case SOS:
+               case JPEG_MARKER_SOS:
                        sos = jpeg_buffer.curr - 2; /* 0xffda */
                        break;
 
                /* skip payload-less markers */
-               case RST ... RST + 7:
-               case SOI:
-               case EOI:
-               case TEM:
+               case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
+               case JPEG_MARKER_SOI:
+               case JPEG_MARKER_EOI:
+               case JPEG_MARKER_TEM:
                        break;
 
                /* skip uninteresting payload markers */
index a77d93c098ce74d08cb5f2254b83fb3c845a01d7..8473a019bb5f276624af2540129d5a2016b2f900 100644 (file)
 #define EXYNOS3250_IRQ_TIMEOUT         0x10000000
 
 /* a selection of JPEG markers */
-#define TEM                            0x01
-#define SOF0                           0xc0
-#define DHT                            0xc4
-#define RST                            0xd0
-#define SOI                            0xd8
-#define EOI                            0xd9
-#define        SOS                             0xda
-#define DQT                            0xdb
-#define DHP                            0xde
+#define JPEG_MARKER_TEM                                0x01
+#define JPEG_MARKER_SOF0                               0xc0
+#define JPEG_MARKER_DHT                                0xc4
+#define JPEG_MARKER_RST                                0xd0
+#define JPEG_MARKER_SOI                                0xd8
+#define JPEG_MARKER_EOI                                0xd9
+#define        JPEG_MARKER_SOS                         0xda
+#define JPEG_MARKER_DQT                                0xdb
+#define JPEG_MARKER_DHP                                0xde
 
 /* Flags that indicate a format can be used for capture/output */
 #define SJPEG_FMT_FLAG_ENC_CAPTURE     (1 << 0)
@@ -187,11 +187,11 @@ struct s5p_jpeg_marker {
  * @fmt:       driver-specific format of this queue
  * @w:         image width
  * @h:         image height
- * @sos:       SOS marker's position relative to the buffer beginning
- * @dht:       DHT markers' positions relative to the buffer beginning
- * @dqt:       DQT markers' positions relative to the buffer beginning
- * @sof:       SOF0 marker's position relative to the buffer beginning
- * @sof_len:   SOF0 marker's payload length (without length field itself)
+ * @sos:       JPEG_MARKER_SOS's position relative to the buffer beginning
+ * @dht:       JPEG_MARKER_DHT' positions relative to the buffer beginning
+ * @dqt:       JPEG_MARKER_DQT' positions relative to the buffer beginning
+ * @sof:       JPEG_MARKER_SOF0's position relative to the buffer beginning
+ * @sof_len:   JPEG_MARKER_SOF0's payload length (without length field itself)
  * @size:      image buffer size in bytes
  */
 struct s5p_jpeg_q_data {
index 3e729a17b35ff9dab6651c150d2230d35f4c62df..48d52baec1a1c981f2cd0122d8fe03911c7373cd 100644 (file)
@@ -24,6 +24,7 @@ static const u8 COMMAND_VERSION[] = { 'v' };
 // End transmit and repeat reset command so we exit sump mode
 static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
 static const u8 COMMAND_SMODE_ENTER[] = { 's' };
+static const u8 COMMAND_SMODE_EXIT[] = { 0 };
 static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
 
 #define REPLY_XMITCOUNT 't'
@@ -309,12 +310,30 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
                buf[i] = cpu_to_be16(v);
        }
 
-       buf[count] = cpu_to_be16(0xffff);
+       buf[count] = 0xffff;
 
        irtoy->tx_buf = buf;
        irtoy->tx_len = size;
        irtoy->emitted = 0;
 
+       // There is an issue where if the unit is receiving IR while the
+       // first TXSTART command is sent, the device might end up hanging
+       // with its led on. It does not respond to any command when this
+       // happens. To work around this, re-enter sample mode.
+       err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
+                           sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
+       if (err) {
+               dev_err(irtoy->dev, "exit sample mode: %d\n", err);
+               return err;
+       }
+
+       err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
+                           sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
+       if (err) {
+               dev_err(irtoy->dev, "enter sample mode: %d\n", err);
+               return err;
+       }
+
        err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
                            STATE_TX);
        kfree(buf);
index 85ba901bc11b0ad0f57217e756c1ab0f3f2b4985..0f5a49fc7c9e0e1029086883415fcb8dd3007d56 100644 (file)
@@ -224,6 +224,7 @@ config HI6421V600_IRQ
        tristate "HiSilicon Hi6421v600 IRQ and powerkey"
        depends on OF
        depends on SPMI
+       depends on HAS_IOMEM
        select MFD_CORE
        select REGMAP_SPMI
        help
index 1b6076a89ca6ae4d0bc69bd5b51a7bf159c3f61a..6669625ba4c8d58eb8584559a495990aa695f93f 100644 (file)
@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
                struct device *tty_dev;
 
                tty_port_init(&vk->tty[i].port);
-               tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
-                                                  i, dev);
+               tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
+                                                       tty_drv, i, dev, vk,
+                                                       NULL);
                if (IS_ERR(tty_dev)) {
                        err = PTR_ERR(tty_dev);
                        goto unwind;
                }
-               dev_set_drvdata(tty_dev, vk);
                vk->tty[i].is_opened = false;
        }
 
index e5a4ed3701eb8e3d858eee4026ea5a299068542a..a798fad5f03c28f707814cc56c8a8042f0f3079b 100644 (file)
@@ -47,7 +47,7 @@ static inline bool needs_unaligned_copy(const void *ptr)
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
        return false;
 #else
-       return ((ptr - NULL) & 3) != 0;
+       return ((uintptr_t)ptr & 3) != 0;
 #endif
 }
 
index 4d09b672ac3c8a6e70b34f2802ff7c0de7bb8da6..632325474233a11f0916047ed42338a9eb342d76 100644 (file)
@@ -366,6 +366,13 @@ static const struct of_device_id at25_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, at25_of_match);
 
+static const struct spi_device_id at25_spi_ids[] = {
+       { .name = "at25",},
+       { .name = "fm25",},
+       { }
+};
+MODULE_DEVICE_TABLE(spi, at25_spi_ids);
+
 static int at25_probe(struct spi_device *spi)
 {
        struct at25_data        *at25 = NULL;
@@ -491,6 +498,7 @@ static struct spi_driver at25_driver = {
                .dev_groups     = sernum_groups,
        },
        .probe          = at25_probe,
+       .id_table       = at25_spi_ids,
 };
 
 module_spi_driver(at25_driver);
index 29d8971ec558bff437e4bea69af3ca7d58ae3f83..1f15399e5cb49199f5f4b467f8ff8ef8fb6771f8 100644 (file)
@@ -406,6 +406,23 @@ static const struct of_device_id eeprom_93xx46_of_table[] = {
 };
 MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
 
+static const struct spi_device_id eeprom_93xx46_spi_ids[] = {
+       { .name = "eeprom-93xx46",
+         .driver_data = (kernel_ulong_t)&at93c46_data, },
+       { .name = "at93c46",
+         .driver_data = (kernel_ulong_t)&at93c46_data, },
+       { .name = "at93c46d",
+         .driver_data = (kernel_ulong_t)&atmel_at93c46d_data, },
+       { .name = "at93c56",
+         .driver_data = (kernel_ulong_t)&at93c56_data, },
+       { .name = "at93c66",
+         .driver_data = (kernel_ulong_t)&at93c66_data, },
+       { .name = "93lc46b",
+         .driver_data = (kernel_ulong_t)&microchip_93lc46b_data, },
+       {}
+};
+MODULE_DEVICE_TABLE(spi, eeprom_93xx46_spi_ids);
+
 static int eeprom_93xx46_probe_dt(struct spi_device *spi)
 {
        const struct of_device_id *of_id =
@@ -555,6 +572,7 @@ static struct spi_driver eeprom_93xx46_driver = {
        },
        .probe          = eeprom_93xx46_probe,
        .remove         = eeprom_93xx46_remove,
+       .id_table       = eeprom_93xx46_spi_ids,
 };
 
 module_spi_driver(eeprom_93xx46_driver);
index beda610e6b30d139d2ccf53d4fed655ee3952f4b..ad6ced4546556a6a2db25152e89c8eec45b05cd6 100644 (file)
@@ -814,10 +814,12 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
                        rpra[i].pv = (u64) ctx->args[i].ptr;
                        pages[i].addr = ctx->maps[i]->phys;
 
+                       mmap_read_lock(current->mm);
                        vma = find_vma(current->mm, ctx->args[i].ptr);
                        if (vma)
                                pages[i].addr += ctx->args[i].ptr -
                                                 vma->vm_start;
+                       mmap_read_unlock(current->mm);
 
                        pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
                        pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
index 02f33bc60c56e4692291387985d9b913ad2e9250..4c9c5394da6fa62bb3d0fb29f6e54d695314ba32 100644 (file)
@@ -539,6 +539,7 @@ static int gehc_achc_probe(struct spi_device *spi)
 
 static const struct spi_device_id gehc_achc_id[] = {
        { "ge,achc", 0 },
+       { "achc", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(spi, gehc_achc_id);
index 2e1befbd1ad991035dce1b608a71326cd395cf9f..693981891870c4c5920aa66fd4e377d1fa9aa8e6 100644 (file)
@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
 
        /* check for 64-bit DMA address supported (DAC) */
        /* check for 32-bit DMA address supported (SAC) */
-       if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) ||
+       if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
            dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
                dev_err(&pci_dev->dev,
                        "err: neither DMA32 nor DMA64 supported\n");
index 7b0516cf808b0666ae5644e8273f099a871de1ad..6dafff375f1c6e614010552e79f5b52292cfb1af 100644 (file)
@@ -405,7 +405,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
 {
        bool next_entry_found = false;
-       struct hl_cs *next;
+       struct hl_cs *next, *first_cs;
 
        if (!cs_needs_timeout(cs))
                return;
@@ -415,9 +415,16 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
        /* We need to handle tdr only once for the complete staged submission.
         * Hence, we choose the CS that reaches this function first which is
         * the CS marked as 'staged_last'.
+        * In case single staged cs was submitted which has both first and last
+        * indications, then "cs_find_first" below will return NULL, since we
+        * removed the cs node from the list before getting here,
+        * in such cases just continue with the cs to cancel it's TDR work.
         */
-       if (cs->staged_cs && cs->staged_last)
-               cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+       if (cs->staged_cs && cs->staged_last) {
+               first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+               if (first_cs)
+                       cs = first_cs;
+       }
 
        spin_unlock(&hdev->cs_mirror_lock);
 
@@ -1288,6 +1295,12 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
        if (rc)
                goto free_cs_object;
 
+       /* If this is a staged submission we must return the staged sequence
+        * rather than the internal CS sequence
+        */
+       if (cs->staged_cs)
+               *cs_seq = cs->staged_sequence;
+
        /* Validate ALL the CS chunks before submitting the CS */
        for (i = 0 ; i < num_chunks ; i++) {
                struct hl_cs_chunk *chunk = &cs_chunk_array[i];
@@ -1988,6 +2001,15 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                        goto free_cs_chunk_array;
                }
 
+               if (!hdev->nic_ports_mask) {
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
+                       dev_err(hdev->dev,
+                               "Collective operations not supported when NIC ports are disabled");
+                       rc = -EINVAL;
+                       goto free_cs_chunk_array;
+               }
+
                collective_engine_id = chunk->collective_engine_id;
        }
 
@@ -2026,9 +2048,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                        spin_unlock(&ctx->sig_mgr.lock);
 
                        if (!handle_found) {
-                               dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
+                               /* treat as signal CS already finished */
+                               dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
                                                signal_seq);
-                               rc = -EINVAL;
+                               rc = 0;
                                goto free_cs_chunk_array;
                        }
 
@@ -2613,7 +2636,8 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
                 * completed after the poll function.
                 */
                if (!mcs_data.completion_bitmap) {
-                       dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
+                       dev_warn_ratelimited(hdev->dev,
+                               "Multi-CS got completion on wait but no CS completed\n");
                        rc = -EFAULT;
                }
        }
@@ -2625,11 +2649,18 @@ put_ctx:
 free_seq_arr:
        kfree(cs_seq_arr);
 
-       /* update output args */
-       memset(args, 0, sizeof(*args));
        if (rc)
                return rc;
 
+       if (mcs_data.wait_status == -ERESTARTSYS) {
+               dev_err_ratelimited(hdev->dev,
+                               "user process got signal while waiting for Multi-CS\n");
+               return -EINTR;
+       }
+
+       /* update output args */
+       memset(args, 0, sizeof(*args));
+
        if (mcs_data.completion_bitmap) {
                args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
                args->out.cs_completion_map = mcs_data.completion_bitmap;
@@ -2643,8 +2674,6 @@ free_seq_arr:
                /* update if some CS was gone */
                if (mcs_data.timestamp)
                        args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
-       } else if (mcs_data.wait_status == -ERESTARTSYS) {
-               args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
        } else {
                args->out.status = HL_WAIT_CS_STATUS_BUSY;
        }
@@ -2664,16 +2693,17 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
        rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
                                &status, &timestamp);
 
+       if (rc == -ERESTARTSYS) {
+               dev_err_ratelimited(hdev->dev,
+                       "user process got signal while waiting for CS handle %llu\n",
+                       seq);
+               return -EINTR;
+       }
+
        memset(args, 0, sizeof(*args));
 
        if (rc) {
-               if (rc == -ERESTARTSYS) {
-                       dev_err_ratelimited(hdev->dev,
-                               "user process got signal while waiting for CS handle %llu\n",
-                               seq);
-                       args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
-                       rc = -EINTR;
-               } else if (rc == -ETIMEDOUT) {
+               if (rc == -ETIMEDOUT) {
                        dev_err_ratelimited(hdev->dev,
                                "CS %llu has timed-out while user process is waiting for it\n",
                                seq);
@@ -2740,10 +2770,20 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
        else
                interrupt = &hdev->user_interrupt[interrupt_offset];
 
+       /* Add pending user interrupt to relevant list for the interrupt
+        * handler to monitor
+        */
+       spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+       list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+       spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+       /* We check for completion value as interrupt could have been received
+        * before we added the node to the wait list
+        */
        if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
                dev_err(hdev->dev, "Failed to copy completion value from user\n");
                rc = -EFAULT;
-               goto free_fence;
+               goto remove_pending_user_interrupt;
        }
 
        if (completion_value >= target_value)
@@ -2752,14 +2792,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
                *status = CS_WAIT_STATUS_BUSY;
 
        if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
-               goto free_fence;
-
-       /* Add pending user interrupt to relevant list for the interrupt
-        * handler to monitor
-        */
-       spin_lock_irqsave(&interrupt->wait_list_lock, flags);
-       list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
-       spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+               goto remove_pending_user_interrupt;
 
 wait_again:
        /* Wait for interrupt handler to signal completion */
@@ -2770,6 +2803,15 @@ wait_again:
         * If comparison fails, keep waiting until timeout expires
         */
        if (completion_rc > 0) {
+               spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+               /* reinit_completion must be called before we check for user
+                * completion value, otherwise, if interrupt is received after
+                * the comparison and before the next wait_for_completion,
+                * we will reach timeout and fail
+                */
+               reinit_completion(&pend->fence.completion);
+               spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
                if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
                        dev_err(hdev->dev, "Failed to copy completion value from user\n");
                        rc = -EFAULT;
@@ -2780,18 +2822,13 @@ wait_again:
                if (completion_value >= target_value) {
                        *status = CS_WAIT_STATUS_COMPLETED;
                } else {
-                       spin_lock_irqsave(&interrupt->wait_list_lock, flags);
-                       reinit_completion(&pend->fence.completion);
                        timeout = completion_rc;
-
-                       spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
                        goto wait_again;
                }
        } else if (completion_rc == -ERESTARTSYS) {
                dev_err_ratelimited(hdev->dev,
                        "user process got signal while waiting for interrupt ID %d\n",
                        interrupt->interrupt_id);
-               *status = HL_WAIT_CS_STATUS_INTERRUPTED;
                rc = -EINTR;
        } else {
                *status = CS_WAIT_STATUS_BUSY;
@@ -2802,7 +2839,6 @@ remove_pending_user_interrupt:
        list_del(&pend->wait_list_node);
        spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
 
-free_fence:
        kfree(pend);
        hl_ctx_put(ctx);
 
@@ -2847,8 +2883,6 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
                                args->in.interrupt_timeout_us, args->in.addr,
                                args->in.target, interrupt_offset, &status);
 
-       memset(args, 0, sizeof(*args));
-
        if (rc) {
                if (rc != -EINTR)
                        dev_err_ratelimited(hdev->dev,
@@ -2857,6 +2891,8 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
                return rc;
        }
 
+       memset(args, 0, sizeof(*args));
+
        switch (status) {
        case CS_WAIT_STATUS_COMPLETED:
                args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
index 76b7de8f1406e2ac10d274b52adff04fa906cf43..0743319b10c782dfa1ae5d3f8024caa4b8c043d2 100644 (file)
@@ -437,6 +437,7 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
                        struct hl_cs_compl *cs_cmpl)
 {
        struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
+       u32 offset = 0;
 
        cs_cmpl->hw_sob = handle->hw_sob;
 
@@ -446,9 +447,13 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
         * set offset 1 for example he mean to wait only for the first
         * signal only, which will be pre_sob_val, and if he set offset 2
         * then the value required is (pre_sob_val + 1) and so on...
+        * if user set wait offset to 0, then treat it as legacy wait cs,
+        * wait for the next signal.
         */
-       cs_cmpl->sob_val = handle->pre_sob_val +
-                       (job->encaps_sig_wait_offset - 1);
+       if (job->encaps_sig_wait_offset)
+               offset = job->encaps_sig_wait_offset - 1;
+
+       cs_cmpl->sob_val = handle->pre_sob_val + offset;
 }
 
 static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
index 383865be3c2ce552f90975754b2457126488820c..14da87b38e83522f7fceec0ef9b1eaf52f0848f0 100644 (file)
@@ -395,7 +395,7 @@ static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
 
 static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
        { .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
-       { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" },
+       { .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" },
        { .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
        { .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
        { .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
@@ -5802,6 +5802,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
 {
        struct gaudi_device *gaudi = hdev->asic_specific;
        struct packet_msg_prot *cq_pkt;
+       u64 msi_addr;
        u32 tmp;
 
        cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
@@ -5823,10 +5824,12 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
        cq_pkt->ctl = cpu_to_le32(tmp);
        cq_pkt->value = cpu_to_le32(1);
 
-       if (!gaudi->multi_msi_mode)
-               msi_vec = 0;
+       if (gaudi->multi_msi_mode)
+               msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
+       else
+               msi_addr = mmPCIE_CORE_MSI_REQ;
 
-       cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4);
+       cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
 }
 
 static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
index cb265c00cf73adecd90efc1fb39ecf087fdc1dc7..25ac87cebd454f646eeb2300b30fce058c8c9b86 100644 (file)
@@ -8,16 +8,21 @@
 #include "gaudiP.h"
 #include "../include/gaudi/asic_reg/gaudi_regs.h"
 
-#define GAUDI_NUMBER_OF_RR_REGS                24
-#define GAUDI_NUMBER_OF_LBW_RANGES     12
+#define GAUDI_NUMBER_OF_LBW_RR_REGS    28
+#define GAUDI_NUMBER_OF_HBW_RR_REGS    24
+#define GAUDI_NUMBER_OF_LBW_RANGES     10
 
-static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_HIT_WPROT,
        mmDMA_IF_W_S_DMA0_HIT_WPROT,
        mmDMA_IF_W_S_DMA1_HIT_WPROT,
+       mmDMA_IF_E_S_SOB_HIT_WPROT,
        mmDMA_IF_E_S_DMA0_HIT_WPROT,
        mmDMA_IF_E_S_DMA1_HIT_WPROT,
+       mmDMA_IF_W_N_SOB_HIT_WPROT,
        mmDMA_IF_W_N_DMA0_HIT_WPROT,
        mmDMA_IF_W_N_DMA1_HIT_WPROT,
+       mmDMA_IF_E_N_SOB_HIT_WPROT,
        mmDMA_IF_E_N_DMA0_HIT_WPROT,
        mmDMA_IF_E_N_DMA1_HIT_WPROT,
        mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
@@ -38,13 +43,17 @@ static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
 };
 
-static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_HIT_RPROT,
        mmDMA_IF_W_S_DMA0_HIT_RPROT,
        mmDMA_IF_W_S_DMA1_HIT_RPROT,
+       mmDMA_IF_E_S_SOB_HIT_RPROT,
        mmDMA_IF_E_S_DMA0_HIT_RPROT,
        mmDMA_IF_E_S_DMA1_HIT_RPROT,
+       mmDMA_IF_W_N_SOB_HIT_RPROT,
        mmDMA_IF_W_N_DMA0_HIT_RPROT,
        mmDMA_IF_W_N_DMA1_HIT_RPROT,
+       mmDMA_IF_E_N_SOB_HIT_RPROT,
        mmDMA_IF_E_N_DMA0_HIT_RPROT,
        mmDMA_IF_E_N_DMA1_HIT_RPROT,
        mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
@@ -65,13 +74,17 @@ static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
 };
 
-static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_MIN_WPROT_0,
        mmDMA_IF_W_S_DMA0_MIN_WPROT_0,
        mmDMA_IF_W_S_DMA1_MIN_WPROT_0,
+       mmDMA_IF_E_S_SOB_MIN_WPROT_0,
        mmDMA_IF_E_S_DMA0_MIN_WPROT_0,
        mmDMA_IF_E_S_DMA1_MIN_WPROT_0,
+       mmDMA_IF_W_N_SOB_MIN_WPROT_0,
        mmDMA_IF_W_N_DMA0_MIN_WPROT_0,
        mmDMA_IF_W_N_DMA1_MIN_WPROT_0,
+       mmDMA_IF_E_N_SOB_MIN_WPROT_0,
        mmDMA_IF_E_N_DMA0_MIN_WPROT_0,
        mmDMA_IF_E_N_DMA1_MIN_WPROT_0,
        mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
@@ -92,13 +105,17 @@ static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
 };
 
-static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_MAX_WPROT_0,
        mmDMA_IF_W_S_DMA0_MAX_WPROT_0,
        mmDMA_IF_W_S_DMA1_MAX_WPROT_0,
+       mmDMA_IF_E_S_SOB_MAX_WPROT_0,
        mmDMA_IF_E_S_DMA0_MAX_WPROT_0,
        mmDMA_IF_E_S_DMA1_MAX_WPROT_0,
+       mmDMA_IF_W_N_SOB_MAX_WPROT_0,
        mmDMA_IF_W_N_DMA0_MAX_WPROT_0,
        mmDMA_IF_W_N_DMA1_MAX_WPROT_0,
+       mmDMA_IF_E_N_SOB_MAX_WPROT_0,
        mmDMA_IF_E_N_DMA0_MAX_WPROT_0,
        mmDMA_IF_E_N_DMA1_MAX_WPROT_0,
        mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
@@ -119,13 +136,17 @@ static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
 };
 
-static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_MIN_RPROT_0,
        mmDMA_IF_W_S_DMA0_MIN_RPROT_0,
        mmDMA_IF_W_S_DMA1_MIN_RPROT_0,
+       mmDMA_IF_E_S_SOB_MIN_RPROT_0,
        mmDMA_IF_E_S_DMA0_MIN_RPROT_0,
        mmDMA_IF_E_S_DMA1_MIN_RPROT_0,
+       mmDMA_IF_W_N_SOB_MIN_RPROT_0,
        mmDMA_IF_W_N_DMA0_MIN_RPROT_0,
        mmDMA_IF_W_N_DMA1_MIN_RPROT_0,
+       mmDMA_IF_E_N_SOB_MIN_RPROT_0,
        mmDMA_IF_E_N_DMA0_MIN_RPROT_0,
        mmDMA_IF_E_N_DMA1_MIN_RPROT_0,
        mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
@@ -146,13 +167,17 @@ static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
 };
 
-static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_MAX_RPROT_0,
        mmDMA_IF_W_S_DMA0_MAX_RPROT_0,
        mmDMA_IF_W_S_DMA1_MAX_RPROT_0,
+       mmDMA_IF_E_S_SOB_MAX_RPROT_0,
        mmDMA_IF_E_S_DMA0_MAX_RPROT_0,
        mmDMA_IF_E_S_DMA1_MAX_RPROT_0,
+       mmDMA_IF_W_N_SOB_MAX_RPROT_0,
        mmDMA_IF_W_N_DMA0_MAX_RPROT_0,
        mmDMA_IF_W_N_DMA1_MAX_RPROT_0,
+       mmDMA_IF_E_N_SOB_MAX_RPROT_0,
        mmDMA_IF_E_N_DMA0_MAX_RPROT_0,
        mmDMA_IF_E_N_DMA1_MAX_RPROT_0,
        mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
@@ -173,7 +198,7 @@ static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
 };
 
-static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW,
@@ -200,7 +225,7 @@ static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW
 };
 
-static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR,
@@ -227,7 +252,7 @@ static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR
 };
 
-static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
@@ -254,7 +279,7 @@ static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0
 };
 
-static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
@@ -281,7 +306,7 @@ static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0
 };
 
-static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
@@ -308,7 +333,7 @@ static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0
 };
 
-static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
@@ -335,7 +360,7 @@ static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0
 };
 
-static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
@@ -362,7 +387,7 @@ static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0
 };
 
-static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
@@ -389,7 +414,7 @@ static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0
 };
 
-static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
@@ -416,7 +441,7 @@ static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0
 };
 
-static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
@@ -12849,50 +12874,44 @@ static void gaudi_init_range_registers_lbw(struct hl_device *hdev)
        u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES];
        int i, j;
 
-       lbw_rng_start[0]  = (0xFBFE0000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[0]    = (0xFBFFF000 & 0x3FFFFFF) + 1;
+       lbw_rng_start[0]  = (0xFC0E8000 & 0x3FFFFFF) - 1; /* 0x000E7FFF */
+       lbw_rng_end[0]    = (0xFC11FFFF & 0x3FFFFFF) + 1; /* 0x00120000 */
 
-       lbw_rng_start[1]  = (0xFC0E8000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[1]    = (0xFC120000 & 0x3FFFFFF) + 1;
+       lbw_rng_start[1]  = (0xFC1E8000 & 0x3FFFFFF) - 1; /* 0x001E7FFF */
+       lbw_rng_end[1]    = (0xFC48FFFF & 0x3FFFFFF) + 1; /* 0x00490000 */
 
-       lbw_rng_start[2]  = (0xFC1E8000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[2]    = (0xFC48FFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[2]  = (0xFC600000 & 0x3FFFFFF) - 1; /* 0x005FFFFF */
+       lbw_rng_end[2]    = (0xFCC48FFF & 0x3FFFFFF) + 1; /* 0x00C49000 */
 
-       lbw_rng_start[3]  = (0xFC600000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[3]    = (0xFCC48FFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[3]  = (0xFCC4A000 & 0x3FFFFFF) - 1; /* 0x00C49FFF */
+       lbw_rng_end[3]    = (0xFCCDFFFF & 0x3FFFFFF) + 1; /* 0x00CE0000 */
 
-       lbw_rng_start[4]  = (0xFCC4A000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[4]    = (0xFCCDFFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[4]  = (0xFCCE4000 & 0x3FFFFFF) - 1; /* 0x00CE3FFF */
+       lbw_rng_end[4]    = (0xFCD1FFFF & 0x3FFFFFF) + 1; /* 0x00D20000 */
 
-       lbw_rng_start[5]  = (0xFCCE4000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[5]    = (0xFCD1FFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[5]  = (0xFCD24000 & 0x3FFFFFF) - 1; /* 0x00D23FFF */
+       lbw_rng_end[5]    = (0xFCD5FFFF & 0x3FFFFFF) + 1; /* 0x00D60000 */
 
-       lbw_rng_start[6]  = (0xFCD24000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[6]    = (0xFCD5FFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[6]  = (0xFCD64000 & 0x3FFFFFF) - 1; /* 0x00D63FFF */
+       lbw_rng_end[6]    = (0xFCD9FFFF & 0x3FFFFFF) + 1; /* 0x00DA0000 */
 
-       lbw_rng_start[7]  = (0xFCD64000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[7]    = (0xFCD9FFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[7]  = (0xFCDA4000 & 0x3FFFFFF) - 1; /* 0x00DA3FFF */
+       lbw_rng_end[7]    = (0xFCDDFFFF & 0x3FFFFFF) + 1; /* 0x00DE0000 */
 
-       lbw_rng_start[8]  = (0xFCDA4000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[8]    = (0xFCDDFFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[8]  = (0xFCDE4000 & 0x3FFFFFF) - 1; /* 0x00DE3FFF */
+       lbw_rng_end[8]    = (0xFCE05FFF & 0x3FFFFFF) + 1; /* 0x00E06000 */
 
-       lbw_rng_start[9]  = (0xFCDE4000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[9]    = (0xFCE05FFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[9]  = (0xFCFC9000 & 0x3FFFFFF) - 1; /* 0x00FC8FFF */
+       lbw_rng_end[9]    = (0xFFFFFFFE & 0x3FFFFFF) + 1; /* 0x03FFFFFF */
 
-       lbw_rng_start[10]  = (0xFEC43000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[10]    = (0xFEC43FFF & 0x3FFFFFF) + 1;
-
-       lbw_rng_start[11] = (0xFE484000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[11]   = (0xFE484FFF & 0x3FFFFFF) + 1;
-
-       for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+       for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) {
                WREG32(gaudi_rr_lbw_hit_aw_regs[i],
                                (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
                WREG32(gaudi_rr_lbw_hit_ar_regs[i],
                                (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
        }
 
-       for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++)
+       for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++)
                for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) {
                        WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2),
                                                        lbw_rng_start[j]);
@@ -12939,12 +12958,12 @@ static void gaudi_init_range_registers_hbw(struct hl_device *hdev)
         * 6th range is the host
         */
 
-       for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+       for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
                WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F);
                WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D);
        }
 
-       for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+       for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
                WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo);
                WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo);
 
index ffdfbd9b32201f38854407bb24e2640179afb00a..1a6576666794538e2e750c2643418d15c4bdb851 100644 (file)
 #define mmPCIE_AUX_FLR_CTRL                                          0xC07394
 #define mmPCIE_AUX_DBI                                               0xC07490
 
+#define mmPCIE_CORE_MSI_REQ                                          0xC04100
+
 #define mmPSOC_PCI_PLL_NR                                            0xC72100
 #define mmSRAM_W_PLL_NR                                              0x4C8100
 #define mmPSOC_HBM_PLL_NR                                            0xC74100
index 99b5c1ecc444194e378c1666f242aabb0430ba5d..be41843df75bcb029dc34af8c13c0d3e0c905443 100644 (file)
@@ -1298,7 +1298,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_STARTING) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
                                return 0;
                        }
@@ -1381,7 +1382,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_DR_SETUP) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
                                return 0;
                        }
@@ -1448,7 +1450,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
                                return 0;
                        }
@@ -1490,7 +1493,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
-                       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+                       if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+                           dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
                                return 0;
                        }
index cb34925e10f15d6cd65b79b48c4870257b9e6189..67bb6a25fd0a020c55c310fcd171b1fc566982f9 100644 (file)
@@ -92,6 +92,7 @@
 #define MEI_DEV_ID_CDF        0x18D3  /* Cedar Fork */
 
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
+#define MEI_DEV_ID_ICP_N      0x38E0  /* Ice Lake Point N */
 
 #define MEI_DEV_ID_JSP_N      0x4DE0  /* Jasper Lake Point N */
 
index c3393b383e5989bf6ed3be487ba219a8b7fcaeb2..3a45aaf002ac8523e3955e23e21603acfffeb280 100644 (file)
@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
index 71313961cc54db98ba2ed90ecd939070ae625bd6..95b3511b056052c07dfc11cabc79ce8afcee716d 100644 (file)
@@ -547,7 +547,7 @@ config MMC_SDHCI_MSM
        depends on MMC_SDHCI_PLTFM
        select MMC_SDHCI_IO_ACCESSORS
        select MMC_CQHCI
-       select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM
+       select QCOM_SCM if MMC_CRYPTO
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in Qualcomm SOCs. The controller supports
index 6578cc64ae9e80caadf8d44ef18ba2ac7d33e891..380f9aa56eb26a76877526c8f61cbd16eb785f2a 100644 (file)
@@ -1802,10 +1802,15 @@ static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
 
        spin_lock_irqsave(&host->irq_lock, flags);
 
-       if (!host->data_status)
+       /*
+        * Only inject an error if we haven't already got an error or data over
+        * interrupt.
+        */
+       if (!host->data_status) {
                host->data_status = SDMMC_INT_DCRC;
-       set_bit(EVENT_DATA_ERROR, &host->pending_events);
-       tasklet_schedule(&host->tasklet);
+               set_bit(EVENT_DATA_ERROR, &host->pending_events);
+               tasklet_schedule(&host->tasklet);
+       }
 
        spin_unlock_irqrestore(&host->irq_lock, flags);
 
@@ -2721,12 +2726,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
                }
 
                if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+                       spin_lock(&host->irq_lock);
+
                        /* if there is an error report DATA_ERROR */
                        mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
                        host->data_status = pending;
                        smp_wmb(); /* drain writebuffer */
                        set_bit(EVENT_DATA_ERROR, &host->pending_events);
                        tasklet_schedule(&host->tasklet);
+
+                       spin_unlock(&host->irq_lock);
                }
 
                if (pending & SDMMC_INT_DATA_OVER) {
index 3f28eb4d17fe79a9c13f37ff0d5cf5afb11a4db0..8f36536cb1b6d0d33242e9287104851e95b92bf0 100644 (file)
@@ -746,7 +746,7 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
        writel(start, host->regs + SD_EMMC_START);
 }
 
-/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+/* local sg copy for dram_access_quirk */
 static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
                                  size_t buflen, bool to_buffer)
 {
@@ -764,21 +764,27 @@ static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data
        sg_miter_start(&miter, sgl, nents, sg_flags);
 
        while ((offset < buflen) && sg_miter_next(&miter)) {
-               unsigned int len;
+               unsigned int buf_offset = 0;
+               unsigned int len, left;
+               u32 *buf = miter.addr;
 
                len = min(miter.length, buflen - offset);
+               left = len;
 
-               /* When dram_access_quirk, the bounce buffer is a iomem mapping */
-               if (host->dram_access_quirk) {
-                       if (to_buffer)
-                               memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
-                       else
-                               memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+               if (to_buffer) {
+                       do {
+                               writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
+
+                               buf_offset += 4;
+                               left -= 4;
+                       } while (left);
                } else {
-                       if (to_buffer)
-                               memcpy(host->bounce_buf + offset, miter.addr, len);
-                       else
-                               memcpy(miter.addr, host->bounce_buf + offset, len);
+                       do {
+                               *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
+
+                               buf_offset += 4;
+                               left -= 4;
+                       } while (left);
                }
 
                offset += len;
@@ -830,7 +836,11 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
                if (data->flags & MMC_DATA_WRITE) {
                        cmd_cfg |= CMD_CFG_DATA_WR;
                        WARN_ON(xfer_bytes > host->bounce_buf_size);
-                       meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+                       if (host->dram_access_quirk)
+                               meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+                       else
+                               sg_copy_to_buffer(data->sg, data->sg_len,
+                                                 host->bounce_buf, xfer_bytes);
                        dma_wmb();
                }
 
@@ -849,12 +859,43 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
        writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
 }
 
+static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
+{
+       struct scatterlist *sg;
+       int i;
+
+       /* Reject request if any element offset or size is not 32bit aligned */
+       for_each_sg(data->sg, sg, data->sg_len, i) {
+               if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+                   !IS_ALIGNED(sg->length, sizeof(u32))) {
+                       dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
+                               data->sg->offset, data->sg->length);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct meson_host *host = mmc_priv(mmc);
        bool needs_pre_post_req = mrq->data &&
                        !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
 
+       /*
+        * The memory at the end of the controller used as bounce buffer for
+        * the dram_access_quirk only accepts 32bit read/write access,
+        * check the aligment and length of the data before starting the request.
+        */
+       if (host->dram_access_quirk && mrq->data) {
+               mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
+               if (mrq->cmd->error) {
+                       mmc_request_done(mmc, mrq);
+                       return;
+               }
+       }
+
        if (needs_pre_post_req) {
                meson_mmc_get_transfer_mode(mmc, mrq);
                if (!meson_mmc_desc_chain_mode(mrq->data))
@@ -999,7 +1040,11 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
        if (meson_mmc_bounce_buf_read(data)) {
                xfer_bytes = data->blksz * data->blocks;
                WARN_ON(xfer_bytes > host->bounce_buf_size);
-               meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+               if (host->dram_access_quirk)
+                       meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+               else
+                       sg_copy_from_buffer(data->sg, data->sg_len,
+                                           host->bounce_buf, xfer_bytes);
        }
 
        next_cmd = meson_mmc_get_next_command(cmd);
index 6fc4cf3c9dce101cd97b9a1718ae19bdb000ccec..a4407f391f66a6f2f05bebad1334748895c9fe77 100644 (file)
@@ -561,6 +561,8 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
                /* Unknown why but without polling reset status, it will hang */
                read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
                                  false, priv->rstc);
+               /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+               sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
                priv->needs_adjust_hs400 = false;
                renesas_sdhi_set_clock(host, host->clk_cache);
        } else if (priv->scc_ctl) {
index 5564d7b23e7cd9861fc90ab0c720ddb06f6a65ef..d1a1c548c515f07a4cebce87dd0637177f39197f 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/slot-gpio.h>
@@ -61,7 +62,6 @@ static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
 static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
 {
        u16 clk;
-       unsigned long timeout;
 
        host->mmc->actual_clock = 0;
 
@@ -86,16 +86,11 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
        /* Wait max 20 ms */
-       timeout = 20;
-       while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
-               & SDHCI_CLOCK_INT_STABLE)) {
-               if (timeout == 0) {
-                       pr_err("%s: Internal clock never stabilised.\n",
-                              mmc_hostname(host->mmc));
-                       return;
-               }
-               timeout--;
-               mdelay(1);
+       if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
+                             1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
+               pr_err("%s: Internal clock never stabilised.\n",
+                      mmc_hostname(host->mmc));
+               return;
        }
 
        clk |= SDHCI_CLOCK_CARD_EN;
@@ -114,6 +109,7 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+       unsigned int tmp;
 
        sdhci_reset(host, mask);
 
@@ -126,6 +122,10 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
 
                sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
                             SDMMC_CALCR);
+
+               if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN),
+                                     10, 20000, false, host, SDMMC_CALCR))
+                       dev_err(mmc_dev(host->mmc), "Failed to calibrate\n");
        }
 }
 
index ef0badea4f4158e9b136fa2274a87f7006ff20a1..04e6f7b26706482883fb3fe681200febc23fc92e 100644 (file)
@@ -1676,13 +1676,17 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
        struct nand_ecc_ctrl *ecc = &chip->ecc;
        int data_size1, data_size2, oob_size1, oob_size2;
        int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+       int raw_cw = cw;
 
        nand_read_page_op(chip, page, 0, NULL, 0);
        host->use_ecc = false;
 
+       if (nandc->props->qpic_v2)
+               raw_cw = ecc->steps - 1;
+
        clear_bam_transaction(nandc);
        set_address(host, host->cw_size * cw, page);
-       update_rw_regs(host, 1, true, cw);
+       update_rw_regs(host, 1, true, raw_cw);
        config_nand_page_read(chip);
 
        data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
@@ -1711,7 +1715,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
                nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
        }
 
-       config_nand_cw_read(chip, false, cw);
+       config_nand_cw_read(chip, false, raw_cw);
 
        read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
        reg_off += data_size1;
index a533a90e39048dbac2b4cbb56af4e25ddfc36b31..a7aeb3c132c987297239c178fa0f7e69ea701df2 100644 (file)
@@ -351,9 +351,25 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
 static void b53_mdio_remove(struct mdio_device *mdiodev)
 {
        struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
-       struct dsa_switch *ds = dev->ds;
 
-       dsa_unregister_switch(ds);
+       if (!dev)
+               return;
+
+       b53_switch_remove(dev);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void b53_mdio_shutdown(struct mdio_device *mdiodev)
+{
+       struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+       if (!dev)
+               return;
+
+       b53_switch_shutdown(dev);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id b53_of_match[] = {
@@ -373,6 +389,7 @@ MODULE_DEVICE_TABLE(of, b53_of_match);
 static struct mdio_driver b53_mdio_driver = {
        .probe  = b53_mdio_probe,
        .remove = b53_mdio_remove,
+       .shutdown = b53_mdio_shutdown,
        .mdiodrv.driver = {
                .name = "bcm53xx",
                .of_match_table = b53_of_match,
index 82680e083cc29c6ca26c7a94a9010b70d847089a..ae4c79d39bc047e8274cc849078a960df1545527 100644 (file)
@@ -316,9 +316,21 @@ static int b53_mmap_remove(struct platform_device *pdev)
        if (dev)
                b53_switch_remove(dev);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void b53_mmap_shutdown(struct platform_device *pdev)
+{
+       struct b53_device *dev = platform_get_drvdata(pdev);
+
+       if (dev)
+               b53_switch_shutdown(dev);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct of_device_id b53_mmap_of_table[] = {
        { .compatible = "brcm,bcm3384-switch" },
        { .compatible = "brcm,bcm6328-switch" },
@@ -331,6 +343,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
 static struct platform_driver b53_mmap_driver = {
        .probe = b53_mmap_probe,
        .remove = b53_mmap_remove,
+       .shutdown = b53_mmap_shutdown,
        .driver = {
                .name = "b53-switch",
                .of_match_table = b53_mmap_of_table,
index 5d068acf7cf817abe293e56d6d958d700fa892fd..959a52d41f0a17cdb966bf03777745bded30a2e0 100644 (file)
@@ -228,6 +228,11 @@ static inline void b53_switch_remove(struct b53_device *dev)
        dsa_unregister_switch(dev->ds);
 }
 
+static inline void b53_switch_shutdown(struct b53_device *dev)
+{
+       dsa_switch_shutdown(dev->ds);
+}
+
 #define b53_build_op(type_op_size, val_type)                           \
 static inline int b53_##type_op_size(struct b53_device *dev, u8 page,  \
                                     u8 reg, val_type val)              \
index ecb9f7f6b3358d7a304152958910f9f1a226faef..01e37b75471e111430fcd30c1a159afc7d5540bc 100644 (file)
@@ -321,9 +321,21 @@ static int b53_spi_remove(struct spi_device *spi)
        if (dev)
                b53_switch_remove(dev);
 
+       spi_set_drvdata(spi, NULL);
+
        return 0;
 }
 
+static void b53_spi_shutdown(struct spi_device *spi)
+{
+       struct b53_device *dev = spi_get_drvdata(spi);
+
+       if (dev)
+               b53_switch_shutdown(dev);
+
+       spi_set_drvdata(spi, NULL);
+}
+
 static const struct of_device_id b53_spi_of_match[] = {
        { .compatible = "brcm,bcm5325" },
        { .compatible = "brcm,bcm5365" },
@@ -344,6 +356,7 @@ static struct spi_driver b53_spi_driver = {
        },
        .probe  = b53_spi_probe,
        .remove = b53_spi_remove,
+       .shutdown = b53_spi_shutdown,
 };
 
 module_spi_driver(b53_spi_driver);
index 3f4249de70c568305281ecd6851762fa27a77eb9..4591bb1c05d2c7f4fba3d3805d077b77c5e66c8c 100644 (file)
@@ -629,17 +629,34 @@ static int b53_srab_probe(struct platform_device *pdev)
 static int b53_srab_remove(struct platform_device *pdev)
 {
        struct b53_device *dev = platform_get_drvdata(pdev);
-       struct b53_srab_priv *priv = dev->priv;
 
-       b53_srab_intr_set(priv, false);
+       if (!dev)
+               return 0;
+
+       b53_srab_intr_set(dev->priv, false);
        b53_switch_remove(dev);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void b53_srab_shutdown(struct platform_device *pdev)
+{
+       struct b53_device *dev = platform_get_drvdata(pdev);
+
+       if (!dev)
+               return;
+
+       b53_switch_shutdown(dev);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static struct platform_driver b53_srab_driver = {
        .probe = b53_srab_probe,
        .remove = b53_srab_remove,
+       .shutdown = b53_srab_shutdown,
        .driver = {
                .name = "b53-srab-switch",
                .of_match_table = b53_srab_of_match,
index 6ce9ec1283e05d63f9a46629d9757b40121fda4a..7578a5c38df597a4ab7648004c8f8a3df7a665a1 100644 (file)
@@ -68,7 +68,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        unsigned int port, count = 0;
 
-       for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
+       for (port = 0; port < ds->num_ports; port++) {
                if (dsa_is_cpu_port(ds, port))
                        continue;
                if (priv->port_sts[port].enabled)
@@ -1512,6 +1512,9 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
 {
        struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
 
+       if (!priv)
+               return 0;
+
        priv->wol_ports_mask = 0;
        /* Disable interrupts */
        bcm_sf2_intr_disable(priv);
@@ -1523,6 +1526,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
        if (priv->type == BCM7278_DEVICE_ID)
                reset_control_assert(priv->rcdev);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
@@ -1530,6 +1535,9 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
 {
        struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
 
+       if (!priv)
+               return;
+
        /* For a kernel about to be kexec'd we want to keep the GPHY on for a
         * successful MDIO bus scan to occur. If we did turn off the GPHY
         * before (e.g: port_disable), this will also power it back on.
@@ -1538,6 +1546,10 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
         */
        if (priv->hw_params.num_gphy == 1)
                bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
+       dsa_switch_shutdown(priv->dev->ds);
+
+       platform_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
index bfdf3324aac3a24172b096ef48c05d656a633a38..e638e3eea9112d504a28173ad3527d67ff2e3aa8 100644 (file)
@@ -340,10 +340,29 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
 static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
 {
        struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
-       struct dsa_loop_priv *ps = ds->priv;
+       struct dsa_loop_priv *ps;
+
+       if (!ds)
+               return;
+
+       ps = ds->priv;
 
        dsa_unregister_switch(ds);
        dev_put(ps->netdev);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
+{
+       struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+       if (!ds)
+               return;
+
+       dsa_switch_shutdown(ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static struct mdio_driver dsa_loop_drv = {
@@ -352,6 +371,7 @@ static struct mdio_driver dsa_loop_drv = {
        },
        .probe  = dsa_loop_drv_probe,
        .remove = dsa_loop_drv_remove,
+       .shutdown = dsa_loop_drv_shutdown,
 };
 
 #define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
index 542cfc4ccb0806f2d6d36cfffa0a95e4fefd450e..354655f9ed0036a6ddb3cf47a54265070c720414 100644 (file)
@@ -1916,6 +1916,9 @@ static int hellcreek_remove(struct platform_device *pdev)
 {
        struct hellcreek *hellcreek = platform_get_drvdata(pdev);
 
+       if (!hellcreek)
+               return 0;
+
        hellcreek_hwtstamp_free(hellcreek);
        hellcreek_ptp_free(hellcreek);
        dsa_unregister_switch(hellcreek->ds);
@@ -1924,6 +1927,18 @@ static int hellcreek_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void hellcreek_shutdown(struct platform_device *pdev)
+{
+       struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+       if (!hellcreek)
+               return;
+
+       dsa_switch_shutdown(hellcreek->ds);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct hellcreek_platform_data de1soc_r1_pdata = {
        .name            = "r4c30",
        .num_ports       = 4,
@@ -1946,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match);
 static struct platform_driver hellcreek_driver = {
        .probe  = hellcreek_probe,
        .remove = hellcreek_remove,
+       .shutdown = hellcreek_shutdown,
        .driver = {
                .name = "hellcreek",
                .of_match_table = hellcreek_of_match,
index d7ce281570b5429b2369b03e8cf2da380422aa7f..89f920289ae217bc5d241b23bae2944e037b8c83 100644 (file)
@@ -1379,6 +1379,12 @@ int lan9303_remove(struct lan9303 *chip)
 }
 EXPORT_SYMBOL(lan9303_remove);
 
+void lan9303_shutdown(struct lan9303 *chip)
+{
+       dsa_switch_shutdown(chip->ds);
+}
+EXPORT_SYMBOL(lan9303_shutdown);
+
 MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>");
 MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
 MODULE_LICENSE("GPL v2");
index 11f590b64701bcf12b1d3010dfa6128bbe8ab505..c7f73efa50f04dcd07bf198ce285250cf451fe37 100644 (file)
@@ -10,3 +10,4 @@ extern const struct lan9303_phy_ops lan9303_indirect_phy_ops;
 
 int lan9303_probe(struct lan9303 *chip, struct device_node *np);
 int lan9303_remove(struct lan9303 *chip);
+void lan9303_shutdown(struct lan9303 *chip);
index 9bffaef65a043e4bca9288fb9e3f659fad48390d..8ca4713310fa3d821297c6607137209398489ba9 100644 (file)
@@ -67,13 +67,28 @@ static int lan9303_i2c_probe(struct i2c_client *client,
 
 static int lan9303_i2c_remove(struct i2c_client *client)
 {
-       struct lan9303_i2c *sw_dev;
+       struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
 
-       sw_dev = i2c_get_clientdata(client);
        if (!sw_dev)
-               return -ENODEV;
+               return 0;
+
+       lan9303_remove(&sw_dev->chip);
+
+       i2c_set_clientdata(client, NULL);
+
+       return 0;
+}
+
+static void lan9303_i2c_shutdown(struct i2c_client *client)
+{
+       struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
+
+       if (!sw_dev)
+               return;
+
+       lan9303_shutdown(&sw_dev->chip);
 
-       return lan9303_remove(&sw_dev->chip);
+       i2c_set_clientdata(client, NULL);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -97,6 +112,7 @@ static struct i2c_driver lan9303_i2c_driver = {
        },
        .probe = lan9303_i2c_probe,
        .remove = lan9303_i2c_remove,
+       .shutdown = lan9303_i2c_shutdown,
        .id_table = lan9303_i2c_id,
 };
 module_i2c_driver(lan9303_i2c_driver);
index 9cbe80460b53c0d13838a9e5dc94f9a45e6012cd..bbb7032409baa2305653013713100a2d247ab853 100644 (file)
@@ -138,6 +138,20 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
                return;
 
        lan9303_remove(&sw_dev->chip);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
+{
+       struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+       if (!sw_dev)
+               return;
+
+       lan9303_shutdown(&sw_dev->chip);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -155,6 +169,7 @@ static struct mdio_driver lan9303_mdio_driver = {
        },
        .probe  = lan9303_mdio_probe,
        .remove = lan9303_mdio_remove,
+       .shutdown = lan9303_mdio_shutdown,
 };
 mdio_module_driver(lan9303_mdio_driver);
 
index 64d6dfa83122035810ddcd967c943bf8c40a224f..3ff4b7e177f3cb0d11ec59324df4e2222764dab0 100644 (file)
@@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
 
        reset_control_assert(gphy_fw->reset);
 
+       /* The vendor BSP uses a 200ms delay after asserting the reset line.
+        * Without this some users are observing that the PHY is not coming up
+        * on the MDIO bus.
+        */
+       msleep(200);
+
        ret = request_firmware(&fw, gphy_fw->fw_name, dev);
        if (ret) {
                dev_err(dev, "failed to load firmware: %s, error: %i\n",
@@ -2178,6 +2184,9 @@ static int gswip_remove(struct platform_device *pdev)
        struct gswip_priv *priv = platform_get_drvdata(pdev);
        int i;
 
+       if (!priv)
+               return 0;
+
        /* disable the switch */
        gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
 
@@ -2191,9 +2200,23 @@ static int gswip_remove(struct platform_device *pdev)
        for (i = 0; i < priv->num_gphy_fw; i++)
                gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void gswip_shutdown(struct platform_device *pdev)
+{
+       struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct gswip_hw_info gswip_xrx200 = {
        .max_ports = 7,
        .cpu_port = 6,
@@ -2217,6 +2240,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match);
 static struct platform_driver gswip_driver = {
        .probe = gswip_probe,
        .remove = gswip_remove,
+       .shutdown = gswip_shutdown,
        .driver = {
                .name = "gswip",
                .of_match_table = gswip_of_match,
index ea7550d1b634d416f207e256335ff458111a847e..866767b70d65bbc0097c748d81c7b8e463c0966c 100644 (file)
@@ -94,6 +94,8 @@ static int ksz8795_spi_remove(struct spi_device *spi)
        if (dev)
                ksz_switch_remove(dev);
 
+       spi_set_drvdata(spi, NULL);
+
        return 0;
 }
 
@@ -101,8 +103,15 @@ static void ksz8795_spi_shutdown(struct spi_device *spi)
 {
        struct ksz_device *dev = spi_get_drvdata(spi);
 
-       if (dev && dev->dev_ops->shutdown)
+       if (!dev)
+               return;
+
+       if (dev->dev_ops->shutdown)
                dev->dev_ops->shutdown(dev);
+
+       dsa_switch_shutdown(dev->ds);
+
+       spi_set_drvdata(spi, NULL);
 }
 
 static const struct of_device_id ksz8795_dt_ids[] = {
index 11293485138c260a2eec63772f74d613a39a40a6..5883fa7edda2271c59aa35ba6439fee24a4c23cf 100644 (file)
@@ -191,6 +191,18 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
 
        if (dev)
                ksz_switch_remove(dev);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
+{
+       struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+       if (dev)
+               dsa_switch_shutdown(dev->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id ksz8863_dt_ids[] = {
@@ -203,6 +215,7 @@ MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
 static struct mdio_driver ksz8863_driver = {
        .probe  = ksz8863_smi_probe,
        .remove = ksz8863_smi_remove,
+       .shutdown = ksz8863_smi_shutdown,
        .mdiodrv.driver = {
                .name   = "ksz8863-switch",
                .of_match_table = ksz8863_dt_ids,
index 4e053a25d07713f9abcd61e07695922c795632fa..f3afb8b8c4cc7d5b439e23604bb6c276202df5d8 100644 (file)
@@ -56,7 +56,10 @@ static int ksz9477_i2c_remove(struct i2c_client *i2c)
 {
        struct ksz_device *dev = i2c_get_clientdata(i2c);
 
-       ksz_switch_remove(dev);
+       if (dev)
+               ksz_switch_remove(dev);
+
+       i2c_set_clientdata(i2c, NULL);
 
        return 0;
 }
@@ -65,8 +68,15 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
 {
        struct ksz_device *dev = i2c_get_clientdata(i2c);
 
-       if (dev && dev->dev_ops->shutdown)
+       if (!dev)
+               return;
+
+       if (dev->dev_ops->shutdown)
                dev->dev_ops->shutdown(dev);
+
+       dsa_switch_shutdown(dev->ds);
+
+       i2c_set_clientdata(i2c, NULL);
 }
 
 static const struct i2c_device_id ksz9477_i2c_id[] = {
index 15bc11b3cda4343f91fb82f6407fd80704aef67c..e3cb0e6c9f6f2cbcb032b3b128d7aa0388552a39 100644 (file)
@@ -72,6 +72,8 @@ static int ksz9477_spi_remove(struct spi_device *spi)
        if (dev)
                ksz_switch_remove(dev);
 
+       spi_set_drvdata(spi, NULL);
+
        return 0;
 }
 
@@ -79,8 +81,10 @@ static void ksz9477_spi_shutdown(struct spi_device *spi)
 {
        struct ksz_device *dev = spi_get_drvdata(spi);
 
-       if (dev && dev->dev_ops->shutdown)
-               dev->dev_ops->shutdown(dev);
+       if (dev)
+               dsa_switch_shutdown(dev->ds);
+
+       spi_set_drvdata(spi, NULL);
 }
 
 static const struct of_device_id ksz9477_dt_ids[] = {
index 1542bfb8b5e54a595ef822a679d6725c8f7eb85e..7c2968a639eba552e552bb418fc3cbeabe8a9dea 100644 (file)
@@ -449,8 +449,10 @@ EXPORT_SYMBOL(ksz_switch_register);
 void ksz_switch_remove(struct ksz_device *dev)
 {
        /* timer started */
-       if (dev->mib_read_interval)
+       if (dev->mib_read_interval) {
+               dev->mib_read_interval = 0;
                cancel_delayed_work_sync(&dev->mib_read);
+       }
 
        dev->dev_ops->exit(dev);
        dsa_unregister_switch(dev->ds);
index d0cba2d1cd681645415a83d681a79bd4d8428e21..094737e5084a49bd5e8ad66381a42df76a952baa 100644 (file)
@@ -3286,6 +3286,9 @@ mt7530_remove(struct mdio_device *mdiodev)
        struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
        int ret = 0;
 
+       if (!priv)
+               return;
+
        ret = regulator_disable(priv->core_pwr);
        if (ret < 0)
                dev_err(priv->dev,
@@ -3301,11 +3304,26 @@ mt7530_remove(struct mdio_device *mdiodev)
 
        dsa_unregister_switch(priv->ds);
        mutex_destroy(&priv->reg_mutex);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mt7530_shutdown(struct mdio_device *mdiodev)
+{
+       struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static struct mdio_driver mt7530_mdio_driver = {
        .probe  = mt7530_probe,
        .remove = mt7530_remove,
+       .shutdown = mt7530_shutdown,
        .mdiodrv.driver = {
                .name = "mt7530",
                .of_match_table = mt7530_of_match,
index 24b8219fd607781d5fb042a8c24f8902b204ff0c..a4c6eb9a52d0df6fcbdce54f949d7702a06d28ed 100644 (file)
@@ -290,7 +290,24 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
 {
        struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
 
+       if (!ds)
+               return;
+
        dsa_unregister_switch(ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6060_shutdown(struct mdio_device *mdiodev)
+{
+       struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+       if (!ds)
+               return;
+
+       dsa_switch_shutdown(ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id mv88e6060_of_match[] = {
@@ -303,6 +320,7 @@ static const struct of_device_id mv88e6060_of_match[] = {
 static struct mdio_driver mv88e6060_driver = {
        .probe  = mv88e6060_probe,
        .remove = mv88e6060_remove,
+       .shutdown = mv88e6060_shutdown,
        .mdiodrv.driver = {
                .name = "mv88e6060",
                .of_match_table = mv88e6060_of_match,
index c45ca2473743886f29cc9dad927c99173dd9523d..8dadcae93c9b53ef8ad4f3440ff9d12630c8a13e 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/bitfield.h>
 #include <linux/delay.h>
+#include <linux/dsa/mv88e6xxx.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_bridge.h>
@@ -749,7 +750,11 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
+       /* Internal PHYs propagate their configuration directly to the MAC.
+        * External PHYs depend on whether the PPU is enabled for this port.
+        */
+       if (((!mv88e6xxx_phy_is_internal(ds, port) &&
+             !mv88e6xxx_port_ppu_updates(chip, port)) ||
             mode == MLO_AN_FIXED) && ops->port_sync_link)
                err = ops->port_sync_link(chip, port, mode, false);
        mv88e6xxx_reg_unlock(chip);
@@ -772,7 +777,12 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) {
+       /* Internal PHYs propagate their configuration directly to the MAC.
+        * External PHYs depend on whether the PPU is enabled for this port.
+        */
+       if ((!mv88e6xxx_phy_is_internal(ds, port) &&
+            !mv88e6xxx_port_ppu_updates(chip, port)) ||
+           mode == MLO_AN_FIXED) {
                /* FIXME: for an automedia port, should we force the link
                 * down here - what if the link comes up due to "other" media
                 * while we're bringing the port up, how is the exclusivity
@@ -1677,6 +1687,30 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
        return 0;
 }
 
+static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
+{
+       struct dsa_port *dp = dsa_to_port(chip->ds, port);
+       struct mv88e6xxx_port *p = &chip->ports[port];
+       u16 pvid = MV88E6XXX_VID_STANDALONE;
+       bool drop_untagged = false;
+       int err;
+
+       if (dp->bridge_dev) {
+               if (br_vlan_enabled(dp->bridge_dev)) {
+                       pvid = p->bridge_pvid.vid;
+                       drop_untagged = !p->bridge_pvid.valid;
+               } else {
+                       pvid = MV88E6XXX_VID_BRIDGED;
+               }
+       }
+
+       err = mv88e6xxx_port_set_pvid(chip, port, pvid);
+       if (err)
+               return err;
+
+       return mv88e6xxx_port_drop_untagged(chip, port, drop_untagged);
+}
+
 static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
                                         bool vlan_filtering,
                                         struct netlink_ext_ack *extack)
@@ -1690,7 +1724,16 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
                return -EOPNOTSUPP;
 
        mv88e6xxx_reg_lock(chip);
+
        err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
+       if (err)
+               goto unlock;
+
+       err = mv88e6xxx_port_commit_pvid(chip, port);
+       if (err)
+               goto unlock;
+
+unlock:
        mv88e6xxx_reg_unlock(chip);
 
        return err;
@@ -1725,11 +1768,15 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
        u16 fid;
        int err;
 
-       /* Null VLAN ID corresponds to the port private database */
+       /* Ports have two private address databases: one for when the port is
+        * standalone and one for when the port is under a bridge and the
+        * 802.1Q mode is disabled. When the port is standalone, DSA wants its
+        * address database to remain 100% empty, so we never load an ATU entry
+        * into a standalone port's database. Therefore, translate the null
+        * VLAN ID into the port's database used for VLAN-unaware bridging.
+        */
        if (vid == 0) {
-               err = mv88e6xxx_port_get_fid(chip, port, &fid);
-               if (err)
-                       return err;
+               fid = MV88E6XXX_FID_BRIDGED;
        } else {
                err = mv88e6xxx_vtu_get(chip, vid, &vlan);
                if (err)
@@ -2123,6 +2170,7 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
        struct mv88e6xxx_chip *chip = ds->priv;
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+       struct mv88e6xxx_port *p = &chip->ports[port];
        bool warn;
        u8 member;
        int err;
@@ -2156,13 +2204,21 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
        }
 
        if (pvid) {
-               err = mv88e6xxx_port_set_pvid(chip, port, vlan->vid);
-               if (err) {
-                       dev_err(ds->dev, "p%d: failed to set PVID %d\n",
-                               port, vlan->vid);
+               p->bridge_pvid.vid = vlan->vid;
+               p->bridge_pvid.valid = true;
+
+               err = mv88e6xxx_port_commit_pvid(chip, port);
+               if (err)
+                       goto out;
+       } else if (vlan->vid && p->bridge_pvid.vid == vlan->vid) {
+               /* The old pvid was reinstalled as a non-pvid VLAN */
+               p->bridge_pvid.valid = false;
+
+               err = mv88e6xxx_port_commit_pvid(chip, port);
+               if (err)
                        goto out;
-               }
        }
+
 out:
        mv88e6xxx_reg_unlock(chip);
 
@@ -2212,6 +2268,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
                                   const struct switchdev_obj_port_vlan *vlan)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
+       struct mv88e6xxx_port *p = &chip->ports[port];
        int err = 0;
        u16 pvid;
 
@@ -2229,7 +2286,9 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
                goto unlock;
 
        if (vlan->vid == pvid) {
-               err = mv88e6xxx_port_set_pvid(chip, port, 0);
+               p->bridge_pvid.valid = false;
+
+               err = mv88e6xxx_port_commit_pvid(chip, port);
                if (err)
                        goto unlock;
        }
@@ -2393,7 +2452,16 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
        int err;
 
        mv88e6xxx_reg_lock(chip);
+
        err = mv88e6xxx_bridge_map(chip, br);
+       if (err)
+               goto unlock;
+
+       err = mv88e6xxx_port_commit_pvid(chip, port);
+       if (err)
+               goto unlock;
+
+unlock:
        mv88e6xxx_reg_unlock(chip);
 
        return err;
@@ -2403,11 +2471,20 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
                                        struct net_device *br)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
+       int err;
 
        mv88e6xxx_reg_lock(chip);
+
        if (mv88e6xxx_bridge_map(chip, br) ||
            mv88e6xxx_port_vlan_map(chip, port))
                dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+
+       err = mv88e6xxx_port_commit_pvid(chip, port);
+       if (err)
+               dev_err(ds->dev,
+                       "port %d failed to restore standalone pvid: %pe\n",
+                       port, ERR_PTR(err));
+
        mv88e6xxx_reg_unlock(chip);
 }
 
@@ -2834,8 +2911,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
-       /* Port Control 2: don't force a good FCS, set the maximum frame size to
-        * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
+       /* Port Control 2: don't force a good FCS, set the MTU size to
+        * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
         * untagged frames on this port, do a destination address lookup on all
         * received packets as usual, disable ARP mirroring and don't send a
         * copy of all transmitted/received frames on this port to the CPU.
@@ -2853,8 +2930,22 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        if (err)
                return err;
 
+       /* Associate MV88E6XXX_VID_BRIDGED with MV88E6XXX_FID_BRIDGED in the
+        * ATU by virtue of the fact that mv88e6xxx_atu_new() will pick it as
+        * the first free FID after MV88E6XXX_FID_STANDALONE. This will be used
+        * as the private PVID on ports under a VLAN-unaware bridge.
+        * Shared (DSA and CPU) ports must also be members of it, to translate
+        * the VID from the DSA tag into MV88E6XXX_FID_BRIDGED, instead of
+        * relying on their port default FID.
+        */
+       err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
+                                      MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+                                      false);
+       if (err)
+               return err;
+
        if (chip->info->ops->port_set_jumbo_size) {
-               err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
+               err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
                if (err)
                        return err;
        }
@@ -2925,7 +3016,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
         * database, and allow bidirectional communication between the
         * CPU and DSA port(s), and the other ports.
         */
-       err = mv88e6xxx_port_set_fid(chip, port, 0);
+       err = mv88e6xxx_port_set_fid(chip, port, MV88E6XXX_FID_STANDALONE);
        if (err)
                return err;
 
@@ -2944,10 +3035,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
        struct mv88e6xxx_chip *chip = ds->priv;
 
        if (chip->info->ops->port_set_jumbo_size)
-               return 10240;
+               return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
        else if (chip->info->ops->set_max_frame_size)
-               return 1632;
-       return 1522;
+               return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+       return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
 }
 
 static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@@ -2955,6 +3046,9 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
        struct mv88e6xxx_chip *chip = ds->priv;
        int ret = 0;
 
+       if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+               new_mtu += EDSA_HLEN;
+
        mv88e6xxx_reg_lock(chip);
        if (chip->info->ops->port_set_jumbo_size)
                ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
@@ -3071,7 +3165,7 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
 {
        mv88e6xxx_teardown_devlink_params(ds);
        dsa_devlink_resources_unregister(ds);
-       mv88e6xxx_teardown_devlink_regions(ds);
+       mv88e6xxx_teardown_devlink_regions_global(ds);
 }
 
 static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3112,6 +3206,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
                }
        }
 
+       err = mv88e6xxx_vtu_setup(chip);
+       if (err)
+               goto unlock;
+
        /* Setup Switch Port Registers */
        for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
                if (dsa_is_unused_port(ds, i))
@@ -3141,10 +3239,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
        if (err)
                goto unlock;
 
-       err = mv88e6xxx_vtu_setup(chip);
-       if (err)
-               goto unlock;
-
        err = mv88e6xxx_pvt_setup(chip);
        if (err)
                goto unlock;
@@ -3215,7 +3309,7 @@ unlock:
        if (err)
                goto out_resources;
 
-       err = mv88e6xxx_setup_devlink_regions(ds);
+       err = mv88e6xxx_setup_devlink_regions_global(ds);
        if (err)
                goto out_params;
 
@@ -3229,6 +3323,16 @@ out_resources:
        return err;
 }
 
+static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
+{
+       return mv88e6xxx_setup_devlink_regions_port(ds, port);
+}
+
+static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
+{
+       mv88e6xxx_teardown_devlink_regions_port(ds, port);
+}
+
 /* prod_id for switch families which do not have a PHY model number */
 static const u16 family_prod_id_table[] = {
        [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
@@ -3715,7 +3819,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
        .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
@@ -3740,6 +3843,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .avb_ops = &mv88e6165_avb_ops,
        .ptp_ops = &mv88e6165_ptp_ops,
        .phylink_validate = mv88e6185_phylink_validate,
+       .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
 
 static const struct mv88e6xxx_ops mv88e6165_ops = {
@@ -6116,6 +6220,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .change_tag_protocol    = mv88e6xxx_change_tag_protocol,
        .setup                  = mv88e6xxx_setup,
        .teardown               = mv88e6xxx_teardown,
+       .port_setup             = mv88e6xxx_port_setup,
+       .port_teardown          = mv88e6xxx_port_teardown,
        .phylink_validate       = mv88e6xxx_validate,
        .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
        .phylink_mac_config     = mv88e6xxx_mac_config,
@@ -6389,7 +6495,12 @@ out:
 static void mv88e6xxx_remove(struct mdio_device *mdiodev)
 {
        struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
-       struct mv88e6xxx_chip *chip = ds->priv;
+       struct mv88e6xxx_chip *chip;
+
+       if (!ds)
+               return;
+
+       chip = ds->priv;
 
        if (chip->info->ptp_support) {
                mv88e6xxx_hwtstamp_free(chip);
@@ -6410,6 +6521,20 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
                mv88e6xxx_g1_irq_free(chip);
        else
                mv88e6xxx_irq_poll_free(chip);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
+{
+       struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+       if (!ds)
+               return;
+
+       dsa_switch_shutdown(ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id mv88e6xxx_of_match[] = {
@@ -6433,6 +6558,7 @@ MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
 static struct mdio_driver mv88e6xxx_driver = {
        .probe  = mv88e6xxx_probe,
        .remove = mv88e6xxx_remove,
+       .shutdown = mv88e6xxx_shutdown,
        .mdiodrv.driver = {
                .name = "mv88e6085",
                .of_match_table = mv88e6xxx_of_match,
index 675b1f3e43b7bfc16694b226341710d06ab7744d..8271b8aa7b71eafad4fbcf16c7f550fa1a872e56 100644 (file)
 #include <linux/timecounter.h>
 #include <net/dsa.h>
 
+#define EDSA_HLEN              8
 #define MV88E6XXX_N_FID                4096
 
+#define MV88E6XXX_FID_STANDALONE       0
+#define MV88E6XXX_FID_BRIDGED          1
+
 /* PVT limits for 4-bit port and 5-bit switch */
 #define MV88E6XXX_MAX_PVT_SWITCHES     32
 #define MV88E6XXX_MAX_PVT_PORTS                16
@@ -245,9 +249,15 @@ struct mv88e6xxx_policy {
        u16 vid;
 };
 
+struct mv88e6xxx_vlan {
+       u16     vid;
+       bool    valid;
+};
+
 struct mv88e6xxx_port {
        struct mv88e6xxx_chip *chip;
        int port;
+       struct mv88e6xxx_vlan bridge_pvid;
        u64 serdes_stats[2];
        u64 atu_member_violation;
        u64 atu_miss_violation;
index 0c0f5ea6680c3ccd33c4f7d1bd7a22a1185ea647..381068395c63ba7cffad32c4cffbf85b5154dec3 100644 (file)
@@ -647,26 +647,25 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
        },
 };
 
-static void
-mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_chip *chip = ds->priv;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
                dsa_devlink_region_destroy(chip->regions[i]);
 }
 
-static void
-mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
-                                       int port)
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
 {
+       struct mv88e6xxx_chip *chip = ds->priv;
+
        dsa_devlink_region_destroy(chip->ports[port].region);
 }
 
-static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
-                                               struct mv88e6xxx_chip *chip,
-                                               int port)
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
 {
+       struct mv88e6xxx_chip *chip = ds->priv;
        struct devlink_region *region;
 
        region = dsa_devlink_port_region_create(ds,
@@ -681,40 +680,10 @@ static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
        return 0;
 }
 
-static void
-mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
-{
-       int port;
-
-       for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
-               mv88e6xxx_teardown_devlink_regions_port(chip, port);
-}
-
-static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
-                                                struct mv88e6xxx_chip *chip)
-{
-       int port;
-       int err;
-
-       for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
-               err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
-               if (err)
-                       goto out;
-       }
-
-       return 0;
-
-out:
-       while (port-- > 0)
-               mv88e6xxx_teardown_devlink_regions_port(chip, port);
-
-       return err;
-}
-
-static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
-                                                 struct mv88e6xxx_chip *chip)
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
 {
        bool (*cond)(struct mv88e6xxx_chip *chip);
+       struct mv88e6xxx_chip *chip = ds->priv;
        struct devlink_region_ops *ops;
        struct devlink_region *region;
        u64 size;
@@ -753,30 +722,6 @@ out:
        return PTR_ERR(region);
 }
 
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_chip *chip = ds->priv;
-       int err;
-
-       err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
-       if (err)
-               return err;
-
-       err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
-       if (err)
-               mv88e6xxx_teardown_devlink_regions_global(chip);
-
-       return err;
-}
-
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_chip *chip = ds->priv;
-
-       mv88e6xxx_teardown_devlink_regions_ports(chip);
-       mv88e6xxx_teardown_devlink_regions_global(chip);
-}
-
 int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
                               struct devlink_info_req *req,
                               struct netlink_ext_ack *extack)
index 3d72db3dcf950ea327cc793b360ae8195ff89ebc..65ce6a6858b9faa9f7b3d9e4964c518ea8591c32 100644 (file)
@@ -12,8 +12,10 @@ int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
                                struct devlink_param_gset_ctx *ctx);
 int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
                                struct devlink_param_gset_ctx *ctx);
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port);
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port);
 
 int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
                               struct devlink_info_req *req,
index 815b0f681d698fc75848a16861f32524083456b2..5848112036b08d90ea584f49d9146638ea695158 100644 (file)
@@ -232,6 +232,8 @@ int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
        u16 val;
        int err;
 
+       mtu += ETH_HLEN + ETH_FCS_LEN;
+
        err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
        if (err)
                return err;
index f77e2ee64a607a8cae7c6c1c927c4a6f1a01d9b9..d9817b20ea641f9b642435e693b5ee7e86398a7e 100644 (file)
@@ -1257,6 +1257,27 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
        return 0;
 }
 
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+                                bool drop_untagged)
+{
+       u16 old, new;
+       int err;
+
+       err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &old);
+       if (err)
+               return err;
+
+       if (drop_untagged)
+               new = old | MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+       else
+               new = old & ~MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+
+       if (new == old)
+               return 0;
+
+       return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
+}
+
 int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
 {
        u16 reg;
@@ -1277,6 +1298,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
        u16 reg;
        int err;
 
+       size += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
        err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
        if (err)
                return err;
index b10e5aebacf6805916ddf6f28136e069e1947946..03382b66f80037dbf731178b3f6e1ee6c122db2a 100644 (file)
@@ -423,6 +423,8 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                              phy_interface_t mode);
 int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
 int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+                                bool drop_untagged);
 int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
 int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
                                     int upstream_port);
index 3656e67af789c198b22ade89d8123aacbc99f620..341236dcbdb472b6d8a2fd19b9a8e3aebed625d8 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019-2021 NXP Semiconductors
+/* Copyright 2019-2021 NXP
  *
  * This is an umbrella module for all network switches that are
  * register-compatible with Ocelot and that perform I/O to their host CPU
@@ -266,12 +266,12 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
  */
 static int felix_setup_mmio_filtering(struct felix *felix)
 {
-       unsigned long user_ports = 0, cpu_ports = 0;
+       unsigned long user_ports = dsa_user_ports(felix->ds);
        struct ocelot_vcap_filter *redirect_rule;
        struct ocelot_vcap_filter *tagging_rule;
        struct ocelot *ocelot = &felix->ocelot;
        struct dsa_switch *ds = felix->ds;
-       int port, ret;
+       int cpu = -1, port, ret;
 
        tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
        if (!tagging_rule)
@@ -284,12 +284,15 @@ static int felix_setup_mmio_filtering(struct felix *felix)
        }
 
        for (port = 0; port < ocelot->num_phys_ports; port++) {
-               if (dsa_is_user_port(ds, port))
-                       user_ports |= BIT(port);
-               if (dsa_is_cpu_port(ds, port))
-                       cpu_ports |= BIT(port);
+               if (dsa_is_cpu_port(ds, port)) {
+                       cpu = port;
+                       break;
+               }
        }
 
+       if (cpu < 0)
+               return -EINVAL;
+
        tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
        *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
        *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
@@ -325,7 +328,7 @@ static int felix_setup_mmio_filtering(struct felix *felix)
                 * the CPU port module
                 */
                redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
-               redirect_rule->action.port_mask = cpu_ports;
+               redirect_rule->action.port_mask = BIT(cpu);
        } else {
                /* Trap PTP packets only to the CPU port module (which is
                 * redirected to the NPI port)
@@ -1074,6 +1077,101 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
        return 0;
 }
 
+static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
+                                          struct sk_buff *skb)
+{
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+       struct sk_buff *skb_match = NULL, *skb_tmp;
+       unsigned long flags;
+
+       if (!clone)
+               return;
+
+       spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
+
+       skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+               if (skb != clone)
+                       continue;
+               __skb_unlink(skb, &ocelot_port->tx_skbs);
+               skb_match = skb;
+               break;
+       }
+
+       spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
+
+       WARN_ONCE(!skb_match,
+                 "Could not find skb clone in TX timestamping list\n");
+}
+
+#define work_to_xmit_work(w) \
+               container_of((w), struct felix_deferred_xmit_work, work)
+
+static void felix_port_deferred_xmit(struct kthread_work *work)
+{
+       struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+       struct dsa_switch *ds = xmit_work->dp->ds;
+       struct sk_buff *skb = xmit_work->skb;
+       u32 rew_op = ocelot_ptp_rew_op(skb);
+       struct ocelot *ocelot = ds->priv;
+       int port = xmit_work->dp->index;
+       int retries = 10;
+
+       do {
+               if (ocelot_can_inject(ocelot, 0))
+                       break;
+
+               cpu_relax();
+       } while (--retries);
+
+       if (!retries) {
+               dev_err(ocelot->dev, "port %d failed to inject skb\n",
+                       port);
+               ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
+               kfree_skb(skb);
+               return;
+       }
+
+       ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+
+       consume_skb(skb);
+       kfree(xmit_work);
+}
+
+static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
+{
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct ocelot *ocelot = ds->priv;
+       struct felix *felix = ocelot_to_felix(ocelot);
+       struct felix_port *felix_port;
+
+       if (!dsa_port_is_user(dp))
+               return 0;
+
+       felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
+       if (!felix_port)
+               return -ENOMEM;
+
+       felix_port->xmit_worker = felix->xmit_worker;
+       felix_port->xmit_work_fn = felix_port_deferred_xmit;
+
+       dp->priv = felix_port;
+
+       return 0;
+}
+
+static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
+{
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct felix_port *felix_port = dp->priv;
+
+       if (!felix_port)
+               return;
+
+       dp->priv = NULL;
+       kfree(felix_port);
+}
+
 /* Hardware initialization done here so that we can allocate structures with
  * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
  * us to allocate structures twice (leak memory) and map PCI memory twice
@@ -1102,6 +1200,12 @@ static int felix_setup(struct dsa_switch *ds)
                }
        }
 
+       felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
+       if (IS_ERR(felix->xmit_worker)) {
+               err = PTR_ERR(felix->xmit_worker);
+               goto out_deinit_timestamp;
+       }
+
        for (port = 0; port < ds->num_ports; port++) {
                if (dsa_is_unused_port(ds, port))
                        continue;
@@ -1112,6 +1216,14 @@ static int felix_setup(struct dsa_switch *ds)
                 * bits of vlan tag.
                 */
                felix_port_qos_map_init(ocelot, port);
+
+               err = felix_port_setup_tagger_data(ds, port);
+               if (err) {
+                       dev_err(ds->dev,
+                               "port %d failed to set up tagger data: %pe\n",
+                               port, ERR_PTR(err));
+                       goto out_deinit_ports;
+               }
        }
 
        err = ocelot_devlink_sb_register(ocelot);
@@ -1126,6 +1238,7 @@ static int felix_setup(struct dsa_switch *ds)
                 * there's no real point in checking for errors.
                 */
                felix_set_tag_protocol(ds, port, felix->tag_proto);
+               break;
        }
 
        ds->mtu_enforcement_ingress = true;
@@ -1138,9 +1251,13 @@ out_deinit_ports:
                if (dsa_is_unused_port(ds, port))
                        continue;
 
+               felix_port_teardown_tagger_data(ds, port);
                ocelot_deinit_port(ocelot, port);
        }
 
+       kthread_destroy_worker(felix->xmit_worker);
+
+out_deinit_timestamp:
        ocelot_deinit_timestamp(ocelot);
        ocelot_deinit(ocelot);
 
@@ -1162,19 +1279,23 @@ static void felix_teardown(struct dsa_switch *ds)
                        continue;
 
                felix_del_tag_protocol(ds, port, felix->tag_proto);
+               break;
        }
 
-       ocelot_devlink_sb_unregister(ocelot);
-       ocelot_deinit_timestamp(ocelot);
-       ocelot_deinit(ocelot);
-
        for (port = 0; port < ocelot->num_phys_ports; port++) {
                if (dsa_is_unused_port(ds, port))
                        continue;
 
+               felix_port_teardown_tagger_data(ds, port);
                ocelot_deinit_port(ocelot, port);
        }
 
+       kthread_destroy_worker(felix->xmit_worker);
+
+       ocelot_devlink_sb_unregister(ocelot);
+       ocelot_deinit_timestamp(ocelot);
+       ocelot_deinit(ocelot);
+
        if (felix->info->mdio_bus_free)
                felix->info->mdio_bus_free(ocelot);
 }
@@ -1291,8 +1412,12 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
        if (!ocelot->ptp)
                return;
 
-       if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone))
+       if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
+               dev_err_ratelimited(ds->dev,
+                                   "port %d delivering skb without TX timestamp\n",
+                                   port);
                return;
+       }
 
        if (clone)
                OCELOT_SKB_CB(skb)->clone = clone;
index 5854bab433273c7955dc035109427f45bf429bcc..be3e42e135c008213a51ec6d51f6277f5238fb39 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
  */
 #ifndef _MSCC_FELIX_H
 #define _MSCC_FELIX_H
@@ -62,6 +62,7 @@ struct felix {
        resource_size_t                 switch_base;
        resource_size_t                 imdio_base;
        enum dsa_tag_protocol           tag_proto;
+       struct kthread_worker           *xmit_worker;
 };
 
 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
index f966a253d1c77a9aa6af3b6f27327e61c9951d56..11b42fd812e4a9658edda1bf764704f12972d471 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Copyright 2017 Microsemi Corporation
- * Copyright 2018-2019 NXP Semiconductors
+ * Copyright 2018-2019 NXP
  */
 #include <linux/fsl/enetc_mdio.h>
 #include <soc/mscc/ocelot_qsys.h>
@@ -1472,9 +1472,10 @@ err_pci_enable:
 
 static void felix_pci_remove(struct pci_dev *pdev)
 {
-       struct felix *felix;
+       struct felix *felix = pci_get_drvdata(pdev);
 
-       felix = pci_get_drvdata(pdev);
+       if (!felix)
+               return;
 
        dsa_unregister_switch(felix->ds);
 
@@ -1482,6 +1483,20 @@ static void felix_pci_remove(struct pci_dev *pdev)
        kfree(felix);
 
        pci_disable_device(pdev);
+
+       pci_set_drvdata(pdev, NULL);
+}
+
+static void felix_pci_shutdown(struct pci_dev *pdev)
+{
+       struct felix *felix = pci_get_drvdata(pdev);
+
+       if (!felix)
+               return;
+
+       dsa_switch_shutdown(felix->ds);
+
+       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_device_id felix_ids[] = {
@@ -1498,6 +1513,7 @@ static struct pci_driver felix_vsc9959_pci_driver = {
        .id_table       = felix_ids,
        .probe          = felix_pci_probe,
        .remove         = felix_pci_remove,
+       .shutdown       = felix_pci_shutdown,
 };
 module_pci_driver(felix_vsc9959_pci_driver);
 
index deae923c8b7a82bb4dda3a241f2a5e96f1fcd6ae..de1d34a1f1e47497f99f0e79528b445f6d73d990 100644 (file)
@@ -1245,18 +1245,33 @@ err_alloc_felix:
 
 static int seville_remove(struct platform_device *pdev)
 {
-       struct felix *felix;
+       struct felix *felix = platform_get_drvdata(pdev);
 
-       felix = platform_get_drvdata(pdev);
+       if (!felix)
+               return 0;
 
        dsa_unregister_switch(felix->ds);
 
        kfree(felix->ds);
        kfree(felix);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void seville_shutdown(struct platform_device *pdev)
+{
+       struct felix *felix = platform_get_drvdata(pdev);
+
+       if (!felix)
+               return;
+
+       dsa_switch_shutdown(felix->ds);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct of_device_id seville_of_match[] = {
        { .compatible = "mscc,vsc9953-switch" },
        { },
@@ -1266,6 +1281,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match);
 static struct platform_driver seville_vsc9953_driver = {
        .probe          = seville_probe,
        .remove         = seville_remove,
+       .shutdown       = seville_shutdown,
        .driver = {
                .name           = "mscc_seville",
                .of_match_table = of_match_ptr(seville_of_match),
index 563d8a27903060ae86e2c39ede2b8cf522bd6014..a6bfb6abc51a7dd9e3393043cc648bc6c8f5874d 100644 (file)
@@ -1083,6 +1083,9 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
        struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
        unsigned int i;
 
+       if (!priv)
+               return;
+
        for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
                struct ar9331_sw_port *port = &priv->port[i];
 
@@ -1094,6 +1097,20 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
        dsa_unregister_switch(&priv->ds);
 
        reset_control_assert(priv->sw_reset);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
+{
+       struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(&priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id ar9331_sw_of_match[] = {
@@ -1104,6 +1121,7 @@ static const struct of_device_id ar9331_sw_of_match[] = {
 static struct mdio_driver ar9331_sw_mdio_driver = {
        .probe = ar9331_sw_probe,
        .remove = ar9331_sw_remove,
+       .shutdown = ar9331_sw_shutdown,
        .mdiodrv.driver = {
                .name = AR9331_SW_NAME,
                .of_match_table = ar9331_sw_of_match,
index 1f63f50f73f1733558b7867752d13b6d13a2b7c8..a984f06f6f04f36b733ca912d52935b54089f4a2 100644 (file)
@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
 }
 
 static int
-qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
+qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
 {
-       struct qca8k_priv *priv = salve_bus->priv;
-       struct mii_bus *bus = priv->bus;
        u16 r1, r2, page;
        u32 val;
        int ret;
@@ -682,10 +680,8 @@ exit:
 }
 
 static int
-qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
+qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
 {
-       struct qca8k_priv *priv = salve_bus->priv;
-       struct mii_bus *bus = priv->bus;
        u16 r1, r2, page;
        u32 val;
        int ret;
@@ -726,6 +722,24 @@ exit:
        return ret;
 }
 
+static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+       struct qca8k_priv *priv = slave_bus->priv;
+       struct mii_bus *bus = priv->bus;
+
+       return qca8k_mdio_write(bus, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+       struct qca8k_priv *priv = slave_bus->priv;
+       struct mii_bus *bus = priv->bus;
+
+       return qca8k_mdio_read(bus, phy, regnum);
+}
+
 static int
 qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
 {
@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
 
        bus->priv = (void *)priv;
        bus->name = "qca8k slave mii";
-       bus->read = qca8k_mdio_read;
-       bus->write = qca8k_mdio_write;
+       bus->read = qca8k_internal_mdio_read;
+       bus->write = qca8k_internal_mdio_write;
        snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
                 ds->index);
 
@@ -1866,10 +1880,27 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
        struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
        int i;
 
+       if (!priv)
+               return;
+
        for (i = 0; i < QCA8K_NUM_PORTS; i++)
                qca8k_port_set_status(priv, i, 0);
 
        dsa_unregister_switch(priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+       struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -1926,6 +1957,7 @@ static const struct of_device_id qca8k_of_match[] = {
 static struct mdio_driver qca8kmdio_driver = {
        .probe  = qca8k_sw_probe,
        .remove = qca8k_sw_remove,
+       .shutdown = qca8k_sw_shutdown,
        .mdiodrv.driver = {
                .name = "qca8k",
                .of_match_table = qca8k_of_match,
index 8e49d4f85d48c31da45dd239d994d0cb0ac07d44..2fcfd917b876b5bb7c5f37f1328e5fdc99d169cc 100644 (file)
@@ -368,7 +368,7 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
        smi->slave_mii_bus->parent = smi->dev;
        smi->ds->slave_mii_bus = smi->slave_mii_bus;
 
-       ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
+       ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
        if (ret) {
                dev_err(smi->dev, "unable to register MDIO bus %s\n",
                        smi->slave_mii_bus->id);
@@ -464,16 +464,33 @@ static int realtek_smi_probe(struct platform_device *pdev)
 
 static int realtek_smi_remove(struct platform_device *pdev)
 {
-       struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
+       struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+       if (!smi)
+               return 0;
 
        dsa_unregister_switch(smi->ds);
        if (smi->slave_mii_bus)
                of_node_put(smi->slave_mii_bus->dev.of_node);
        gpiod_set_value(smi->reset, 1);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+       struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+       if (!smi)
+               return;
+
+       dsa_switch_shutdown(smi->ds);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct of_device_id realtek_smi_of_match[] = {
        {
                .compatible = "realtek,rtl8366rb",
@@ -495,6 +512,7 @@ static struct platform_driver realtek_smi_driver = {
        },
        .probe  = realtek_smi_probe,
        .remove = realtek_smi_remove,
+       .shutdown = realtek_smi_shutdown,
 };
 module_platform_driver(realtek_smi_driver);
 
index 387a1f2f161c7e4deffe66980e2ad5a77beb7541..5bbf1707f2af5c50e26024ac99ce431842803d44 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #include <linux/packing.h>
index 05c7f4ca3b1a31dad45061058c710d28562e41d8..0569ff066634dee718bfbc7e1b2b40c45bc080aa 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
- * Copyright 2020 NXP Semiconductors
+ * Copyright 2020 NXP
  */
 #include "sja1105.h"
 
index 6c10ffa968ce6a06876575f849232109daf2b075..72b9b39b0989d265d9902d18804e75f8502423b5 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
  */
 #include "sja1105.h"
 #include "sja1105_vl.h"
index 2f8cc6686c38caf67a116bd8469fcfc0ef683869..924c3f129992f27c134ed95873404ec9ae8695ed 100644 (file)
@@ -3117,7 +3117,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
        sja1105_static_config_free(&priv->static_config);
 }
 
-const struct dsa_switch_ops sja1105_switch_ops = {
+static const struct dsa_switch_ops sja1105_switch_ops = {
        .get_tag_protocol       = sja1105_get_tag_protocol,
        .setup                  = sja1105_setup,
        .teardown               = sja1105_teardown,
@@ -3166,7 +3166,6 @@ const struct dsa_switch_ops sja1105_switch_ops = {
        .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
        .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
 };
-EXPORT_SYMBOL_GPL(sja1105_switch_ops);
 
 static const struct of_device_id sja1105_dt_ids[];
 
@@ -3335,13 +3334,29 @@ static int sja1105_probe(struct spi_device *spi)
 static int sja1105_remove(struct spi_device *spi)
 {
        struct sja1105_private *priv = spi_get_drvdata(spi);
-       struct dsa_switch *ds = priv->ds;
 
-       dsa_unregister_switch(ds);
+       if (!priv)
+               return 0;
+
+       dsa_unregister_switch(priv->ds);
+
+       spi_set_drvdata(spi, NULL);
 
        return 0;
 }
 
+static void sja1105_shutdown(struct spi_device *spi)
+{
+       struct sja1105_private *priv = spi_get_drvdata(spi);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       spi_set_drvdata(spi, NULL);
+}
+
 static const struct of_device_id sja1105_dt_ids[] = {
        { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
        { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
@@ -3365,6 +3380,7 @@ static struct spi_driver sja1105_driver = {
        },
        .probe  = sja1105_probe,
        .remove = sja1105_remove,
+       .shutdown = sja1105_shutdown,
 };
 
 module_spi_driver(sja1105_driver);
index 705d3900e43a3bdd9bc2c14da5db5be1f5b6fdad..215dd17ca7906c4e358fc0c4b4105521c77d481b 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021, NXP Semiconductors
+/* Copyright 2021 NXP
  */
 #include <linux/pcs/pcs-xpcs.h>
 #include <linux/of_mdio.h>
index 691f6dd7e669738e3316b0909ba578dfed78bcd4..54396992a9199c39b1abcabced620e5c6b4e9c58 100644 (file)
@@ -64,6 +64,7 @@ enum sja1105_ptp_clk_mode {
 static int sja1105_change_rxtstamping(struct sja1105_private *priv,
                                      bool on)
 {
+       struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
        struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
        struct sja1105_general_params_entry *general_params;
        struct sja1105_table *table;
@@ -79,7 +80,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
                priv->tagger_data.stampable_skb = NULL;
        }
        ptp_cancel_worker_sync(ptp_data->clock);
-       skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+       skb_queue_purge(&tagger_data->skb_txtstamp_queue);
        skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
 
        return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
@@ -452,40 +453,6 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
        return priv->info->rxtstamp(ds, port, skb);
 }
 
-void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
-                                enum sja1110_meta_tstamp dir, u64 tstamp)
-{
-       struct sja1105_private *priv = ds->priv;
-       struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
-       struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
-       struct skb_shared_hwtstamps shwt = {0};
-
-       /* We don't care about RX timestamps on the CPU port */
-       if (dir == SJA1110_META_TSTAMP_RX)
-               return;
-
-       spin_lock(&ptp_data->skb_txtstamp_queue.lock);
-
-       skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
-               if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
-                       continue;
-
-               __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
-               skb_match = skb;
-
-               break;
-       }
-
-       spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
-
-       if (WARN_ON(!skb_match))
-               return;
-
-       shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
-       skb_complete_tx_timestamp(skb_match, &shwt);
-}
-EXPORT_SYMBOL_GPL(sja1110_process_meta_tstamp);
-
 /* In addition to cloning the skb which is done by the common
  * sja1105_port_txtstamp, we need to generate a timestamp ID and save the
  * packet to the TX timestamping queue.
@@ -494,7 +461,6 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
 {
        struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
        struct sja1105_private *priv = ds->priv;
-       struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
        struct sja1105_port *sp = &priv->ports[port];
        u8 ts_id;
 
@@ -510,7 +476,7 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
 
        spin_unlock(&sp->data->meta_lock);
 
-       skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
+       skb_queue_tail(&sp->data->skb_txtstamp_queue, clone);
 }
 
 /* Called from dsa_skb_tx_timestamp. This callback is just to clone
@@ -953,7 +919,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
        /* Only used on SJA1105 */
        skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
        /* Only used on SJA1110 */
-       skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
+       skb_queue_head_init(&tagger_data->skb_txtstamp_queue);
        spin_lock_init(&tagger_data->meta_lock);
 
        ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
@@ -971,6 +937,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
 void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
 {
        struct sja1105_private *priv = ds->priv;
+       struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
        struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
 
        if (IS_ERR_OR_NULL(ptp_data->clock))
@@ -978,7 +945,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
 
        del_timer_sync(&ptp_data->extts_timer);
        ptp_cancel_worker_sync(ptp_data->clock);
-       skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+       skb_queue_purge(&tagger_data->skb_txtstamp_queue);
        skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
        ptp_clock_unregister(ptp_data->clock);
        ptp_data->clock = NULL;
index 3c874bb4c17b7385b727c436c3482b4ea8529231..3ae6b9fdd492b5eaadc6e96b233ce8536fbddfb5 100644 (file)
@@ -8,21 +8,6 @@
 
 #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
 
-/* Timestamps are in units of 8 ns clock ticks (equivalent to
- * a fixed 125 MHz clock).
- */
-#define SJA1105_TICK_NS                        8
-
-static inline s64 ns_to_sja1105_ticks(s64 ns)
-{
-       return ns / SJA1105_TICK_NS;
-}
-
-static inline s64 sja1105_ticks_to_ns(s64 ticks)
-{
-       return ticks * SJA1105_TICK_NS;
-}
-
 /* Calculate the first base_time in the future that satisfies this
  * relationship:
  *
@@ -77,10 +62,6 @@ struct sja1105_ptp_data {
        struct timer_list extts_timer;
        /* Used only on SJA1105 to reconstruct partial timestamps */
        struct sk_buff_head skb_rxtstamp_queue;
-       /* Used on SJA1110 where meta frames are generated only for
-        * 2-step TX timestamps
-        */
-       struct sk_buff_head skb_txtstamp_queue;
        struct ptp_clock_info caps;
        struct ptp_clock *clock;
        struct sja1105_ptp_cmd cmd;
index d60a530d0272f2c65d642dbccbef12f5c313ea87..d3c9ad6d39d469ae90989bc5f80c446119fccf0d 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
index 7a422ef4deb6f29c4a1a3d65001f59535af0ab40..baba204ad62f6b507a6ccf3337248dd02b777249 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #include "sja1105_static_config.h"
index bce0f5c03d0bfd0740a309f02490fa64243dc6cb..6a372d5f22aeb78ebf94b658def0b92f639faa96 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_STATIC_CONFIG_H
index ec7b65daec209a81367064edd2265f6b54737be9..6802f4057cc03835b5fece974da657ac9acc1281 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
  */
 #include <net/tc_act/tc_gate.h>
 #include <linux/dsa/8021q.h>
index 173d78963fed5dd4ab04a2600426f0fce9cdbf0b..51fba0dce91a90aa7b490cca940468d8189c8c50 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
  */
 #ifndef _SJA1105_VL_H
 #define _SJA1105_VL_H
index 19ce4aa0973b473e089e3998408ed31e2e825b13..a4b1447ff05570b24aeadb0111d73fa59c9d4a2e 100644 (file)
@@ -1225,6 +1225,12 @@ int vsc73xx_remove(struct vsc73xx *vsc)
 }
 EXPORT_SYMBOL(vsc73xx_remove);
 
+void vsc73xx_shutdown(struct vsc73xx *vsc)
+{
+       dsa_switch_shutdown(vsc->ds);
+}
+EXPORT_SYMBOL(vsc73xx_shutdown);
+
 MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
 MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
 MODULE_LICENSE("GPL v2");
index 2a57f337b2a2595197fde6190a5e8a965971d09d..fe4b154a0a575f8d363dc72df1aa98eb27d95f7f 100644 (file)
@@ -116,7 +116,26 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
 {
        struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
 
-       return vsc73xx_remove(&vsc_platform->vsc);
+       if (!vsc_platform)
+               return 0;
+
+       vsc73xx_remove(&vsc_platform->vsc);
+
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static void vsc73xx_platform_shutdown(struct platform_device *pdev)
+{
+       struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
+
+       if (!vsc_platform)
+               return;
+
+       vsc73xx_shutdown(&vsc_platform->vsc);
+
+       platform_set_drvdata(pdev, NULL);
 }
 
 static const struct vsc73xx_ops vsc73xx_platform_ops = {
@@ -144,6 +163,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
 static struct platform_driver vsc73xx_platform_driver = {
        .probe = vsc73xx_platform_probe,
        .remove = vsc73xx_platform_remove,
+       .shutdown = vsc73xx_platform_shutdown,
        .driver = {
                .name = "vsc73xx-platform",
                .of_match_table = vsc73xx_of_match,
index 81eca4a5781de4266c31c7a954ea061fc6ff3962..645398901e05e928361e9a31bb80a8db9c050887 100644 (file)
@@ -163,7 +163,26 @@ static int vsc73xx_spi_remove(struct spi_device *spi)
 {
        struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
 
-       return vsc73xx_remove(&vsc_spi->vsc);
+       if (!vsc_spi)
+               return 0;
+
+       vsc73xx_remove(&vsc_spi->vsc);
+
+       spi_set_drvdata(spi, NULL);
+
+       return 0;
+}
+
+static void vsc73xx_spi_shutdown(struct spi_device *spi)
+{
+       struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
+
+       if (!vsc_spi)
+               return;
+
+       vsc73xx_shutdown(&vsc_spi->vsc);
+
+       spi_set_drvdata(spi, NULL);
 }
 
 static const struct vsc73xx_ops vsc73xx_spi_ops = {
@@ -191,6 +210,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
 static struct spi_driver vsc73xx_spi_driver = {
        .probe = vsc73xx_spi_probe,
        .remove = vsc73xx_spi_remove,
+       .shutdown = vsc73xx_spi_shutdown,
        .driver = {
                .name = "vsc73xx-spi",
                .of_match_table = vsc73xx_of_match,
index 7478f8d4e0a992be4294b3f2bf532acb1c4a667c..30b951504e6513a2bd96b9802a7d3938ea87f920 100644 (file)
@@ -27,3 +27,4 @@ struct vsc73xx_ops {
 int vsc73xx_is_addr_valid(u8 block, u8 subblock);
 int vsc73xx_probe(struct vsc73xx *vsc);
 int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_shutdown(struct vsc73xx *vsc);
index 130abb0f1438e8f827a7877ae0a056d23a7430af..469420941054e9d4e6f9b62b75803d47164c110a 100644 (file)
@@ -822,6 +822,12 @@ void xrs700x_switch_remove(struct xrs700x *priv)
 }
 EXPORT_SYMBOL(xrs700x_switch_remove);
 
+void xrs700x_switch_shutdown(struct xrs700x *priv)
+{
+       dsa_switch_shutdown(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_shutdown);
+
 MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
 MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
 MODULE_LICENSE("GPL v2");
index ff62cf61b0915191c0fa3a689374b058bd98b6b7..4d58257471d24104909c0741c5e35b50659c93cc 100644 (file)
@@ -40,3 +40,4 @@ struct xrs700x {
 struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
 int xrs700x_switch_register(struct xrs700x *priv);
 void xrs700x_switch_remove(struct xrs700x *priv);
+void xrs700x_switch_shutdown(struct xrs700x *priv);
index 489d9385b4f0ab17231cae8548ec0fcc671978c6..6deae388a0d6f89809fab4df62478a263fa85231 100644 (file)
@@ -109,11 +109,28 @@ static int xrs700x_i2c_remove(struct i2c_client *i2c)
 {
        struct xrs700x *priv = i2c_get_clientdata(i2c);
 
+       if (!priv)
+               return 0;
+
        xrs700x_switch_remove(priv);
 
+       i2c_set_clientdata(i2c, NULL);
+
        return 0;
 }
 
+static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
+{
+       struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+       if (!priv)
+               return;
+
+       xrs700x_switch_shutdown(priv);
+
+       i2c_set_clientdata(i2c, NULL);
+}
+
 static const struct i2c_device_id xrs700x_i2c_id[] = {
        { "xrs700x-switch", 0 },
        {},
@@ -137,6 +154,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
        },
        .probe  = xrs700x_i2c_probe,
        .remove = xrs700x_i2c_remove,
+       .shutdown = xrs700x_i2c_shutdown,
        .id_table = xrs700x_i2c_id,
 };
 
index 44f58bee04a46f3584174ade6237414ad48b3ff0..d01cf1073d49d5dec47dbc80d08903688cb94bbd 100644 (file)
@@ -136,7 +136,24 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
 {
        struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
 
+       if (!priv)
+               return;
+
        xrs700x_switch_remove(priv);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+       struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       xrs700x_switch_shutdown(priv);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = {
@@ -155,6 +172,7 @@ static struct mdio_driver xrs700x_mdio_driver = {
        },
        .probe  = xrs700x_mdio_probe,
        .remove = xrs700x_mdio_remove,
+       .shutdown = xrs700x_mdio_shutdown,
 };
 
 mdio_module_driver(xrs700x_mdio_driver);
index 8d90fed5d33eb6fa08d5d2024e4d7347e580e698..6f0ea2facea9e616976a886352d1ee746653323d 100644 (file)
@@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
 #ifdef VORTEX_BUS_MASTER
        if (vp->bus_master) {
                /* Set the bus-master controller to transfer the packet. */
-               outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
+               outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
                outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
                vp->tx_skb = skb;
                outw(StartDMADown, ioaddr + EL3_CMD);
index 53660bc8d6ff60b54754561518f2876933d38c2d..9afc712f594847ed93d6904a563b61cb23097223 100644 (file)
@@ -922,13 +922,16 @@ static void __init ne_add_devices(void)
        }
 }
 
-#ifdef MODULE
 static int __init ne_init(void)
 {
        int retval;
-       ne_add_devices();
+
+       if (IS_MODULE(CONFIG_NE2000))
+               ne_add_devices();
+
        retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-       if (retval) {
+
+       if (IS_MODULE(CONFIG_NE2000) && retval) {
                if (io[0] == 0)
                        pr_notice("ne.c: You must supply \"io=0xNNN\""
                               " value(s) for ISA cards.\n");
@@ -941,18 +944,8 @@ static int __init ne_init(void)
        return retval;
 }
 module_init(ne_init);
-#else /* MODULE */
-static int __init ne_init(void)
-{
-       int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-
-       /* Unregister unused platform_devices. */
-       ne_loop_rm_unreg(0);
-       return retval;
-}
-module_init(ne_init);
 
-#ifdef CONFIG_NETDEV_LEGACY_INIT
+#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT)
 struct net_device * __init ne_probe(int unit)
 {
        int this_dev;
@@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit)
        return ERR_PTR(-ENODEV);
 }
 #endif
-#endif /* MODULE */
 
 static void __exit ne_exit(void)
 {
index d796684ec9ca00523b774881fbbcbb3d9f06263b..412ae3e43ffb71782fc810c9541ccec749aa053e 100644 (file)
@@ -100,6 +100,7 @@ config JME
 config KORINA
        tristate "Korina (IDT RC32434) Ethernet support"
        depends on MIKROTIK_RB532 || COMPILE_TEST
+       select CRC32
        select MII
        help
          If you have a Mikrotik RouterBoard 500 or IDT RC32434
index b5df7ad5a83f18372a541e65548bb0f910bd8b26..032e8922b4829b8e676c0bc35f6857ae552e14c3 100644 (file)
@@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
 #ifdef XMT_VIA_SKB
                        skb_save[i] = p->tmd_skb[i];
 #endif
-                       buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+                       buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
                        blen[i] = tmdp->blen;
                        tmdp->u.s.status = 0x0;
                }
index dee9ff74d6d6cd4a2c382d3974d77a40057387ef..d4b1976ee69b934d4a5dffac2d13da5697d78066 100644 (file)
@@ -413,13 +413,13 @@ static int atl_resume_common(struct device *dev, bool deep)
        if (deep) {
                /* Reinitialize Nic/Vecs objects */
                aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+       }
 
+       if (netif_running(nic->ndev)) {
                ret = aq_nic_init(nic);
                if (ret)
                        goto err_exit;
-       }
 
-       if (netif_running(nic->ndev)) {
                ret = aq_nic_start(nic);
                if (ret)
                        goto err_exit;
index 37a41773dd4350d96fded737601db7afef3ee521..92a79c4ffa2c7bc3313cf90cc2c8b8c513be47a2 100644 (file)
@@ -21,6 +21,7 @@ config ARC_EMAC_CORE
        depends on ARC || ARCH_ROCKCHIP || COMPILE_TEST
        select MII
        select PHYLIB
+       select CRC32
 
 config ARC_EMAC
        tristate "ARC EMAC support"
index 85fa0ab7201c75e6950dfbecc7f6a7fd0f6d756c..9513cfb5ba58c3d41a4198f2516223cf7c858062 100644 (file)
@@ -129,6 +129,8 @@ static int bgmac_probe(struct bcma_device *core)
        bcma_set_drvdata(core, bgmac);
 
        err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+       if (err == -EPROBE_DEFER)
+               return err;
 
        /* If no MAC address assigned via device tree, check SPROM */
        if (err) {
index 4ab5bf64d353e35761d468c90394cb6a4e05cfd2..df8ff839cc62142cf7b2b94a11b76a5860c6306c 100644 (file)
@@ -192,6 +192,9 @@ static int bgmac_probe(struct platform_device *pdev)
        bgmac->dma_dev = &pdev->dev;
 
        ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
+       if (ret == -EPROBE_DEFER)
+               return ret;
+
        if (ret)
                dev_warn(&pdev->dev,
                         "MAC address not present in device tree\n");
index f255fd0b16dbd91608f05df09f9d3ef4865c362c..6fbf735fca31c2ad4e6e0af222098e7f808651cb 100644 (file)
@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
 
        /* SR-IOV capability was enabled but there are no VFs*/
        if (iov->total == 0) {
-               err = -EINVAL;
+               err = 0;
                goto failed;
        }
 
index ea0c45d33814d639ca455cf772ec78cc77bc5fcb..62f84cc91e4d10b504b439f93c11c453930077d7 100644 (file)
@@ -391,7 +391,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
         * netif_tx_queue_stopped().
         */
        smp_mb();
-       if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+       if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
                netif_tx_wake_queue(txq);
                return false;
        }
@@ -764,7 +764,7 @@ next_tx_int:
        smp_mb();
 
        if (unlikely(netif_tx_queue_stopped(txq)) &&
-           bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+           bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
            READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
                netif_tx_wake_queue(txq);
 }
@@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
                        DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
                                     bp->current_interval * 10);
                fw_health->tmr_counter = fw_health->tmr_multiplier;
-               if (!fw_health->enabled) {
+               if (!fw_health->enabled)
                        fw_health->last_fw_heartbeat =
                                bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
-                       fw_health->last_fw_reset_cnt =
-                               bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
-               }
+               fw_health->last_fw_reset_cnt =
+                       bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
                netif_info(bp, drv, bp->dev,
                           "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
                           fw_health->master, fw_health->last_fw_reset_cnt,
@@ -2417,7 +2416,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
-                       if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+                       if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
                                rx_pkts = budget;
                                raw_cons = NEXT_RAW_CMP(raw_cons);
                                if (budget)
@@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
                int j;
 
+               if (!txr->tx_buf_ring)
+                       continue;
+
                for (j = 0; j < max_idx;) {
                        struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
                        struct sk_buff *skb;
@@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
        }
 
 skip_rx_tpa_free:
+       if (!rxr->rx_buf_ring)
+               goto skip_rx_buf_free;
+
        for (i = 0; i < max_idx; i++) {
                struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
                dma_addr_t mapping = rx_buf->mapping;
@@ -2836,6 +2841,11 @@ skip_rx_tpa_free:
                        kfree(data);
                }
        }
+
+skip_rx_buf_free:
+       if (!rxr->rx_agg_ring)
+               goto skip_rx_agg_free;
+
        for (i = 0; i < max_agg_idx; i++) {
                struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
                struct page *page = rx_agg_buf->page;
@@ -2852,6 +2862,8 @@ skip_rx_tpa_free:
 
                __free_page(page);
        }
+
+skip_rx_agg_free:
        if (rxr->rx_page) {
                __free_page(rxr->rx_page);
                rxr->rx_page = NULL;
@@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
        struct pci_dev *pdev = bp->pdev;
        int i;
 
+       if (!rmem->pg_arr)
+               goto skip_pages;
+
        for (i = 0; i < rmem->nr_pages; i++) {
                if (!rmem->pg_arr[i])
                        continue;
@@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 
                rmem->pg_arr[i] = NULL;
        }
+skip_pages:
        if (rmem->pg_tbl) {
                size_t pg_tbl_size = rmem->nr_pages * 8;
 
@@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
 
 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
 {
+       struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
        kfree(cpr->cp_desc_ring);
        cpr->cp_desc_ring = NULL;
+       ring->ring_mem.pg_arr = NULL;
        kfree(cpr->cp_desc_mapping);
        cpr->cp_desc_mapping = NULL;
+       ring->ring_mem.dma_arr = NULL;
 }
 
 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
@@ -3620,7 +3640,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
        u16 i;
 
        bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
-                                  MAX_SKB_FRAGS + 1);
+                                  BNXT_MIN_TX_DESC_CNT);
 
        for (i = 0; i < bp->tx_nr_rings; i++) {
                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
@@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                        return;
                }
 
+               if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
+                   bp->fw_health->enabled) {
+                       bp->fw_health->last_fw_reset_cnt =
+                               bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+               }
                bp->fw_reset_state = 0;
                /* Make sure fw_reset_state is 0 before clearing the flag */
                smp_mb__before_atomic();
index ec046e7a2484d28b83c24306f5323de9fab694c1..19fe6478e9b4bc40f7e391cb318efe04c8140d46 100644 (file)
@@ -629,6 +629,11 @@ struct nqe_cn {
 #define BNXT_MAX_RX_JUM_DESC_CNT       (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
 #define BNXT_MAX_TX_DESC_CNT           (TX_DESC_CNT * MAX_TX_PAGES - 1)
 
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1.  We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNXT_MIN_TX_DESC_CNT           (MAX_SKB_FRAGS + 2)
+
 #define RX_RING(x)     (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
 #define RX_IDX(x)      ((x) & (RX_DESC_CNT - 1))
 
index b056e3c29bbda42d9695ac404fae306faae0a1d9..7260910e75fb2a62eb74fcdc3aa08b64a9638737 100644 (file)
@@ -798,7 +798,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
 
        if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
            (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
-           (ering->tx_pending <= MAX_SKB_FRAGS))
+           (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
                return -EINVAL;
 
        if (netif_running(dev))
index 46fae1acbeed69dd2182b6cd99b87fb9e35d62d3..e6a4a768b10b29a791db83567e9b34385b53e7dc 100644 (file)
@@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
 {
        struct bnxt_flower_indr_block_cb_priv *cb_priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
                if (cb_priv->tunnel_netdev == netdev)
                        return cb_priv;
index 8b7b59908a1ab453a9afc4940d3a76779dc750be..f66d22de5168d2be6583f2b78d43a2003ab874c7 100644 (file)
@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
        struct platform_device *plat_dev = pci_get_drvdata(pdev);
        struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
 
-       platform_device_unregister(plat_dev);
        clk_unregister(plat_data->pclk);
        clk_unregister(plat_data->hclk);
+       platform_device_unregister(plat_dev);
 }
 
 static const struct pci_device_id dev_id_table[] = {
index 3ca93adb9662826e99f8f734cf4d408f1f4ee717..042327b9981fad3cbc309a022b8afe448ad99b46 100644 (file)
@@ -419,7 +419,7 @@ static void enetc_rx_dim_work(struct work_struct *w)
 
 static void enetc_rx_net_dim(struct enetc_int_vector *v)
 {
-       struct dim_sample dim_sample;
+       struct dim_sample dim_sample = {};
 
        v->comp_cnt++;
 
@@ -1879,7 +1879,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
 {
        struct pci_dev *pdev = priv->si->pdev;
-       cpumask_t cpu_mask;
        int i, j, err;
 
        for (i = 0; i < priv->bdr_int_num; i++) {
@@ -1908,9 +1907,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
 
                        enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
                }
-               cpumask_clear(&cpu_mask);
-               cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
-               irq_set_affinity_hint(irq, &cpu_mask);
+               irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
        }
 
        return 0;
index ee1468e3eaa3fc357c1e761918f3905ed31a018b..91f02c5050285ba63e84513dd980d0b144e900f6 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
  *
  * The Integrated Endpoint Register Block (IERB) is configured by pre-boot
  * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
index b3b774e0998ac83700112f4856213fba5f6ac380..c2ce47c4be9fde974008b34da40b8c1d243be9a0 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2021 NXP Semiconductors */
+/* Copyright 2021 NXP */
 
 #include <linux/pci.h>
 #include <linux/platform_device.h>
index 60d94e0a07d6e55a57b169bed3e920947fe1e853..4c977dfc44f0dd930a8fa2230a3562fb332f1001 100644 (file)
@@ -541,8 +541,7 @@ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
 
        if (phy_interface_mode_is_rgmii(phy_mode)) {
                val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
-               val &= ~ENETC_PM0_IFM_EN_AUTO;
-               val &= ENETC_PM0_IFM_IFMODE_MASK;
+               val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
                val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
                enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
        }
index 80bd5c629fa00fd787b39cdf931edfb5c8b5296f..ec87b370bba1fb909784715a89a0637eb1ecc6ed 100644 (file)
@@ -4176,5 +4176,4 @@ static struct platform_driver fec_driver = {
 
 module_platform_driver(fec_driver);
 
-MODULE_ALIAS("platform:"DRIVER_NAME);
 MODULE_LICENSE("GPL");
index 1d3188e8e3b3c01caa84c3edbf332cd0c1fa5dd2..92dc18a4bcc41c233e97e693a85359d86f4b95cb 100644 (file)
@@ -780,7 +780,7 @@ struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
                                    gve_num_tx_qpls(priv));
 
        /* we are out of rx qpls */
-       if (id == priv->qpl_cfg.qpl_map_size)
+       if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
                return NULL;
 
        set_bit(id, priv->qpl_cfg.qpl_id_map);
index 099a2bc5ae6704553516561106a0bb6822fdb8e4..bf8a4a7c43f78001f74c80a2312ff5aef21aff90 100644 (file)
@@ -41,6 +41,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
 {
        struct gve_priv *priv = netdev_priv(dev);
        unsigned int start;
+       u64 packets, bytes;
        int ring;
 
        if (priv->rx) {
@@ -48,10 +49,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
                        do {
                                start =
                                  u64_stats_fetch_begin(&priv->rx[ring].statss);
-                               s->rx_packets += priv->rx[ring].rpackets;
-                               s->rx_bytes += priv->rx[ring].rbytes;
+                               packets = priv->rx[ring].rpackets;
+                               bytes = priv->rx[ring].rbytes;
                        } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
                                                       start));
+                       s->rx_packets += packets;
+                       s->rx_bytes += bytes;
                }
        }
        if (priv->tx) {
@@ -59,10 +62,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
                        do {
                                start =
                                  u64_stats_fetch_begin(&priv->tx[ring].statss);
-                               s->tx_packets += priv->tx[ring].pkt_done;
-                               s->tx_bytes += priv->tx[ring].bytes_done;
+                               packets = priv->tx[ring].pkt_done;
+                               bytes = priv->tx[ring].bytes_done;
                        } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
                                                       start));
+                       s->tx_packets += packets;
+                       s->tx_bytes += bytes;
                }
        }
 }
@@ -82,6 +87,9 @@ static int gve_alloc_counter_array(struct gve_priv *priv)
 
 static void gve_free_counter_array(struct gve_priv *priv)
 {
+       if (!priv->counter_array)
+               return;
+
        dma_free_coherent(&priv->pdev->dev,
                          priv->num_event_counters *
                          sizeof(*priv->counter_array),
@@ -142,6 +150,9 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
 
 static void gve_free_stats_report(struct gve_priv *priv)
 {
+       if (!priv->stats_report)
+               return;
+
        del_timer_sync(&priv->stats_report_timer);
        dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
                          priv->stats_report, priv->stats_report_bus);
@@ -370,18 +381,19 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
 {
        int i;
 
-       if (priv->msix_vectors) {
-               /* Free the irqs */
-               for (i = 0; i < priv->num_ntfy_blks; i++) {
-                       struct gve_notify_block *block = &priv->ntfy_blocks[i];
-                       int msix_idx = i;
+       if (!priv->msix_vectors)
+               return;
 
-                       irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
-                                             NULL);
-                       free_irq(priv->msix_vectors[msix_idx].vector, block);
-               }
-               free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+       /* Free the irqs */
+       for (i = 0; i < priv->num_ntfy_blks; i++) {
+               struct gve_notify_block *block = &priv->ntfy_blocks[i];
+               int msix_idx = i;
+
+               irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+                                     NULL);
+               free_irq(priv->msix_vectors[msix_idx].vector, block);
        }
+       free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
        dma_free_coherent(&priv->pdev->dev,
                          priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
                          priv->ntfy_blocks, priv->ntfy_block_bus);
@@ -1185,9 +1197,10 @@ static void gve_handle_reset(struct gve_priv *priv)
 
 void gve_handle_report_stats(struct gve_priv *priv)
 {
-       int idx, stats_idx = 0, tx_bytes;
-       unsigned int start = 0;
        struct stats *stats = priv->stats_report->stats;
+       int idx, stats_idx = 0;
+       unsigned int start = 0;
+       u64 tx_bytes;
 
        if (!gve_get_report_stats(priv))
                return;
index bb8261368250293a873565cd69da3838cc358c8c..94941d4e47449244a3c4f5fe2c8248cab24300ec 100644 (file)
@@ -104,8 +104,14 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
        if (!rx->data.page_info)
                return -ENOMEM;
 
-       if (!rx->data.raw_addressing)
+       if (!rx->data.raw_addressing) {
                rx->data.qpl = gve_assign_rx_qpl(priv);
+               if (!rx->data.qpl) {
+                       kvfree(rx->data.page_info);
+                       rx->data.page_info = NULL;
+                       return -ENOMEM;
+               }
+       }
        for (i = 0; i < slots; i++) {
                if (!rx->data.raw_addressing) {
                        struct page *page = rx->data.qpl->pages[i];
index 546a605303848aa1db81452707b4e89ab7b52bac..8ba21d6dc220caaeb1c75bc9d9e35f8e5c3f6a0e 100644 (file)
@@ -752,7 +752,6 @@ struct hnae3_tc_info {
        u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
        u16 tqp_count[HNAE3_MAX_TC];
        u16 tqp_offset[HNAE3_MAX_TC];
-       unsigned long tc_en; /* bitmap of TC enabled */
        u8 num_tc; /* Total number of enabled TCs */
        bool mqprio_active;
 };
index 22af3d6ce1783262070e1a51cea59acde51f406b..468b8f07bf47c57463b6f4d3174df5a2e84484cb 100644 (file)
@@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1;
 module_param(tx_sgl, uint, 0600);
 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
 
+static bool page_pool_enabled = true;
+module_param(page_pool_enabled, bool, 0400);
+
 #define HNS3_SGL_SIZE(nfrag)   (sizeof(struct scatterlist) * (nfrag) + \
                                 sizeof(struct sg_table))
 #define HNS3_MAX_SGL_SIZE      ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
@@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
 #define HNS3_OUTER_VLAN_TAG    2
 
 #define HNS3_MIN_TX_LEN                33U
+#define HNS3_MIN_TUN_PKT_LEN   65U
 
 /* hns3_pci_tbl - PCI Device ID Table
  *
@@ -619,13 +623,9 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
                        return ret;
                }
 
-               for (i = 0; i < HNAE3_MAX_TC; i++) {
-                       if (!test_bit(i, &tc_info->tc_en))
-                               continue;
-
+               for (i = 0; i < tc_info->num_tc; i++)
                        netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
                                            tc_info->tqp_offset[i]);
-               }
        }
 
        ret = netif_set_real_num_tx_queues(netdev, queue_size);
@@ -775,6 +775,11 @@ static int hns3_nic_net_open(struct net_device *netdev)
        if (hns3_nic_resetting(netdev))
                return -EBUSY;
 
+       if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+               netdev_warn(netdev, "net open repeatedly!\n");
+               return 0;
+       }
+
        netif_carrier_off(netdev);
 
        ret = hns3_nic_set_real_num_queue(netdev);
@@ -1424,8 +1429,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
                               l4.tcp->doff);
                break;
        case IPPROTO_UDP:
-               if (hns3_tunnel_csum_bug(skb))
-                       return skb_checksum_help(skb);
+               if (hns3_tunnel_csum_bug(skb)) {
+                       int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
+
+                       return ret ? ret : skb_checksum_help(skb);
+               }
 
                hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
                hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -4753,7 +4761,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
                goto out_with_desc_cb;
 
        if (!HNAE3_IS_TX_RING(ring)) {
-               hns3_alloc_page_pool(ring);
+               if (page_pool_enabled)
+                       hns3_alloc_page_pool(ring);
 
                ret = hns3_alloc_ring_buffers(ring);
                if (ret)
@@ -4857,12 +4866,9 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
        struct hnae3_tc_info *tc_info = &kinfo->tc_info;
        int i;
 
-       for (i = 0; i < HNAE3_MAX_TC; i++) {
+       for (i = 0; i < tc_info->num_tc; i++) {
                int j;
 
-               if (!test_bit(i, &tc_info->tc_en))
-                       continue;
-
                for (j = 0; j < tc_info->tqp_count[i]; j++) {
                        struct hnae3_queue *q;
 
index 7ea511d59e91ac62767928a14afa428fc7a199c6..5ebd96f6833d6ebb8dc176af3374379d0556d35f 100644 (file)
@@ -334,7 +334,8 @@ static void hns3_selftest_prepare(struct net_device *ndev,
 
 #if IS_ENABLED(CONFIG_VLAN_8021Q)
        /* Disable the vlan filter for selftest does not support it */
-       if (h->ae_algo->ops->enable_vlan_filter)
+       if (h->ae_algo->ops->enable_vlan_filter &&
+           ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
                h->ae_algo->ops->enable_vlan_filter(h, false);
 #endif
 
@@ -359,7 +360,8 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
                h->ae_algo->ops->halt_autoneg(h, false);
 
 #if IS_ENABLED(CONFIG_VLAN_8021Q)
-       if (h->ae_algo->ops->enable_vlan_filter)
+       if (h->ae_algo->ops->enable_vlan_filter &&
+           ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
                h->ae_algo->ops->enable_vlan_filter(h, true);
 #endif
 
index ac9b69513332bf7c94bfa71d180cc13a52fc8013..9c2eeaa822944fd31c0f7192657db99b8c909a3f 100644 (file)
@@ -467,7 +467,7 @@ err_csq:
        return ret;
 }
 
-static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
 {
        struct hclge_firmware_compat_cmd *req;
        struct hclge_desc desc;
@@ -475,13 +475,16 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
 
-       req = (struct hclge_firmware_compat_cmd *)desc.data;
+       if (en) {
+               req = (struct hclge_firmware_compat_cmd *)desc.data;
 
-       hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
-       hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
-       if (hnae3_dev_phy_imp_supported(hdev))
-               hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
-       req->compat = cpu_to_le32(compat);
+               hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+               hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+               if (hnae3_dev_phy_imp_supported(hdev))
+                       hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
+
+               req->compat = cpu_to_le32(compat);
+       }
 
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
@@ -538,7 +541,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
        /* ask the firmware to enable some features, driver can work without
         * it.
         */
-       ret = hclge_firmware_compat_config(hdev);
+       ret = hclge_firmware_compat_config(hdev, true);
        if (ret)
                dev_warn(&hdev->pdev->dev,
                         "Firmware compatible features not enabled(%d).\n",
@@ -568,6 +571,8 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
 
 void hclge_cmd_uninit(struct hclge_dev *hdev)
 {
+       hclge_firmware_compat_config(hdev, false);
+
        set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
        /* wait to ensure that the firmware completes the possible left
         * over commands.
index 4a619e5d3f35e3ca159685799b7e19eccf7f8b44..307c9e830510ae4d73b277d4298c3101e83fd587 100644 (file)
@@ -247,6 +247,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
        }
 
        hclge_tm_schd_info_update(hdev, num_tc);
+       if (num_tc > 1)
+               hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+       else
+               hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
 
        ret = hclge_ieee_ets_to_tm_info(hdev, ets);
        if (ret)
@@ -306,8 +310,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
        u8 i, j, pfc_map, *prio_tc;
        int ret;
 
-       if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
-           hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+       if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
                return -EINVAL;
 
        if (pfc->pfc_en == hdev->tm_info.pfc_en)
@@ -441,8 +444,6 @@ static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
                                   struct tc_mqprio_qopt_offload *mqprio_qopt)
 {
-       int i;
-
        memset(tc_info, 0, sizeof(*tc_info));
        tc_info->num_tc = mqprio_qopt->qopt.num_tc;
        memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
@@ -451,9 +452,6 @@ static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
               sizeof_field(struct hnae3_tc_info, tqp_count));
        memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
               sizeof_field(struct hnae3_tc_info, tqp_offset));
-
-       for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
-               set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
 }
 
 static int hclge_config_tc(struct hclge_dev *hdev,
@@ -519,12 +517,17 @@ static int hclge_setup_tc(struct hnae3_handle *h,
        return hclge_notify_init_up(hdev);
 
 err_out:
-       /* roll-back */
-       memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
-       if (hclge_config_tc(hdev, &kinfo->tc_info))
-               dev_err(&hdev->pdev->dev,
-                       "failed to roll back tc configuration\n");
-
+       if (!tc) {
+               dev_warn(&hdev->pdev->dev,
+                        "failed to destroy mqprio, will active after reset, ret = %d\n",
+                        ret);
+       } else {
+               /* roll-back */
+               memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
+               if (hclge_config_tc(hdev, &kinfo->tc_info))
+                       dev_err(&hdev->pdev->dev,
+                               "failed to roll back tc configuration\n");
+       }
        hclge_notify_init_up(hdev);
 
        return ret;
index 68ed1715ac5285945b529e4ac85e6fc782da1341..32f62cd2dd99f4ebcfc0cae8c2a6cf244ccb73e5 100644 (file)
@@ -719,9 +719,9 @@ static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
        sprintf(result[(*index)++], "%6u", para->rate);
 }
 
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
+                                 char *buf, int len)
 {
-       char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN];
        struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
        char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
        u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
@@ -729,8 +729,10 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
        int pos = 0;
        int ret;
 
-       for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++)
-               result[i] = &data_str[i][0];
+       for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
+               result[i] = data_str;
+               data_str += HCLGE_DBG_DATA_STR_LEN;
+       }
 
        hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
                               NULL, ARRAY_SIZE(tm_pg_items));
@@ -781,6 +783,24 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
        return 0;
 }
 
+static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+{
+       char *data_str;
+       int ret;
+
+       data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
+                          HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
+
+       if (!data_str)
+               return -ENOMEM;
+
+       ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
+
+       kfree(data_str);
+
+       return ret;
+}
+
 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
 {
        struct hclge_tm_shaper_para shaper_para;
@@ -1724,6 +1744,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
        }
 
        bd_num = le32_to_cpu(req->bd_num);
+       if (!bd_num) {
+               dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
+               return -EINVAL;
+       }
 
        desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
        if (!desc_src)
index 718c16d686fa9c6dea428a36db1d48b8db44efb6..bb9b026ae88e52661d1e02e11aa8389c9b400d8f 100644 (file)
@@ -2445,12 +2445,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
                return;
        }
 
-       dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+       dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
                vf_id, q_id);
 
        if (vf_id) {
                if (vf_id >= hdev->num_alloc_vport) {
-                       dev_err(dev, "invalid vf id(%u)\n", vf_id);
+                       dev_err(dev, "invalid vport(%u)\n", vf_id);
                        return;
                }
 
@@ -2463,8 +2463,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
 
                ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
                if (ret)
-                       dev_err(dev, "inform reset to vf(%u) failed %d!\n",
-                               hdev->vport->vport_id, ret);
+                       dev_err(dev, "inform reset to vport(%u) failed %d!\n",
+                               vf_id, ret);
        } else {
                set_bit(HNAE3_FUNC_RESET, reset_requests);
        }
index e55ba2e511b1a3eafb73437d53ae613d919f7be0..f5b8d1fee0f1aecc56f94db11d7c3501f35d8249 100644 (file)
@@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
 static int hclge_configure(struct hclge_dev *hdev)
 {
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+       const struct cpumask *cpumask = cpu_online_mask;
        struct hclge_cfg cfg;
        unsigned int i;
-       int ret;
+       int node, ret;
 
        ret = hclge_get_cfg(hdev, &cfg);
        if (ret)
@@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        hclge_init_kdump_kernel_config(hdev);
 
-       /* Set the init affinity based on pci func number */
-       i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
-       i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
-       cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
-                       &hdev->affinity_mask);
+       /* Set the affinity based on numa node */
+       node = dev_to_node(&hdev->pdev->dev);
+       if (node != NUMA_NO_NODE)
+               cpumask = cpumask_of_node(node);
+
+       cpumask_copy(&hdev->affinity_mask, cpumask);
 
        return ret;
 }
@@ -3659,7 +3661,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
                if (ret) {
                        dev_err(&hdev->pdev->dev,
                                "set vf(%u) rst failed %d!\n",
-                               vport->vport_id, ret);
+                               vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+                               ret);
                        return ret;
                }
 
@@ -3674,7 +3677,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
                if (ret)
                        dev_warn(&hdev->pdev->dev,
                                 "inform reset to vf(%u) failed %d!\n",
-                                vport->vport_id, ret);
+                                vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+                                ret);
        }
 
        return 0;
@@ -4739,6 +4743,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
        return 0;
 }
 
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
+                                u8 *hash_algo)
+{
+       switch (hfunc) {
+       case ETH_RSS_HASH_TOP:
+               *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+               return 0;
+       case ETH_RSS_HASH_XOR:
+               *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+               return 0;
+       case ETH_RSS_HASH_NO_CHANGE:
+               *hash_algo = vport->rss_algo;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
                         const  u8 *key, const  u8 hfunc)
 {
@@ -4748,30 +4770,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
        u8 hash_algo;
        int ret, i;
 
+       ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+               return ret;
+       }
+
        /* Set the RSS Hash Key if specififed by the user */
        if (key) {
-               switch (hfunc) {
-               case ETH_RSS_HASH_TOP:
-                       hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
-                       break;
-               case ETH_RSS_HASH_XOR:
-                       hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
-                       break;
-               case ETH_RSS_HASH_NO_CHANGE:
-                       hash_algo = vport->rss_algo;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-
                ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
                if (ret)
                        return ret;
 
                /* Update the shadow RSS key with user specified qids */
                memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
-               vport->rss_algo = hash_algo;
+       } else {
+               ret = hclge_set_rss_algo_key(hdev, hash_algo,
+                                            vport->rss_hash_key);
+               if (ret)
+                       return ret;
        }
+       vport->rss_algo = hash_algo;
 
        /* Update the shadow RSS table with user specified qids */
        for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
@@ -6625,10 +6644,13 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
                u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
                u16 tqps;
 
+               /* To keep consistent with user's configuration, minus 1 when
+                * printing 'vf', because vf id from ethtool is added 1 for vf.
+                */
                if (vf > hdev->num_req_vfs) {
                        dev_err(&hdev->pdev->dev,
-                               "Error: vf id (%u) > max vf num (%u)\n",
-                               vf, hdev->num_req_vfs);
+                               "Error: vf id (%u) should be less than %u\n",
+                               vf - 1, hdev->num_req_vfs);
                        return -EINVAL;
                }
 
@@ -8125,11 +8147,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
        hclge_clear_arfs_rules(hdev);
        spin_unlock_bh(&hdev->fd_rule_lock);
 
-       /* If it is not PF reset, the firmware will disable the MAC,
+       /* If it is not PF reset or FLR, the firmware will disable the MAC,
         * so it only need to stop phy here.
         */
        if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
-           hdev->reset_type != HNAE3_FUNC_RESET) {
+           hdev->reset_type != HNAE3_FUNC_RESET &&
+           hdev->reset_type != HNAE3_FLR_RESET) {
                hclge_mac_stop_phy(hdev);
                hclge_update_link_status(hdev);
                return;
@@ -8685,15 +8708,8 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
        }
 
        /* check if we just hit the duplicate */
-       if (!ret) {
-               dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
-                        vport->vport_id, addr);
-               return 0;
-       }
-
-       dev_err(&hdev->pdev->dev,
-               "PF failed to add unicast entry(%pM) in the MAC table\n",
-               addr);
+       if (!ret)
+               return -EEXIST;
 
        return ret;
 }
@@ -8845,7 +8861,13 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
                } else {
                        set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
                                &vport->state);
-                       break;
+
+                       /* If one unicast mac address is existing in hardware,
+                        * we need to try whether other unicast mac addresses
+                        * are new addresses that can be added.
+                        */
+                       if (ret != -EEXIST)
+                               break;
                }
        }
 }
@@ -9794,6 +9816,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
        if (is_kill && !vlan_id)
                return 0;
 
+       if (vlan_id >= VLAN_N_VID)
+               return -EINVAL;
+
        ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
        if (ret) {
                dev_err(&hdev->pdev->dev,
@@ -10700,7 +10725,8 @@ static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
        return 0;
 }
 
-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
+                                 u8 *reset_status)
 {
        struct hclge_reset_tqp_queue_cmd *req;
        struct hclge_desc desc;
@@ -10718,7 +10744,9 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
                return ret;
        }
 
-       return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+       *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+
+       return 0;
 }
 
 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
@@ -10737,7 +10765,7 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
        u16 reset_try_times = 0;
-       int reset_status;
+       u8 reset_status;
        u16 queue_gid;
        int ret;
        u16 i;
@@ -10753,7 +10781,11 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
                }
 
                while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
-                       reset_status = hclge_get_reset_status(hdev, queue_gid);
+                       ret = hclge_get_reset_status(hdev, queue_gid,
+                                                    &reset_status);
+                       if (ret)
+                               return ret;
+
                        if (reset_status)
                                break;
 
@@ -11446,11 +11478,11 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
                struct hclge_vport *vport = &hdev->vport[i];
                int ret;
 
-                /* Send cmd to clear VF's FUNC_RST_ING */
+                /* Send cmd to clear vport's FUNC_RST_ING */
                ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
                if (ret)
                        dev_warn(&hdev->pdev->dev,
-                                "clear vf(%u) rst failed %d!\n",
+                                "clear vport(%u) rst failed %d!\n",
                                 vport->vport_id, ret);
        }
 }
@@ -12764,8 +12796,12 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
                        continue;
 
                if (vport->vf_info.trusted) {
-                       uc_en = vport->vf_info.request_uc_en > 0;
-                       mc_en = vport->vf_info.request_mc_en > 0;
+                       uc_en = vport->vf_info.request_uc_en > 0 ||
+                               vport->overflow_promisc_flags &
+                               HNAE3_OVERFLOW_UPE;
+                       mc_en = vport->vf_info.request_mc_en > 0 ||
+                               vport->overflow_promisc_flags &
+                               HNAE3_OVERFLOW_MPE;
                }
                bc_en = vport->vf_info.request_bc_en > 0;
 
index 2ce5302c5956f8dc09ec28595c4dd29cbc0f77d2..65d78ee4d65a0d6b820e54e4a7912a1d5c899927 100644 (file)
@@ -566,7 +566,7 @@ static int hclge_reset_vf(struct hclge_vport *vport)
        struct hclge_dev *hdev = vport->back;
 
        dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
-                vport->vport_id);
+                vport->vport_id - HCLGE_VF_VPORT_START_NUM);
 
        return hclge_func_reset_cmd(hdev, vport->vport_id);
 }
@@ -590,9 +590,17 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
                                     struct hclge_mbx_vf_to_pf_cmd *mbx_req,
                                     struct hclge_respond_to_vf_msg *resp_msg)
 {
+       struct hnae3_handle *handle = &vport->nic;
+       struct hclge_dev *hdev = vport->back;
        u16 queue_id, qid_in_pf;
 
        memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+       if (queue_id >= handle->kinfo.num_tqps) {
+               dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+                       queue_id, mbx_req->mbx_src_vfid);
+               return;
+       }
+
        qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
        memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
        resp_msg->len = sizeof(qid_in_pf);
index 78d5bf1ea561039d33af9981e8d67ec9ef7ae6c0..f314dbd3ce11fb1584dbb23a51517fd42039574d 100644 (file)
@@ -581,7 +581,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
                if (ret) {
                        dev_err(&hdev->pdev->dev,
-                               "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+                               "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
                                vport->vport_id, shap_cfg_cmd->qs_id,
                                max_tx_rate, ret);
                        return ret;
@@ -687,12 +687,10 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
 
        for (i = 0; i < HNAE3_MAX_TC; i++) {
                if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
-                       set_bit(i, &kinfo->tc_info.tc_en);
                        kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
                        kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
                } else {
                        /* Set to default queue if TC is disable */
-                       clear_bit(i, &kinfo->tc_info.tc_en);
                        kinfo->tc_info.tqp_offset[i] = 0;
                        kinfo->tc_info.tqp_count[i] = 1;
                }
@@ -729,14 +727,6 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
                hdev->tm_info.prio_tc[i] =
                        (i >= hdev->tm_info.num_tc) ? 0 : i;
-
-       /* DCB is enabled if we have more than 1 TC or pfc_en is
-        * non-zero.
-        */
-       if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
-               hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
-       else
-               hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
 }
 
 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
@@ -767,10 +757,10 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
 
 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
 {
-       if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
+       if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
                if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
                        dev_warn(&hdev->pdev->dev,
-                                "DCB is disable, but last mode is FC_PFC\n");
+                                "Only 1 tc used, but last mode is FC_PFC\n");
 
                hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
        } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
@@ -796,7 +786,7 @@ static void hclge_update_fc_mode(struct hclge_dev *hdev)
        }
 }
 
-static void hclge_pfc_info_init(struct hclge_dev *hdev)
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
 {
        if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
                hclge_update_fc_mode(hdev);
@@ -812,7 +802,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
 
        hclge_tm_vport_info_update(hdev);
 
-       hclge_pfc_info_init(hdev);
+       hclge_tm_pfc_info_update(hdev);
 }
 
 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -1558,19 +1548,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
        hclge_tm_schd_info_init(hdev);
 }
 
-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
-{
-       /* DCB is enabled if we have more than 1 TC or pfc_en is
-        * non-zero.
-        */
-       if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
-               hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
-       else
-               hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
-       hclge_pfc_info_init(hdev);
-}
-
 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
 {
        int ret;
@@ -1616,7 +1593,7 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
        if (ret)
                return ret;
 
-       if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
+       if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
                return 0;
 
        return hclge_tm_bp_setup(hdev);
index 82e72702012031ef3d719624fd2b4a11f6effeed..5fdac8685f9552ec9dae748c3ac60ad5a569abd7 100644 (file)
@@ -816,40 +816,56 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
        return 0;
 }
 
+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
+                                  u8 *hash_algo)
+{
+       switch (hfunc) {
+       case ETH_RSS_HASH_TOP:
+               *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+               return 0;
+       case ETH_RSS_HASH_XOR:
+               *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+               return 0;
+       case ETH_RSS_HASH_NO_CHANGE:
+               *hash_algo = hdev->rss_cfg.hash_algo;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
                           const u8 *key, const u8 hfunc)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+       u8 hash_algo;
        int ret, i;
 
        if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+               ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+               if (ret)
+                       return ret;
+
                /* Set the RSS Hash Key if specififed by the user */
                if (key) {
-                       switch (hfunc) {
-                       case ETH_RSS_HASH_TOP:
-                               rss_cfg->hash_algo =
-                                       HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
-                               break;
-                       case ETH_RSS_HASH_XOR:
-                               rss_cfg->hash_algo =
-                                       HCLGEVF_RSS_HASH_ALGO_SIMPLE;
-                               break;
-                       case ETH_RSS_HASH_NO_CHANGE:
-                               break;
-                       default:
-                               return -EINVAL;
-                       }
-
-                       ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
-                                                      key);
-                       if (ret)
+                       ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
+                       if (ret) {
+                               dev_err(&hdev->pdev->dev,
+                                       "invalid hfunc type %u\n", hfunc);
                                return ret;
+                       }
 
                        /* Update the shadow RSS key with user specified qids */
                        memcpy(rss_cfg->rss_hash_key, key,
                               HCLGEVF_RSS_KEY_SIZE);
+               } else {
+                       ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
+                                                      rss_cfg->rss_hash_key);
+                       if (ret)
+                               return ret;
                }
+               rss_cfg->hash_algo = hash_algo;
        }
 
        /* update the shadow RSS table with user specified qids */
@@ -2465,6 +2481,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
 
        hclgevf_enable_vector(&hdev->misc_vector, false);
        event_cause = hclgevf_check_evt_cause(hdev, &clearval);
+       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+               hclgevf_clear_event_cause(hdev, clearval);
 
        switch (event_cause) {
        case HCLGEVF_VECTOR0_EVENT_RST:
@@ -2477,10 +2495,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
                break;
        }
 
-       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
-               hclgevf_clear_event_cause(hdev, clearval);
+       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
                hclgevf_enable_vector(&hdev->misc_vector, true);
-       }
 
        return IRQ_HANDLED;
 }
index 3e54017a2a5ba8f8003b12f3723ecf65c8e04a3d..07fdab58001d9175a124be5ab70038c40f35add4 100644 (file)
@@ -354,7 +354,7 @@ static int hns_mdio_reset(struct mii_bus *bus)
 
        if (dev_of_node(bus->parent)) {
                if (!mdio_dev->subctrl_vbase) {
-                       dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+                       dev_err(&bus->dev, "mdio sys ctl reg has not mapped\n");
                        return -ENODEV;
                }
 
index b8a40146b89585e477bb63c2c8ae348e87d054fe..b482f6f633bd558aeb5b88dd5f93c4bb4f7130a6 100644 (file)
@@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void)
                        err = -ENODEV;
                        goto out;
                }
-               memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN);        /* YUCK! Get addr from NOVRAM */
+               memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
                dev->base_addr = MVME_I596_BASE;
                dev->irq = (unsigned) MVME16x_IRQ_I596;
                goto found;
index a775c69e4fd7f089d56341fca8ad979bd98c3488..6aa6ff89a76511ff840e545e3bd0dc9ec5c4faff 100644 (file)
@@ -4700,6 +4700,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
                return 0;
        }
 
+       if (adapter->failover_pending) {
+               adapter->init_done_rc = -EAGAIN;
+               netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+               complete(&adapter->init_done);
+               /* login response buffer will be released on reset */
+               return 0;
+       }
+
        netdev->mtu = adapter->req_mtu - ETH_HLEN;
 
        netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
index b0b6f90deb7d099346454824a7c03aafb6fbfef8..ed8ea63bb172419d219d17c73996ffea2d70ee5f 100644 (file)
@@ -335,6 +335,7 @@ config IGC
        tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
        default n
        depends on PCI
+       depends on PTP_1588_CLOCK_OPTIONAL
        help
          This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
          family of adapters.
index 373eb027b925207fdc70b47e413c4055559a2e2d..09ae1939e6db4c7cc32e89708aa871763a543d91 100644 (file)
@@ -2437,11 +2437,15 @@ static void e100_get_drvinfo(struct net_device *netdev,
                sizeof(info->bus_info));
 }
 
-#define E100_PHY_REGS 0x1C
+#define E100_PHY_REGS 0x1D
 static int e100_get_regs_len(struct net_device *netdev)
 {
        struct nic *nic = netdev_priv(netdev);
-       return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
+
+       /* We know the number of registers, and the size of the dump buffer.
+        * Calculate the total size in bytes.
+        */
+       return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
 }
 
 static void e100_get_regs(struct net_device *netdev,
@@ -2455,14 +2459,18 @@ static void e100_get_regs(struct net_device *netdev,
        buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
                ioread8(&nic->csr->scb.cmd_lo) << 16 |
                ioread16(&nic->csr->scb.status);
-       for (i = E100_PHY_REGS; i >= 0; i--)
-               buff[1 + E100_PHY_REGS - i] =
-                       mdio_read(netdev, nic->mii.phy_id, i);
+       for (i = 0; i < E100_PHY_REGS; i++)
+               /* Note that we read the registers in reverse order. This
+                * ordering is the ABI apparently used by ethtool and other
+                * applications.
+                */
+               buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
+                                       E100_PHY_REGS - 1 - i);
        memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
        e100_exec_cb(nic, NULL, e100_dump);
        msleep(10);
-       memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
-               sizeof(nic->mem->dump_buf));
+       memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
+              sizeof(nic->mem->dump_buf));
 }
 
 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
index 2f20980dd9a58aa8e4251c7b1cf6b69853e83175..e04b540cedc85c3c4a435600274b33e401809c67 100644 (file)
@@ -4871,7 +4871,8 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
 {
        int i;
 
-       i40e_free_misc_vector(pf);
+       if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
+               i40e_free_misc_vector(pf);
 
        i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
                      I40E_IWARP_IRQ_PILE_ID);
@@ -10113,7 +10114,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
                if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
                        /* retry with a larger buffer */
                        buf_len = data_size;
-               } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+               } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
                        dev_info(&pf->pdev->dev,
                                 "capability discovery failed, err %s aq_err %s\n",
                                 i40e_stat_str(&pf->hw, err),
index 23762a7ef740b261a950e2e454a5a6c0d82d682a..cada4e0e40b48eda0b40a3df56ab7576a4348123 100644 (file)
@@ -1965,7 +1965,6 @@ static void iavf_watchdog_task(struct work_struct *work)
                }
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-               mutex_unlock(&adapter->crit_lock);
                queue_delayed_work(iavf_wq,
                                   &adapter->watchdog_task,
                                   msecs_to_jiffies(10));
index eadcb99583464f04c69412212742379683a895a9..3c4f08d20414e063c59e69f6a96483fe2ea141bd 100644 (file)
@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
 {
        if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
                set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+               set_bit(ICE_FLAG_AUX_ENA, pf->flags);
                ice_plug_aux_dev(pf);
        }
 }
@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
 {
        ice_unplug_aux_dev(pf);
        clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+       clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
 }
 #endif /* _ICE_H_ */
index 1f2afdf6cd483d79fbe98d2a4a6fff2c9e5bb9d6..adcc9a251595a75a4e84d8590f8b923b6b982b4d 100644 (file)
@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
        struct auxiliary_device *adev;
        int ret;
 
+       /* if this PF doesn't support a technology that requires auxiliary
+        * devices, then gracefully exit
+        */
+       if (!ice_is_aux_ena(pf))
+               return 0;
+
        iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
        if (!iadev)
                return -ENOMEM;
index 05cc5870e4efb52b01ef1cf649b4bf318d8e498d..80380aed8882d5681411cd04648a38679877fbd0 100644 (file)
@@ -1313,22 +1313,21 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
 {
        u8 idx;
 
-       spin_lock(&tx->lock);
-
        for (idx = 0; idx < tx->len; idx++) {
                u8 phy_idx = idx + tx->quad_offset;
 
-               /* Clear any potential residual timestamp in the PHY block */
-               if (!pf->hw.reset_ongoing)
-                       ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
-
+               spin_lock(&tx->lock);
                if (tx->tstamps[idx].skb) {
                        dev_kfree_skb_any(tx->tstamps[idx].skb);
                        tx->tstamps[idx].skb = NULL;
                }
-       }
+               clear_bit(idx, tx->in_use);
+               spin_unlock(&tx->lock);
 
-       spin_unlock(&tx->lock);
+               /* Clear any potential residual timestamp in the PHY block */
+               if (!pf->hw.reset_ongoing)
+                       ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+       }
 }
 
 /**
index b877efae61dfafe09970ff62b713e78d0c5cb41b..0e19b4d02e628ac6afecbc9a1fc19616e4cb4a80 100644 (file)
@@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev,
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-       netdev->vlan_features |= netdev->features;
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+       netdev->mpls_features |= NETIF_F_HW_CSUM;
+       netdev->hw_enc_features |= netdev->vlan_features;
 
        /* MTU range: 68 - 9216 */
        netdev->min_mtu = ETH_MIN_MTU;
index fc26e4ddeb0dcf48922a1e91bbc5049874e6e5db..beda8e0ef7d421aa08c4521c3ac8cabe30539621 100644 (file)
@@ -3208,7 +3208,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
                max_combined = ixgbe_max_rss_indices(adapter);
        }
 
-       return max_combined;
+       return min_t(int, max_combined, num_online_cpus());
 }
 
 static void ixgbe_get_channels(struct net_device *dev,
index 24e06ba6f5e93de93fe167be3702421f68aadf66..13c4782b920a79f695553e5d3a11442ba74720f0 100644 (file)
@@ -10112,6 +10112,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct bpf_prog *old_prog;
        bool need_reset;
+       int num_queues;
 
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
                return -EINVAL;
@@ -10161,11 +10162,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
        /* Kick start the NAPI context if there is an AF_XDP socket open
         * on that queue id. This so that receiving will start.
         */
-       if (need_reset && prog)
-               for (i = 0; i < adapter->num_rx_queues; i++)
+       if (need_reset && prog) {
+               num_queues = min_t(int, adapter->num_rx_queues,
+                                  adapter->num_xdp_queues);
+               for (i = 0; i < num_queues; i++)
                        if (adapter->xdp_ring[i]->xsk_pool)
                                (void)ixgbe_xsk_wakeup(adapter->netdev, i,
                                                       XDP_WAKEUP_RX);
+       }
 
        return 0;
 }
index b5f68f66d42a8f9125a16f0f7e29f777a4838344..7bb1f20002b582dfa8e5afdcac82d2b3bc3a4b35 100644 (file)
@@ -186,6 +186,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
        int hash;
        int i;
 
+       if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
+               return -EEXIST;
+
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
                struct flow_match_meta match;
 
index a2f61a87cef805b38590fb2ec07b15f77c464f5f..8af7f282732255659a1b4dc388a68acbf8ff5a2c 100644 (file)
@@ -372,6 +372,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        int nhoff = skb_network_offset(skb);
        int ret = 0;
 
+       if (skb->encapsulation)
+               return -EPROTONOSUPPORT;
+
        if (skb->protocol != htons(ETH_P_IP))
                return -EPROTONOSUPPORT;
 
@@ -1269,7 +1272,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
        if (!netif_carrier_ok(dev)) {
                if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
                        if (priv->port_state.link_state) {
-                               priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
                                netif_carrier_on(dev);
                                en_dbg(LINK, priv, "Link Up\n");
                        }
@@ -1557,26 +1559,36 @@ static void mlx4_en_service_task(struct work_struct *work)
        mutex_unlock(&mdev->state_lock);
 }
 
-static void mlx4_en_linkstate(struct work_struct *work)
+static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_port_state *port_state = &priv->port_state;
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct net_device *dev = priv->dev;
+       bool up;
+
+       if (mlx4_en_QUERY_PORT(mdev, priv->port))
+               port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
+
+       up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
+       if (up == netif_carrier_ok(dev))
+               netif_carrier_event(dev);
+       if (!up) {
+               en_info(priv, "Link Down\n");
+               netif_carrier_off(dev);
+       } else {
+               en_info(priv, "Link Up\n");
+               netif_carrier_on(dev);
+       }
+}
+
+static void mlx4_en_linkstate_work(struct work_struct *work)
 {
        struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
                                                 linkstate_task);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int linkstate = priv->link_state;
 
        mutex_lock(&mdev->state_lock);
-       /* If observable port state changed set carrier state and
-        * report to system log */
-       if (priv->last_link_state != linkstate) {
-               if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
-                       en_info(priv, "Link Down\n");
-                       netif_carrier_off(priv->dev);
-               } else {
-                       en_info(priv, "Link Up\n");
-                       netif_carrier_on(priv->dev);
-               }
-       }
-       priv->last_link_state = linkstate;
+       mlx4_en_linkstate(priv);
        mutex_unlock(&mdev->state_lock);
 }
 
@@ -2079,9 +2091,11 @@ static int mlx4_en_open(struct net_device *dev)
        mlx4_en_clear_stats(dev);
 
        err = mlx4_en_start_port(dev);
-       if (err)
+       if (err) {
                en_err(priv, "Failed starting port:%d\n", priv->port);
-
+               goto out;
+       }
+       mlx4_en_linkstate(priv);
 out:
        mutex_unlock(&mdev->state_lock);
        return err;
@@ -3168,7 +3182,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        spin_lock_init(&priv->stats_lock);
        INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
        INIT_WORK(&priv->restart_task, mlx4_en_restart);
-       INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+       INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
        INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
        INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
 #ifdef CONFIG_RFS_ACCEL
index f3d1a20201ef34f860ef865143de5abc3349f04e..6bf558c5ec1070b93f9a38a8dac5cf72fa19401c 100644 (file)
@@ -552,7 +552,6 @@ struct mlx4_en_priv {
 
        struct mlx4_hwq_resources res;
        int link_state;
-       int last_link_state;
        bool port_up;
        int port;
        int registered;
index cf97985628ab91d4bf2940adb24cd70b18a34e1f..02e77ffe5c3e4f68e3828482f07c2f2a12fdee68 100644 (file)
@@ -155,6 +155,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
        u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
        int err;
 
+       mlx5_debug_cq_remove(dev, cq);
+
        mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
        mlx5_eq_del_cq(&cq->eq->core, cq);
 
@@ -162,16 +164,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
        MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
        MLX5_SET(destroy_cq_in, in, uid, cq->uid);
        err = mlx5_cmd_exec_in(dev, destroy_cq, in);
-       if (err)
-               return err;
 
        synchronize_irq(cq->irqn);
 
-       mlx5_debug_cq_remove(dev, cq);
        mlx5_cq_put(cq);
        wait_for_completion(&cq->free);
 
-       return 0;
+       return err;
 }
 EXPORT_SYMBOL(mlx5_core_destroy_cq);
 
index e84287ffc7cea46272bd5c3f55616431324b531e..dcf9f27ba2efdf9673e03976ed522a36c46bbc70 100644 (file)
@@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param =
 
 static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
        union devlink_param_value value;
        int err;
 
-       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
                return 0;
 
        err = devlink_param_register(devlink, &enable_rdma_param);
@@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
 
 static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
-
-       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
                return;
 
        devlink_param_unpublish(devlink, &enable_rdma_param);
index 3f8a98093f8cb79606698247da2ad4f2147f809a..f9cf9fb31547973e41808db83ce27500052f0758 100644 (file)
@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
        err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
        if (err) {
                mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
-               return err;
+               goto err_cancel_work;
        }
 
        err = mlx5_fw_tracer_create_mkey(tracer);
@@ -1031,6 +1031,7 @@ err_notifier_unregister:
        mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
 err_dealloc_pd:
        mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+err_cancel_work:
        cancel_work_sync(&tracer->read_fw_strings_work);
        return err;
 }
index 669a75f3537a64833ac3378face27013fac2e7a6..03a7a4ce5cd5ec42ba18a80f642183cd37a15085 100644 (file)
@@ -252,6 +252,7 @@ struct mlx5e_params {
        struct {
                u16 mode;
                u8 num_tc;
+               struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
        } mqprio;
        bool rx_cqe_compress_def;
        bool tunneled_offload_en;
@@ -845,6 +846,7 @@ struct mlx5e_priv {
        struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
        struct mlx5e_channel_stats trap_stats;
        struct mlx5e_ptp_stats     ptp_stats;
+       u16                        stats_nch;
        u16                        max_nch;
        u8                         max_opened_tc;
        bool                       tx_ptp_opened;
@@ -922,7 +924,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
                          u16 vid);
@@ -1100,12 +1102,6 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
                                 struct ethtool_pauseparam *pauseparam);
 
 /* mlx5e generic netdev management API */
-static inline unsigned int
-mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
-{
-       return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
-}
-
 static inline bool
 mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
 {
@@ -1114,11 +1110,13 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
 }
 
 int mlx5e_priv_init(struct mlx5e_priv *priv,
+                   const struct mlx5e_profile *profile,
                    struct net_device *netdev,
                    struct mlx5_core_dev *mdev);
 void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
 struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+                   unsigned int txqs, unsigned int rxqs);
 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
index ac44bbe95c5c1b653366252662fddb54e92188fa..d290d7276b8d99aacaa93167085eaac55814535f 100644 (file)
@@ -35,7 +35,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
 {
        int ch, i = 0;
 
-       for (ch = 0; ch < priv->max_nch; ch++) {
+       for (ch = 0; ch < priv->stats_nch; ch++) {
                void *buf = data + i;
 
                if (WARN_ON_ONCE(buf +
@@ -51,7 +51,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
 static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv)
 {
        return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) *
-               priv->max_nch);
+               priv->stats_nch);
 }
 
 static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
@@ -100,7 +100,7 @@ static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent,
        sagent = &priv->stats_agent;
 
        block->version = MLX5_HV_VHCA_STATS_VERSION;
-       block->rings   = priv->max_nch;
+       block->rings   = priv->stats_nch;
 
        if (!block->command) {
                cancel_delayed_work_sync(&priv->stats_agent.work);
index ee688dec67a99c87bf19022c07a0a930949d8e9c..3a86f66d1295588ac3e77e0e55b110cd619532bb 100644 (file)
@@ -13,8 +13,6 @@ struct mlx5e_ptp_fs {
        bool valid;
 };
 
-#define MLX5E_PTP_CHANNEL_IX 0
-
 struct mlx5e_ptp_params {
        struct mlx5e_params params;
        struct mlx5e_sq_param txq_sq_param;
@@ -509,6 +507,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
        rq->mdev         = mdev;
        rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        rq->stats        = &c->priv->ptp_stats.rq;
+       rq->ix           = MLX5E_PTP_CHANNEL_IX;
        rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
        err = mlx5e_rq_set_handlers(rq, params, false);
        if (err)
index c96668bd701cd8409b727b68d9d98ae4dfe9e3c7..a71a32e00ebb9f4ffd9c3704d0dc2e92e97c7f66 100644 (file)
@@ -8,6 +8,8 @@
 #include "en_stats.h"
 #include <linux/ptp_classify.h>
 
+#define MLX5E_PTP_CHANNEL_IX 0
+
 struct mlx5e_ptpsq {
        struct mlx5e_txqsq       txqsq;
        struct mlx5e_cq          ts_cq;
index 0c38c2e319be93c7ca082e432e019bb28351ff30..c6d2f8c78db71ab2342cbb368de174831724e102 100644 (file)
@@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
        u16 vport_num, esw_owner_vhca_id;
        struct netlink_ext_ack *extack;
        int ifindex = upper->ifindex;
-       int err;
+       int err = 0;
 
        if (!netif_is_bridge_master(upper))
                return 0;
@@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
        struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
        const struct switchdev_attr *attr = port_attr_info->attr;
        u16 vport_num, esw_owner_vhca_id;
-       int err;
+       int err = 0;
 
        if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
                                                             &esw_owner_vhca_id))
@@ -475,9 +475,6 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
                esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
                goto err_alloc_wq;
        }
-       INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
-       queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
-                          msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
 
        br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
        err = register_switchdev_notifier(&br_offloads->nb);
@@ -500,6 +497,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
                         err);
                goto err_register_netdev;
        }
+       INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
+       queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+                          msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
        return;
 
 err_register_netdev:
@@ -523,10 +523,10 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
        if (!br_offloads)
                return;
 
+       cancel_delayed_work_sync(&br_offloads->update_work);
        unregister_netdevice_notifier(&br_offloads->netdev_nb);
        unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
        unregister_switchdev_notifier(&br_offloads->nb);
-       cancel_delayed_work(&br_offloads->update_work);
        destroy_workqueue(br_offloads->wq);
        rtnl_lock();
        mlx5_esw_bridge_cleanup(esw);
index 51a4d80f7fa34033d8229d1c9eb27bdc8e3c7669..de03684528bbf97fcd4502306e5483daa3fe759d 100644 (file)
@@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
 {
        struct mlx5e_rep_indr_block_priv *cb_priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv,
                            &rpriv->uplink_priv.tc_indr_block_priv_list,
                            list)
index bf0313e2682bafb098f5685181716e856835b956..13056cb9757d4b117ff7fbb83dd5a1cec3be40d0 100644 (file)
@@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
        if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
                u32 rqn;
 
-               if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
+               if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
                        rqn = res->drop_rqn;
 
                err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
index 2cfd12953909bd2cfbf66c95637583e417565083..9d451b8ee467c7b41d784093d933cd2b4f82fb05 100644 (file)
@@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
        return set_pflag_cqe_based_moder(netdev, enable, true);
 }
 
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
 {
        bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
        struct mlx5e_params new_params;
@@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        if (curr_val == new_val)
                return 0;
 
-       if (new_val && !priv->profile->rx_ptp_support &&
-           priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+       if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
                netdev_err(priv->netdev,
                           "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
                return -EINVAL;
@@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
 
        new_params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
-       if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
+       if (rx_filter)
                new_params.ptp_rx = new_val;
 
        if (new_params.ptp_rx == priv->channels.params.ptp_rx)
@@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       bool rx_filter;
        int err;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
                return -EOPNOTSUPP;
 
-       err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+       rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
        if (err)
                return err;
 
@@ -2035,6 +2036,17 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
        }
 
        new_params = priv->channels.params;
+       /* Don't allow enabling TX-port-TS if MQPRIO mode channel  offload is
+        * active, since it defines explicitly which TC accepts the packet.
+        * This conflicts with TX-port-TS hijacking the PTP traffic to a specific
+        * HW TX-queue.
+        */
+       if (enable && new_params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+               netdev_err(priv->netdev,
+                          "%s: MQPRIO mode channel offload is active, cannot set the TX-port-TS\n",
+                          __func__);
+               return -EINVAL;
+       }
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);
        /* No need to verify SQ stop room as
         * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
index 47efd858964d6b809b3c3539470ae4faa89e8c1a..09c8b71b186c72c7d6200c5b93762320a0bf16e8 100644 (file)
@@ -2264,7 +2264,7 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
 }
 
 static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
-                               struct tc_mqprio_qopt_offload *mqprio)
+                               struct netdev_tc_txq *tc_to_txq)
 {
        int tc, err;
 
@@ -2282,11 +2282,8 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
        for (tc = 0; tc < ntc; tc++) {
                u16 count, offset;
 
-               /* For DCB mode, map netdev TCs to offset 0
-                * We have our own UP to TXQ mapping for QoS
-                */
-               count = mqprio ? mqprio->qopt.count[tc] : nch;
-               offset = mqprio ? mqprio->qopt.offset[tc] : 0;
+               count = tc_to_txq[tc].count;
+               offset = tc_to_txq[tc].offset;
                netdev_set_tc_queue(netdev, tc, count, offset);
        }
 
@@ -2315,19 +2312,24 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
 
 static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
 {
+       struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
        struct net_device *netdev = priv->netdev;
        int old_num_txqs, old_ntc;
        int num_rxqs, nch, ntc;
        int err;
+       int i;
 
        old_num_txqs = netdev->real_num_tx_queues;
        old_ntc = netdev->num_tc ? : 1;
+       for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
+               old_tc_to_txq[i] = netdev->tc_to_txq[i];
 
        nch = priv->channels.params.num_channels;
-       ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+       ntc = priv->channels.params.mqprio.num_tc;
        num_rxqs = nch * priv->profile->rq_groups;
+       tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
 
-       err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL);
+       err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
        if (err)
                goto err_out;
        err = mlx5e_update_tx_netdev_queues(priv);
@@ -2350,11 +2352,14 @@ err_txqs:
        WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
 
 err_tcs:
-       mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL);
+       WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
+                                         old_tc_to_txq));
 err_out:
        return err;
 }
 
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
+
 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
                                           struct mlx5e_params *params)
 {
@@ -2861,6 +2866,58 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
        return 0;
 }
 
+static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
+                                                int ntc, int nch)
+{
+       int tc;
+
+       memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
+
+       /* Map netdev TCs to offset 0.
+        * We have our own UP to TXQ mapping for DCB mode of QoS
+        */
+       for (tc = 0; tc < ntc; tc++) {
+               tc_to_txq[tc] = (struct netdev_tc_txq) {
+                       .count = nch,
+                       .offset = 0,
+               };
+       }
+}
+
+static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
+                                        struct tc_mqprio_qopt *qopt)
+{
+       int tc;
+
+       for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+               tc_to_txq[tc] = (struct netdev_tc_txq) {
+                       .count = qopt->count[tc],
+                       .offset = qopt->offset[tc],
+               };
+       }
+}
+
+static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
+{
+       params->mqprio.mode = TC_MQPRIO_MODE_DCB;
+       params->mqprio.num_tc = num_tc;
+       mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
+                                            params->num_channels);
+}
+
+static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
+                                           struct tc_mqprio_qopt *qopt)
+{
+       params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
+       params->mqprio.num_tc = qopt->num_tc;
+       mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
+}
+
+static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
+{
+       mlx5e_params_mqprio_dcb_set(params, 1);
+}
+
 static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
                                     struct tc_mqprio_qopt *mqprio)
 {
@@ -2874,8 +2931,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
                return -EINVAL;
 
        new_params = priv->channels.params;
-       new_params.mqprio.mode = TC_MQPRIO_MODE_DCB;
-       new_params.mqprio.num_tc = tc ? tc : 1;
+       mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
 
        err = mlx5e_safe_switch_params(priv, &new_params,
                                       mlx5e_num_channels_changed_ctx, NULL, true);
@@ -2889,9 +2945,17 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
                                         struct tc_mqprio_qopt_offload *mqprio)
 {
        struct net_device *netdev = priv->netdev;
+       struct mlx5e_ptp *ptp_channel;
        int agg_count = 0;
        int i;
 
+       ptp_channel = priv->channels.ptp;
+       if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
+               netdev_err(netdev,
+                          "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
+               return -EINVAL;
+       }
+
        if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
            mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
                return -EINVAL;
@@ -2917,8 +2981,8 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
                agg_count += mqprio->qopt.count[i];
        }
 
-       if (priv->channels.params.num_channels < agg_count) {
-               netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n",
+       if (priv->channels.params.num_channels != agg_count) {
+               netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
                           agg_count, priv->channels.params.num_channels);
                return -EINVAL;
        }
@@ -2926,25 +2990,12 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
        return 0;
 }
 
-static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx)
-{
-       struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx;
-       struct net_device *netdev = priv->netdev;
-       u8 num_tc;
-
-       if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
-               return -EINVAL;
-
-       num_tc = priv->channels.params.mqprio.num_tc;
-       mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio);
-
-       return 0;
-}
-
 static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
                                         struct tc_mqprio_qopt_offload *mqprio)
 {
+       mlx5e_fp_preactivate preactivate;
        struct mlx5e_params new_params;
+       bool nch_changed;
        int err;
 
        err = mlx5e_mqprio_channel_validate(priv, mqprio);
@@ -2952,12 +3003,12 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
                return err;
 
        new_params = priv->channels.params;
-       new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
-       new_params.mqprio.num_tc = mqprio->qopt.num_tc;
-       err = mlx5e_safe_switch_params(priv, &new_params,
-                                      mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true);
+       mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
 
-       return err;
+       nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
+       preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
+               mlx5e_update_netdev_queues_ctx;
+       return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
 }
 
 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3065,7 +3116,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
 {
        int i;
 
-       for (i = 0; i < priv->max_nch; i++) {
+       for (i = 0; i < priv->stats_nch; i++) {
                struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
                struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
                struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
@@ -3274,20 +3325,67 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
        return mlx5_set_port_fcs(mdev, !enable);
 }
 
+static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
+{
+       u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
+       bool supported, curr_state;
+       int err;
+
+       if (!MLX5_CAP_GEN(mdev, ports_check))
+               return 0;
+
+       err = mlx5_query_ports_check(mdev, in, sizeof(in));
+       if (err)
+               return err;
+
+       supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
+       curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
+
+       if (!supported || enable == curr_state)
+               return 0;
+
+       MLX5_SET(pcmr_reg, in, local_port, 1);
+       MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
+
+       return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_channels *chs = &priv->channels;
+       struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
        mutex_lock(&priv->state_lock);
 
-       priv->channels.params.scatter_fcs_en = enable;
-       err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
-       if (err)
-               priv->channels.params.scatter_fcs_en = !enable;
+       if (enable) {
+               err = mlx5e_set_rx_port_ts(mdev, false);
+               if (err)
+                       goto out;
 
-       mutex_unlock(&priv->state_lock);
+               chs->params.scatter_fcs_en = true;
+               err = mlx5e_modify_channels_scatter_fcs(chs, true);
+               if (err) {
+                       chs->params.scatter_fcs_en = false;
+                       mlx5e_set_rx_port_ts(mdev, true);
+               }
+       } else {
+               chs->params.scatter_fcs_en = false;
+               err = mlx5e_modify_channels_scatter_fcs(chs, false);
+               if (err) {
+                       chs->params.scatter_fcs_en = true;
+                       goto out;
+               }
+               err = mlx5e_set_rx_port_ts(mdev, true);
+               if (err) {
+                       mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
+                       err = 0;
+               }
+       }
 
+out:
+       mutex_unlock(&priv->state_lock);
        return err;
 }
 
@@ -3554,14 +3652,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
 
        if (!rx_filter)
                /* Reset CQE compression to Admin default */
-               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
 
        if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
                return 0;
 
        /* Disable CQE compression */
        netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
-       err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
        if (err)
                netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
 
@@ -4186,13 +4284,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
        struct mlx5_core_dev *mdev = priv->mdev;
        u8 rx_cq_period_mode;
 
-       priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
-
        params->sw_mtu = mtu;
        params->hard_mtu = MLX5E_ETH_HARD_MTU;
        params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
                                     priv->max_nch);
-       params->mqprio.num_tc = 1;
+       mlx5e_params_mqprio_reset(params);
 
        /* Set an initial non-zero value, so that mlx5e_select_queue won't
         * divide by zero if called before first activating channels.
@@ -4682,8 +4778,35 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
        .rx_ptp_support    = true,
 };
 
+static unsigned int
+mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
+                  const struct mlx5e_profile *profile)
+
+{
+       unsigned int max_nch, tmp;
+
+       /* core resources */
+       max_nch = mlx5e_get_max_num_channels(mdev);
+
+       /* netdev rx queues */
+       tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
+       max_nch = min_t(unsigned int, max_nch, tmp);
+
+       /* netdev tx queues */
+       tmp = netdev->num_tx_queues;
+       if (mlx5_qos_is_supported(mdev))
+               tmp -= mlx5e_qos_max_leaf_nodes(mdev);
+       if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+               tmp -= profile->max_tc;
+       tmp = tmp / profile->max_tc;
+       max_nch = min_t(unsigned int, max_nch, tmp);
+
+       return max_nch;
+}
+
 /* mlx5e generic netdev management API (move to en_common.c) */
 int mlx5e_priv_init(struct mlx5e_priv *priv,
+                   const struct mlx5e_profile *profile,
                    struct net_device *netdev,
                    struct mlx5_core_dev *mdev)
 {
@@ -4691,6 +4814,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
        priv->mdev        = mdev;
        priv->netdev      = netdev;
        priv->msglevel    = MLX5E_MSG_LEVEL;
+       priv->max_nch     = mlx5e_calc_max_nch(mdev, netdev, profile);
+       priv->stats_nch   = priv->max_nch;
        priv->max_opened_tc = 1;
 
        if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
@@ -4734,7 +4859,8 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
 }
 
 struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+                   unsigned int txqs, unsigned int rxqs)
 {
        struct net_device *netdev;
        int err;
@@ -4745,7 +4871,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int
                return NULL;
        }
 
-       err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);
+       err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
        if (err) {
                mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
                goto err_free_netdev;
@@ -4787,7 +4913,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
        clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
        /* max number of channels may have changed */
-       max_nch = mlx5e_get_max_num_channels(priv->mdev);
+       max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
        if (priv->channels.params.num_channels > max_nch) {
                mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
                /* Reducing the number of channels - RXFH has to be reset, and
@@ -4795,7 +4921,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
                 */
                priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
                priv->channels.params.num_channels = max_nch;
+               if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+                       mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
+                       mlx5e_params_mqprio_reset(&priv->channels.params);
+               }
        }
+       if (max_nch != priv->max_nch) {
+               mlx5_core_warn(priv->mdev,
+                              "MLX5E: Updating max number of channels from %u to %u\n",
+                              priv->max_nch, max_nch);
+               priv->max_nch = max_nch;
+       }
+
        /* 1. Set the real number of queues in the kernel the first time.
         * 2. Set our default XPS cpumask.
         * 3. Build the RQT.
@@ -4860,7 +4997,7 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
        struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
-       err = mlx5e_priv_init(priv, netdev, mdev);
+       err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
        if (err) {
                mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
                return err;
@@ -4886,20 +5023,12 @@ priv_cleanup:
 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
                                const struct mlx5e_profile *new_profile, void *new_ppriv)
 {
-       unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
        const struct mlx5e_profile *orig_profile = priv->profile;
        struct net_device *netdev = priv->netdev;
        struct mlx5_core_dev *mdev = priv->mdev;
        void *orig_ppriv = priv->ppriv;
        int err, rollback_err;
 
-       /* sanity */
-       if (new_max_nch != priv->max_nch) {
-               netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
-                           __func__);
-               return -EINVAL;
-       }
-
        /* cleanup old profile */
        mlx5e_detach_netdev(priv);
        priv->profile->cleanup(priv);
@@ -4995,7 +5124,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
        nch = mlx5e_get_max_num_channels(mdev);
        txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
        rxqs = nch * profile->rq_groups;
-       netdev = mlx5e_create_netdev(mdev, txqs, rxqs);
+       netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
        if (!netdev) {
                mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
                return -ENOMEM;
index ae71a17fdb277bfed76ee9041a1c0fe2607f4e88..0684ac6699b2de8f832e6a7f29df1f31d94227f5 100644 (file)
@@ -596,7 +596,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
                                         MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
                                         MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
 
-       priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
        params = &priv->channels.params;
 
        params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
@@ -619,6 +618,11 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
        params->mqprio.num_tc       = 1;
        params->tunneled_offload_en = false;
 
+       /* Set an initial non-zero value, so that mlx5e_select_queue won't
+        * divide by zero if called before first activating channels.
+        */
+       priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
+
        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 }
 
@@ -644,7 +648,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
        netdev->hw_features    |= NETIF_F_RXCSUM;
 
        netdev->features |= netdev->hw_features;
-       netdev->features |= NETIF_F_VLAN_CHALLENGED;
        netdev->features |= NETIF_F_NETNS_LOCAL;
 }
 
@@ -1169,7 +1172,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
        nch = mlx5e_get_max_num_channels(dev);
        txqs = nch * profile->max_tc;
        rxqs = nch * profile->rq_groups;
-       netdev = mlx5e_create_netdev(dev, txqs, rxqs);
+       netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
        if (!netdev) {
                mlx5_core_warn(dev,
                               "Failed to create representor netdev for vport %d\n",
index 3c65fd0bcf31c56f67f41a2d65a91902776e7be2..29a6586ef28dc12848a4625e147b1f252c8cddab 100644 (file)
@@ -1001,14 +1001,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                goto csum_unnecessary;
 
        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
-               u8 ipproto = get_ip_proto(skb, network_depth, proto);
-
-               if (unlikely(ipproto == IPPROTO_SCTP))
+               if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
                        goto csum_unnecessary;
 
-               if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
-                       goto csum_none;
-
                stats->csum_complete++;
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
index e4f5b63951482462f0842b18d684821e56ede381..e1dd17019030e6c339592095a0f10f860aba7216 100644 (file)
@@ -34,6 +34,7 @@
 #include "en.h"
 #include "en_accel/tls.h"
 #include "en_accel/en_accel.h"
+#include "en/ptp.h"
 
 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
 {
@@ -450,7 +451,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
 
        memset(s, 0, sizeof(*s));
 
-       for (i = 0; i < priv->max_nch; i++) {
+       for (i = 0; i < priv->stats_nch; i++) {
                struct mlx5e_channel_stats *channel_stats =
                        &priv->channel_stats[i];
                int j;
@@ -2076,7 +2077,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
        if (priv->rx_ptp_opened) {
                for (i = 0; i < NUM_PTP_RQ_STATS; i++)
                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
-                               ptp_rq_stats_desc[i].format);
+                               ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
        }
        return idx;
 }
@@ -2119,7 +2120,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
 
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
 {
-       int max_nch = priv->max_nch;
+       int max_nch = priv->stats_nch;
 
        return (NUM_RQ_STATS * max_nch) +
               (NUM_CH_STATS * max_nch) +
@@ -2133,7 +2134,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
 {
        bool is_xsk = priv->xsk.ever_used;
-       int max_nch = priv->max_nch;
+       int max_nch = priv->stats_nch;
        int i, j, tc;
 
        for (i = 0; i < max_nch; i++)
@@ -2175,7 +2176,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
 {
        bool is_xsk = priv->xsk.ever_used;
-       int max_nch = priv->max_nch;
+       int max_nch = priv->stats_nch;
        int i, j, tc;
 
        for (i = 0; i < max_nch; i++)
index 0399a396d1662d7b553141cbb3f2646701ab0ebf..60a73990017c2a3683d112e0fc03a746276b2bbe 100644 (file)
@@ -79,12 +79,16 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
        int dest_num = 0;
        int err = 0;
 
-       if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+       if (vport->egress.legacy.drop_counter) {
+               drop_counter = vport->egress.legacy.drop_counter;
+       } else if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
                drop_counter = mlx5_fc_create(esw->dev, false);
-               if (IS_ERR(drop_counter))
+               if (IS_ERR(drop_counter)) {
                        esw_warn(esw->dev,
                                 "vport[%d] configure egress drop rule counter err(%ld)\n",
                                 vport->vport, PTR_ERR(drop_counter));
+                       drop_counter = NULL;
+               }
                vport->egress.legacy.drop_counter = drop_counter;
        }
 
@@ -123,7 +127,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
 
        /* Attach egress drop flow counter */
-       if (!IS_ERR_OR_NULL(drop_counter)) {
+       if (drop_counter) {
                flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
                drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
                drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
@@ -162,7 +166,7 @@ void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw,
        esw_acl_egress_table_destroy(vport);
 
 clean_drop_counter:
-       if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) {
+       if (vport->egress.legacy.drop_counter) {
                mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
                vport->egress.legacy.drop_counter = NULL;
        }
index f75b86abaf1cdf5d0bbd0a79fe1de296c1d3cbf5..b1a5199260f69627a151785eebcbad361d8100f3 100644 (file)
@@ -160,7 +160,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
 
        esw_acl_ingress_lgcy_rules_destroy(vport);
 
-       if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+       if (vport->ingress.legacy.drop_counter) {
+               counter = vport->ingress.legacy.drop_counter;
+       } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
                counter = mlx5_fc_create(esw->dev, false);
                if (IS_ERR(counter)) {
                        esw_warn(esw->dev,
index 9fe8e3c204d63a117a21c68f6cda37b463ba25fa..fe501ba88bea9cd11548851dab9eaf4e5503be98 100644 (file)
@@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
 
                curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
                if (!curr_match) {
+                       rcu_read_unlock();
                        free_match_list(match_head, ft_locked);
-                       err = -ENOMEM;
-                       goto out;
+                       return -ENOMEM;
                }
                curr_match->g = g;
                list_add_tail(&curr_match->list, &match_head->list);
        }
-out:
        rcu_read_unlock();
        return err;
 }
index 67571e5040d68cf53df14de1c69dc72727e57c3c..269ebb53eda6760f9ecf50206b3ecd4ea818862b 100644 (file)
@@ -113,7 +113,7 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
        struct mlx5e_sw_stats s = { 0 };
        int i, j;
 
-       for (i = 0; i < priv->max_nch; i++) {
+       for (i = 0; i < priv->stats_nch; i++) {
                struct mlx5e_channel_stats *channel_stats;
                struct mlx5e_rq_stats *rq_stats;
 
@@ -711,7 +711,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
                        goto destroy_ht;
        }
 
-       err = mlx5e_priv_init(epriv, netdev, mdev);
+       err = mlx5e_priv_init(epriv, prof, netdev, mdev);
        if (err)
                goto destroy_mdev_resources;
 
index 49ca57c6d31dceeef31e77d3d06ab293cd304395..ca5690b0a7abbf3628e7d56d96f9d9ee6233099a 100644 (file)
@@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
        struct mlx5_core_dev *dev1;
        struct mlx5_lag *ldev;
 
+       ldev = mlx5_lag_dev(dev);
+       if (!ldev)
+               return;
+
        mlx5_dev_list_lock();
 
-       ldev = mlx5_lag_dev(dev);
        dev0 = ldev->pf[MLX5_LAG_P1].dev;
        dev1 = ldev->pf[MLX5_LAG_P2].dev;
 
@@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
 
-       mlx5_dev_list_lock();
        ldev = mlx5_lag_dev(dev);
+       if (!ldev)
+               return;
+
+       mlx5_dev_list_lock();
        ldev->mode_changes_in_progress--;
        mlx5_dev_list_unlock();
        mlx5_queue_bond_work(ldev, 0);
index ffac8a0e7a2321659976e9dd1714f87617f8662e..91e806c1aa21126abe7bcdc9e8817a8efd1bc79c 100644 (file)
@@ -448,22 +448,20 @@ static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
        return cycles_now + cycles_delta;
 }
 
-static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
-                                     s64 sec, u32 nsec)
+static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
 {
-       struct timespec64 ts;
+       struct timespec64 ts = {};
        s64 target_ns;
 
        ts.tv_sec = sec;
-       ts.tv_nsec = nsec;
        target_ns = timespec64_to_ns(&ts);
 
        return find_target_cycles(mdev, target_ns);
 }
 
-static u64 perout_conf_real_time(s64 sec, u32 nsec)
+static u64 perout_conf_real_time(s64 sec)
 {
-       return (u64)nsec | (u64)sec << 32;
+       return (u64)sec << 32;
 }
 
 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
@@ -474,6 +472,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
                        container_of(ptp, struct mlx5_clock, ptp_info);
        struct mlx5_core_dev *mdev =
                        container_of(clock, struct mlx5_core_dev, clock);
+       bool rt_mode = mlx5_real_time_mode(mdev);
        u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
        struct timespec64 ts;
        u32 field_select = 0;
@@ -501,8 +500,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
 
        if (on) {
                bool rt_mode = mlx5_real_time_mode(mdev);
-               u32 nsec;
-               s64 sec;
+               s64 sec = rq->perout.start.sec;
+
+               if (rq->perout.start.nsec)
+                       return -EINVAL;
 
                pin_mode = MLX5_PIN_MODE_OUT;
                pattern = MLX5_OUT_PATTERN_PERIODIC;
@@ -513,14 +514,11 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
                if ((ns >> 1) != 500000000LL)
                        return -EINVAL;
 
-               nsec = rq->perout.start.nsec;
-               sec = rq->perout.start.sec;
-
                if (rt_mode && sec > U32_MAX)
                        return -EINVAL;
 
-               time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
-                                      perout_conf_internal_timer(mdev, sec, nsec);
+               time_stamp = rt_mode ? perout_conf_real_time(sec) :
+                                      perout_conf_internal_timer(mdev, sec);
 
                field_select |= MLX5_MTPPS_FS_PIN_MODE |
                                MLX5_MTPPS_FS_PATTERN |
@@ -538,6 +536,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
        if (err)
                return err;
 
+       if (rt_mode)
+               return 0;
+
        return mlx5_set_mtppse(mdev, pin, 0,
                               MLX5_EVENT_MODE_REPETETIVE & on);
 }
@@ -705,20 +706,14 @@ static void ts_next_sec(struct timespec64 *ts)
 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
                                        struct mlx5_clock *clock)
 {
-       bool rt_mode = mlx5_real_time_mode(mdev);
        struct timespec64 ts;
        s64 target_ns;
 
-       if (rt_mode)
-               ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
-       else
-               mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
-
+       mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
        ts_next_sec(&ts);
        target_ns = timespec64_to_ns(&ts);
 
-       return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
-                        find_target_cycles(mdev, target_ns);
+       return find_target_cycles(mdev, target_ns);
 }
 
 static int mlx5_pps_event(struct notifier_block *nb,
index c79a10b3454d48244d5f43cd012fd4d31452d420..763c83a0238091ef55b7e13d78bc4dc41212be60 100644 (file)
@@ -13,8 +13,8 @@
 #endif
 
 #define MLX5_MAX_IRQ_NAME (32)
-/* max irq_index is 255. three chars */
-#define MLX5_MAX_IRQ_IDX_CHARS (3)
+/* max irq_index is 2047, so four chars */
+#define MLX5_MAX_IRQ_IDX_CHARS (4)
 
 #define MLX5_SFS_PER_CTRL_IRQ 64
 #define MLX5_IRQ_CTRL_SF_MAX 8
@@ -633,8 +633,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
 {
        if (table->sf_comp_pool)
-               return table->sf_comp_pool->xa_num_irqs.max -
-                       table->sf_comp_pool->xa_num_irqs.min + 1;
+               return min_t(int, num_online_cpus(),
+                            table->sf_comp_pool->xa_num_irqs.max -
+                            table->sf_comp_pool->xa_num_irqs.min + 1);
        else
                return mlx5_irq_table_get_num_comp(table);
 }
index 3e85b17f5857303d9628f0ca68725d34cca75c11..6704f5c1aa32e78cb1b73ee8ab0093091bd19f78 100644 (file)
@@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
        err = mlxbf_gige_clean_port(priv);
        if (err)
                goto free_irqs;
+
+       /* Clear driver's valid_polarity to match hardware,
+        * since the above call to clean_port() resets the
+        * receive polarity used by hardware.
+        */
+       priv->valid_polarity = 0;
+
        err = mlxbf_gige_rx_init(priv);
        if (err)
                goto free_irqs;
index 0998dcc9cac04688bbefc092973543a7fb7493e5..b29824448aa858d078878af2ebb366c26611e3dc 100644 (file)
 #define MLXSW_THERMAL_ZONE_MAX_NAME    16
 #define MLXSW_THERMAL_TEMP_SCORE_MAX   GENMASK(31, 0)
 #define MLXSW_THERMAL_MAX_STATE        10
+#define MLXSW_THERMAL_MIN_STATE        2
 #define MLXSW_THERMAL_MAX_DUTY 255
-/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
- * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
- * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
- * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
- * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
- */
-#define MLXSW_THERMAL_SPEED_MIN                (MLXSW_THERMAL_MAX_STATE + 2)
-#define MLXSW_THERMAL_SPEED_MAX                (MLXSW_THERMAL_MAX_STATE * 2)
-#define MLXSW_THERMAL_SPEED_MIN_LEVEL  2               /* 20% */
 
 /* External cooling devices, allowed for binding to mlxsw thermal zones. */
 static char * const mlxsw_thermal_external_allowed_cdev[] = {
@@ -646,49 +638,16 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
        struct mlxsw_thermal *thermal = cdev->devdata;
        struct device *dev = thermal->bus_info->dev;
        char mfsc_pl[MLXSW_REG_MFSC_LEN];
-       unsigned long cur_state, i;
        int idx;
-       u8 duty;
        int err;
 
+       if (state > MLXSW_THERMAL_MAX_STATE)
+               return -EINVAL;
+
        idx = mlxsw_get_cooling_device_idx(thermal, cdev);
        if (idx < 0)
                return idx;
 
-       /* Verify if this request is for changing allowed fan dynamical
-        * minimum. If it is - update cooling levels accordingly and update
-        * state, if current state is below the newly requested minimum state.
-        * For example, if current state is 5, and minimal state is to be
-        * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
-        * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
-        * overwritten.
-        */
-       if (state >= MLXSW_THERMAL_SPEED_MIN &&
-           state <= MLXSW_THERMAL_SPEED_MAX) {
-               state -= MLXSW_THERMAL_MAX_STATE;
-               for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
-                       thermal->cooling_levels[i] = max(state, i);
-
-               mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
-               err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
-               if (err)
-                       return err;
-
-               duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
-               cur_state = mlxsw_duty_to_state(duty);
-
-               /* If current fan state is lower than requested dynamical
-                * minimum, increase fan speed up to dynamical minimum.
-                */
-               if (state < cur_state)
-                       return 0;
-
-               state = cur_state;
-       }
-
-       if (state > MLXSW_THERMAL_MAX_STATE)
-               return -EINVAL;
-
        /* Normalize the state to the valid speed range. */
        state = thermal->cooling_levels[state];
        mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
@@ -998,8 +957,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
 
        /* Initialize cooling levels per PWM state. */
        for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
-               thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
-                                                i);
+               thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
 
        thermal->polling_delay = bus_info->low_frequency ?
                                 MLXSW_THERMAL_SLOW_POLL_INT :
index 5cc00d22c708c46f37afbea48830223b53237cb9..6ecc4eb30e74b80582abde169a21afbacc577705 100644 (file)
@@ -4,8 +4,6 @@
 #
 
 obj-$(CONFIG_KS8842) += ks8842.o
-obj-$(CONFIG_KS8851) += ks8851.o
-ks8851-objs = ks8851_common.o ks8851_spi.o
-obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
-ks8851_mll-objs = ks8851_common.o ks8851_par.o
+obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o
+obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o
 obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
index 3f69bb59ba49a8fb1b5e091261e26bffdbbeffdc..a6db1a8156e1a8ebf2b31ca26b9946ade628ec11 100644 (file)
@@ -1057,6 +1057,7 @@ int ks8851_suspend(struct device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ks8851_suspend);
 
 int ks8851_resume(struct device *dev)
 {
@@ -1070,6 +1071,7 @@ int ks8851_resume(struct device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ks8851_resume);
 #endif
 
 static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
@@ -1243,6 +1245,7 @@ err_reg:
 err_reg_io:
        return ret;
 }
+EXPORT_SYMBOL_GPL(ks8851_probe_common);
 
 int ks8851_remove_common(struct device *dev)
 {
@@ -1261,3 +1264,8 @@ int ks8851_remove_common(struct device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ks8851_remove_common);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
index 796e46a5392699c932d1d9b94c09bd5749ad6b18..81a8ccca7e5e075de4e1c4d5dfc21cab6874a686 100644 (file)
@@ -497,13 +497,19 @@ static struct regmap_bus phymap_encx24j600 = {
        .reg_read = regmap_encx24j600_phy_reg_read,
 };
 
-void devm_regmap_init_encx24j600(struct device *dev,
-                                struct encx24j600_context *ctx)
+int devm_regmap_init_encx24j600(struct device *dev,
+                               struct encx24j600_context *ctx)
 {
        mutex_init(&ctx->mutex);
        regcfg.lock_arg = ctx;
        ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+       if (IS_ERR(ctx->regmap))
+               return PTR_ERR(ctx->regmap);
        ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+       if (IS_ERR(ctx->phymap))
+               return PTR_ERR(ctx->phymap);
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
 
index ee921a99e439a23f42b27f34cca6a01a64b99957..0bc6b3176fbf0a5c4703c6a4b4b36e78f48be4b2 100644 (file)
@@ -1023,10 +1023,13 @@ static int encx24j600_spi_probe(struct spi_device *spi)
        priv->speed = SPEED_100;
 
        priv->ctx.spi = spi;
-       devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
        ndev->irq = spi->irq;
        ndev->netdev_ops = &encx24j600_netdev_ops;
 
+       ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+       if (ret)
+               goto out_free;
+
        mutex_init(&priv->lock);
 
        /* Reset device and check if it is connected */
index fac61a8fbd0205feef4e1ed3608b733fcad12722..34c5a289898c925c4cb477afb1d1a773796e2131 100644 (file)
@@ -15,8 +15,8 @@ struct encx24j600_context {
        int bank;
 };
 
-void devm_regmap_init_encx24j600(struct device *dev,
-                                struct encx24j600_context *ctx);
+int devm_regmap_init_encx24j600(struct device *dev,
+                               struct encx24j600_context *ctx);
 
 /* Single-byte instructions */
 #define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
index c1310ea1c216f2860a0b194e7f718e9e1e221090..d5c485a6d2845db469900b816b1b85db4cf6b417 100644 (file)
@@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
        int err;
        u16 i;
 
-       dma_buf = kzalloc(sizeof(*dma_buf) +
-                         q_depth * sizeof(struct hwc_work_request),
-                         GFP_KERNEL);
+       dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
        if (!dma_buf)
                return -ENOMEM;
 
index 1b21030308e57c6004c299eb6e76de5a822fd8d6..030ae89f3a337abb52568185b494c15945e309f5 100644 (file)
@@ -1477,8 +1477,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
        if (err)
                goto out;
 
-       if (cq->gdma_id >= gc->max_num_cqs)
+       if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
+               err = -EINVAL;
                goto out;
+       }
 
        gc->cq_table[cq->gdma_id] = cq->gdma_cq;
 
index c581b955efb3c32db82e1385378425d425d9cb4d..a08e4f530c1c1186a55d8a1392a3786792c11396 100644 (file)
@@ -472,9 +472,9 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
            !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
                ocelot_port_rmwl(ocelot_port,
                                 DEV_CLOCK_CFG_MAC_TX_RST |
-                                DEV_CLOCK_CFG_MAC_TX_RST,
+                                DEV_CLOCK_CFG_MAC_RX_RST,
                                 DEV_CLOCK_CFG_MAC_TX_RST |
-                                DEV_CLOCK_CFG_MAC_TX_RST,
+                                DEV_CLOCK_CFG_MAC_RX_RST,
                                 DEV_CLOCK_CFG);
 }
 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
@@ -563,65 +563,50 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
        ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
                           DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
 
-       /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
-        * reset
-        */
-       ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
-                          DEV_CLOCK_CFG);
-
-       /* No PFC */
-       ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
-                        ANA_PFC_PFC_CFG, port);
-
        /* Core: Enable port for frame transfer */
        ocelot_fields_write(ocelot, port,
                            QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
 }
 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
 
-static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
-                                        struct sk_buff *clone)
+static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+                                       struct sk_buff *clone)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       unsigned long flags;
+
+       spin_lock_irqsave(&ocelot->ts_id_lock, flags);
 
-       spin_lock(&ocelot_port->ts_id_lock);
+       if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+           ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+               spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+               return -EBUSY;
+       }
 
        skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
        /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
        OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
-       ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
-       skb_queue_tail(&ocelot_port->tx_skbs, clone);
 
-       spin_unlock(&ocelot_port->ts_id_lock);
-}
+       ocelot_port->ts_id++;
+       if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+               ocelot_port->ts_id = 0;
 
-u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
-       struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
-       u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
-       u32 rew_op = 0;
+       ocelot_port->ptp_skbs_in_flight++;
+       ocelot->ptp_skbs_in_flight++;
 
-       if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
-               rew_op = ptp_cmd;
-               rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
-       } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
-               rew_op = ptp_cmd;
-       }
+       skb_queue_tail(&ocelot_port->tx_skbs, clone);
+
+       spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
 
-       return rew_op;
+       return 0;
 }
-EXPORT_SYMBOL(ocelot_ptp_rew_op);
 
-static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb)
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
+                                      unsigned int ptp_class)
 {
        struct ptp_header *hdr;
-       unsigned int ptp_class;
        u8 msgtype, twostep;
 
-       ptp_class = ptp_classify_raw(skb);
-       if (ptp_class == PTP_CLASS_NONE)
-               return false;
-
        hdr = ptp_parse_header(skb, ptp_class);
        if (!hdr)
                return false;
@@ -641,10 +626,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
        u8 ptp_cmd = ocelot_port->ptp_cmd;
+       unsigned int ptp_class;
+       int err;
+
+       /* Don't do anything if PTP timestamping not enabled */
+       if (!ptp_cmd)
+               return 0;
+
+       ptp_class = ptp_classify_raw(skb);
+       if (ptp_class == PTP_CLASS_NONE)
+               return -EINVAL;
 
        /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
        if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
-               if (ocelot_ptp_is_onestep_sync(skb)) {
+               if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
                        OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
                        return 0;
                }
@@ -658,8 +653,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
                if (!(*clone))
                        return -ENOMEM;
 
-               ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+               err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+               if (err)
+                       return err;
+
                OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+               OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
        }
 
        return 0;
@@ -693,6 +692,17 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
        spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
 }
 
+static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+{
+       struct ptp_header *hdr;
+
+       hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+       if (WARN_ON(!hdr))
+               return false;
+
+       return seqid == ntohs(hdr->sequence_id);
+}
+
 void ocelot_get_txtstamp(struct ocelot *ocelot)
 {
        int budget = OCELOT_PTP_QUEUE_SZ;
@@ -700,10 +710,10 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
        while (budget--) {
                struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
                struct skb_shared_hwtstamps shhwtstamps;
+               u32 val, id, seqid, txport;
                struct ocelot_port *port;
                struct timespec64 ts;
                unsigned long flags;
-               u32 val, id, txport;
 
                val = ocelot_read(ocelot, SYS_PTP_STATUS);
 
@@ -716,10 +726,17 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
                /* Retrieve the ts ID and Tx port */
                id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
                txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+               seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
 
-               /* Retrieve its associated skb */
                port = ocelot->ports[txport];
 
+               spin_lock(&ocelot->ts_id_lock);
+               port->ptp_skbs_in_flight--;
+               ocelot->ptp_skbs_in_flight--;
+               spin_unlock(&ocelot->ts_id_lock);
+
+               /* Retrieve its associated skb */
+try_again:
                spin_lock_irqsave(&port->tx_skbs.lock, flags);
 
                skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
@@ -732,12 +749,20 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
 
                spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
 
+               if (WARN_ON(!skb_match))
+                       continue;
+
+               if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+                       dev_err_ratelimited(ocelot->dev,
+                                           "port %d received stale TX timestamp for seqid %d, discarding\n",
+                                           txport, seqid);
+                       dev_kfree_skb_any(skb);
+                       goto try_again;
+               }
+
                /* Get the h/w timestamp */
                ocelot_get_hwtimestamp(ocelot, &ts);
 
-               if (unlikely(!skb_match))
-                       continue;
-
                /* Set the timestamp into the skb */
                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
                shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -1303,14 +1328,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
        return mask;
 }
 
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
+static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
                                      struct net_device *bridge)
 {
+       struct ocelot_port *ocelot_port = ocelot->ports[src_port];
        u32 mask = 0;
        int port;
 
+       if (!ocelot_port || ocelot_port->bridge != bridge ||
+           ocelot_port->stp_state != BR_STATE_FORWARDING)
+               return 0;
+
        for (port = 0; port < ocelot->num_phys_ports; port++) {
-               struct ocelot_port *ocelot_port = ocelot->ports[port];
+               ocelot_port = ocelot->ports[port];
 
                if (!ocelot_port)
                        continue;
@@ -1376,7 +1406,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
                        struct net_device *bridge = ocelot_port->bridge;
                        struct net_device *bond = ocelot_port->bond;
 
-                       mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+                       mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
                        mask |= cpu_fwd_mask;
                        mask &= ~BIT(port);
                        if (bond) {
@@ -1953,7 +1983,6 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
        struct ocelot_port *ocelot_port = ocelot->ports[port];
 
        skb_queue_head_init(&ocelot_port->tx_skbs);
-       spin_lock_init(&ocelot_port->ts_id_lock);
 
        /* Basic L2 initialization */
 
@@ -2086,6 +2115,7 @@ int ocelot_init(struct ocelot *ocelot)
        mutex_init(&ocelot->stats_lock);
        mutex_init(&ocelot->ptp_lock);
        spin_lock_init(&ocelot->ptp_clock_lock);
+       spin_lock_init(&ocelot->ts_id_lock);
        snprintf(queue_name, sizeof(queue_name), "%s-stats",
                 dev_name(ocelot->dev));
        ocelot->stats_queue = create_singlethread_workqueue(queue_name);
index edafbd37d12cbf0e6723195e5d0584688aa0b89e..b8737efd2a8531a0d91c2d1f41484fe9da417be3 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
  */
 #include <net/devlink.h>
 #include "ocelot.h"
index 08b481a934600350617390534bbb318566fa8442..4b0941f09f718ab25bbc526f56681e59e5a4d6b0 100644 (file)
@@ -2,7 +2,7 @@
 /* Microsemi Ocelot Switch driver
  *
  * Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
  */
 
 #include <linux/if_bridge.h>
index c0c465a4a981ad294a8a89ac3b3dc3674cf7e43b..2545727fd5b2f31288683a8648629f4fb7cb9e3f 100644 (file)
@@ -5,9 +5,10 @@
  * mscc_ocelot_switch_lib.
  *
  * Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
  */
 
+#include <linux/dsa/ocelot.h>
 #include <linux/if_bridge.h>
 #include <linux/of_net.h>
 #include <linux/phy/phy.h>
@@ -1625,7 +1626,7 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
        if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
                ocelot_port_rmwl(ocelot_port, 0,
                                 DEV_CLOCK_CFG_MAC_TX_RST |
-                                DEV_CLOCK_CFG_MAC_TX_RST,
+                                DEV_CLOCK_CFG_MAC_RX_RST,
                                 DEV_CLOCK_CFG);
 
        ocelot_port->phy_mode = phy_mode;
index 7945393a06557d972bf8600d6181eada7cfd4892..99d7376a70a748d50d123ace7797a722b5e7f372 100644 (file)
@@ -998,8 +998,8 @@ ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
 }
 
 struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie,
-                                   bool tc_offload)
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+                                   unsigned long cookie, bool tc_offload)
 {
        struct ocelot_vcap_filter *filter;
 
index 09c0e839cca5822a4a9ea88774da5abb69672f6d..3b6b2e61139e65df9a719b49768c9d3bb8e36ccb 100644 (file)
@@ -8566,7 +8566,7 @@ static void s2io_io_resume(struct pci_dev *pdev)
                        return;
                }
 
-               if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
+               if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
                        s2io_card_down(sp);
                        pr_err("Can't restore mac addr after reset.\n");
                        return;
index c029950a81e202230ea8b8b4e427bf018643c258..ac1dcfa1d17908fae345db8555de8919fb40db14 100644 (file)
@@ -830,10 +830,6 @@ static int nfp_flower_init(struct nfp_app *app)
        if (err)
                goto err_cleanup;
 
-       err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
-       if (err)
-               goto err_cleanup;
-
        if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
                nfp_flower_qos_init(app);
 
@@ -942,7 +938,20 @@ static int nfp_flower_start(struct nfp_app *app)
                        return err;
        }
 
-       return nfp_tunnel_config_start(app);
+       err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
+       if (err)
+               return err;
+
+       err = nfp_tunnel_config_start(app);
+       if (err)
+               goto err_tunnel_config;
+
+       return 0;
+
+err_tunnel_config:
+       flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+                                nfp_flower_setup_indr_tc_release);
+       return err;
 }
 
 static void nfp_flower_stop(struct nfp_app *app)
index 556c3495211da4fdce712821c7bc7f9334a94123..64c0ef57ad42634a0d521f5d521084e5d44a1662 100644 (file)
@@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
        struct nfp_flower_indr_block_cb_priv *cb_priv;
        struct nfp_flower_priv *priv = app->priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
                if (cb_priv->netdev == netdev)
                        return cb_priv;
index 381966e8f55722188f96ab9e4290d3f57edab55b..7f3322ce044c7c5d91cffd6e2002daf1a9c26e5b 100644 (file)
@@ -1292,8 +1292,10 @@ int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
        if (err && err != -EEXIST) {
                /* set the state back to NEW so we can try again later */
                f = ionic_rx_filter_by_addr(lif, addr);
-               if (f && f->state == IONIC_FILTER_STATE_SYNCED)
+               if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
                        f->state = IONIC_FILTER_STATE_NEW;
+                       set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
+               }
 
                spin_unlock_bh(&lif->rx_filters.lock);
 
@@ -1377,6 +1379,10 @@ static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
 
 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
 {
+       /* Don't delete our own address from the uc list */
+       if (ether_addr_equal(addr, netdev->dev_addr))
+               return 0;
+
        return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
 }
 
index 25ecfcfa1281fd12256cd468e16c5fcdbe816289..69728f9013cbbfa2049973db8f9cc00b28be248b 100644 (file)
@@ -349,9 +349,6 @@ loop_out:
        list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
                (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
 
-               if (sync_item->f.state != IONIC_FILTER_STATE_SYNCED)
-                       set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
-
                list_del(&sync_item->list);
                devm_kfree(dev, sync_item);
        }
index 58a854666c62b1d2a424df87442c4c57f0709c88..c14de5fcedea37c474e07e38b2ae33953ced0307 100644 (file)
@@ -380,15 +380,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
                                          &ionic_dbg_intr_stats_desc[i]);
                (*buf)++;
        }
-       for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
-               **buf = IONIC_READ_STAT64(&txqcq->napi_stats,
-                                         &ionic_dbg_napi_stats_desc[i]);
-               (*buf)++;
-       }
-       for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
-               **buf = txqcq->napi_stats.work_done_cntr[i];
-               (*buf)++;
-       }
        for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
                **buf = txstats->sg_cntr[i];
                (*buf)++;
index fc8b3e64f153a40602124a8316c2f0f525bb3444..186d0048a9d14e924ed84e9e8a4498f3d0c459c7 100644 (file)
@@ -1297,6 +1297,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
        prev_weight = weight;
 
        while (weight) {
+               /* If the HW device is during recovery, all resources are
+                * immediately reset without receiving a per-cid indication
+                * from HW. In this case we don't expect the cid_map to be
+                * cleared.
+                */
+               if (p_hwfn->cdev->recov_in_prog)
+                       return 0;
+
                msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
 
                weight = bitmap_weight(bmap->bitmap, bmap->max_count);
index 15ef59aa34ff56fbec1f31cbb21db3e44bd408a7..d10e1cd6d2ba96b1a75575f468c3a21ad1625d73 100644 (file)
@@ -1299,6 +1299,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
                        } else {
                                DP_NOTICE(cdev,
                                          "Failed to acquire PTT for aRFS\n");
+                               rc = -EINVAL;
                                goto err;
                        }
                }
index 6e5a6cc97d0eb694767183add5afdc3936bee499..24cd415677756984047c5d3307066664089c4c14 100644 (file)
@@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                          struct qed_nvm_image_att *p_image_att)
 {
        enum nvm_image_type type;
+       int rc;
        u32 i;
 
        /* Translate image_id into MFW definitions */
@@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
-       qed_mcp_nvm_info_populate(p_hwfn);
+       rc = qed_mcp_nvm_info_populate(p_hwfn);
+       if (rc)
+               return rc;
+
        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
                if (type == p_hwfn->nvm_info.image_att[i].image_type)
                        break;
index f16a157bb95a08d58b84541921cdd16fac960d84..cf5baa5e59bcc91573ff5859f7826629c27767f8 100644 (file)
@@ -77,6 +77,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
         * Beyond the added delay we clear the bitmap anyway.
         */
        while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+               /* If the HW device is during recovery, all resources are
+                * immediately reset without receiving a per-cid indication
+                * from HW. In this case we don't expect the cid bitmap to be
+                * cleared.
+                */
+               if (p_hwfn->cdev->recov_in_prog)
+                       return;
+
                msleep(100);
                if (wait_count++ > 20) {
                        DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
index 0a2f34fc8b24a1d9e71ac16c7efd0c211223cdc9..27dffa299ca6f45dac1dcc6851e0378046334bff 100644 (file)
@@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
        struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
        const struct firmware *fw = fw_info->fw;
        u32 dest, *p_cache, *temp;
-       int i, ret = -EIO;
        __le32 *temp_le;
        u8 data[16];
        size_t size;
+       int i, ret;
        u64 addr;
 
        temp = vzalloc(fw->size);
index 4b2eca5e08e29a43b5502c91bd9b6df0eef4ed28..01ef5efd7bc2ac36e12dfea5ea5b9841a1cfcac1 100644 (file)
 #define PHY_ST         0x8A    /* PHY status register */
 #define MAC_SM         0xAC    /* MAC status machine */
 #define  MAC_SM_RST    0x0002  /* MAC status machine reset */
+#define MD_CSC         0xb6    /* MDC speed control register */
+#define  MD_CSC_DEFAULT        0x0030
 #define MAC_ID         0xBE    /* Identifier register */
 
 #define TX_DCNT                0x80    /* TX descriptor count */
@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
 {
        void __iomem *ioaddr = lp->base;
        int limit = MAC_DEF_TIMEOUT;
-       u16 cmd;
+       u16 cmd, md_csc;
 
+       md_csc = ioread16(ioaddr + MD_CSC);
        iowrite16(MAC_RST, ioaddr + MCR1);
        while (limit--) {
                cmd = ioread16(ioaddr + MCR1);
@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
        iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
        iowrite16(0, ioaddr + MAC_SM);
        mdelay(5);
+
+       /* Restore MDIO clock frequency */
+       if (md_csc != MD_CSC_DEFAULT)
+               iowrite16(md_csc, ioaddr + MD_CSC);
 }
 
 static void r6040_init_mac_regs(struct net_device *dev)
index e5b0d795c3012c40a393395016a74a434d0ef4c7..3dbea028b325ceed9ad813263527cd8963740db8 100644 (file)
@@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
         * We need a channel per event queue, plus a VI per tx queue.
         * This may be more pessimistic than it needs to be.
         */
-       if (n_channels + n_xdp_ev > max_channels) {
-               netif_err(efx, drv, efx->net_dev,
-                         "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
-                         n_xdp_ev, n_channels, max_channels);
-               netif_err(efx, drv, efx->net_dev,
-                         "XDP_TX and XDP_REDIRECT will not work on this interface");
-               efx->n_xdp_channels = 0;
-               efx->xdp_tx_per_channel = 0;
-               efx->xdp_tx_queue_count = 0;
+       if (n_channels >= max_channels) {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+                          n_xdp_ev, n_channels, max_channels);
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
        } else if (n_channels + n_xdp_tx > efx->max_vis) {
-               netif_err(efx, drv, efx->net_dev,
-                         "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
-                         n_xdp_tx, n_channels, efx->max_vis);
-               netif_err(efx, drv, efx->net_dev,
-                         "XDP_TX and XDP_REDIRECT will not work on this interface");
-               efx->n_xdp_channels = 0;
-               efx->xdp_tx_per_channel = 0;
-               efx->xdp_tx_queue_count = 0;
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+                          n_xdp_tx, n_channels, efx->max_vis);
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+       } else if (n_channels + n_xdp_ev > max_channels) {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+                          n_xdp_ev, n_channels, max_channels);
+
+               n_xdp_ev = max_channels - n_channels;
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
+                          DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
        } else {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
+       }
+
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
                efx->n_xdp_channels = n_xdp_ev;
                efx->xdp_tx_per_channel = tx_per_ev;
                efx->xdp_tx_queue_count = n_xdp_tx;
                n_channels += n_xdp_ev;
                netif_dbg(efx, drv, efx->net_dev,
                          "Allocating %d TX and %d event queues for XDP\n",
-                         n_xdp_tx, n_xdp_ev);
+                         n_xdp_ev * tx_per_ev, n_xdp_ev);
+       } else {
+               efx->n_xdp_channels = 0;
+               efx->xdp_tx_per_channel = 0;
+               efx->xdp_tx_queue_count = n_xdp_tx;
        }
 
        if (vec_count < n_channels) {
@@ -858,6 +872,20 @@ rollback:
        goto out;
 }
 
+static inline int
+efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+                    struct efx_tx_queue *tx_queue)
+{
+       if (xdp_queue_number >= efx->xdp_tx_queue_count)
+               return -EINVAL;
+
+       netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+                 tx_queue->channel->channel, tx_queue->label,
+                 xdp_queue_number, tx_queue->queue);
+       efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+       return 0;
+}
+
 int efx_set_channels(struct efx_nic *efx)
 {
        struct efx_tx_queue *tx_queue;
@@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx)
                        if (efx_channel_is_xdp_tx(channel)) {
                                efx_for_each_channel_tx_queue(tx_queue, channel) {
                                        tx_queue->queue = next_queue++;
-
-                                       /* We may have a few left-over XDP TX
-                                        * queues owing to xdp_tx_queue_count
-                                        * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
-                                        * We still allocate and probe those
-                                        * TXQs, but never use them.
-                                        */
-                                       if (xdp_queue_number < efx->xdp_tx_queue_count) {
-                                               netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
-                                                         channel->channel, tx_queue->label,
-                                                         xdp_queue_number, tx_queue->queue);
-                                               efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+                                       rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+                                       if (rc == 0)
                                                xdp_queue_number++;
-                                       }
                                }
                        } else {
                                efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx)
                                                  channel->channel, tx_queue->label,
                                                  tx_queue->queue);
                                }
+
+                               /* If XDP is borrowing queues from net stack, it must use the queue
+                                * with no csum offload, which is the first one of the channel
+                                * (note: channel->tx_queue_by_type is not initialized yet)
+                                */
+                               if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+                                       tx_queue = &channel->tx_queue[0];
+                                       rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+                                       if (rc == 0)
+                                               xdp_queue_number++;
+                               }
                        }
                }
        }
-       WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
+       WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+               xdp_queue_number != efx->xdp_tx_queue_count);
+       WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+               xdp_queue_number > efx->xdp_tx_queue_count);
+
+       /* If we have more CPUs than assigned XDP TX queues, assign the already
+        * existing queues to the exceeding CPUs
+        */
+       next_queue = 0;
+       while (xdp_queue_number < efx->xdp_tx_queue_count) {
+               tx_queue = efx->xdp_tx_queues[next_queue++];
+               rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+               if (rc == 0)
+                       xdp_queue_number++;
+       }
 
        rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
        if (rc)
index 9b4b25704271041f3bebdc91d24d45ac4b2d0ff7..f6981810039d06dca945385e2796eda8bf57719d 100644 (file)
@@ -782,6 +782,12 @@ struct efx_async_filter_insertion {
 #define EFX_RPS_MAX_IN_FLIGHT  8
 #endif /* CONFIG_RFS_ACCEL */
 
+enum efx_xdp_tx_queues_mode {
+       EFX_XDP_TX_QUEUES_DEDICATED,    /* one queue per core, locking not needed */
+       EFX_XDP_TX_QUEUES_SHARED,       /* each queue used by more than 1 core */
+       EFX_XDP_TX_QUEUES_BORROWED      /* queues borrowed from net stack */
+};
+
 /**
  * struct efx_nic - an Efx NIC
  * @name: Device name (net device name or bus id before net device registered)
@@ -820,6 +826,7 @@ struct efx_async_filter_insertion {
  *     should be allocated for this NIC
  * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
  * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
+ * @xdp_txq_queues_mode: XDP TX queues sharing strategy.
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
  * @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -979,6 +986,7 @@ struct efx_nic {
 
        unsigned int xdp_tx_queue_count;
        struct efx_tx_queue **xdp_tx_queues;
+       enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
 
        unsigned rxq_entries;
        unsigned txq_entries;
index 0c6650d2e23997cd97304a76c7cb739f2115199b..d16e031e95f44d09c4fcc37456ef68bf23e8aa6c 100644 (file)
@@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
        unsigned int len;
        int space;
        int cpu;
-       int i;
+       int i = 0;
 
-       cpu = raw_smp_processor_id();
+       if (unlikely(n && !xdpfs))
+               return -EINVAL;
+       if (unlikely(!n))
+               return 0;
 
-       if (!efx->xdp_tx_queue_count ||
-           unlikely(cpu >= efx->xdp_tx_queue_count))
+       cpu = raw_smp_processor_id();
+       if (unlikely(cpu >= efx->xdp_tx_queue_count))
                return -EINVAL;
 
        tx_queue = efx->xdp_tx_queues[cpu];
        if (unlikely(!tx_queue))
                return -EINVAL;
 
-       if (unlikely(n && !xdpfs))
-               return -EINVAL;
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+               HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
 
-       if (!n)
-               return 0;
+       /* If we're borrowing net stack queues we have to handle stop-restart
+        * or we might block the queue and it will be considered as frozen
+        */
+       if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+               if (netif_tx_queue_stopped(tx_queue->core_txq))
+                       goto unlock;
+               efx_tx_maybe_stop_queue(tx_queue);
+       }
 
        /* Check for available space. We should never need multiple
         * descriptors per frame.
@@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
        if (flush && i > 0)
                efx_nic_push_buffers(tx_queue);
 
+unlock:
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+               HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
+
        return i == 0 ? -EIO : i;
 }
 
index fbfda55b4c5263112070323a97f7baf39c516855..5e731a72cce81a4c2d50dbece319465588e3df70 100644 (file)
@@ -71,6 +71,7 @@ err_remove_config_dt:
 
 static const struct of_device_id dwmac_generic_match[] = {
        { .compatible = "st,spear600-gmac"},
+       { .compatible = "snps,dwmac-3.40a"},
        { .compatible = "snps,dwmac-3.50a"},
        { .compatible = "snps,dwmac-3.610"},
        { .compatible = "snps,dwmac-3.70a"},
index ed817011a94a09e68d353344756e322c84c1eb2c..6924a6aacbd53c8310a1476b82d5978f87094b5b 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/delay.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
+#include <linux/pm_runtime.h>
 
 #include "stmmac_platform.h"
 
@@ -1528,6 +1529,8 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
                return ret;
        }
 
+       pm_runtime_get_sync(dev);
+
        if (bsp_priv->integrated_phy)
                rk_gmac_integrated_phy_powerup(bsp_priv);
 
@@ -1539,6 +1542,8 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
        if (gmac->integrated_phy)
                rk_gmac_integrated_phy_powerdown(gmac);
 
+       pm_runtime_put_sync(&gmac->pdev->dev);
+
        phy_power_on(gmac, false);
        gmac_clk_enable(gmac, false);
 }
index 90383abafa66acbba638c95e076b6a7fc7598b3e..f5581db0ba9bacb040d5328866ab2bc47b2029e0 100644 (file)
@@ -218,11 +218,18 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
                                readl(ioaddr + DMA_BUS_MODE + i * 4);
 }
 
-static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
-                                    struct dma_features *dma_cap)
+static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
+                                   struct dma_features *dma_cap)
 {
        u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
 
+       if (!hw_cap) {
+               /* 0x00000000 is the value read on old hardware that does not
+                * implement this register
+                */
+               return -EOPNOTSUPP;
+       }
+
        dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
        dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
        dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
@@ -252,6 +259,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
        dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
        /* Alternate (enhanced) DESC mode */
        dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+
+       return 0;
 }
 
 static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
index 5be8e6a631d9b9ccdda9a25217ac6cc72efbea6a..d99fa028c6468ef988482c80dc5a941283e530f9 100644 (file)
@@ -347,8 +347,8 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
        writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
 }
 
-static void dwmac4_get_hw_feature(void __iomem *ioaddr,
-                                 struct dma_features *dma_cap)
+static int dwmac4_get_hw_feature(void __iomem *ioaddr,
+                                struct dma_features *dma_cap)
 {
        u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
 
@@ -437,6 +437,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
        dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
        dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
        dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
+
+       return 0;
 }
 
 /* Enable/disable TSO feature and set MSS */
index 906e985441a93b17c11bb89bd8554ad4961fa917..5e98355f422b39e22110bded362de1939f85ce3f 100644 (file)
@@ -371,8 +371,8 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
        return ret;
 }
 
-static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
-                                   struct dma_features *dma_cap)
+static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+                                  struct dma_features *dma_cap)
 {
        u32 hw_cap;
 
@@ -445,6 +445,8 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
        dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
        dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
        dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
+
+       return 0;
 }
 
 static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
index 6dc1c98ebec82aba543d80ee9489e62c5d724613..fe2660d5694d7994db088195c5126afd4a61a335 100644 (file)
@@ -203,8 +203,8 @@ struct stmmac_dma_ops {
        int (*dma_interrupt) (void __iomem *ioaddr,
                              struct stmmac_extra_stats *x, u32 chan, u32 dir);
        /* If supported then get the optional core features */
-       void (*get_hw_feature)(void __iomem *ioaddr,
-                              struct dma_features *dma_cap);
+       int (*get_hw_feature)(void __iomem *ioaddr,
+                             struct dma_features *dma_cap);
        /* Program the HW RX Watchdog */
        void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue);
        void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
@@ -255,7 +255,7 @@ struct stmmac_dma_ops {
 #define stmmac_dma_interrupt_status(__priv, __args...) \
        stmmac_do_callback(__priv, dma, dma_interrupt, __args)
 #define stmmac_get_hw_feature(__priv, __args...) \
-       stmmac_do_void_callback(__priv, dma, get_hw_feature, __args)
+       stmmac_do_callback(__priv, dma, get_hw_feature, __args)
 #define stmmac_rx_watchdog(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
 #define stmmac_set_tx_ring_len(__priv, __args...) \
index ece02b35a6cec4a7e6652f678dbac61705e2a3d5..eb3b7bf771d7b3a67d7aab18ce1478eeab3ce8e1 100644 (file)
@@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
                        priv->clk_csr = STMMAC_CSR_100_150M;
                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
                        priv->clk_csr = STMMAC_CSR_150_250M;
-               else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+               else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
                        priv->clk_csr = STMMAC_CSR_250_300M;
        }
 
@@ -477,6 +477,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                        stmmac_lpi_entry_timer_config(priv, 0);
                        del_timer_sync(&priv->eee_ctrl_timer);
                        stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+                       if (priv->hw->xpcs)
+                               xpcs_config_eee(priv->hw->xpcs,
+                                               priv->plat->mult_fact_100ns,
+                                               false);
                }
                mutex_unlock(&priv->lock);
                return false;
@@ -486,6 +490,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
                stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
                                     eee_tw_timer);
+               if (priv->hw->xpcs)
+                       xpcs_config_eee(priv->hw->xpcs,
+                                       priv->plat->mult_fact_100ns,
+                                       true);
        }
 
        if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
@@ -1034,7 +1042,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
        stmmac_mac_set(priv, priv->ioaddr, false);
        priv->eee_active = false;
        priv->tx_lpi_enabled = false;
-       stmmac_eee_init(priv);
+       priv->eee_enabled = stmmac_eee_init(priv);
        stmmac_set_eee_pls(priv, priv->hw, false);
 
        if (priv->dma_cap.fpesel)
@@ -7118,7 +7126,6 @@ int stmmac_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
        u32 chan;
-       int ret;
 
        if (!ndev || !netif_running(ndev))
                return 0;
@@ -7150,13 +7157,6 @@ int stmmac_suspend(struct device *dev)
        } else {
                stmmac_mac_set(priv, priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
-               /* Disable clock in case of PWM is off */
-               clk_disable_unprepare(priv->plat->clk_ptp_ref);
-               ret = pm_runtime_force_suspend(dev);
-               if (ret) {
-                       mutex_unlock(&priv->lock);
-                       return ret;
-               }
        }
 
        mutex_unlock(&priv->lock);
@@ -7242,12 +7242,6 @@ int stmmac_resume(struct device *dev)
                priv->irq_wake = 0;
        } else {
                pinctrl_pm_select_default_state(priv->device);
-               /* enable the clk previously disabled */
-               ret = pm_runtime_force_resume(dev);
-               if (ret)
-                       return ret;
-               if (priv->plat->clk_ptp_ref)
-                       clk_prepare_enable(priv->plat->clk_ptp_ref);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
index 5ca710844cc1eb9a1c41dbcbca92bdd8d2218d14..232ac98943cd08a66a4bd65b53c2d100843d29f1 100644 (file)
@@ -9,6 +9,7 @@
 *******************************************************************************/
 
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -507,6 +508,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
                plat->pmt = 1;
        }
 
+       if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
+               plat->has_gmac = 1;
+               plat->enh_desc = 1;
+               plat->tx_coe = 1;
+               plat->bugged_jumbo = 1;
+               plat->pmt = 1;
+       }
+
        if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
            of_device_is_compatible(np, "snps,dwmac-4.10a") ||
            of_device_is_compatible(np, "snps,dwmac-4.20a") ||
@@ -771,9 +780,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
        return stmmac_bus_clks_config(priv, true);
 }
 
+static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+               /* Disable clock in case of PWM is off */
+               clk_disable_unprepare(priv->plat->clk_ptp_ref);
+
+               ret = pm_runtime_force_suspend(dev);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+               /* enable the clk previously disabled */
+               ret = pm_runtime_force_resume(dev);
+               if (ret)
+                       return ret;
+
+               clk_prepare_enable(priv->plat->clk_ptp_ref);
+       }
+
+       return 0;
+}
+
 const struct dev_pm_ops stmmac_pltfr_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
        SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
 };
 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
 
index 309de38a7530432e6d0bf56337002ff4d694683b..b0d3f9a2950c023333a88dca8b8e44614af547f0 100644 (file)
@@ -73,6 +73,7 @@ config CASSINI
 config SUNVNET_COMMON
        tristate "Common routines to support Sun Virtual Networking"
        depends on SUN_LDOMS
+       depends on INET
        default m
 
 config SUNVNET
index 8fe8887d506a3e269d34542b75486624bdc03a3e..6192244b304ab976e0b17f7fd3eb568585f16a54 100644 (file)
@@ -68,9 +68,9 @@
 #define SIXP_DAMA_OFF          0
 
 /* default level 2 parameters */
-#define SIXP_TXDELAY                   (HZ/4)  /* in 1 s */
+#define SIXP_TXDELAY                   25      /* 250 ms */
 #define SIXP_PERSIST                   50      /* in 256ths */
-#define SIXP_SLOTTIME                  (HZ/10) /* in 1 s */
+#define SIXP_SLOTTIME                  10      /* 100 ms */
 #define SIXP_INIT_RESYNC_TIMEOUT       (3*HZ/2) /* in 1 s */
 #define SIXP_RESYNC_TIMEOUT            5*HZ    /* in 1 s */
 
index f4843f9672c1a594bb60073ec77fd478ea7dc7a0..441da03c23ee46cf720260880c09acf6eaf1f6cd 100644 (file)
@@ -48,6 +48,7 @@ config BPQETHER
 config DMASCC
        tristate "High-speed (DMA) SCC driver for AX.25"
        depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
+       depends on VIRT_TO_BUS
        help
          This is a driver for high-speed SCC boards, i.e. those supporting
          DMA on one port. You usually use those boards to connect your
index b50b7fafd8d6c525d34a05de696acefa496605ac..f4c3efc3e0745c621d8e1ff6bea66b1c8efb5be6 100644 (file)
@@ -973,7 +973,7 @@ static inline void tx_on(struct scc_priv *priv)
                flags = claim_dma_lock();
                set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
                set_dma_addr(priv->param.dma,
-                            (int) priv->tx_buf[priv->tx_tail] + n);
+                            virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
                set_dma_count(priv->param.dma,
                              priv->tx_len[priv->tx_tail] - n);
                release_dma_lock(flags);
@@ -1020,7 +1020,7 @@ static inline void rx_on(struct scc_priv *priv)
                flags = claim_dma_lock();
                set_dma_mode(priv->param.dma, DMA_MODE_READ);
                set_dma_addr(priv->param.dma,
-                            (int) priv->rx_buf[priv->rx_head]);
+                            virt_to_bus(priv->rx_buf[priv->rx_head]));
                set_dma_count(priv->param.dma, BUF_SIZE);
                release_dma_lock(flags);
                enable_dma(priv->param.dma);
@@ -1233,7 +1233,7 @@ static void special_condition(struct scc_priv *priv, int rc)
                if (priv->param.dma >= 0) {
                        flags = claim_dma_lock();
                        set_dma_addr(priv->param.dma,
-                                    (int) priv->rx_buf[priv->rx_head]);
+                                    virt_to_bus(priv->rx_buf[priv->rx_head]));
                        set_dma_count(priv->param.dma, BUF_SIZE);
                        release_dma_lock(flags);
                } else {
index 8f99cfa14680ae22bd81644b60187f4244ece5ae..d037682fb7adb8ab20e617fbc3053bd258b85e3c 100644 (file)
@@ -4,6 +4,7 @@ config QCOM_IPA
        depends on ARCH_QCOM || COMPILE_TEST
        depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
        select QCOM_MDT_LOADER if ARCH_QCOM
+       select QCOM_SCM
        select QCOM_QMI_HELPERS
        help
          Choose Y or M here to include support for the Qualcomm
index 2324e1b93e37dc9a95f95157c25117b03018246f..1da334f54944a0cbe36f96b13fd9a4e5fd2d9370 100644 (file)
@@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
         * table region determines the number of entries it has.
         */
        if (filter) {
-               count = hweight32(ipa->filter_map);
+               /* Include one extra "slot" to hold the filter map itself */
+               count = 1 + hweight32(ipa->filter_map);
                hash_count = hash_mem->size ? count : 0;
        } else {
                count = mem->size / sizeof(__le64);
index 0d7d3e15d2f015d0c2543dcb3835d6045cd9b742..5f4cd24a0241d33f31450d7ee6a979ad34c5fa68 100644 (file)
@@ -207,6 +207,7 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
 {
        struct ipq4019_mdio_data *priv;
        struct mii_bus *bus;
+       struct resource *res;
        int ret;
 
        bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
@@ -224,7 +225,10 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
                return PTR_ERR(priv->mdio_clk);
 
        /* The platform resource is provided on the chipset IPQ5018 */
-       priv->eth_ldo_rdy = devm_platform_ioremap_resource(pdev, 1);
+       /* This resource is optional */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (res)
+               priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
 
        bus->name = "ipq4019_mdio";
        bus->read = ipq4019_mdio_read;
index 1ee592d3eae46dc0a755c534134c25664ac0dbe8..17f98f609ec823c0573538b7b00707e1f58d41b9 100644 (file)
@@ -134,8 +134,9 @@ static int mscc_miim_reset(struct mii_bus *bus)
 
 static int mscc_miim_probe(struct platform_device *pdev)
 {
-       struct mii_bus *bus;
        struct mscc_miim_dev *dev;
+       struct resource *res;
+       struct mii_bus *bus;
        int ret;
 
        bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));
@@ -156,10 +157,14 @@ static int mscc_miim_probe(struct platform_device *pdev)
                return PTR_ERR(dev->regs);
        }
 
-       dev->phy_regs = devm_platform_ioremap_resource(pdev, 1);
-       if (IS_ERR(dev->phy_regs)) {
-               dev_err(&pdev->dev, "Unable to map internal phy registers\n");
-               return PTR_ERR(dev->phy_regs);
+       /* This resource is optional */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (res) {
+               dev->phy_regs = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(dev->phy_regs)) {
+                       dev_err(&pdev->dev, "Unable to map internal phy registers\n");
+                       return PTR_ERR(dev->phy_regs);
+               }
        }
 
        ret = of_mdiobus_register(bus, pdev->dev.of_node);
index d127eb6e9257f6e39acb6321ea0eb55ee64f6d5d..aaa628f859fd4f3af8591e90b9f5fe0a9e569350 100644 (file)
@@ -321,7 +321,7 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
        /* Start MHI channels */
        err = mhi_prepare_for_transfer(mhi_dev);
        if (err)
-               goto out_err;
+               return err;
 
        /* Number of transfer descriptors determines size of the queue */
        mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
@@ -331,10 +331,6 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
                return err;
 
        return 0;
-
-out_err:
-       free_netdev(ndev);
-       return err;
 }
 
 static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
index 984c9f7f16a83bb0733defb4b634d931dcae61bf..d16fc58cd48d3d8d877dc02ad2096c9a91244221 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
  */
 #include <linux/pcs/pcs-xpcs.h>
 #include "pcs-xpcs.h"
index fb0a83dc09acc02010c6c5a2597b11911ec65b69..7de631f5356fc832588fe54aef0168f32c575966 100644 (file)
@@ -666,6 +666,10 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
 {
        int ret;
 
+       ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);
+       if (ret < 0)
+               return ret;
+
        if (enable) {
        /* Enable EEE */
                ret = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
@@ -673,9 +677,6 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
                      DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
                      mult_fact_100ns << DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT;
        } else {
-               ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0);
-               if (ret < 0)
-                       return ret;
                ret &= ~(DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
                       DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
                       DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
@@ -690,21 +691,28 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
        if (ret < 0)
                return ret;
 
-       ret |= DW_VR_MII_EEE_TRN_LPI;
+       if (enable)
+               ret |= DW_VR_MII_EEE_TRN_LPI;
+       else
+               ret &= ~DW_VR_MII_EEE_TRN_LPI;
+
        return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret);
 }
 EXPORT_SYMBOL_GPL(xpcs_config_eee);
 
 static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
 {
-       int ret;
+       int ret, mdio_ctrl;
 
        /* For AN for C37 SGMII mode, the settings are :-
-        * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)
-        * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)
+        * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case
+             it is already enabled)
+        * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN)
+        * 3) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII)
         *    DW xPCS used with DW EQoS MAC is always MAC side SGMII.
-        * 3) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic
+        * 4) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic
         *    speed/duplex mode change by HW after SGMII AN complete)
+        * 5) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 1b (Enable SGMII AN)
         *
         * Note: Since it is MAC side SGMII, there is no need to set
         *       SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from
@@ -712,6 +720,17 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
         *       between PHY and Link Partner. There is also no need to
         *       trigger AN restart for MAC-side SGMII.
         */
+       mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL);
+       if (mdio_ctrl < 0)
+               return mdio_ctrl;
+
+       if (mdio_ctrl & AN_CL37_EN) {
+               ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+                                mdio_ctrl & ~AN_CL37_EN);
+               if (ret < 0)
+                       return ret;
+       }
+
        ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL);
        if (ret < 0)
                return ret;
@@ -736,7 +755,15 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
        else
                ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
 
-       return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+       ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+       if (ret < 0)
+               return ret;
+
+       if (phylink_autoneg_inband(mode))
+               ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL,
+                                mdio_ctrl | AN_CL37_EN);
+
+       return ret;
 }
 
 static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
index e79297a4bae81dffb7095c0cdeb465f04e675e77..27b6a3f507ae60d9e7e836f35fd6c373be010182 100644 (file)
 #define MII_BCM7XXX_SHD_2_ADDR_CTRL    0xe
 #define MII_BCM7XXX_SHD_2_CTRL_STAT    0xf
 #define MII_BCM7XXX_SHD_2_BIAS_TRIM    0x1a
+#define MII_BCM7XXX_SHD_3_PCS_CTRL     0x0
+#define MII_BCM7XXX_SHD_3_PCS_STATUS   0x1
+#define MII_BCM7XXX_SHD_3_EEE_CAP      0x2
 #define MII_BCM7XXX_SHD_3_AN_EEE_ADV   0x3
+#define MII_BCM7XXX_SHD_3_EEE_LP       0x4
+#define MII_BCM7XXX_SHD_3_EEE_WK_ERR   0x5
 #define MII_BCM7XXX_SHD_3_PCS_CTRL_2   0x6
 #define  MII_BCM7XXX_PCS_CTRL_2_DEF    0x4400
 #define MII_BCM7XXX_SHD_3_AN_STAT      0xb
@@ -216,25 +221,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
        return genphy_config_aneg(phydev);
 }
 
-static int phy_set_clr_bits(struct phy_device *dev, int location,
-                                       int set_mask, int clr_mask)
+static int __phy_set_clr_bits(struct phy_device *dev, int location,
+                             int set_mask, int clr_mask)
 {
        int v, ret;
 
-       v = phy_read(dev, location);
+       v = __phy_read(dev, location);
        if (v < 0)
                return v;
 
        v &= ~clr_mask;
        v |= set_mask;
 
-       ret = phy_write(dev, location, v);
+       ret = __phy_write(dev, location, v);
        if (ret < 0)
                return ret;
 
        return v;
 }
 
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+                           int set_mask, int clr_mask)
+{
+       int ret;
+
+       mutex_lock(&dev->mdio.bus->mdio_lock);
+       ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
+       mutex_unlock(&dev->mdio.bus->mdio_lock);
+
+       return ret;
+}
+
 static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
 {
        int ret;
@@ -398,6 +415,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
        return bcm7xxx_28nm_ephy_apd_enable(phydev);
 }
 
+#define MII_BCM7XXX_REG_INVALID        0xff
+
+static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
+{
+       switch (regnum) {
+       case MDIO_CTRL1:
+               return MII_BCM7XXX_SHD_3_PCS_CTRL;
+       case MDIO_STAT1:
+               return MII_BCM7XXX_SHD_3_PCS_STATUS;
+       case MDIO_PCS_EEE_ABLE:
+               return MII_BCM7XXX_SHD_3_EEE_CAP;
+       case MDIO_AN_EEE_ADV:
+               return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
+       case MDIO_AN_EEE_LPABLE:
+               return MII_BCM7XXX_SHD_3_EEE_LP;
+       case MDIO_PCS_EEE_WK_ERR:
+               return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
+       default:
+               return MII_BCM7XXX_REG_INVALID;
+       }
+}
+
+static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
+{
+       return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
+}
+
+static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
+                                     int devnum, u16 regnum)
+{
+       u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
+       int ret;
+
+       if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
+           shd == MII_BCM7XXX_REG_INVALID)
+               return -EOPNOTSUPP;
+
+       /* set shadow mode 2 */
+       ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+                                MII_BCM7XXX_SHD_MODE_2, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Access the desired shadow register address */
+       ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
+
+reset_shadow_mode:
+       /* reset shadow mode 2 */
+       __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+                          MII_BCM7XXX_SHD_MODE_2);
+       return ret;
+}
+
+static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
+                                      int devnum, u16 regnum, u16 val)
+{
+       u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
+       int ret;
+
+       if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
+           shd == MII_BCM7XXX_REG_INVALID)
+               return -EOPNOTSUPP;
+
+       /* set shadow mode 2 */
+       ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+                                MII_BCM7XXX_SHD_MODE_2, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Access the desired shadow register address */
+       ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
+       if (ret < 0)
+               goto reset_shadow_mode;
+
+       /* Write the desired value in the shadow register */
+       __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
+
+reset_shadow_mode:
+       /* reset shadow mode 2 */
+       return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+                                 MII_BCM7XXX_SHD_MODE_2);
+}
+
 static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
 {
        int ret;
@@ -595,6 +699,8 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
        .get_stats      = bcm7xxx_28nm_get_phy_stats,                   \
        .probe          = bcm7xxx_28nm_probe,                           \
        .remove         = bcm7xxx_28nm_remove,                          \
+       .read_mmd       = bcm7xxx_28nm_ephy_read_mmd,                   \
+       .write_mmd      = bcm7xxx_28nm_ephy_write_mmd,                  \
 }
 
 #define BCM7XXX_40NM_EPHY(_oui, _name)                                 \
index 21aa24c741b9671222e1baa63c92177b0ffa72d6..daae7fa58fb822886c5a374577225d572680b1de 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef HAVE_DP83640_REGISTERS
 #define HAVE_DP83640_REGISTERS
 
-#define PAGE0                     0x0000
+/* #define PAGE0                  0x0000 */
 #define PHYCR2                    0x001c /* PHY Control Register 2 */
 
 #define PAGE4                     0x0004
index 53f034fc2ef7983527faa32fc7589e93eb8f04bc..6865d9319197f716c18aec179cb4db74ea78e8da 100644 (file)
@@ -525,6 +525,10 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
            NULL == bus->read || NULL == bus->write)
                return -EINVAL;
 
+       if (bus->parent && bus->parent->of_node)
+               bus->parent->of_node->fwnode.flags |=
+                                       FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
+
        BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
               bus->state != MDIOBUS_UNREGISTERED);
 
@@ -534,6 +538,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
        bus->dev.groups = NULL;
        dev_set_name(&bus->dev, "%s", bus->id);
 
+       /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
+        * the device in mdiobus_free()
+        *
+        * State will be updated later in this function in case of success
+        */
+       bus->state = MDIOBUS_UNREGISTERED;
+
        err = device_register(&bus->dev);
        if (err) {
                pr_err("mii_bus %s failed to register\n", bus->id);
index c94cb5382dc922e5cec73e053f863e8b28742974..250742ffdfd91961301020c5ea721fccb33093b9 100644 (file)
@@ -179,6 +179,16 @@ static int mdio_remove(struct device *dev)
        return 0;
 }
 
+static void mdio_shutdown(struct device *dev)
+{
+       struct mdio_device *mdiodev = to_mdio_device(dev);
+       struct device_driver *drv = mdiodev->dev.driver;
+       struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+
+       if (mdiodrv->shutdown)
+               mdiodrv->shutdown(mdiodev);
+}
+
 /**
  * mdio_driver_register - register an mdio_driver with the MDIO layer
  * @drv: new mdio_driver to register
@@ -193,6 +203,7 @@ int mdio_driver_register(struct mdio_driver *drv)
        mdiodrv->driver.bus = &mdio_bus_type;
        mdiodrv->driver.probe = mdio_probe;
        mdiodrv->driver.remove = mdio_remove;
+       mdiodrv->driver.shutdown = mdio_shutdown;
 
        retval = driver_register(&mdiodrv->driver);
        if (retval) {
index 2d5d5081c3b6b01bbeb86f19ad74e0ba1ae2f9d5..5ce1bf03bbd71a11a094c387820bc0bd813f45ae 100644 (file)
@@ -493,6 +493,25 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
        return ret;
 }
 
+static int gpy115_loopback(struct phy_device *phydev, bool enable)
+{
+       int ret;
+       int fw_minor;
+
+       if (enable)
+               return gpy_loopback(phydev, enable);
+
+       ret = phy_read(phydev, PHY_FWV);
+       if (ret < 0)
+               return ret;
+
+       fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, ret);
+       if (fw_minor > 0x0076)
+               return gpy_loopback(phydev, 0);
+
+       return genphy_soft_reset(phydev);
+}
+
 static struct phy_driver gpy_drivers[] = {
        {
                PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx),
@@ -527,7 +546,7 @@ static struct phy_driver gpy_drivers[] = {
                .handle_interrupt = gpy_handle_interrupt,
                .set_wol        = gpy_set_wol,
                .get_wol        = gpy_get_wol,
-               .set_loopback   = gpy_loopback,
+               .set_loopback   = gpy115_loopback,
        },
        {
                PHY_ID_MATCH_MODEL(PHY_ID_GPY115C),
@@ -544,7 +563,7 @@ static struct phy_driver gpy_drivers[] = {
                .handle_interrupt = gpy_handle_interrupt,
                .set_wol        = gpy_set_wol,
                .get_wol        = gpy_get_wol,
-               .set_loopback   = gpy_loopback,
+               .set_loopback   = gpy115_loopback,
        },
        {
                .phy_id         = PHY_ID_GPY211B,
index 9e2891d8e8dd98219caffce9db345f90190ac964..4f9990b47a377de462454378ce3888f6274d1a79 100644 (file)
@@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock);
 
 static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
 {
+       struct device_driver *drv = phydev->mdio.dev.driver;
+       struct phy_driver *phydrv = to_phy_driver(drv);
        struct net_device *netdev = phydev->attached_dev;
 
-       if (!phydev->drv->suspend)
+       if (!drv || !phydrv->suspend)
                return false;
 
        /* PHY not attached? May suspend if the PHY has not already been
@@ -3123,6 +3125,9 @@ static void phy_shutdown(struct device *dev)
 {
        struct phy_device *phydev = to_phy_device(dev);
 
+       if (phydev->state == PHY_READY || !phydev->attached_dev)
+               return;
+
        phy_disable_interrupts(phydev);
 }
 
index a1464b764d4dcd3209e25b9199dac96db68234bb..0a0abe8e4be0b64b5c093accec38e94c6988b4b1 100644 (file)
@@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
        if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
                return -EINVAL;
 
+       /* If this link is with an SFP, ensure that changes to advertised modes
+        * also cause the associated interface to be selected such that the
+        * link can be configured correctly.
+        */
+       if (pl->sfp_port && pl->sfp_bus) {
+               config.interface = sfp_select_interface(pl->sfp_bus,
+                                                       config.advertising);
+               if (config.interface == PHY_INTERFACE_MODE_NA) {
+                       phylink_err(pl,
+                                   "selection of interface failed, advertisement %*pb\n",
+                                   __ETHTOOL_LINK_MODE_MASK_NBITS,
+                                   config.advertising);
+                       return -EINVAL;
+               }
+
+               /* Revalidate with the selected interface */
+               linkmode_copy(support, pl->supported);
+               if (phylink_validate(pl, support, &config)) {
+                       phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
+                                   phylink_an_mode_str(pl->cur_link_an_mode),
+                                   phy_modes(config.interface),
+                                   __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+                       return -EINVAL;
+               }
+       }
+
        mutex_lock(&pl->state_mutex);
        pl->link_config.speed = config.speed;
        pl->link_config.duplex = config.duplex;
@@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
        if (phy_interface_mode_is_8023z(iface) && pl->phydev)
                return -EINVAL;
 
-       changed = !linkmode_equal(pl->supported, support);
+       changed = !linkmode_equal(pl->supported, support) ||
+                 !linkmode_equal(pl->link_config.advertising,
+                                 config.advertising);
        if (changed) {
                linkmode_copy(pl->supported, support);
                linkmode_copy(pl->link_config.advertising, config.advertising);
index 34e90216bd2cb7fedcbf0f22637cffc853ffe46b..ab77a9f439ef9a51e6095fc371a39fd3437a2b7c 100644 (file)
@@ -134,7 +134,7 @@ static const char * const sm_state_strings[] = {
        [SFP_S_LINK_UP] = "link_up",
        [SFP_S_TX_FAULT] = "tx_fault",
        [SFP_S_REINIT] = "reinit",
-       [SFP_S_TX_DISABLE] = "rx_disable",
+       [SFP_S_TX_DISABLE] = "tx_disable",
 };
 
 static const char *sm_state_to_str(unsigned short sm_state)
index 4c5d69732a7e125528fdc6f519d873f9c8181503..f87f175033731a38b87fe9183b023185519c6a9d 100644 (file)
@@ -99,6 +99,10 @@ config USB_RTL8150
 config USB_RTL8152
        tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
        select MII
+       select CRC32
+       select CRYPTO
+       select CRYPTO_HASH
+       select CRYPTO_SHA256
        help
          This option adds support for Realtek RTL8152 based USB 2.0
          10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
index a57251ba5991fd97a7e2a0bedd4d88bdfeb95cfe..f97813a4e8d1b15b040c6a18fc80b5cb33943f5b 100644 (file)
@@ -2719,14 +2719,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
 
        serial = kzalloc(sizeof(*serial), GFP_KERNEL);
        if (!serial)
-               goto exit;
+               goto err_free_dev;
 
        hso_dev->port_data.dev_serial = serial;
        serial->parent = hso_dev;
 
        if (hso_serial_common_create
            (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
-               goto exit;
+               goto err_free_serial;
 
        serial->tx_data_length--;
        serial->write_data = hso_mux_serial_write_data;
@@ -2742,11 +2742,9 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
        /* done, return it */
        return hso_dev;
 
-exit:
-       if (serial) {
-               tty_unregister_device(tty_drv, serial->minor);
-               kfree(serial);
-       }
+err_free_serial:
+       kfree(serial);
+err_free_dev:
        kfree(hso_dev);
        return NULL;
 
index 60ba9b734055ba3da6f36ac117021805a22a9edb..f329e39100a7dd0782310c98e8d63582791ed95a 100644 (file)
@@ -767,6 +767,7 @@ enum rtl8152_flags {
        PHY_RESET,
        SCHEDULE_TASKLET,
        GREEN_ETHERNET,
+       RX_EPROTO,
 };
 
 #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2      0x3082
@@ -1770,6 +1771,14 @@ static void read_bulk_callback(struct urb *urb)
                rtl_set_unplug(tp);
                netif_device_detach(tp->netdev);
                return;
+       case -EPROTO:
+               urb->actual_length = 0;
+               spin_lock_irqsave(&tp->rx_lock, flags);
+               list_add_tail(&agg->list, &tp->rx_done);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
+               set_bit(RX_EPROTO, &tp->flags);
+               schedule_delayed_work(&tp->schedule, 1);
+               return;
        case -ENOENT:
                return; /* the urb is in unlink state */
        case -ETIME:
@@ -2425,6 +2434,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
        if (list_empty(&tp->rx_done))
                goto out1;
 
+       clear_bit(RX_EPROTO, &tp->flags);
        INIT_LIST_HEAD(&rx_queue);
        spin_lock_irqsave(&tp->rx_lock, flags);
        list_splice_init(&tp->rx_done, &rx_queue);
@@ -2441,7 +2451,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
 
                agg = list_entry(cursor, struct rx_agg, list);
                urb = agg->urb;
-               if (urb->actual_length < ETH_ZLEN)
+               if (urb->status != 0 || urb->actual_length < ETH_ZLEN)
                        goto submit;
 
                agg_free = rtl_get_free_rx(tp, GFP_ATOMIC);
@@ -6643,6 +6653,10 @@ static void rtl_work_func_t(struct work_struct *work)
            netif_carrier_ok(tp->netdev))
                tasklet_schedule(&tp->tx_tl);
 
+       if (test_and_clear_bit(RX_EPROTO, &tp->flags) &&
+           !list_empty(&tp->rx_done))
+               napi_schedule(&tp->napi);
+
        mutex_unlock(&tp->control);
 
 out1:
index 7d953974eb9b5d61f326fae1a81448945bbbe2fb..26b1bd8e845b437b7fdff7e22d8c4803fc020268 100644 (file)
@@ -1178,7 +1178,10 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 
 static void smsc95xx_handle_link_change(struct net_device *net)
 {
+       struct usbnet *dev = netdev_priv(net);
+
        phy_print_status(net->phydev);
+       usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
 }
 
 static int smsc95xx_start_phy(struct usbnet *dev)
index 271d38c1d9f80673cf90cf2077461042a5f74179..4ad25a8b0870c67f40c2cc8a6a0f6eddaded09dd 100644 (file)
@@ -406,7 +406,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
         * add_recvbuf_mergeable() + get_mergeable_buf_len()
         */
        truesize = headroom ? PAGE_SIZE : truesize;
-       tailroom = truesize - len - headroom;
+       tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
        buf = p - headroom;
 
        len -= hdr_len;
@@ -423,6 +423,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 
                skb_reserve(skb, p - buf);
                skb_put(skb, len);
+
+               page = (struct page *)page->private;
+               if (page)
+                       give_pages(rq, page);
                goto ok;
        }
 
index 5a8df5a195cb5700c45b4785355ef8ed84866052..141635a35c28a236f33c180dc2868c64338eccb1 100644 (file)
@@ -4756,12 +4756,12 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
        LIST_HEAD(list);
        unsigned int h;
 
-       rtnl_lock();
        list_for_each_entry(net, net_list, exit_list) {
                struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 
                unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
        }
+       rtnl_lock();
        list_for_each_entry(net, net_list, exit_list)
                vxlan_destroy_tunnels(net, &list);
 
index f6b92efffc94c340f758155a26a08391c30c0aac..480bcd1f6c1cb490ca40c1ff8afc92551d720c25 100644 (file)
@@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522)    += slic_ds26522.o
 clean-files := wanxlfw.inc
 $(obj)/wanxl.o:        $(obj)/wanxlfw.inc
 
+CROSS_COMPILE_M68K = m68k-linux-gnu-
+
 ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
 ifeq ($(ARCH),m68k)
   M68KCC = $(CC)
index 741289e385d59411715b60a43aac1ddaa9b50199..ca007b800f75607eaba3a0b842348e2617f5cf40 100644 (file)
@@ -44,7 +44,7 @@ config ATH10K_SNOC
        tristate "Qualcomm ath10k SNOC support"
        depends on ATH10K
        depends on ARCH_QCOM || COMPILE_TEST
-       depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
+       select QCOM_SCM
        select QCOM_QMI_HELPERS
        help
          This module adds support for integrated WCN3990 chip connected
index f35cd8de228e49f4db2c3fef795b131f5c30ad1a..6914b37bb0fbcf4e35334ec5e13c4c8d3b738f88 100644 (file)
@@ -3,9 +3,7 @@ config ATH5K
        tristate "Atheros 5xxx wireless cards support"
        depends on (PCI || ATH25) && MAC80211
        select ATH_COMMON
-       select MAC80211_LEDS
-       select LEDS_CLASS
-       select NEW_LEDS
+       select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select ATH5K_AHB if ATH25
        select ATH5K_PCI if !ATH25
        help
index 6a2a168567630fdbf8ea47818c3a336876b04df4..33e9928af36354dda8848a50f93ea53523a9e694 100644 (file)
@@ -89,7 +89,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
 
 void ath5k_led_enable(struct ath5k_hw *ah)
 {
-       if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
+       if (IS_ENABLED(CONFIG_MAC80211_LEDS) &&
+           test_bit(ATH_STAT_LEDSOFT, ah->status)) {
                ath5k_hw_set_gpio_output(ah, ah->led_pin);
                ath5k_led_off(ah);
        }
@@ -104,7 +105,8 @@ static void ath5k_led_on(struct ath5k_hw *ah)
 
 void ath5k_led_off(struct ath5k_hw *ah)
 {
-       if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
+       if (!IS_ENABLED(CONFIG_MAC80211_LEDS) ||
+           !test_bit(ATH_STAT_LEDSOFT, ah->status))
                return;
        ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
 }
@@ -146,7 +148,7 @@ ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
 static void
 ath5k_unregister_led(struct ath5k_led *led)
 {
-       if (!led->ah)
+       if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !led->ah)
                return;
        led_classdev_unregister(&led->led_dev);
        ath5k_led_off(led->ah);
@@ -169,7 +171,7 @@ int ath5k_init_leds(struct ath5k_hw *ah)
        char name[ATH5K_LED_MAX_NAME_LEN + 1];
        const struct pci_device_id *match;
 
-       if (!ah->pdev)
+       if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !ah->pdev)
                return 0;
 
 #ifdef CONFIG_ATH5K_AHB
index f7b96cd69242d0fa38cb265a94490e48ca306236..9db12ffd2ff8010f4ea7a90a8c20a2f3d431850d 100644 (file)
@@ -7463,23 +7463,18 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
        s32 found_index;
        int i;
 
+       country_codes = drvr->settings->country_codes;
+       if (!country_codes) {
+               brcmf_dbg(TRACE, "No country codes configured for device\n");
+               return -EINVAL;
+       }
+
        if ((alpha2[0] == ccreq->country_abbrev[0]) &&
            (alpha2[1] == ccreq->country_abbrev[1])) {
                brcmf_dbg(TRACE, "Country code already set\n");
                return -EAGAIN;
        }
 
-       country_codes = drvr->settings->country_codes;
-       if (!country_codes) {
-               brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
-               memset(ccreq, 0, sizeof(*ccreq));
-               ccreq->country_abbrev[0] = alpha2[0];
-               ccreq->country_abbrev[1] = alpha2[1];
-               ccreq->ccode[0] = alpha2[0];
-               ccreq->ccode[1] = alpha2[1];
-               return 0;
-       }
-
        found_index = -1;
        for (i = 0; i < country_codes->table_size; i++) {
                cc = &country_codes->table[i];
index 0e97d5e6c6448f64aba4356a81b857657871c9a8..9f706fffb5922003f578f960569539ce26425398 100644 (file)
@@ -160,6 +160,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                mvm->ptk_icvlen = key->icv_len;
                mvm->gtk_ivlen = key->iv_len;
                mvm->gtk_icvlen = key->icv_len;
+               mutex_unlock(&mvm->mutex);
 
                /* don't upload key again */
                return;
@@ -360,11 +361,11 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
        if (sta) {
                rsc = data->rsc->ucast_rsc;
        } else {
-               if (WARN_ON(data->gtks > ARRAY_SIZE(data->gtk_ids)))
+               if (WARN_ON(data->gtks >= ARRAY_SIZE(data->gtk_ids)))
                        return;
                data->gtk_ids[data->gtks] = key->keyidx;
                rsc = data->rsc->mcast_rsc[data->gtks % 2];
-               if (WARN_ON(key->keyidx >
+               if (WARN_ON(key->keyidx >=
                                ARRAY_SIZE(data->rsc->mcast_key_id_map)))
                        return;
                data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2;
index 25af88a3edcea428b2dc9bae94bdc7939adac7a7..e91f8e889df70b07dba339017ae9dc28f12cd8c1 100644 (file)
@@ -662,12 +662,13 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                                        u32 *uid)
 {
        u32 id;
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+       struct iwl_mvm_vif *mvmvif;
        enum nl80211_iftype iftype;
 
        if (!te_data->vif)
                return false;
 
+       mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
        iftype = te_data->vif->type;
 
        /*
index 61b2797a34a886ff1c1d3efd526a08b79a60e7d5..e3996ff99bad54c77616e93ba360728eddb367e4 100644 (file)
@@ -547,6 +547,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+       IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
+       IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
        IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
index ffa894f7312a47a0bfd4277fb3c5a02130fe86dd..0adae76eb8df1d37958122210cc63975f7a4adbb 100644 (file)
@@ -1867,8 +1867,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
                bcn_int -= data->bcn_delta;
                data->bcn_delta = 0;
        }
-       hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
-                       ns_to_ktime(bcn_int * NSEC_PER_USEC));
+       hrtimer_forward_now(&data->beacon_timer,
+                           ns_to_ktime(bcn_int * NSEC_PER_USEC));
        return HRTIMER_RESTART;
 }
 
index 241305377e2069ea460fe266cb264887d504f878..a9b5eb992220da5b896a2c5d087b4c3dbe24422c 100644 (file)
@@ -62,8 +62,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
 
        pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
 
-       pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)-
-                        NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
+       pad = ((uintptr_t)skb->data - (sizeof(*local_tx_pd) + hroom)) &
+              (MWIFIEX_DMA_ALIGN_SZ - 1);
        skb_push(skb, sizeof(*local_tx_pd) + pad);
 
        local_tx_pd = (struct txpd *) skb->data;
index 9bbdb8dfce62ae1de23f142208c71bb5a16cb21f..245ff644f81e33418828552bc38067754e7410c4 100644 (file)
@@ -475,8 +475,8 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
 
        pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
 
-       pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) &
-                       (MWIFIEX_DMA_ALIGN_SZ - 1);
+       pad = ((uintptr_t)skb->data - (sizeof(*txpd) + hroom)) &
+              (MWIFIEX_DMA_ALIGN_SZ - 1);
 
        skb_push(skb, sizeof(*txpd) + pad);
 
index 39a01c2a3058d6a8628d9dc47a7520b5f969cabe..32d5bc4919d8cc10216c625833b892385d40849a 100644 (file)
@@ -499,7 +499,7 @@ check_frags:
                                 * the header's copy failed, and they are
                                 * sharing a slot, send an error
                                 */
-                               if (i == 0 && sharedslot)
+                               if (i == 0 && !first_shinfo && sharedslot)
                                        xenvif_idx_release(queue, pending_idx,
                                                           XEN_NETIF_RSP_ERROR);
                                else
index a620c34790e6d7af34d456e2dd0c54f9fa2c15f7..0875b773fb413ac330850db58437995d202fc265 100644 (file)
@@ -278,6 +278,7 @@ static int st_nci_spi_remove(struct spi_device *dev)
 
 static struct spi_device_id st_nci_spi_id_table[] = {
        {ST_NCI_SPI_DRIVER_NAME, 0},
+       {"st21nfcb-spi", 0},
        {}
 };
 MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
index 72de88ff0d30d2bf359aa55bd032264119ee159a..ef4950f8083263c65f10888d92f9b314d2decdbf 100644 (file)
@@ -380,7 +380,6 @@ static int pmem_attach_disk(struct device *dev,
        struct nd_pfn_sb *pfn_sb;
        struct pmem_device *pmem;
        struct request_queue *q;
-       struct device *gendev;
        struct gendisk *disk;
        void *addr;
        int rc;
@@ -489,10 +488,8 @@ static int pmem_attach_disk(struct device *dev,
        }
        dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
        pmem->dax_dev = dax_dev;
-       gendev = disk_to_dev(disk);
-       gendev->groups = pmem_attribute_groups;
 
-       device_add_disk(dev, disk, NULL);
+       device_add_disk(dev, disk, pmem_attribute_groups);
        if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
                return -ENOMEM;
 
index 7efb31b87f37aa756124d52606d270a76f905ad2..7712a8f783379a73d3024179337851c1a19a42d7 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/backing-dev.h>
-#include <linux/list_sort.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/pr.h>
@@ -979,6 +978,7 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
 {
        struct nvme_command *cmd = nvme_req(req)->cmd;
+       struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
        blk_status_t ret = BLK_STS_OK;
 
        if (!(req->rq_flags & RQF_DONTPREP)) {
@@ -1027,7 +1027,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
                return BLK_STS_IOERR;
        }
 
-       nvme_req(req)->genctr++;
+       if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+               nvme_req(req)->genctr++;
        cmd->common.command_id = nvme_cid(req);
        trace_nvme_setup_cmd(req, cmd);
        return ret;
@@ -3524,7 +3525,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
        lockdep_assert_held(&subsys->lock);
 
        list_for_each_entry(h, &subsys->nsheads, entry) {
-               if (h->ns_id == nsid && nvme_tryget_ns_head(h))
+               if (h->ns_id != nsid)
+                       continue;
+               if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
                        return h;
        }
 
@@ -3714,15 +3717,6 @@ out_unlock:
        return ret;
 }
 
-static int ns_cmp(void *priv, const struct list_head *a,
-               const struct list_head *b)
-{
-       struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
-       struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
-
-       return nsa->head->ns_id - nsb->head->ns_id;
-}
-
 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
        struct nvme_ns *ns, *ret = NULL;
@@ -3743,6 +3737,22 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 }
 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
 
+/*
+ * Add the namespace to the controller list while keeping the list ordered.
+ */
+static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
+{
+       struct nvme_ns *tmp;
+
+       list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
+               if (tmp->head->ns_id < ns->head->ns_id) {
+                       list_add(&ns->list, &tmp->list);
+                       return;
+               }
+       }
+       list_add(&ns->list, &ns->ctrl->namespaces);
+}
+
 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
                struct nvme_ns_ids *ids)
 {
@@ -3793,9 +3803,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
                goto out_unlink_ns;
 
        down_write(&ctrl->namespaces_rwsem);
-       list_add_tail(&ns->list, &ctrl->namespaces);
+       nvme_ns_add_to_ctrl_list(ns);
        up_write(&ctrl->namespaces_rwsem);
-
        nvme_get_ctrl(ctrl);
 
        if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
@@ -3843,6 +3852,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
+       if (list_empty(&ns->head->list)) {
+               list_del_init(&ns->head->entry);
+               last_path = true;
+       }
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        /* guarantee not available in head->list */
@@ -3856,20 +3869,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                nvme_cdev_del(&ns->cdev, &ns->cdev_device);
        del_gendisk(ns->disk);
        blk_cleanup_queue(ns->queue);
-       if (blk_get_integrity(ns->disk))
-               blk_integrity_unregister(ns->disk);
 
        down_write(&ns->ctrl->namespaces_rwsem);
        list_del_init(&ns->list);
        up_write(&ns->ctrl->namespaces_rwsem);
 
-       /* Synchronize with nvme_init_ns_head() */
-       mutex_lock(&ns->head->subsys->lock);
-       if (list_empty(&ns->head->list)) {
-               list_del_init(&ns->head->entry);
-               last_path = true;
-       }
-       mutex_unlock(&ns->head->subsys->lock);
        if (last_path)
                nvme_mpath_shutdown_disk(ns->head);
        nvme_put_ns(ns);
@@ -4083,10 +4087,6 @@ static void nvme_scan_work(struct work_struct *work)
        if (nvme_scan_ns_list(ctrl) != 0)
                nvme_scan_ns_sequential(ctrl);
        mutex_unlock(&ctrl->scan_lock);
-
-       down_write(&ctrl->namespaces_rwsem);
-       list_sort(NULL, &ctrl->namespaces, ns_cmp);
-       up_write(&ctrl->namespaces_rwsem);
 }
 
 /*
index b08a61ca283f26eb979b7953e82065337427c597..aa14ad963d910624385e40ee6442a49113cf2bd0 100644 (file)
@@ -2487,6 +2487,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
         */
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_sync_io_queues(&ctrl->ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                nvme_fc_terminate_exchange, &ctrl->ctrl);
                blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
@@ -2510,6 +2511,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
         * clean up the admin queue. Same thing as above.
         */
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       blk_sync_queue(ctrl->ctrl.admin_q);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_fc_terminate_exchange, &ctrl->ctrl);
        blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
@@ -2951,6 +2953,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ctrl->ctrl.queue_count == 1)
                return 0;
 
+       if (prior_ioq_cnt != nr_io_queues) {
+               dev_info(ctrl->ctrl.device,
+                       "reconnect: revising io queue count from %d to %d\n",
+                       prior_ioq_cnt, nr_io_queues);
+               blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+       }
+
        ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_free_io_queues;
@@ -2959,15 +2968,6 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_delete_hw_queues;
 
-       if (prior_ioq_cnt != nr_io_queues) {
-               dev_info(ctrl->ctrl.device,
-                       "reconnect: revising io queue count from %d to %d\n",
-                       prior_ioq_cnt, nr_io_queues);
-               nvme_wait_freeze(&ctrl->ctrl);
-               blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
-               nvme_unfreeze(&ctrl->ctrl);
-       }
-
        return 0;
 
 out_delete_hw_queues:
index 5d7bc58a27bd984dc7f19341dcdcf1a3d3af658d..e8ccdd398f784443241d659761414f0e622c111b 100644 (file)
@@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
 
        down_read(&ctrl->namespaces_rwsem);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+               unsigned nsid;
+again:
+               nsid = le32_to_cpu(desc->nsids[n]);
                if (ns->head->ns_id < nsid)
                        continue;
                if (ns->head->ns_id == nsid)
                        nvme_update_ns_ana_state(desc, ns);
                if (++n == nr_nsids)
                        break;
+               if (ns->head->ns_id > nsid)
+                       goto again;
        }
        up_read(&ctrl->namespaces_rwsem);
        return 0;
index 9871c0c9374c4c40d7672bab0f663f290b5f34ce..ed79a6c7e8043b50d9770d278d0b8316436e4bbf 100644 (file)
@@ -138,6 +138,12 @@ enum nvme_quirks {
         * 48 bits.
         */
        NVME_QUIRK_DMA_ADDRESS_BITS_48          = (1 << 16),
+
+       /*
+        * The controller requires the command_id value be be limited, so skip
+        * encoding the generation sequence number.
+        */
+       NVME_QUIRK_SKIP_CID_GEN                 = (1 << 17),
 };
 
 /*
index b82492cd750330803f48e58f6c3e1a3d7d7daaff..456a0e8a5718964dab22e5323ebbd5a6e1b2e0f8 100644 (file)
@@ -3369,7 +3369,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
                .driver_data = NVME_QUIRK_SINGLE_VECTOR |
                                NVME_QUIRK_128_BYTES_SQES |
-                               NVME_QUIRK_SHARED_TAGS },
+                               NVME_QUIRK_SHARED_TAGS |
+                               NVME_QUIRK_SKIP_CID_GEN },
 
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
index a68704e39084e038e02acae5c46bd7725281449c..042c594bc57e21915363acee1521c473d217a0cb 100644 (file)
@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
                return;
 
-       nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
+       nvme_rdma_destroy_queue_ib(queue);
        mutex_destroy(&queue->queue_lock);
 }
 
@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
        for (i = 0; i < queue->queue_size; i++) {
                ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
                if (ret)
-                       goto out_destroy_queue_ib;
+                       return ret;
        }
 
        return 0;
-
-out_destroy_queue_ib:
-       nvme_rdma_destroy_queue_ib(queue);
-       return ret;
 }
 
 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
        if (ret) {
                dev_err(ctrl->ctrl.device,
                        "rdma_connect_locked failed (%d).\n", ret);
-               goto out_destroy_queue_ib;
+               return ret;
        }
 
        return 0;
-
-out_destroy_queue_ib:
-       nvme_rdma_destroy_queue_ib(queue);
-       return ret;
 }
 
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
        case RDMA_CM_EVENT_ROUTE_ERROR:
        case RDMA_CM_EVENT_CONNECT_ERROR:
        case RDMA_CM_EVENT_UNREACHABLE:
-               nvme_rdma_destroy_queue_ib(queue);
-               fallthrough;
        case RDMA_CM_EVENT_ADDR_ERROR:
                dev_dbg(queue->ctrl->ctrl.device,
                        "CM error event %d\n", ev->event);
index e2ab12f3f51c92a0fb676771e25d931fd30bc13c..3c1c29dd30207cd6fbf337a67aa72e8ef34cfddd 100644 (file)
@@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
        } while (ret > 0);
 }
 
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+       return !list_empty(&queue->send_list) ||
+               !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                bool sync, bool last)
 {
@@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                nvme_tcp_send_all(queue);
                queue->more_requests = false;
                mutex_unlock(&queue->send_mutex);
-       } else if (last) {
-               queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
        }
+
+       if (last && nvme_tcp_queue_more(queue))
+               queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 }
 
 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
@@ -613,7 +620,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
                cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
        data->ttag = pdu->ttag;
        data->command_id = nvme_cid(rq);
-       data->data_offset = cpu_to_le32(req->data_sent);
+       data->data_offset = pdu->r2t_offset;
        data->data_length = cpu_to_le32(req->pdu_len);
        return 0;
 }
@@ -906,12 +913,6 @@ done:
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
-{
-       return !list_empty(&queue->send_list) ||
-               !llist_empty(&queue->req_list) || queue->more_requests;
-}
-
 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
 {
        queue->request = NULL;
@@ -952,7 +953,15 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                        nvme_tcp_ddgst_update(queue->snd_hash, page,
                                        offset, ret);
 
-               /* fully successful last write*/
+               /*
+                * update the request iterator except for the last payload send
+                * in the request where we don't want to modify it as we may
+                * compete with the RX path completing the request.
+                */
+               if (req->data_sent + ret < req->data_len)
+                       nvme_tcp_advance_req(req, ret);
+
+               /* fully successful last send in current PDU */
                if (last && ret == len) {
                        if (queue->data_digest) {
                                nvme_tcp_ddgst_final(queue->snd_hash,
@@ -964,7 +973,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                        }
                        return 1;
                }
-               nvme_tcp_advance_req(req, ret);
        }
        return -EAGAIN;
 }
@@ -1145,8 +1153,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
                                pending = true;
                        else if (unlikely(result < 0))
                                break;
-               } else
-                       pending = !llist_empty(&queue->req_list);
+               }
 
                result = nvme_tcp_try_recv(queue);
                if (result > 0)
index d784f3c200b4cefcaab34c8466b7242a8cef7d4b..be5d82421e3a458a5dc2f37de7877320df998b5c 100644 (file)
@@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
 {
        struct nvmet_subsys *subsys = to_subsys(item);
 
-       return snprintf(page, PAGE_SIZE, "%*s\n",
+       return snprintf(page, PAGE_SIZE, "%.*s\n",
                        NVMET_SN_MAX_SIZE, subsys->serial);
 }
 
index 39854d43758be3fbd794fcb4058d292fab9ef63e..da414617a54d4b9933703a83745fa40fb08328cd 100644 (file)
@@ -109,6 +109,7 @@ config MTK_EFUSE
 
 config NVMEM_NINTENDO_OTP
        tristate "Nintendo Wii and Wii U OTP Support"
+       depends on WII || COMPILE_TEST
        help
          This is a driver exposing the OTP of a Nintendo Wii or Wii U console.
 
index 3d87fadaa160d5242bb0c99a3c08693ad026b9f1..8976da38b375a1b481cc8b7b41c84d2297e1423d 100644 (file)
@@ -1383,7 +1383,8 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
                *p-- = 0;
 
        /* clear msb bits if any leftover in the last byte */
-       *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
+       if (cell->nbits % BITS_PER_BYTE)
+               *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
 }
 
 static int __nvmem_cell_read(struct nvmem_device *nvmem,
index f720c0d246f2706f16ed9a1a84125314096653ed..0ac17256258d59b94adee28100bd2c2ef7af2d6b 100644 (file)
@@ -36,6 +36,7 @@ LIST_HEAD(aliases_lookup);
 struct device_node *of_root;
 EXPORT_SYMBOL(of_root);
 struct device_node *of_chosen;
+EXPORT_SYMBOL(of_chosen);
 struct device_node *of_aliases;
 struct device_node *of_stdout;
 static const char *of_stdout_options;
index 5b043ee308240bf0d389e92aebb462dcb044382a..b0800c260f64a219848888eee8762fafee0aef64 100644 (file)
@@ -85,7 +85,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
                        break;
        }
 
-       if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
+       /*
+        * Attempt to initialize a restricted-dma-pool region if one was found.
+        * Note that count can hold a negative error code.
+        */
+       if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
                dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
 }
 
index 3fd74bb34819b75e87fd7e60cac82d97ff160a9c..a3483484a5a2a4b6e7cdb6db27ce58a0b2f90944 100644 (file)
@@ -1291,7 +1291,6 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
 DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
 DEFINE_SIMPLE_PROP(leds, "leds", NULL)
 DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
-DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL)
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
 
@@ -1380,7 +1379,6 @@ static const struct supplier_bindings of_supplier_bindings[] = {
        { .parse_prop = parse_resets, },
        { .parse_prop = parse_leds, },
        { .parse_prop = parse_backlight, },
-       { .parse_prop = parse_phy_handle, },
        { .parse_prop = parse_gpio_compat, },
        { .parse_prop = parse_interrupts, },
        { .parse_prop = parse_regulators, },
index 0c473d75e625c5bdd187d19f984b841803a6a656..43e615aa12ffac88e50733771f1737cb2e549525 100644 (file)
@@ -110,7 +110,7 @@ config PCI_PF_STUB
 
 config XEN_PCIDEV_FRONTEND
        tristate "Xen PCI Frontend"
-       depends on X86 && XEN
+       depends on XEN_PV
        select PCI_XEN
        select XEN_XENBUS_FRONTEND
        default y
index eaec915ffe62fac109579735c39b9c683e0826c2..67c46e52c0dc3812f4d7e65b14d661b62d824eef 100644 (file)
@@ -3301,9 +3301,17 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
                return 0;
 
        if (!keep_devs) {
-               /* Delete any children which might still exist. */
+               struct list_head removed;
+
+               /* Move all present children to the list on stack */
+               INIT_LIST_HEAD(&removed);
                spin_lock_irqsave(&hbus->device_list_lock, flags);
-               list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) {
+               list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
+                       list_move_tail(&hpdev->list_entry, &removed);
+               spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+               /* Remove all children in the list */
+               list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
                        list_del(&hpdev->list_entry);
                        if (hpdev->pci_slot)
                                pci_destroy_slot(hpdev->pci_slot);
@@ -3311,7 +3319,6 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
                        put_pcichild(hpdev);
                        put_pcichild(hpdev);
                }
-               spin_unlock_irqrestore(&hbus->device_list_lock, flags);
        }
 
        ret = hv_send_resources_released(hdev);
index 014868752cd4dce46c9554862b50e78846bbf4ed..dcefdb42ac463d45dba6a32c80b0e075a4ab4752 100644 (file)
@@ -62,14 +62,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
        struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
                                             hotplug_slot);
 
-       switch (zdev->state) {
-       case ZPCI_FN_STATE_STANDBY:
-               *value = 0;
-               break;
-       default:
-               *value = 1;
-               break;
-       }
+       *value = zpci_is_device_configured(zdev) ? 1 : 0;
        return 0;
 }
 
index 0099a00af361ba657e04bb00cf8d9091f4154e3a..4b4792940e8691062affc2dd4868745c55b41fb8 100644 (file)
@@ -535,6 +535,7 @@ static int msi_verify_entries(struct pci_dev *dev)
 static int msi_capability_init(struct pci_dev *dev, int nvec,
                               struct irq_affinity *affd)
 {
+       const struct attribute_group **groups;
        struct msi_desc *entry;
        int ret;
 
@@ -558,12 +559,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
        if (ret)
                goto err;
 
-       dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
-       if (IS_ERR(dev->msi_irq_groups)) {
-               ret = PTR_ERR(dev->msi_irq_groups);
+       groups = msi_populate_sysfs(&dev->dev);
+       if (IS_ERR(groups)) {
+               ret = PTR_ERR(groups);
                goto err;
        }
 
+       dev->msi_irq_groups = groups;
+
        /* Set MSI enabled bits */
        pci_intx_for_msi(dev, 0);
        pci_msi_set_enable(dev, 1);
@@ -691,6 +694,7 @@ static void msix_mask_all(void __iomem *base, int tsize)
 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
                                int nvec, struct irq_affinity *affd)
 {
+       const struct attribute_group **groups;
        void __iomem *base;
        int ret, tsize;
        u16 control;
@@ -730,12 +734,14 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
 
        msix_update_entries(dev, entries);
 
-       dev->msi_irq_groups = msi_populate_sysfs(&dev->dev);
-       if (IS_ERR(dev->msi_irq_groups)) {
-               ret = PTR_ERR(dev->msi_irq_groups);
+       groups = msi_populate_sysfs(&dev->dev);
+       if (IS_ERR(groups)) {
+               ret = PTR_ERR(groups);
                goto out_free;
        }
 
+       dev->msi_irq_groups = groups;
+
        /* Set MSI-X enabled bits and unmask the function */
        pci_intx_for_msi(dev, 0);
        dev->msix_enabled = 1;
index a1b1e2a0163299f7ecc78d382bed7df09a95f1fa..260a06fb78a6132b27ba839b082625e4c2e44e9f 100644 (file)
@@ -937,7 +937,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev);
 
 void pci_set_acpi_fwnode(struct pci_dev *dev)
 {
-       if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+       if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
                ACPI_COMPANION_SET(&dev->dev,
                                   acpi_pci_find_companion(&dev->dev));
 }
@@ -1249,6 +1249,9 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
        bool check_children;
        u64 addr;
 
+       if (!dev->parent)
+               return NULL;
+
        down_read(&pci_acpi_companion_lookup_sem);
 
        adev = pci_acpi_find_companion_hook ?
index e5089af8ad90a00329adb01189c469e07da4b22c..4537d1ea14fdc034a4a0c72789ca8b745315c60b 100644 (file)
@@ -5435,7 +5435,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
 
 /*
- * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * Create device link for GPUs with integrated USB xHCI Host
  * controller to VGA.
  */
 static void quirk_gpu_usb(struct pci_dev *usb)
@@ -5444,9 +5444,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
 
 /*
- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * Create device link for GPUs with integrated Type-C UCSI controller
  * to VGA. Currently there is no class code defined for UCSI device over PCI
  * so using UNKNOWN class for now and it will be updated when UCSI
  * over PCI gets a class code.
@@ -5459,6 +5461,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_SERIAL_UNKNOWN, 8,
                              quirk_gpu_usb_typec_ucsi);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_UNKNOWN, 8,
+                             quirk_gpu_usb_typec_ucsi);
 
 /*
  * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
index 25557b272a4f9a630d174a664c015b85c74c137f..4be24890132ede060e4c12f3601864db4af867cc 100644 (file)
@@ -99,6 +99,24 @@ error:
        return off ?: PCI_VPD_SZ_INVALID;
 }
 
+static bool pci_vpd_available(struct pci_dev *dev)
+{
+       struct pci_vpd *vpd = &dev->vpd;
+
+       if (!vpd->cap)
+               return false;
+
+       if (vpd->len == 0) {
+               vpd->len = pci_vpd_size(dev);
+               if (vpd->len == PCI_VPD_SZ_INVALID) {
+                       vpd->cap = 0;
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 /*
  * Wait for last operation to complete.
  * This code has to spin since there is no other notification from the PCI
@@ -145,7 +163,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
        loff_t end = pos + count;
        u8 *buf = arg;
 
-       if (!vpd->cap)
+       if (!pci_vpd_available(dev))
                return -ENODEV;
 
        if (pos < 0)
@@ -206,7 +224,7 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
        loff_t end = pos + count;
        int ret = 0;
 
-       if (!vpd->cap)
+       if (!pci_vpd_available(dev))
                return -ENODEV;
 
        if (pos < 0 || (pos & 3) || (count & 3))
@@ -242,14 +260,11 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
 
 void pci_vpd_init(struct pci_dev *dev)
 {
+       if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+               return;
+
        dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
        mutex_init(&dev->vpd.lock);
-
-       if (!dev->vpd.len)
-               dev->vpd.len = pci_vpd_size(dev);
-
-       if (dev->vpd.len == PCI_VPD_SZ_INVALID)
-               dev->vpd.cap = 0;
 }
 
 static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -294,13 +309,14 @@ const struct attribute_group pci_dev_vpd_attr_group = {
 
 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
 {
-       unsigned int len = dev->vpd.len;
+       unsigned int len;
        void *buf;
        int cnt;
 
-       if (!dev->vpd.cap)
+       if (!pci_vpd_available(dev))
                return ERR_PTR(-ENODEV);
 
+       len = dev->vpd.len;
        buf = kmalloc(len, GFP_KERNEL);
        if (!buf)
                return ERR_PTR(-ENOMEM);
index 3cbc3baf087f3938d8bef85f79fb930d1a7f47f9..295cc7952d0edf9d622727357d1c3d607ac4aa0b 100644 (file)
@@ -952,6 +952,8 @@ int armpmu_register(struct arm_pmu *pmu)
                pmu->name, pmu->num_events,
                has_nmi ? ", using NMIs" : "");
 
+       kvm_host_pmu_init(pmu);
+
        return 0;
 
 out_destroy:
index a4ac87c8b4f8da380d1b04380e7a175351904c1b..5082102d7d0d973f8403b7c23d10a8fa3ad5f7a6 100644 (file)
@@ -2306,7 +2306,7 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_register_and_init);
 
 /**
  * devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().
- * @dev: device for which which resource was allocated
+ * @dev: device for which resource was allocated
  * @pctldev: the pinctrl device to unregister.
  */
 void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
index c001f2ed20f834fa84a05c47916fad967ec71843..8d0f88e9ca88f6f38972c7139b412df9268bffef 100644 (file)
@@ -445,6 +445,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
        u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
+       int err;
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
@@ -457,6 +458,15 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
+       if (on)
+               err = enable_irq_wake(gpio_dev->irq);
+       else
+               err = disable_irq_wake(gpio_dev->irq);
+
+       if (err)
+               dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",
+                       on ? "enable" : "disable");
+
        return 0;
 }
 
@@ -902,7 +912,6 @@ static struct pinctrl_desc amd_pinctrl_desc = {
 static int amd_gpio_probe(struct platform_device *pdev)
 {
        int ret = 0;
-       int irq_base;
        struct resource *res;
        struct amd_gpio *gpio_dev;
        struct gpio_irq_chip *girq;
@@ -925,9 +934,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
        if (!gpio_dev->base)
                return -ENOMEM;
 
-       irq_base = platform_get_irq(pdev, 0);
-       if (irq_base < 0)
-               return irq_base;
+       gpio_dev->irq = platform_get_irq(pdev, 0);
+       if (gpio_dev->irq < 0)
+               return gpio_dev->irq;
 
 #ifdef CONFIG_PM_SLEEP
        gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,
@@ -987,7 +996,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
                goto out2;
        }
 
-       ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,
+       ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
                               IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
        if (ret)
                goto out2;
index 95e76342404222545efb3a721e60ba7c9ab8666a..1d431707365456af3df7cd6c6415a1a6ff44c89b 100644 (file)
@@ -98,6 +98,7 @@ struct amd_gpio {
        struct resource         *res;
        struct platform_device  *pdev;
        u32                     *saved_regs;
+       int                     irq;
 };
 
 /*  KERNCZ configuration*/
index ae33e376695fd75998aab478b8a1148cc95c51ba..5ce260f152ce568b32d0a80e66fd9aff59132f0d 100644 (file)
@@ -2092,6 +2092,23 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
        return false;
 }
 
+static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,
+                                        unsigned int pin, u32 arg)
+{
+       struct rockchip_pin_output_deferred *cfg;
+
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg)
+               return -ENOMEM;
+
+       cfg->pin = pin;
+       cfg->arg = arg;
+
+       list_add_tail(&cfg->head, &bank->deferred_output);
+
+       return 0;
+}
+
 /* set the pin config settings for a specified pin */
 static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                                unsigned long *configs, unsigned num_configs)
@@ -2136,6 +2153,22 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        if (rc != RK_FUNC_GPIO)
                                return -EINVAL;
 
+                       /*
+                        * Check for gpio driver not being probed yet.
+                        * The lock makes sure that either gpio-probe has completed
+                        * or the gpio driver hasn't probed yet.
+                        */
+                       mutex_lock(&bank->deferred_lock);
+                       if (!gpio || !gpio->direction_output) {
+                               rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);
+                               mutex_unlock(&bank->deferred_lock);
+                               if (rc)
+                                       return rc;
+
+                               break;
+                       }
+                       mutex_unlock(&bank->deferred_lock);
+
                        rc = gpio->direction_output(gpio, pin - bank->pin_base,
                                                    arg);
                        if (rc)
@@ -2204,6 +2237,11 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
                if (rc != RK_FUNC_GPIO)
                        return -EINVAL;
 
+               if (!gpio || !gpio->get) {
+                       arg = 0;
+                       break;
+               }
+
                rc = gpio->get(gpio, pin - bank->pin_base);
                if (rc < 0)
                        return rc;
@@ -2450,6 +2488,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
                                                pin_bank->name, pin);
                        pdesc++;
                }
+
+               INIT_LIST_HEAD(&pin_bank->deferred_output);
+               mutex_init(&pin_bank->deferred_lock);
        }
 
        ret = rockchip_pinctrl_parse_dt(pdev, info);
@@ -2716,6 +2757,31 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int rockchip_pinctrl_remove(struct platform_device *pdev)
+{
+       struct rockchip_pinctrl *info = platform_get_drvdata(pdev);
+       struct rockchip_pin_bank *bank;
+       struct rockchip_pin_output_deferred *cfg;
+       int i;
+
+       of_platform_depopulate(&pdev->dev);
+
+       for (i = 0; i < info->ctrl->nr_banks; i++) {
+               bank = &info->ctrl->pin_banks[i];
+
+               mutex_lock(&bank->deferred_lock);
+               while (!list_empty(&bank->deferred_output)) {
+                       cfg = list_first_entry(&bank->deferred_output,
+                                              struct rockchip_pin_output_deferred, head);
+                       list_del(&cfg->head);
+                       kfree(cfg);
+               }
+               mutex_unlock(&bank->deferred_lock);
+       }
+
+       return 0;
+}
+
 static struct rockchip_pin_bank px30_pin_banks[] = {
        PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
                                             IOMUX_SOURCE_PMU,
@@ -3175,6 +3241,7 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
 
 static struct platform_driver rockchip_pinctrl_driver = {
        .probe          = rockchip_pinctrl_probe,
+       .remove         = rockchip_pinctrl_remove,
        .driver = {
                .name   = "rockchip-pinctrl",
                .pm = &rockchip_pinctrl_dev_pm_ops,
index 589d4d2a98c9ef788f1216e337c822bfdb4bb661..91f10279d08442d1a4c441131649bea211771f74 100644 (file)
@@ -141,6 +141,8 @@ struct rockchip_drv {
  * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
  * @recalced_mask: bit mask to indicate a need to recalulate the mask
  * @route_mask: bits describing the routing pins of per bank
+ * @deferred_output: gpio output settings to be done after gpio bank probed
+ * @deferred_lock: mutex for the deferred_output shared btw gpio and pinctrl
  */
 struct rockchip_pin_bank {
        struct device                   *dev;
@@ -169,6 +171,8 @@ struct rockchip_pin_bank {
        u32                             toggle_edge_mode;
        u32                             recalced_mask;
        u32                             route_mask;
+       struct list_head                deferred_output;
+       struct mutex                    deferred_lock;
 };
 
 /**
@@ -243,6 +247,12 @@ struct rockchip_pin_config {
        unsigned int            nconfigs;
 };
 
+struct rockchip_pin_output_deferred {
+       struct list_head head;
+       unsigned int pin;
+       u32 arg;
+};
+
 /**
  * struct rockchip_pin_group: represent group of pins of a pinmux function.
  * @name: name of the pin group, used to lookup the group.
index 32ea2a8ec02b54a1b9323cd5cc6c1663403f774a..5ff4207df66e1d5f98ed0f1b59a1c867a2a7928e 100644 (file)
@@ -3,7 +3,8 @@ if (ARCH_QCOM || COMPILE_TEST)
 
 config PINCTRL_MSM
        tristate "Qualcomm core pin controller driver"
-       depends on GPIOLIB && (QCOM_SCM || !QCOM_SCM) #if QCOM_SCM=m this can't be =y
+       depends on GPIOLIB
+       select QCOM_SCM
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
index afddf6d60dbe650c60d92ad8ed98230da5b57379..9017ede409c9c7a3bd5410e2c9ccedaaf94dc525 100644 (file)
@@ -1496,6 +1496,7 @@ static const struct of_device_id sc7280_pinctrl_of_match[] = {
 static struct platform_driver sc7280_pinctrl_driver = {
        .driver = {
                .name = "sc7280-pinctrl",
+               .pm = &msm_pinctrl_dev_pm_ops,
                .of_match_table = sc7280_pinctrl_of_match,
        },
        .probe = sc7280_pinctrl_probe,
index 98bf0e2a2a8da5141271133ea9ac434daf0db524..b2562e8931397e594b9fe67f04b512202a68e381 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/gpio/driver.h>
@@ -14,6 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/spmi.h>
 #include <linux/types.h>
 
 #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
@@ -171,6 +172,8 @@ struct pmic_gpio_state {
        struct pinctrl_dev *ctrl;
        struct gpio_chip chip;
        struct irq_chip irq;
+       u8 usid;
+       u8 pid_base;
 };
 
 static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -949,12 +952,36 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
                                           unsigned int *parent_hwirq,
                                           unsigned int *parent_type)
 {
-       *parent_hwirq = child_hwirq + 0xc0;
+       struct pmic_gpio_state *state = gpiochip_get_data(chip);
+
+       *parent_hwirq = child_hwirq + state->pid_base;
        *parent_type = child_type;
 
        return 0;
 }
 
+static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+                                            unsigned int parent_hwirq,
+                                            unsigned int parent_type)
+{
+       struct pmic_gpio_state *state = gpiochip_get_data(chip);
+       struct irq_fwspec *fwspec;
+
+       fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
+       if (!fwspec)
+               return NULL;
+
+       fwspec->fwnode = chip->irq.parent_domain->fwnode;
+
+       fwspec->param_count = 4;
+       fwspec->param[0] = state->usid;
+       fwspec->param[1] = parent_hwirq;
+       /* param[2] must be left as 0 */
+       fwspec->param[3] = parent_type;
+
+       return fwspec;
+}
+
 static int pmic_gpio_probe(struct platform_device *pdev)
 {
        struct irq_domain *parent_domain;
@@ -965,6 +992,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
        struct pmic_gpio_pad *pad, *pads;
        struct pmic_gpio_state *state;
        struct gpio_irq_chip *girq;
+       const struct spmi_device *parent_spmi_dev;
        int ret, npins, i;
        u32 reg;
 
@@ -984,6 +1012,9 @@ static int pmic_gpio_probe(struct platform_device *pdev)
 
        state->dev = &pdev->dev;
        state->map = dev_get_regmap(dev->parent, NULL);
+       parent_spmi_dev = to_spmi_device(dev->parent);
+       state->usid = parent_spmi_dev->usid;
+       state->pid_base = reg >> 8;
 
        pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
        if (!pindesc)
@@ -1059,7 +1090,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
        girq->fwnode = of_node_to_fwnode(state->dev->of_node);
        girq->parent_domain = parent_domain;
        girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
-       girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
+       girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec;
        girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
        girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
 
index 7646708d57e42bdc0ae57d2f0cdbf41bcc1a2fc2..a916cd89cbbeddf99272f26012a790d43ce08f7b 100644 (file)
@@ -98,7 +98,7 @@ mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
                        if (ret)
                                goto access_error;
 
-                       *regval |= rol32(val, regsize * i);
+                       *regval |= rol32(val, regsize * i * 8);
                }
        }
 
@@ -141,7 +141,7 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        /* Convert buffer to input value. */
-       ret = kstrtou32(buf, len, &input_val);
+       ret = kstrtou32(buf, 0, &input_val);
        if (ret)
                return ret;
 
index 3481479a2942fd8010be5e929c39586b20a7349c..fc95620101e85bf21b1201fb897c5098b3066ffd 100644 (file)
@@ -71,7 +71,7 @@
 #define AMD_CPU_ID_YC                  0x14B5
 
 #define PMC_MSG_DELAY_MIN_US           100
-#define RESPONSE_REGISTER_LOOP_MAX     200
+#define RESPONSE_REGISTER_LOOP_MAX     20000
 
 #define SOC_SUBSYSTEM_IP_MAX   12
 #define DELAY_MIN_US           2000
@@ -476,6 +476,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0006", 0},
        {"AMDI0007", 0},
        {"AMD0004", 0},
+       {"AMD0005", 0},
        { }
 };
 MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
index 821aba31821cc7a4c8281e84fea9b5b1e624fc92..2fffa57e596e4191c68301e5244518bd2474b759 100644 (file)
@@ -166,8 +166,8 @@ config DELL_WMI
 
 config DELL_WMI_PRIVACY
        bool "Dell WMI Hardware Privacy Support"
+       depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
        depends on DELL_WMI
-       depends on LEDS_TRIGGER_AUDIO
        help
          This option adds integration with the "Dell Hardware Privacy"
          feature of Dell laptops to the dell-wmi driver.
index 7f3a03f937f66564f6af5dec899eaf728f5eee18..658bab4b79648b7cdbcb9455c6053e2c322b0c8f 100644 (file)
@@ -141,9 +141,11 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
index a33a5826e81a144d42f133c48deb3fc70c54efcc..08598942a6d780388286ef501be45caa851625e5 100644 (file)
@@ -118,12 +118,30 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
        { }
 };
 
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+       {
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+               },
+       },
+       {
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+               },
+       },
+       {} /* Array terminator */
+};
+
 struct intel_hid_priv {
        struct input_dev *input_dev;
        struct input_dev *array;
        struct input_dev *switches;
        bool wakeup_mode;
-       bool dual_accel;
+       bool auto_add_switch;
 };
 
 #define HID_EVENT_FILTER_UUID  "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -452,10 +470,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
         * Some convertible have unreliable VGBS return which could cause incorrect
         * SW_TABLET_MODE report, in these cases we enable support when receiving
         * the first event instead of during driver setup.
-        *
-        * See dual_accel_detect.h for more info on the dual_accel check.
         */
-       if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
+       if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
                dev_info(&device->dev, "switch event received, enable switches supports\n");
                err = intel_hid_switches_setup(device);
                if (err)
@@ -596,7 +612,8 @@ static int intel_hid_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
-       priv->dual_accel = dual_accel_detect();
+       /* See dual_accel_detect.h for more info on the dual_accel check. */
+       priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
 
        err = intel_hid_input_setup(device);
        if (err) {
index 379560fe5df96db2fad30d809e2d94db6730e75d..e03943e6380a6dfec2438c379d27674f0d69293b 100644 (file)
@@ -42,12 +42,20 @@ static void update_sar_data(struct wwan_sar_context *context)
 
        if (config->device_mode_info &&
            context->sar_data.device_mode < config->total_dev_mode) {
-               struct wwan_device_mode_info *dev_mode =
-                       &config->device_mode_info[context->sar_data.device_mode];
-
-               context->sar_data.antennatable_index = dev_mode->antennatable_index;
-               context->sar_data.bandtable_index = dev_mode->bandtable_index;
-               context->sar_data.sartable_index = dev_mode->sartable_index;
+               int itr = 0;
+
+               for (itr = 0; itr < config->total_dev_mode; itr++) {
+                       if (context->sar_data.device_mode ==
+                               config->device_mode_info[itr].device_mode) {
+                               struct wwan_device_mode_info *dev_mode =
+                               &config->device_mode_info[itr];
+
+                               context->sar_data.antennatable_index = dev_mode->antennatable_index;
+                               context->sar_data.bandtable_index = dev_mode->bandtable_index;
+                               context->sar_data.sartable_index = dev_mode->sartable_index;
+                               break;
+                       }
+               }
        }
 }
 
@@ -305,7 +313,6 @@ static struct platform_driver sar_driver = {
        .remove = sar_remove,
        .driver = {
                .name = DRVNAME,
-               .owner = THIS_MODULE,
                .acpi_match_table = ACPI_PTR(sar_device_ids)
        }
 };
@@ -313,4 +320,4 @@ module_platform_driver(sar_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
-MODULE_AUTHOR("Shravan S <s.shravan@intel.com>");
+MODULE_AUTHOR("Shravan Sudhakar <s.shravan@intel.com>");
index 9fe0a2527e1cd2c5d449d7f2468c9c7b81177b61..e59d79c7e82f866f665bdcd6932f48a01ee7a878 100644 (file)
@@ -401,7 +401,7 @@ int skl_int3472_discrete_remove(struct platform_device *pdev)
 
        gpiod_remove_lookup_table(&int3472->gpios);
 
-       if (int3472->clock.ena_gpio)
+       if (int3472->clock.cl)
                skl_int3472_unregister_clock(int3472);
 
        gpiod_put(int3472->clock.ena_gpio);
index f58b8543f6ac576243bc1734f3a0ba52a7107dac..66bb39fd0ef9022988f7e8ffbe63944acc53f939 100644 (file)
@@ -8,7 +8,6 @@
  * which provide mailbox interface for power management usage.
  */
 
-#include <linux/acpi.h>
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -319,7 +318,7 @@ static struct platform_driver intel_punit_ipc_driver = {
        .remove = intel_punit_ipc_remove,
        .driver = {
                .name = "intel_punit_ipc",
-               .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
+               .acpi_match_table = punit_ipc_acpi_ids,
        },
 };
 
index bfa0cc20750d3621996142917f62c6cf7dee48f2..7cc9089d1e14f4c16e332321ecf24327aaeb9268 100644 (file)
@@ -75,7 +75,7 @@ struct intel_scu_ipc_dev {
 #define IPC_READ_BUFFER                0x90
 
 /* Timeout in jiffies */
-#define IPC_TIMEOUT            (5 * HZ)
+#define IPC_TIMEOUT            (10 * HZ)
 
 static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
@@ -232,7 +232,7 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
 /* Wait till scu status is busy */
 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
 {
-       unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
+       unsigned long end = jiffies + IPC_TIMEOUT;
 
        do {
                u32 status;
@@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
        return -ETIMEDOUT;
 }
 
-/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
+/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
 static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
 {
        int status;
index 3e520d5bca07fb84fb0be9a81b67a2f0c9f37ba8..88b551caeaaf41590178336b2f10e4bf003dd587 100644 (file)
@@ -655,7 +655,7 @@ static int acpi_add(struct acpi_device *device)
                goto out_platform_registered;
        }
        product = dmi_get_system_info(DMI_PRODUCT_NAME);
-       if (strlen(product) > 4)
+       if (product && strlen(product) > 4)
                switch (product[4]) {
                case '5':
                case '6':
index 0e1451b1d9c6cde93ee3f95e5af26bd6edcd85e3..033f797861d8a1b1649ad7e10140875952b74ef6 100644 (file)
@@ -100,10 +100,10 @@ static const struct ts_dmi_data chuwi_hi10_air_data = {
 };
 
 static const struct property_entry chuwi_hi10_plus_props[] = {
-       PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
-       PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
-       PROPERTY_ENTRY_U32("touchscreen-size-x", 1914),
-       PROPERTY_ENTRY_U32("touchscreen-size-y", 1283),
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
@@ -111,6 +111,15 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
 };
 
 static const struct ts_dmi_data chuwi_hi10_plus_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl1680-chuwi-hi10plus.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 34056,
+               .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
+                           0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
+                           0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
+                           0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
+       },
        .acpi_name      = "MSSL0017:00",
        .properties     = chuwi_hi10_plus_props,
 };
@@ -141,6 +150,33 @@ static const struct ts_dmi_data chuwi_hi10_pro_data = {
        .properties     = chuwi_hi10_pro_props,
 };
 
+static const struct property_entry chuwi_hibook_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data chuwi_hibook_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl1680-chuwi-hibook.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 40392,
+               .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
+                           0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
+                           0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
+                           0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
+       },
+       .acpi_name      = "MSSL0017:00",
+       .properties     = chuwi_hibook_props,
+};
+
 static const struct property_entry chuwi_vi8_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -979,6 +1015,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
                },
        },
+       {
+               /* Chuwi HiBook (CWI514) */
+               .driver_data = (void *)&chuwi_hibook_data,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+                       /* Above matches are too generic, add bios-date match */
+                       DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+               },
+       },
        {
                /* Chuwi Vi8 (CWI506) */
                .driver_data = (void *)&chuwi_vi8_data,
index f02bedf412644e01b72730e62dde8a09e9d2038a..458218f88c5eb9cb80cb39864fbfb3f7384ce4b4 100644 (file)
@@ -174,6 +174,7 @@ config PTP_1588_CLOCK_OCP
        depends on I2C && MTD
        depends on SERIAL_8250
        depends on !S390
+       depends on COMMON_CLK
        select NET_DEVLINK
        help
          This driver adds support for an OpenCompute time card.
index 3dd519dfc473c7719da1e7f35f532ac548012473..d0096cd7096a8192b92ae356279f6de37edf93b0 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/ptp_clock_kernel.h>
 #include <linux/ptp_kvm.h>
 
-struct pvclock_vsyscall_time_info *hv_clock;
-
 static phys_addr_t clock_pair_gpa;
 static struct kvm_clock_pairing clock_pair;
 
@@ -28,8 +26,7 @@ int kvm_arch_ptp_init(void)
                return -ENODEV;
 
        clock_pair_gpa = slow_virt_to_phys(&clock_pair);
-       hv_clock = pvclock_get_pvti_cpu0_va();
-       if (!hv_clock)
+       if (!pvclock_get_pvti_cpu0_va())
                return -ENODEV;
 
        ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
@@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
        struct pvclock_vcpu_time_info *src;
        unsigned int version;
        long ret;
-       int cpu;
 
-       cpu = smp_processor_id();
-       src = &hv_clock[cpu].pvti;
+       src = this_cpu_pvti();
 
        do {
                /*
index a17e8cc642c5f2ca5b1c43ef4b142f2bbd566f2c..8070f3fd98f01d0de87f6dfc87ac87fde7d28446 100644 (file)
@@ -644,6 +644,7 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
         },
        {0}
 };
+MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
 
 static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume);
 
index 1d78b455cc48cb72d1e3f508e778d3c5d6fe5b65..e34face736f4876a2695ee610c810f7514cb4511 100644 (file)
@@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit);
 MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
-MODULE_ALIAS("platform:max77836-regulator");
index 6cca910a76ded1a3e337c756d1333f5f8cfe1b69..7f458d510483f9f394cb62264b7d2889998f3ff0 100644 (file)
@@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
        RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo,      "vdd-l4"),
        RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_pldo,      "vdd-l5-l6"),
        RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo,      "vdd-l5-l6"),
-       RPMH_VREG("ldo7",   "ldo%s6",  &pmic5_pldo_lv,   "vdd-l7"),
+       RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_pldo_lv,   "vdd-l7"),
        {}
 };
 
index eb15067a605eff35cb11856439683963512a1cfa..4eb53412b8085e12d668c2aef3b215b939094143 100644 (file)
@@ -1047,7 +1047,9 @@ static void cmos_check_wkalrm(struct device *dev)
         * ACK the rtc irq here
         */
        if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
+               local_irq_disable();
                cmos_interrupt(0, (void *)cmos->rtc);
+               local_irq_enable();
                return;
        }
 
index 2f3515fa242a3dc18284bd2b321c7b76ae679ad6..f3d5c7f4c13d29aa48be077cc9e03456316cdb1a 100644 (file)
@@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void)
        sclp.has_gisaf = !!(sccb->fac118 & 0x08);
        sclp.has_hvs = !!(sccb->fac119 & 0x80);
        sclp.has_kss = !!(sccb->fac98 & 0x01);
-       sclp.has_sipl = !!(sccb->cbl & 0x4000);
        if (sccb->fac85 & 0x02)
                S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
        if (sccb->fac91 & 0x40)
                S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
        if (sccb->cpuoff > 134)
                sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+       if (sccb->cpuoff > 137)
+               sclp.has_sipl = !!(sccb->cbl & 0x4000);
        sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
        sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
        sclp.rzm <<= 20;
index f3c656975e054ec5ce0d2ec95ab7ee7d9aae3feb..93695d535380bcc08442c31376d14c4c5084e508 100644 (file)
@@ -262,10 +262,12 @@ static int blacklist_parse_proc_parameters(char *buf)
 
        if (strcmp("free", parm) == 0) {
                rc = blacklist_parse_parameters(buf, free, 0);
-               /* There could be subchannels without proper devices connected.
-                * evaluate all the entries
+               /*
+                * Evaluate the subchannels without an online device. This way,
+                * no path-verification will be triggered on those subchannels
+                * and it avoids unnecessary delays.
                 */
-               css_schedule_eval_all();
+               css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);
        } else if (strcmp("add", parm) == 0)
                rc = blacklist_parse_parameters(buf, add, 0);
        else if (strcmp("purge", parm) == 0)
index 2ec741106cb67ef9e4d140a2d16473a4f71bccdd..f0538609dfe429b6caec78c766116de6a8b97d8c 100644 (file)
@@ -77,12 +77,13 @@ EXPORT_SYMBOL(ccwgroup_set_online);
 /**
  * ccwgroup_set_offline() - disable a ccwgroup device
  * @gdev: target ccwgroup device
+ * @call_gdrv: Call the registered gdrv set_offline function
  *
  * This function attempts to put the ccwgroup device into the offline state.
  * Returns:
  *  %0 on success and a negative error value on failure.
  */
-int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv)
 {
        struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
        int ret = -EINVAL;
@@ -91,11 +92,16 @@ int ccwgroup_set_offline(struct ccwgroup_device *gdev)
                return -EAGAIN;
        if (gdev->state == CCWGROUP_OFFLINE)
                goto out;
+       if (!call_gdrv) {
+               ret = 0;
+               goto offline;
+       }
        if (gdrv->set_offline)
                ret = gdrv->set_offline(gdev);
        if (ret)
                goto out;
 
+offline:
        gdev->state = CCWGROUP_OFFLINE;
 out:
        atomic_set(&gdev->onoff, 0);
@@ -124,7 +130,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
        if (value == 1)
                ret = ccwgroup_set_online(gdev);
        else if (value == 0)
-               ret = ccwgroup_set_offline(gdev);
+               ret = ccwgroup_set_offline(gdev, true);
        else
                ret = -EINVAL;
 out:
index 3377097e65de68a0f3c9c9422846fde18e444452..44461928aab8afd3b3042a31c2640d39dec931ee 100644 (file)
@@ -788,27 +788,49 @@ static int __unset_registered(struct device *dev, void *data)
        return 0;
 }
 
-void css_schedule_eval_all_unreg(unsigned long delay)
+static int __unset_online(struct device *dev, void *data)
+{
+       struct idset *set = data;
+       struct subchannel *sch = to_subchannel(dev);
+       struct ccw_device *cdev = sch_get_cdev(sch);
+
+       if (cdev && cdev->online)
+               idset_sch_del(set, sch->schid);
+
+       return 0;
+}
+
+void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
 {
        unsigned long flags;
-       struct idset *unreg_set;
+       struct idset *set;
 
        /* Find unregistered subchannels. */
-       unreg_set = idset_sch_new();
-       if (!unreg_set) {
+       set = idset_sch_new();
+       if (!set) {
                /* Fallback. */
                css_schedule_eval_all();
                return;
        }
-       idset_fill(unreg_set);
-       bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+       idset_fill(set);
+       switch (cond) {
+       case CSS_EVAL_UNREG:
+               bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
+               break;
+       case CSS_EVAL_NOT_ONLINE:
+               bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
+               break;
+       default:
+               break;
+       }
+
        /* Apply to slow_subchannel_set. */
        spin_lock_irqsave(&slow_subchannel_lock, flags);
-       idset_add_set(slow_subchannel_set, unreg_set);
+       idset_add_set(slow_subchannel_set, set);
        atomic_set(&css_eval_scheduled, 1);
        queue_delayed_work(cio_work_q, &slow_path_work, delay);
        spin_unlock_irqrestore(&slow_subchannel_lock, flags);
-       idset_free(unreg_set);
+       idset_free(set);
 }
 
 void css_wait_for_slow_path(void)
@@ -820,7 +842,7 @@ void css_wait_for_slow_path(void)
 void css_schedule_reprobe(void)
 {
        /* Schedule with a delay to allow merging of subsequent calls. */
-       css_schedule_eval_all_unreg(1 * HZ);
+       css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
 }
 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 
index c98522cbe276f13019f990e03ab015a039e46747..ede0b905bc6ff7c85c592287889c345ce69d3d15 100644 (file)
 #define SNID_STATE3_MULTI_PATH    1
 #define SNID_STATE3_SINGLE_PATH           0
 
+/*
+ * Conditions used to specify which subchannels need evaluation
+ */
+enum css_eval_cond {
+       CSS_EVAL_UNREG,         /* unregistered subchannels */
+       CSS_EVAL_NOT_ONLINE     /* sch without an online-device */
+};
+
 struct path_state {
        __u8  state1 : 2;       /* path state value 1 */
        __u8  state2 : 2;       /* path state value 2 */
@@ -136,7 +144,7 @@ static inline struct channel_subsystem *css_by_id(u8 cssid)
 /* Helper functions to build lists for the slow path. */
 void css_schedule_eval(struct subchannel_id schid);
 void css_schedule_eval_all(void);
-void css_schedule_eval_all_unreg(unsigned long delay);
+void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);
 int css_complete_work(void);
 
 int sch_is_pseudo_sch(struct subchannel *);
index f433428057d9ce1e8461a6aba742a9d66e50ab83..d9b804943d1923a60d187f3bb733c6405d7652b4 100644 (file)
@@ -213,7 +213,6 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
  * ap_init_qci_info(): Allocate and query qci config info.
  * Does also update the static variables ap_max_domain_id
  * and ap_max_adapter_id if this info is available.
-
  */
 static void __init ap_init_qci_info(void)
 {
@@ -439,6 +438,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
 /**
  * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
  * @airq: pointer to adapter interrupt descriptor
+ * @floating: ignored
  */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
 {
@@ -1786,6 +1786,7 @@ static inline void ap_scan_adapter(int ap)
 /**
  * ap_scan_bus(): Scan the AP bus for new devices
  * Runs periodically, workqueue timer (ap_config_time)
+ * @unused: Unused pointer.
  */
 static void ap_scan_bus(struct work_struct *unused)
 {
index d70c4d3d0907f385a1042fc5158ad74cbc1afea6..9ea48bf0ee40d18b33df24c451333b561c738434 100644 (file)
@@ -20,7 +20,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
  * @ind: the notification indicator byte
  *
  * Enables interruption on AP queue via ap_aqic(). Based on the return
@@ -311,7 +311,7 @@ static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
 
 /**
  * ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
  *
  * Submit the Reset command to an AP queue.
  */
index 118939a7729a1e46176e440a9975cc92642937da..623d5269a52ce595fdbeac56d845a917beb7a54b 100644 (file)
@@ -361,6 +361,7 @@ err_list:
        mutex_lock(&matrix_dev->lock);
        list_del(&matrix_mdev->node);
        mutex_unlock(&matrix_dev->lock);
+       vfio_uninit_group_dev(&matrix_mdev->vdev);
        kfree(matrix_mdev);
 err_dec_available:
        atomic_inc(&matrix_dev->available_instances);
@@ -376,9 +377,10 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
        mutex_lock(&matrix_dev->lock);
        vfio_ap_mdev_reset_queues(matrix_mdev);
        list_del(&matrix_mdev->node);
+       mutex_unlock(&matrix_dev->lock);
+       vfio_uninit_group_dev(&matrix_mdev->vdev);
        kfree(matrix_mdev);
        atomic_inc(&matrix_dev->available_instances);
-       mutex_unlock(&matrix_dev->lock);
 }
 
 static ssize_t name_show(struct mdev_type *mtype,
index 535a60b3946d4a02c41187aa646210d83bc83195..a5aa0bdc61d69587eb2e2e7becb6c2182ee990e3 100644 (file)
@@ -858,7 +858,6 @@ struct qeth_card {
        struct napi_struct napi;
        struct qeth_rx rx;
        struct delayed_work buffer_reclaim_work;
-       struct work_struct close_dev_work;
 };
 
 static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
index 41ca6273b750522f0a5a0b21422e8a0af00f9f10..e9807d2996a9d77053b997594e8753a53647e9bc 100644 (file)
@@ -70,15 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
 static int qeth_qdio_establish(struct qeth_card *);
 static void qeth_free_qdio_queues(struct qeth_card *card);
 
-static void qeth_close_dev_handler(struct work_struct *work)
-{
-       struct qeth_card *card;
-
-       card = container_of(work, struct qeth_card, close_dev_work);
-       QETH_CARD_TEXT(card, 2, "cldevhdl");
-       ccwgroup_set_offline(card->gdev);
-}
-
 static const char *qeth_get_cardname(struct qeth_card *card)
 {
        if (IS_VM_NIC(card)) {
@@ -202,6 +193,9 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
                                 &card->qdio.in_buf_pool.entry_list, list)
                list_del(&pool_entry->list);
 
+       if (!queue)
+               return;
+
        for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
                queue->bufs[i].pool_entry = NULL;
 }
@@ -792,10 +786,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
        case IPA_CMD_STOPLAN:
                if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
                        dev_err(&card->gdev->dev,
-                               "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
+                               "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
                                netdev_name(card->dev));
-                       schedule_work(&card->close_dev_work);
+                       /* Set offline, then probably fail to set online: */
+                       qeth_schedule_recovery(card);
                } else {
+                       /* stay online for subsequent STARTLAN */
                        dev_warn(&card->gdev->dev,
                                 "The link for interface %s on CHPID 0x%X failed\n",
                                 netdev_name(card->dev), card->info.chpid);
@@ -1537,7 +1533,6 @@ static void qeth_setup_card(struct qeth_card *card)
        INIT_LIST_HEAD(&card->ipato.entries);
        qeth_init_qdio_info(card);
        INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
-       INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
        hash_init(card->rx_mode_addrs);
        hash_init(card->local_addrs4);
        hash_init(card->local_addrs6);
@@ -5519,7 +5514,8 @@ static int qeth_do_reset(void *data)
                dev_info(&card->gdev->dev,
                         "Device successfully recovered!\n");
        } else {
-               ccwgroup_set_offline(card->gdev);
+               qeth_set_offline(card, disc, true);
+               ccwgroup_set_offline(card->gdev, false);
                dev_warn(&card->gdev->dev,
                         "The qeth device driver failed to recover an error on the device\n");
        }
index 72e84ff9fea553f25aac2f9ce8f849c4f6055f1e..dc6c00768d9195713ff53a9e152e89da6b839ab4 100644 (file)
@@ -2307,7 +2307,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
        if (gdev->state == CCWGROUP_ONLINE)
                qeth_set_offline(card, card->discipline, false);
 
-       cancel_work_sync(&card->close_dev_work);
        if (card->dev->reg_state == NETREG_REGISTERED) {
                priv = netdev_priv(card->dev);
                if (priv->brport_features & BR_LEARNING_SYNC) {
index 3a523e700a5a6f4bf15a47a51318ea4ce7c47e78..6fd3e288f059554c1aec07f8f773f1258a04659b 100644 (file)
@@ -1969,7 +1969,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        if (cgdev->state == CCWGROUP_ONLINE)
                qeth_set_offline(card, card->discipline, false);
 
-       cancel_work_sync(&card->close_dev_work);
        if (card->dev->reg_state == NETREG_REGISTERED)
                unregister_netdev(card->dev);
 
index f34badc75196e20842669486472071fd35f7ec4c..9f64133f976adb967bd2837fbf77d6f6220bae2b 100644 (file)
@@ -10,17 +10,6 @@ config SCSI_ACORNSCSI_3
          This enables support for the Acorn SCSI card (aka30). If you have an
          Acorn system with one of these, say Y. If unsure, say N.
 
-config SCSI_ACORNSCSI_TAGGED_QUEUE
-       bool "Support SCSI 2 Tagged queueing"
-       depends on SCSI_ACORNSCSI_3
-       help
-         Say Y here to enable tagged queuing support on the Acorn SCSI card.
-
-         This is a feature of SCSI-2 which improves performance: the host
-         adapter can send several SCSI commands to a device's queue even if
-         previous commands haven't finished yet. Some SCSI devices don't
-         implement this properly, so the safe answer is N.
-
 config SCSI_ACORNSCSI_SYNC
        bool "Support SCSI 2 Synchronous Transfers"
        depends on SCSI_ACORNSCSI_3
index 4a84599ff491545a0058fff97bcac548a8b9e645..0cc62c1b082544d1b644bc1f4a6c33c7c665f0c9 100644 (file)
  * You can tell if you have a device that supports tagged queueing my
  * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported
  * as '2 TAG'.
- *
- * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config
- * scripts, but disabled here.  Once debugged, remove the #undef, otherwise to debug,
- * comment out the undef.
  */
-#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+
 /*
  * SCSI-II Synchronous transfer support.
  *
@@ -171,7 +167,7 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
                           unsigned int result);
 static int acornscsi_reconnect_finish(AS_Host *host);
 static void acornscsi_dma_cleanup(AS_Host *host);
-static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
+static void acornscsi_abortcmd(AS_Host *host);
 
 /* ====================================================================================
  * Miscellaneous
@@ -741,17 +737,6 @@ intr_ret_t acornscsi_kick(AS_Host *host)
 #endif
 
     if (from_queue) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-       /*
-        * tagged queueing - allocate a new tag to this command
-        */
-       if (SCpnt->device->simple_tags) {
-           SCpnt->device->current_tag += 1;
-           if (SCpnt->device->current_tag == 0)
-               SCpnt->device->current_tag = 1;
-           SCpnt->tag = SCpnt->device->current_tag;
-       } else
-#endif
            set_bit(SCpnt->device->id * 8 +
                    (u8)(SCpnt->device->lun & 0x07), host->busyluns);
 
@@ -1192,7 +1177,7 @@ void acornscsi_dma_intr(AS_Host *host)
         * the device recognises the attention.
         */
        if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
 
            dmac_write(host, DMAC_TXCNTLO, 0);
            dmac_write(host, DMAC_TXCNTHI, 0);
@@ -1560,23 +1545,6 @@ void acornscsi_message(AS_Host *host)
            acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
 
        switch (host->scsi.last_message) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-       case HEAD_OF_QUEUE_TAG:
-       case ORDERED_QUEUE_TAG:
-       case SIMPLE_QUEUE_TAG:
-           /*
-            * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)
-            *  If a target does not implement tagged queuing and a queue tag
-            *  message is received, it shall respond with a MESSAGE REJECT
-            *  message and accept the I/O process as if it were untagged.
-            */
-           printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",
-                   host->host->host_no, acornscsi_target(host));
-           host->SCpnt->device->simple_tags = 0;
-           set_bit(host->SCpnt->device->id * 8 +
-                   (u8)(host->SCpnt->device->lun & 0x7), host->busyluns);
-           break;
-#endif
        case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):
            /*
             * Target can't handle synchronous transfers
@@ -1687,24 +1655,11 @@ void acornscsi_buildmessages(AS_Host *host)
 #if 0
     /* does the device need the current command aborted */
     if (cmd_aborted) {
-       acornscsi_abortcmd(host->SCpnt->tag);
+       acornscsi_abortcmd(host);
        return;
     }
 #endif
 
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-    if (host->SCpnt->tag) {
-       unsigned int tag_type;
-
-       if (host->SCpnt->cmnd[0] == REQUEST_SENSE ||
-           host->SCpnt->cmnd[0] == TEST_UNIT_READY ||
-           host->SCpnt->cmnd[0] == INQUIRY)
-           tag_type = HEAD_OF_QUEUE_TAG;
-       else
-           tag_type = SIMPLE_QUEUE_TAG;
-       msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag);
-    }
-#endif
 
 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
     if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) {
@@ -1798,7 +1753,7 @@ int acornscsi_reconnect(AS_Host *host)
                "to reconnect with\n",
                host->host->host_no, '0' + target);
        acornscsi_dumplog(host, target);
-       acornscsi_abortcmd(host, 0);
+       acornscsi_abortcmd(host);
        if (host->SCpnt) {
            queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
            host->SCpnt = NULL;
@@ -1821,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
        host->scsi.disconnectable = 0;
        if (host->SCpnt->device->id  == host->scsi.reconnected.target &&
            host->SCpnt->device->lun == host->scsi.reconnected.lun &&
-           host->SCpnt->tag         == host->scsi.reconnected.tag) {
+           scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) {
 #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
            DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
                    host->host->host_no, acornscsi_target(host)));
@@ -1848,7 +1803,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
     }
 
     if (!host->SCpnt)
-       acornscsi_abortcmd(host, host->scsi.reconnected.tag);
+       acornscsi_abortcmd(host);
     else {
        /*
         * Restore data pointer from SAVED pointers.
@@ -1889,21 +1844,15 @@ void acornscsi_disconnect_unexpected(AS_Host *host)
  * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag)
  * Purpose : abort a currently executing command
  * Params  : host - host with connected command to abort
- *          tag  - tag to abort
  */
 static
-void acornscsi_abortcmd(AS_Host *host, unsigned char tag)
+void acornscsi_abortcmd(AS_Host *host)
 {
     host->scsi.phase = PHASE_ABORTED;
     sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
 
     msgqueue_flush(&host->scsi.msgs);
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-    if (tag)
-       msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag);
-    else
-#endif
-       msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
+    msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
 }
 
 /* ==========================================================================================
@@ -1993,7 +1942,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
            printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n",
                    host->host->host_no, acornscsi_target(host), ssr);
            acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
        }
        return INTR_PROCESSING;
 
@@ -2029,7 +1978,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
            printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n",
                    host->host->host_no, acornscsi_target(host), ssr);
            acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
        }
        return INTR_PROCESSING;
 
@@ -2075,20 +2024,20 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
        case 0x18:                      /* -> PHASE_DATAOUT                             */
            /* COMMAND -> DATA OUT */
            if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            acornscsi_dma_setup(host, DMA_OUT);
            if (!acornscsi_starttransfer(host))
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            host->scsi.phase = PHASE_DATAOUT;
            return INTR_IDLE;
 
        case 0x19:                      /* -> PHASE_DATAIN                              */
            /* COMMAND -> DATA IN */
            if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            acornscsi_dma_setup(host, DMA_IN);
            if (!acornscsi_starttransfer(host))
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            host->scsi.phase = PHASE_DATAIN;
            return INTR_IDLE;
 
@@ -2156,7 +2105,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
            /* MESSAGE IN -> DATA OUT */
            acornscsi_dma_setup(host, DMA_OUT);
            if (!acornscsi_starttransfer(host))
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            host->scsi.phase = PHASE_DATAOUT;
            return INTR_IDLE;
 
@@ -2165,7 +2114,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
            /* MESSAGE IN -> DATA IN */
            acornscsi_dma_setup(host, DMA_IN);
            if (!acornscsi_starttransfer(host))
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            host->scsi.phase = PHASE_DATAIN;
            return INTR_IDLE;
 
@@ -2206,7 +2155,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
        switch (ssr) {
        case 0x19:                      /* -> PHASE_DATAIN                              */
        case 0x89:                      /* -> PHASE_DATAIN                              */
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
            return INTR_IDLE;
 
        case 0x1b:                      /* -> PHASE_STATUSIN                            */
@@ -2255,7 +2204,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
        switch (ssr) {
        case 0x18:                      /* -> PHASE_DATAOUT                             */
        case 0x88:                      /* -> PHASE_DATAOUT                             */
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
            return INTR_IDLE;
 
        case 0x1b:                      /* -> PHASE_STATUSIN                            */
@@ -2482,7 +2431,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
     SCpnt->scsi_done = done;
     SCpnt->host_scribble = NULL;
     SCpnt->result = 0;
-    SCpnt->tag = 0;
     SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
     SCpnt->SCp.sent_command = 0;
     SCpnt->SCp.scsi_xferred = 0;
@@ -2581,7 +2529,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
                        break;
 
                default:
-                       acornscsi_abortcmd(host, host->SCpnt->tag);
+                       acornscsi_abortcmd(host);
                        res = res_snooze;
                }
                local_irq_restore(flags);
@@ -2747,9 +2695,6 @@ char *acornscsi_info(struct Scsi_Host *host)
 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
     " SYNC"
 #endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-    " TAG"
-#endif
 #if (DEBUG & DEBUG_NO_WRITE)
     " NOWRITE (" __stringify(NO_WRITE) ")"
 #endif
@@ -2770,9 +2715,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
     " SYNC"
 #endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-    " TAG"
-#endif
 #if (DEBUG & DEBUG_NO_WRITE)
     " NOWRITE (" __stringify(NO_WRITE) ")"
 #endif
@@ -2827,9 +2769,8 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
        seq_printf(m, "Device/Lun TaggedQ      Sync\n");
        seq_printf(m, "     %d/%llu   ", scd->id, scd->lun);
        if (scd->tagged_supported)
-               seq_printf(m, "%3sabled(%3d) ",
-                            scd->simple_tags ? "en" : "dis",
-                            scd->current_tag);
+               seq_printf(m, "%3sabled ",
+                            scd->simple_tags ? "en" : "dis");
        else
                seq_printf(m, "unsupported  ");
 
index 9c4458a99025ad88fd562dc0d4996cf1ac966a46..cf71ef488e360190c61b325c72c40b93b532bffd 100644 (file)
@@ -77,7 +77,6 @@
  *  I was thinking that this was a good chip until I found this restriction ;(
  */
 #define SCSI2_SYNC
-#undef  SCSI2_TAG
 
 #undef DEBUG_CONNECT
 #undef DEBUG_MESSAGES
@@ -990,7 +989,7 @@ fas216_reselected_intr(FAS216_Info *info)
                info->scsi.disconnectable = 0;
                if (info->SCpnt->device->id  == target &&
                    info->SCpnt->device->lun == lun &&
-                   info->SCpnt->tag         == tag) {
+                   scsi_cmd_to_rq(info->SCpnt)->tag == tag) {
                        fas216_log(info, LOG_CONNECT, "reconnected previously executing command");
                } else {
                        queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);
@@ -1791,8 +1790,9 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
        /*
         * add tag message if required
         */
-       if (SCpnt->tag)
-               msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag);
+       if (SCpnt->device->simple_tags)
+               msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG,
+                               scsi_cmd_to_rq(SCpnt)->tag);
 
        do {
 #ifdef SCSI2_SYNC
@@ -1815,20 +1815,8 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
 
 static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
 {
-#ifdef SCSI2_TAG
-       /*
-        * tagged queuing - allocate a new tag to this command
-        */
-       if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE &&
-           SCpnt->cmnd[0] != INQUIRY) {
-           SCpnt->device->current_tag += 1;
-               if (SCpnt->device->current_tag == 0)
-                   SCpnt->device->current_tag = 1;
-                       SCpnt->tag = SCpnt->device->current_tag;
-       } else
-#endif
-               set_bit(SCpnt->device->id * 8 +
-                       (u8)(SCpnt->device->lun & 0x7), info->busyluns);
+       set_bit(SCpnt->device->id * 8 +
+               (u8)(SCpnt->device->lun & 0x7), info->busyluns);
 
        info->stats.removes += 1;
        switch (SCpnt->cmnd[0]) {
@@ -2117,7 +2105,6 @@ request_sense:
        init_SCp(SCpnt);
        SCpnt->SCp.Message = 0;
        SCpnt->SCp.Status = 0;
-       SCpnt->tag = 0;
        SCpnt->host_scribble = (void *)fas216_rq_sns_done;
 
        /*
@@ -2223,7 +2210,6 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
        init_SCp(SCpnt);
 
        info->stats.queues += 1;
-       SCpnt->tag = 0;
 
        spin_lock(&info->host_lock);
 
@@ -3003,9 +2989,8 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
                dev = &info->device[scd->id];
                seq_printf(m, "     %d/%llu   ", scd->id, scd->lun);
                if (scd->tagged_supported)
-                       seq_printf(m, "%3sabled(%3d) ",
-                                    scd->simple_tags ? "en" : "dis",
-                                    scd->current_tag);
+                       seq_printf(m, "%3sabled ",
+                                    scd->simple_tags ? "en" : "dis");
                else
                        seq_puts(m, "unsupported   ");
 
index e5559f27669d646876cb6770353d45f22af0f37b..c6f71a7d1b8ebf3b380c3629d8dc5338efde512f 100644 (file)
@@ -214,7 +214,7 @@ struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
        list_for_each(l, &queue->head) {
                QE_t *q = list_entry(l, QE_t, list);
                if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
-                   q->SCpnt->tag == tag) {
+                   scsi_cmd_to_rq(q->SCpnt)->tag == tag) {
                        SCpnt = __queue_remove(queue, l);
                        break;
                }
index 390b07bf92b9794d0727de8e942c4d5e9eba3834..ccbded3353bd0ea0b8d239b73608fbb075508c2a 100644 (file)
@@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
 MODULE_VERSION(CSIO_DRV_VERSION);
 MODULE_FIRMWARE(FW_FNAME_T5);
 MODULE_FIRMWARE(FW_FNAME_T6);
+MODULE_SOFTDEP("pre: cxgb4");
index bb3b460dc0bcd5a3db52f3b2cc488fd381ed399a..4d73e92909ab4f30e629caf7fb304ef6357f10de 100644 (file)
@@ -880,11 +880,11 @@ efct_lio_npiv_drop_nport(struct se_wwn *wwn)
        struct efct *efct = lio_vport->efct;
        unsigned long flags = 0;
 
-       spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
-
        if (lio_vport->fc_vport)
                fc_vport_terminate(lio_vport->fc_vport);
 
+       spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+
        list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
                                 list_entry) {
                if (vport->lio_vport == lio_vport) {
index 40fb3a724c76d019ce14dc58e6679beca080300c..cf2e41dd354cbc83940ed81382b6ec79e1a2dc44 100644 (file)
@@ -32,7 +32,7 @@ efct_scsi_io_alloc(struct efct_node *node)
        struct efct *efct;
        struct efct_xport *xport;
        struct efct_io *io;
-       unsigned long flags = 0;
+       unsigned long flags;
 
        efct = node->efct;
 
@@ -44,7 +44,6 @@ efct_scsi_io_alloc(struct efct_node *node)
        if (!io) {
                efc_log_err(efct, "IO alloc Failed\n");
                atomic_add_return(1, &xport->io_alloc_failed_count);
-               spin_unlock_irqrestore(&node->active_ios_lock, flags);
                return NULL;
        }
 
index 725ca2a23fb2a39c855d54d7f670820e28809648..52be01333c6e36c75fcb55c07579ca60a4c1e6ec 100644 (file)
@@ -928,22 +928,21 @@ __efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
                break;
 
        case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
-               enum efc_nport_topology topology =
-                                       (enum efc_nport_topology)arg;
+               enum efc_nport_topology *topology = arg;
 
                WARN_ON(node->nport->domain->attached);
 
                WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
 
                node_printf(node, "topology notification, topology=%d\n",
-                           topology);
+                           *topology);
 
                /* At the time the PLOGI was received, the topology was unknown,
                 * so we didn't know which node would perform the domain attach:
                 * 1. The node from which the PLOGI was sent (p2p) or
                 * 2. The node to which the FLOGI was sent (fabric).
                 */
-               if (topology == EFC_NPORT_TOPO_P2P) {
+               if (*topology == EFC_NPORT_TOPO_P2P) {
                        /* if this is p2p, need to attach to the domain using
                         * the d_id from the PLOGI received
                         */
index d397220d9e543258f636fcf3cfee55f3e0aa13b6..3270ce40196c6f5ba6f7f6f36400de179f672470 100644 (file)
@@ -107,7 +107,6 @@ void
 efc_fabric_notify_topology(struct efc_node *node)
 {
        struct efc_node *tmp_node;
-       enum efc_nport_topology topology = node->nport->topology;
        unsigned long index;
 
        /*
@@ -118,7 +117,7 @@ efc_fabric_notify_topology(struct efc_node *node)
                if (tmp_node != node) {
                        efc_node_post_event(tmp_node,
                                            EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
-                                           (void *)topology);
+                                           &node->nport->topology);
                }
        }
 }
index 4683c183e9d411e07a083733c4dd1a7e7b4ed0f1..5bc91d34df634db65d06cf03ab2250aef2940038 100644 (file)
@@ -2281,11 +2281,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
                return FAILED;
        }
 
-       conn = session->leadconn;
-       iscsi_get_conn(conn->cls_conn);
-       conn->eh_abort_cnt++;
-       age = session->age;
-
        spin_lock(&session->back_lock);
        task = (struct iscsi_task *)sc->SCp.ptr;
        if (!task || !task->sc) {
@@ -2293,8 +2288,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
                ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
 
                spin_unlock(&session->back_lock);
-               goto success;
+               spin_unlock_bh(&session->frwd_lock);
+               mutex_unlock(&session->eh_mutex);
+               return SUCCESS;
        }
+
+       conn = session->leadconn;
+       iscsi_get_conn(conn->cls_conn);
+       conn->eh_abort_cnt++;
+       age = session->age;
+
        ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
        __iscsi_get_task(task);
        spin_unlock(&session->back_lock);
index b35bf70a8c0d7b38471c67d9a7a1d19505ffb020..ebe417921dac05257d7ff0a58c6c7991464d79c9 100644 (file)
@@ -285,11 +285,8 @@ buffer_done:
                                "6312 Catching potential buffer "
                                "overflow > PAGE_SIZE = %lu bytes\n",
                                PAGE_SIZE);
-               strscpy(buf + PAGE_SIZE - 1 -
-                       strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1),
-                       LPFC_INFO_MORE_STR,
-                       strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1)
-                       + 1);
+               strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
+                       LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
        }
        return len;
 }
@@ -6204,7 +6201,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
        len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d  total SGEs: %d\n",
                       phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
 
-       len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d  SCSI: %d  NVME: %d\n",
+       len += scnprintf(buf + len, PAGE_SIZE - len,
+                       "Cfg: %d  SCSI: %d  NVME: %d\n",
                        phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
                        phba->cfg_nvme_seg_cnt);
        return len;
index 1254a575fd473104aed1ac6583ef9b4414030d07..052c0e5b11195f3f94559d8a027ff5a6128b1312 100644 (file)
@@ -4015,11 +4015,11 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                be32_to_cpu(pcgd->desc_tag),
                                be32_to_cpu(pcgd->desc_len),
                                be32_to_cpu(pcgd->xmt_signal_capability),
-                               be32_to_cpu(pcgd->xmt_signal_frequency.count),
-                               be32_to_cpu(pcgd->xmt_signal_frequency.units),
+                               be16_to_cpu(pcgd->xmt_signal_frequency.count),
+                               be16_to_cpu(pcgd->xmt_signal_frequency.units),
                                be32_to_cpu(pcgd->rcv_signal_capability),
-                               be32_to_cpu(pcgd->rcv_signal_frequency.count),
-                               be32_to_cpu(pcgd->rcv_signal_frequency.units));
+                               be16_to_cpu(pcgd->rcv_signal_frequency.count),
+                               be16_to_cpu(pcgd->rcv_signal_frequency.units));
 
                        /* Compare driver and Fport capabilities and choose
                         * least common.
@@ -9387,7 +9387,7 @@ lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
                /* Extract the next WWPN from the payload */
                wwn = *wwnlist++;
                wwpn = be64_to_cpu(wwn);
-               len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ,
+               len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
                                 " %016llx", wwpn);
 
                /* Log a message if we are on the last WWPN
index 79a4872c2edb062b3d8927189f25d6c1a41277c3..7359505e604194ccb5bdf9faa6d4eb92200e3d1a 100644 (file)
@@ -1167,7 +1167,7 @@ struct lpfc_mbx_read_object {  /* Version 0 */
 #define lpfc_mbx_rd_object_rlen_MASK   0x00FFFFFF
 #define lpfc_mbx_rd_object_rlen_WORD   word0
                        uint32_t rd_object_offset;
-                       uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
+                       __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
 #define LPFC_OBJ_NAME_SZ 104   /* 26 x sizeof(uint32_t) is 104. */
                        uint32_t rd_object_cnt;
                        struct lpfc_mbx_host_buf rd_object_hbuf[4];
index 0ec322f0e3cb99f602a42fab0344529c05260e3b..195169badb3721196905f9231038b19c0b0815bc 100644 (file)
@@ -5518,7 +5518,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
        if (phba->cgn_fpin_frequency &&
            phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
                value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
-               cp->cgn_stat_npm = cpu_to_le32(value);
+               cp->cgn_stat_npm = value;
        }
        value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
                                    LPFC_CGN_CRC32_SEED);
@@ -5547,9 +5547,9 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
        uint32_t mbps;
        uint32_t dvalue, wvalue, lvalue, avalue;
        uint64_t latsum;
-       uint16_t *ptr;
-       uint32_t *lptr;
-       uint16_t *mptr;
+       __le16 *ptr;
+       __le32 *lptr;
+       __le16 *mptr;
 
        /* Make sure we have a congestion info buffer */
        if (!phba->cgn_i)
@@ -5570,7 +5570,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
        if (phba->cgn_fpin_frequency &&
            phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
                value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
-               cp->cgn_stat_npm = cpu_to_le32(value);
+               cp->cgn_stat_npm = value;
        }
 
        /* Read and clear the latency counters for this minute */
@@ -5753,7 +5753,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
                        dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
                        wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
                        lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
-                       mbps += le32_to_cpu(cp->cgn_bw_hr[i]);
+                       mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
                        avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
                }
                if (lvalue)             /* Avg of latency averages */
@@ -8277,11 +8277,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        return 0;
 
 out_free_hba_hdwq_info:
-       free_percpu(phba->sli4_hba.c_stat);
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       free_percpu(phba->sli4_hba.c_stat);
 out_free_hba_idle_stat:
-       kfree(phba->sli4_hba.idle_stat);
 #endif
+       kfree(phba->sli4_hba.idle_stat);
 out_free_hba_eq_info:
        free_percpu(phba->sli4_hba.eq_info);
 out_free_hba_cpu_map:
@@ -13411,8 +13411,8 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
 
        /* last used Index initialized to 0xff already */
 
-       cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ;
-       cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ;
+       cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
+       cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
        crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
        cp->cgn_info_crc = cpu_to_le32(crc);
 
index 73a3568ff17ec429b39cef3668acf21eca3869c9..479b3eed62085714b83e76c44976ce7ce7cc0bbc 100644 (file)
@@ -1489,9 +1489,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
        struct lpfc_nvme_qhandle *lpfc_queue_info;
        struct lpfc_nvme_fcpreq_priv *freqpriv;
        struct nvme_common_command *sqe;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        uint64_t start = 0;
-#endif
 
        /* Validate pointers. LLDD fault handling with transport does
         * have timing races.
index 0fde1e874c7a308e0850bc7bf6acd332415686cb..befdf864c43bdc8f4a7c4310abd6750c08661f14 100644 (file)
@@ -1495,7 +1495,6 @@ static int
 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                uint8_t *txop, uint8_t *rxop)
 {
-       uint8_t ret = 0;
 
        if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
                switch (scsi_get_prot_op(sc)) {
@@ -1548,7 +1547,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                }
        }
 
-       return ret;
+       return 0;
 }
 #endif
 
@@ -5578,12 +5577,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
        int err, idx;
        u8 *uuid = NULL;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-       uint64_t start = 0L;
+       uint64_t start;
 
-       if (phba->ktime_on)
-               start = ktime_get_ns();
-#endif
        start = ktime_get_ns();
        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 
index ffd8a140638c4ff6e1e24122f1f01dd58bdbd352..026a1196a54d5e109a478a7631e6285f5b9a5e0c 100644 (file)
@@ -12292,12 +12292,12 @@ void
 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                     struct lpfc_iocbq *rspiocb)
 {
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = NULL;
        IOCB_t *irsp = &rspiocb->iocb;
 
        /* ELS cmd tag <ulpIoTag> completes */
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "0139 Ignoring ELS cmd tag x%x completion Data: "
+                       "0139 Ignoring ELS cmd code x%x completion Data: "
                        "x%x x%x x%x\n",
                        irsp->ulpIoTag, irsp->ulpStatus,
                        irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -12305,10 +12305,13 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
         * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
         * if exchange is busy.
         */
-       if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+       if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+               ndlp = cmdiocb->context_un.ndlp;
                lpfc_ct_free_iocb(phba, cmdiocb);
-       else
+       } else {
+               ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
                lpfc_els_free_iocb(phba, cmdiocb);
+       }
 
        lpfc_nlp_put(ndlp);
 }
@@ -22090,6 +22093,7 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
        struct lpfc_dmabuf *pcmd;
+       u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
 
        /* sanity check on queue memory */
        if (!datap)
@@ -22113,10 +22117,10 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
 
        memset((void *)read_object->u.request.rd_object_name, 0,
               LPFC_OBJ_NAME_SZ);
-       sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject);
+       scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
        for (j = 0; j < strlen(rdobject); j++)
                read_object->u.request.rd_object_name[j] =
-                       cpu_to_le32(read_object->u.request.rd_object_name[j]);
+                       cpu_to_le32(rd_object_name[j]);
 
        pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
        if (pcmd)
index e4298bf4a482f8257f8853706898bb232cb41016..39d8754e63acf5cb80518ffd8ef3f83b0db1f073 100644 (file)
@@ -1916,7 +1916,7 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
                raid = MR_LdRaidGet(ld, local_map_ptr);
 
                if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
-               blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+                       blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
 
                mr_device_priv_data->is_tm_capable =
                        raid->capability.tmCapable;
@@ -8033,7 +8033,7 @@ skip_firing_dcmds:
 
        if (instance->adapter_type != MFI_SERIES) {
                megasas_release_fusion(instance);
-                       pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+               pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
                                (sizeof(struct MR_PD_CFG_SEQ) *
                                        (MAX_PHYSICAL_DEVICES - 1));
                for (i = 0; i < 2 ; i++) {
@@ -8773,8 +8773,7 @@ int megasas_update_device_list(struct megasas_instance *instance,
 
                if (event_type & SCAN_VD_CHANNEL) {
                        if (!instance->requestorId ||
-                           (instance->requestorId &&
-                            megasas_get_ld_vf_affiliation(instance, 0))) {
+                       megasas_get_ld_vf_affiliation(instance, 0)) {
                                dcmd_ret = megasas_ld_list_query(instance,
                                                MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
                                if (dcmd_ret != DCMD_SUCCESS)
index 6c82435bc9cc65167854fcf727e5b9c96b07971d..27eb652b564f54117e02d257d92ab6111c9412d5 100644 (file)
@@ -1582,8 +1582,10 @@ mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
         * wait for current poll to complete.
         */
        for (qid = 0; qid < iopoll_q_count; qid++) {
-               while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))
+               while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
+                       cpu_relax();
                        udelay(500);
+               }
        }
 }
 
index 770b241d7bb221084fd12d3c66a06ad6674feaf1..1b79f01f03a4ada6ae0848bc8d89e0e123c8f88d 100644 (file)
@@ -2178,7 +2178,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
                mpt3sas_check_cmd_timeout(ioc,
                    ioc->ctl_cmds.status, mpi_request,
                    sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
-                *issue_reset = reset_needed;
+               *issue_reset = reset_needed;
                rc = -EFAULT;
                goto out;
        }
index 2f82b1e629af14293f02e43392f560242eaf9936..d383d4a034366ecf3a6eaf4a5ca61fd870efa48d 100644 (file)
@@ -10749,8 +10749,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
        case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
                _scsih_pcie_topology_change_event(ioc, fw_event);
                ioc->current_event = NULL;
-                       return;
-       break;
+               return;
        }
 out:
        fw_event_work_put(fw_event);
index 7a4f5d4dd6705908434b98c1cf0a4dd69edbddd7..2b8c6fa5e77569511677892831a3caea50a19933 100644 (file)
@@ -1939,11 +1939,8 @@ static   void    ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn);
 static void    ncr_put_start_queue(struct ncb *np, struct ccb *cp);
 
 static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd);
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd);
 static void process_waiting_list(struct ncb *np, int sts);
 
-#define remove_from_waiting_list(np, cmd) \
-               retrieve_from_waiting_list(1, (np), (cmd))
 #define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
 #define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
 
@@ -7997,26 +7994,6 @@ static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
        }
 }
 
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd)
-{
-       struct scsi_cmnd **pcmd = &np->waiting_list;
-
-       while (*pcmd) {
-               if (cmd == *pcmd) {
-                       if (to_remove) {
-                               *pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
-                               cmd->next_wcmd = NULL;
-                       }
-#ifdef DEBUG_WAITING_LIST
-       printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
-#endif
-                       return cmd;
-               }
-               pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
-       }
-       return NULL;
-}
-
 static void process_waiting_list(struct ncb *np, int sts)
 {
        struct scsi_cmnd *waiting_list, *wcmd;
index 1e4e3e83b5c747bec8f2abea96e07f359f204ad2..5fc7697f0af4c1f979f6d753ab81bdecd745671b 100644 (file)
@@ -7169,7 +7169,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                                return 0;
                        break;
                case QLA2XXX_INI_MODE_DUAL:
-                       if (!qla_dual_mode_enabled(vha))
+                       if (!qla_dual_mode_enabled(vha) &&
+                           !qla_ini_mode_enabled(vha))
                                return 0;
                        break;
                case QLA2XXX_INI_MODE_ENABLED:
index ece60267b971e89b803681a05f6ae634ab570cb0..b26f2699adb2702394cdafa862113a56ee591c4d 100644 (file)
@@ -2634,7 +2634,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
        }
 
        if (unlikely(logit))
-               ql_log(ql_log_warn, fcport->vha, 0x5060,
+               ql_log(ql_dbg_io, fcport->vha, 0x5060,
                   "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x  ox_id=%x\n",
                   sp->name, sp->handle, comp_status,
                   fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -3491,7 +3491,7 @@ check_scsi_status:
 
 out:
        if (logit)
-               ql_log(ql_log_warn, fcport->vha, 0x3022,
+               ql_log(ql_dbg_io, fcport->vha, 0x3022,
                       "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
                       comp_status, scsi_status, res, vha->host_no,
                       cp->device->id, cp->device->lun, fcport->d_id.b.domain,
index d8b05d8b54708a76b945761f91c06c9b493f6b17..922e4c7bd88e4d9a5ff80e61c09cbabe68cb4f49 100644 (file)
@@ -441,9 +441,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
        struct iscsi_transport *t = iface->transport;
        int param = -1;
 
-       if (attr == &dev_attr_iface_enabled.attr)
-               param = ISCSI_NET_PARAM_IFACE_ENABLE;
-       else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+       if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
                param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
        else if (attr == &dev_attr_iface_header_digest.attr)
                param = ISCSI_IFACE_PARAM_HDRDGST_EN;
@@ -483,7 +481,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
        if (param != -1)
                return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
 
-       if (attr == &dev_attr_iface_vlan_id.attr)
+       if (attr == &dev_attr_iface_enabled.attr)
+               param = ISCSI_NET_PARAM_IFACE_ENABLE;
+       else if (attr == &dev_attr_iface_vlan_id.attr)
                param = ISCSI_NET_PARAM_VLAN_ID;
        else if (attr == &dev_attr_iface_vlan_priority.attr)
                param = ISCSI_NET_PARAM_VLAN_PRIORITY;
index cbd9999f93a6b2a08c5f625b2184a21dd8bbb407..523bf2fdc25321e3444e04b6ab4f33fffb14041c 100644 (file)
@@ -2124,6 +2124,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
                retries = 0;
 
                do {
+                       bool media_was_present = sdkp->media_present;
+
                        cmd[0] = TEST_UNIT_READY;
                        memset((void *) &cmd[1], 0, 9);
 
@@ -2138,7 +2140,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
                         * with any more polling.
                         */
                        if (media_not_present(sdkp, &sshdr)) {
-                               sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
+                               if (media_was_present)
+                                       sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
                                return;
                        }
 
@@ -3401,15 +3404,16 @@ static int sd_probe(struct device *dev)
        }
 
        device_initialize(&sdkp->dev);
-       sdkp->dev.parent = dev;
+       sdkp->dev.parent = get_device(dev);
        sdkp->dev.class = &sd_disk_class;
        dev_set_name(&sdkp->dev, "%s", dev_name(dev));
 
        error = device_add(&sdkp->dev);
-       if (error)
-               goto out_free_index;
+       if (error) {
+               put_device(&sdkp->dev);
+               goto out;
+       }
 
-       get_device(dev);
        dev_set_drvdata(dev, sdkp);
 
        gd->major = sd_major((index & 0xf0) >> 4);
index b9757f24b0d6f31e84e8688744ce066a64073fca..ed06798983f87f6c06accc0c5f6c3ec08408d0cb 100644 (file)
@@ -154,8 +154,8 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
 
        /*
         * Report zone buffer size should be at most 64B times the number of
-        * zones requested plus the 64B reply header, but should be at least
-        * SECTOR_SIZE for ATA devices.
+        * zones requested plus the 64B reply header, but should be aligned
+        * to SECTOR_SIZE for ATA devices.
         * Make sure that this size does not exceed the hardware capabilities.
         * Furthermore, since the report zone command cannot be split, make
         * sure that the allocated buffer can always be mapped by limiting the
@@ -174,7 +174,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
                        *buflen = bufsize;
                        return buf;
                }
-               bufsize >>= 1;
+               bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
        }
 
        return NULL;
@@ -280,7 +280,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
 {
        struct scsi_disk *sdkp;
        unsigned long flags;
-       unsigned int zno;
+       sector_t zno;
        int ret;
 
        sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
index c2afba2a5414df7115ed16a289b33fde9002487a..0a1734f34587dd4dc9e8b78468b05d37336e86b5 100644 (file)
@@ -87,9 +87,16 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
                0
        };
        unsigned char recv_page_code;
+       unsigned int retries = SES_RETRIES;
+       struct scsi_sense_hdr sshdr;
+
+       do {
+               ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+                                      &sshdr, SES_TIMEOUT, 1, NULL);
+       } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
+                (sshdr.sense_key == NOT_READY ||
+                 (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
 
-       ret =  scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
-                               NULL, SES_TIMEOUT, SES_RETRIES, NULL);
        if (unlikely(ret))
                return ret;
 
@@ -111,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
 static int ses_send_diag(struct scsi_device *sdev, int page_code,
                         void *buf, int bufflen)
 {
-       u32 result;
+       int result;
 
        unsigned char cmd[] = {
                SEND_DIAGNOSTIC,
@@ -121,9 +128,16 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
                bufflen & 0xff,
                0
        };
+       struct scsi_sense_hdr sshdr;
+       unsigned int retries = SES_RETRIES;
+
+       do {
+               result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
+                                         &sshdr, SES_TIMEOUT, 1, NULL);
+       } while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
+                (sshdr.sense_key == NOT_READY ||
+                 (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
 
-       result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
-                                 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
        if (result)
                sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
                            result);
index 79d9aa2df528b35e89f2014d1da29503f6599d45..ddd00efc488252644e732a481d29d3ee5ecc8e3c 100644 (file)
@@ -523,7 +523,7 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
                        return rc;
                cd->readcd_known = 0;
                sr_printk(KERN_INFO, cd,
-                         "CDROM does'nt support READ CD (0xbe) command\n");
+                         "CDROM doesn't support READ CD (0xbe) command\n");
                /* fall & retry the other way */
        }
        /* ... if this fails, we switch the blocksize using MODE SELECT */
index 9d04929f03a10af482b001a2d35cef6ca0f8d820..ae8636d3780b65e28ce1e1b6ec4db286776a9985 100644 (file)
@@ -3823,6 +3823,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
        case CDROM_SEND_PACKET:
                if (!capable(CAP_SYS_RAWIO))
                        return -EPERM;
+               break;
        default:
                break;
        }
index b3bcc5c882da15e601916524bece3149ee4648ee..149c1aa0910357765935e58835ac3f3085ff9644 100644 (file)
@@ -128,6 +128,81 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
        return err;
 }
 
+static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
+{
+       struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
+       int ret;
+
+       pwr_info.lane_rx = lanes;
+       pwr_info.lane_tx = lanes;
+       ret = ufshcd_config_pwr_mode(hba, &pwr_info);
+       if (ret)
+               dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
+                       __func__, lanes, ret);
+       return ret;
+}
+
+static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
+                               enum ufs_notify_change_status status,
+                               struct ufs_pa_layer_attr *dev_max_params,
+                               struct ufs_pa_layer_attr *dev_req_params)
+{
+       int err = 0;
+
+       switch (status) {
+       case PRE_CHANGE:
+               if (ufshcd_is_hs_mode(dev_max_params) &&
+                   (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
+                       ufs_intel_set_lanes(hba, 2);
+               memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
+               break;
+       case POST_CHANGE:
+               if (ufshcd_is_hs_mode(dev_req_params)) {
+                       u32 peer_granularity;
+
+                       usleep_range(1000, 1250);
+                       err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+                                                 &peer_granularity);
+               }
+               break;
+       default:
+               break;
+       }
+
+       return err;
+}
+
+static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
+{
+       u32 granularity, peer_granularity;
+       u32 pa_tactivate, peer_pa_tactivate;
+       int ret;
+
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
+       if (ret)
+               goto out;
+
+       if (granularity == peer_granularity) {
+               u32 new_peer_pa_tactivate = pa_tactivate + 2;
+
+               ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
+       }
+out:
+       return ret;
+}
+
 #define INTEL_ACTIVELTR                0x804
 #define INTEL_IDLELTR          0x808
 
@@ -351,6 +426,7 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
        struct ufs_host *ufs_host;
        int err;
 
+       hba->nop_out_timeout = 200;
        hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
        hba->caps |= UFSHCD_CAP_CRYPTO;
        err = ufs_intel_common_init(hba);
@@ -381,6 +457,8 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
        .exit                   = ufs_intel_common_exit,
        .hce_enable_notify      = ufs_intel_hce_enable_notify,
        .link_startup_notify    = ufs_intel_link_startup_notify,
+       .pwr_change_notify      = ufs_intel_lkf_pwr_change_notify,
+       .apply_dev_quirks       = ufs_intel_lkf_apply_dev_quirks,
        .resume                 = ufs_intel_resume,
        .device_reset           = ufs_intel_device_reset,
 };
index 3841ab49f55604f3cf4f9f45178ed4a0d293e033..95be7ecdfe10b8c00d166535e2ec54bfe4c4f72e 100644 (file)
@@ -17,8 +17,6 @@
 #include <linux/blk-pm.h>
 #include <linux/blkdev.h>
 #include <scsi/scsi_driver.h>
-#include <scsi/scsi_transport.h>
-#include "../scsi_transport_api.h"
 #include "ufshcd.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
@@ -237,6 +235,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
 static int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -319,8 +318,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
                                     enum ufs_trace_str_t str_t)
 {
-       int off = (int)tag - hba->nutrs;
-       struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
+       struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
 
        if (!trace_ufshcd_upiu_enabled())
                return;
@@ -2759,8 +2757,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 out:
        up_read(&hba->clk_scaling_lock);
 
-       if (ufs_trigger_eh())
-               scsi_schedule_eh(hba->host);
+       if (ufs_trigger_eh()) {
+               unsigned long flags;
+
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               ufshcd_schedule_eh_work(hba);
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
 
        return err;
 }
@@ -3919,35 +3922,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
 
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
-       lockdep_assert_held(hba->host->host_lock);
-
-       return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
-              (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-static void ufshcd_schedule_eh(struct ufs_hba *hba)
-{
-       bool schedule_eh = false;
-       unsigned long flags;
-
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       /* handle fatal errors only when link is not in error state */
-       if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
-               if (hba->force_reset || ufshcd_is_link_broken(hba) ||
-                   ufshcd_is_saved_err_fatal(hba))
-                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
-               else
-                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
-               schedule_eh = true;
-       }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       if (schedule_eh)
-               scsi_schedule_eh(hba->host);
-}
-
 /**
  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
  * state) and waits for it to take effect.
@@ -3968,7 +3942,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 {
        DECLARE_COMPLETION_ONSTACK(uic_async_done);
        unsigned long flags;
-       bool schedule_eh = false;
        u8 status;
        int ret;
        bool reenable_intr = false;
@@ -4038,14 +4011,10 @@ out:
                ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
        if (ret) {
                ufshcd_set_link_broken(hba);
-               schedule_eh = true;
+               ufshcd_schedule_eh_work(hba);
        }
-
 out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       if (schedule_eh)
-               ufshcd_schedule_eh(hba);
        mutex_unlock(&hba->uic_cmd_mutex);
 
        return ret;
@@ -4776,7 +4745,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
        mutex_lock(&hba->dev_cmd.lock);
        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
-                                              NOP_OUT_TIMEOUT);
+                                         hba->nop_out_timeout);
 
                if (!err || err == -ETIMEDOUT)
                        break;
@@ -5911,6 +5880,27 @@ out:
        return err_handling;
 }
 
+/* host lock must be held before calling this func */
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+       return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+              (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+/* host lock must be held before calling this func */
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+{
+       /* handle fatal errors only when link is not in error state */
+       if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+               if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+                   ufshcd_is_saved_err_fatal(hba))
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+               else
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+               queue_work(hba->eh_wq, &hba->eh_work);
+       }
+}
+
 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
 {
        down_write(&hba->clk_scaling_lock);
@@ -6044,11 +6034,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
 
 /**
  * ufshcd_err_handler - handle UFS errors that require s/w attention
- * @host: SCSI host pointer
+ * @work: pointer to work structure
  */
-static void ufshcd_err_handler(struct Scsi_Host *host)
+static void ufshcd_err_handler(struct work_struct *work)
 {
-       struct ufs_hba *hba = shost_priv(host);
+       struct ufs_hba *hba;
        unsigned long flags;
        bool err_xfer = false;
        bool err_tm = false;
@@ -6056,9 +6046,10 @@ static void ufshcd_err_handler(struct Scsi_Host *host)
        int tag;
        bool needs_reset = false, needs_restore = false;
 
+       hba = container_of(work, struct ufs_hba, eh_work);
+
        down(&hba->host_sem);
        spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->host->host_eh_scheduled = 0;
        if (ufshcd_err_handling_should_stop(hba)) {
                if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
                        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -6371,6 +6362,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
                                         "host_regs: ");
                        ufshcd_print_pwr_info(hba);
                }
+               ufshcd_schedule_eh_work(hba);
                retval |= IRQ_HANDLED;
        }
        /*
@@ -6382,34 +6374,9 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
        hba->errors = 0;
        hba->uic_error = 0;
        spin_unlock(hba->host->host_lock);
-
-       if (queue_eh_work)
-               ufshcd_schedule_eh(hba);
-
        return retval;
 }
 
-struct ctm_info {
-       struct ufs_hba  *hba;
-       unsigned long   pending;
-       unsigned int    ncpl;
-};
-
-static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
-{
-       struct ctm_info *const ci = priv;
-       struct completion *c;
-
-       WARN_ON_ONCE(reserved);
-       if (test_bit(req->tag, &ci->pending))
-               return true;
-       ci->ncpl++;
-       c = req->end_io_data;
-       if (c)
-               complete(c);
-       return true;
-}
-
 /**
  * ufshcd_tmc_handler - handle task management function completion
  * @hba: per adapter instance
@@ -6420,18 +6387,24 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
  */
 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 {
-       unsigned long flags;
-       struct request_queue *q = hba->tmf_queue;
-       struct ctm_info ci = {
-               .hba     = hba,
-       };
+       unsigned long flags, pending, issued;
+       irqreturn_t ret = IRQ_NONE;
+       int tag;
+
+       pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
-       blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+       issued = hba->outstanding_tasks & ~pending;
+       for_each_set_bit(tag, &issued, hba->nutmrs) {
+               struct request *req = hba->tmf_rqs[tag];
+               struct completion *c = req->end_io_data;
+
+               complete(c);
+               ret = IRQ_HANDLED;
+       }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
+       return ret;
 }
 
 /**
@@ -6554,9 +6527,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
-       blk_mq_start_request(req);
 
        task_tag = req->tag;
+       hba->tmf_rqs[req->tag] = req;
        treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
 
        memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
@@ -6597,6 +6570,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        }
 
        spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->tmf_rqs[req->tag] = NULL;
        __clear_bit(task_tag, &hba->outstanding_tasks);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
@@ -6876,7 +6850,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
                        err = ufshcd_clear_cmd(hba, pos);
                        if (err)
                                break;
-                       __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);
+                       __ufshcd_transfer_req_compl(hba, 1U << pos, false);
                }
        }
 
@@ -7048,17 +7022,15 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
         * will be to send LU reset which, again, is a spec violation.
         * To avoid these unnecessary/illegal steps, first we clean up
         * the lrb taken by this cmd and re-set it in outstanding_reqs,
-        * then queue the error handler and bail.
+        * then queue the eh_work and bail.
         */
        if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
                ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
 
                spin_lock_irqsave(host->host_lock, flags);
                hba->force_reset = true;
+               ufshcd_schedule_eh_work(hba);
                spin_unlock_irqrestore(host->host_lock, flags);
-
-               ufshcd_schedule_eh(hba);
-
                goto release;
        }
 
@@ -7191,10 +7163,11 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->force_reset = true;
+       ufshcd_schedule_eh_work(hba);
        dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       ufshcd_err_handler(hba->host);
+       flush_work(&hba->eh_work);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@@ -8604,6 +8577,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
        if (hba->is_powered) {
                ufshcd_exit_clk_scaling(hba);
                ufshcd_exit_clk_gating(hba);
+               if (hba->eh_wq)
+                       destroy_workqueue(hba->eh_wq);
                ufs_debugfs_hba_exit(hba);
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
@@ -9448,10 +9423,6 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
        return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
 }
 
-static struct scsi_transport_template ufshcd_transport_template = {
-       .eh_strategy_handler = ufshcd_err_handler,
-};
-
 /**
  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  * @dev: pointer to device handle
@@ -9478,11 +9449,11 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
                err = -ENOMEM;
                goto out_error;
        }
-       host->transportt = &ufshcd_transport_template;
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
        hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
+       hba->nop_out_timeout = NOP_OUT_TIMEOUT;
        INIT_LIST_HEAD(&hba->clk_list_head);
        spin_lock_init(&hba->outstanding_lock);
 
@@ -9517,6 +9488,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        int err;
        struct Scsi_Host *host = hba->host;
        struct device *dev = hba->dev;
+       char eh_wq_name[sizeof("ufs_eh_wq_00")];
 
        if (!mmio_base) {
                dev_err(hba->dev,
@@ -9570,6 +9542,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        hba->max_pwr_info.is_valid = false;
 
+       /* Initialize work queues */
+       snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
+                hba->host->host_no);
+       hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+       if (!hba->eh_wq) {
+               dev_err(hba->dev, "%s: failed to create eh workqueue\n",
+                       __func__);
+               err = -ENOMEM;
+               goto out_disable;
+       }
+       INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
 
        sema_init(&hba->host_sem, 1);
@@ -9638,6 +9621,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
                err = PTR_ERR(hba->tmf_queue);
                goto free_tmf_tag_set;
        }
+       hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
+                                   sizeof(*hba->tmf_rqs), GFP_KERNEL);
+       if (!hba->tmf_rqs) {
+               err = -ENOMEM;
+               goto free_tmf_queue;
+       }
 
        /* Reset the attached device */
        ufshcd_device_reset(hba);
index 52ea6f350b181b8b7ef4af93547e03dd70b3a337..41f6e06f91856469113e2e14f5e4639c935114df 100644 (file)
@@ -741,6 +741,8 @@ struct ufs_hba_monitor {
  * @is_powered: flag to check if HBA is powered
  * @shutting_down: flag to check if shutdown has been invoked
  * @host_sem: semaphore used to serialize concurrent contexts
+ * @eh_wq: Workqueue that eh_work works on
+ * @eh_work: Worker to handle UFS errors that require s/w attention
  * @eeh_work: Worker to handle exception events
  * @errors: HBA errors
  * @uic_error: UFS interconnect layer error status
@@ -826,6 +828,7 @@ struct ufs_hba {
 
        struct blk_mq_tag_set tmf_tag_set;
        struct request_queue *tmf_queue;
+       struct request **tmf_rqs;
 
        struct uic_command *active_uic_cmd;
        struct mutex uic_cmd_mutex;
@@ -843,6 +846,8 @@ struct ufs_hba {
        struct semaphore host_sem;
 
        /* Work Queues */
+       struct workqueue_struct *eh_wq;
+       struct work_struct eh_work;
        struct work_struct eeh_work;
 
        /* HBA Errors */
@@ -858,6 +863,7 @@ struct ufs_hba {
        /* Device management request data */
        struct ufs_dev_cmd dev_cmd;
        ktime_t last_dme_cmd_tstamp;
+       int nop_out_timeout;
 
        /* Keeps information of the UFS device connected to this host */
        struct ufs_dev_info dev_info;
index 02fb51ae8b250cecce39eb716c4238498964f011..589af5f6b940aaf87a639b6e70314c3035e42b9d 100644 (file)
@@ -333,9 +333,8 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
 }
 
 static void
-ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
-                           struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
-                           u8 transfer_len, int read_id)
+ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+                           __be64 ppn, u8 transfer_len, int read_id)
 {
        unsigned char *cdb = lrbp->cmd->cmnd;
        __be64 ppn_tmp = ppn;
@@ -703,8 +702,7 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                }
        }
 
-       ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
-                                   read_id);
+       ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
 
        hpb->stats.hit_cnt++;
        return 0;
index c25ce8f0e0afc68341315ac04899f69b30c9db91..07d0250f17c3a4a16a28ea5d930ac1df058bc6bf 100644 (file)
@@ -300,7 +300,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
                }
                break;
        default:
-               pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+               pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
        }
 }
 
@@ -392,7 +392,7 @@ static void virtscsi_handle_event(struct work_struct *work)
                virtscsi_handle_param_change(vscsi, event);
                break;
        default:
-               pr_err("Unsupport virtio scsi event %x\n", event->event);
+               pr_err("Unsupported virtio scsi event %x\n", event->event);
        }
        virtscsi_kick_event(vscsi, event_node);
 }
index 8179b69518b43904e79d64c16526ce480dfa3718..853096b7e84c39e6241250b12b28528e83d5804f 100644 (file)
@@ -5,7 +5,6 @@ config SOC_K210_SYSCTL
        depends on RISCV && SOC_CANAAN && OF
        default SOC_CANAAN
         select PM
-        select SIMPLE_PM_BUS
         select SYSCON
         select MFD_SYSCON
        help
index bda170d7b4a20ba40961e83c68ba3cde17b07a08..72fc2b539213500df590082d6d5973a8df8755ab 100644 (file)
@@ -98,7 +98,7 @@ void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len)
        if (ehdr->e_phnum < 2)
                return ERR_PTR(-EINVAL);
 
-       if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD)
+       if (phdrs[0].p_type == PT_LOAD)
                return ERR_PTR(-EINVAL);
 
        if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
index 9faf48302f4bc012fd3ea805ea309b71bff943d6..52e581167115599127c8b8b89395bb5a7699c66f 100644 (file)
@@ -628,7 +628,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
        /* Feed the soc specific unique data into entropy pool */
        add_device_randomness(info, item_size);
 
-       platform_set_drvdata(pdev, qs->soc_dev);
+       platform_set_drvdata(pdev, qs);
 
        return 0;
 }
index ea64e187854eb8579a5a1ff1e1eaf9a6aeeaedd8..f32e1cbbe8c52f729925cb05baff6ee8d99b80ce 100644 (file)
@@ -825,25 +825,28 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
        writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
        spin_unlock_irqrestore(&reset->lock, flags);
 
-       if (!has_rstst)
-               goto exit;
+       /* wait for the reset bit to clear */
+       ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+                                               reset->prm->data->rstctrl,
+                                               v, !(v & BIT(id)), 1,
+                                               OMAP_RESET_MAX_WAIT);
+       if (ret)
+               pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+                      reset->prm->data->name, id);
 
        /* wait for the status to be set */
-       ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+       if (has_rstst) {
+               ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
                                                 reset->prm->data->rstst,
                                                 v, v & BIT(st_bit), 1,
                                                 OMAP_RESET_MAX_WAIT);
-       if (ret)
-               pr_err("%s: timedout waiting for %s:%lu\n", __func__,
-                      reset->prm->data->name, id);
+               if (ret)
+                       pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+                              reset->prm->data->name, id);
+       }
 
-exit:
-       if (reset->clkdm) {
-               /* At least dra7 iva needs a delay before clkdm idle */
-               if (has_rstst)
-                       udelay(1);
+       if (reset->clkdm)
                pdata->clkdm_allow_idle(reset->clkdm);
-       }
 
        return ret;
 }
index 788dcdf25f003019cd76757b6435e97a94a434ff..f872cf196c2f324d20a4e27e61c48575b401b3dc 100644 (file)
@@ -1301,7 +1301,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
         * DMA map early, for performance (empties dcache ASAP) and
         * better fault reporting.
         */
-       if ((!master->cur_msg_mapped)
+       if ((!master->cur_msg->is_dma_mapped)
                && as->use_pdc) {
                if (atmel_spi_dma_map_xfer(as, xfer) < 0)
                        return -ENOMEM;
@@ -1381,7 +1381,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
                }
        }
 
-       if (!master->cur_msg_mapped
+       if (!master->cur_msg->is_dma_mapped
                && as->use_pdc)
                atmel_spi_dma_unmap_xfer(master, xfer);
 
index a78e56f566dd8933169fa6615bc3d6cb61a33305..3043677ba22268bd0f95d178c05e5b0c7a3b530c 100644 (file)
@@ -1250,10 +1250,14 @@ static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
 
 static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
 {
+       u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
+
        bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
        if (has_bspi(qspi))
                bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
 
+       /* clear interrupt */
+       bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status & ~1);
 }
 
 static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
@@ -1397,6 +1401,47 @@ int bcm_qspi_probe(struct platform_device *pdev,
        if (!qspi->dev_ids)
                return -ENOMEM;
 
+       /*
+        * Some SoCs integrate spi controller (e.g., its interrupt bits)
+        * in specific ways
+        */
+       if (soc_intc) {
+               qspi->soc_intc = soc_intc;
+               soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
+       } else {
+               qspi->soc_intc = NULL;
+       }
+
+       if (qspi->clk) {
+               ret = clk_prepare_enable(qspi->clk);
+               if (ret) {
+                       dev_err(dev, "failed to prepare clock\n");
+                       goto qspi_probe_err;
+               }
+               qspi->base_clk = clk_get_rate(qspi->clk);
+       } else {
+               qspi->base_clk = MSPI_BASE_FREQ;
+       }
+
+       if (data->has_mspi_rev) {
+               rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
+               /* some older revs do not have a MSPI_REV register */
+               if ((rev & 0xff) == 0xff)
+                       rev = 0;
+       }
+
+       qspi->mspi_maj_rev = (rev >> 4) & 0xf;
+       qspi->mspi_min_rev = rev & 0xf;
+       qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
+
+       qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
+
+       /*
+        * On SW resets it is possible to have the mask still enabled
+        * Need to disable the mask and clear the status while we init
+        */
+       bcm_qspi_hw_uninit(qspi);
+
        for (val = 0; val < num_irqs; val++) {
                irq = -1;
                name = qspi_irq_tab[val].irq_name;
@@ -1433,38 +1478,6 @@ int bcm_qspi_probe(struct platform_device *pdev,
                goto qspi_probe_err;
        }
 
-       /*
-        * Some SoCs integrate spi controller (e.g., its interrupt bits)
-        * in specific ways
-        */
-       if (soc_intc) {
-               qspi->soc_intc = soc_intc;
-               soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
-       } else {
-               qspi->soc_intc = NULL;
-       }
-
-       ret = clk_prepare_enable(qspi->clk);
-       if (ret) {
-               dev_err(dev, "failed to prepare clock\n");
-               goto qspi_probe_err;
-       }
-
-       qspi->base_clk = clk_get_rate(qspi->clk);
-
-       if (data->has_mspi_rev) {
-               rev = bcm_qspi_read(qspi, MSPI, MSPI_REV);
-               /* some older revs do not have a MSPI_REV register */
-               if ((rev & 0xff) == 0xff)
-                       rev = 0;
-       }
-
-       qspi->mspi_maj_rev = (rev >> 4) & 0xf;
-       qspi->mspi_min_rev = rev & 0xf;
-       qspi->mspi_spcr3_sysclk = data->has_spcr3_sysclk;
-
-       qspi->max_speed_hz = qspi->base_clk / (bcm_qspi_spbr_min(qspi) * 2);
-
        bcm_qspi_hw_init(qspi);
        init_completion(&qspi->mspi_done);
        init_completion(&qspi->bspi_done);
index 386e8c84be0af24c951526554b37e24cfbc4ae79..a15de10ee286a9e4f7760295fc2420fd8b51c68c 100644 (file)
@@ -233,36 +233,44 @@ static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
                return delay;
        inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
 
-       setup    = setup ? setup : 1;
-       hold     = hold ? hold : 1;
-       inactive = inactive ? inactive : 1;
-
-       reg_val = readl(mdata->base + SPI_CFG0_REG);
-       if (mdata->dev_comp->enhance_timing) {
-               hold = min_t(u32, hold, 0x10000);
-               setup = min_t(u32, setup, 0x10000);
-               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
-               reg_val |= (((hold - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
-               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
-               reg_val |= (((setup - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
-       } else {
-               hold = min_t(u32, hold, 0x100);
-               setup = min_t(u32, setup, 0x100);
-               reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
-               reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
-               reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
-               reg_val |= (((setup - 1) & 0xff)
-                           << SPI_CFG0_CS_SETUP_OFFSET);
+       if (hold || setup) {
+               reg_val = readl(mdata->base + SPI_CFG0_REG);
+               if (mdata->dev_comp->enhance_timing) {
+                       if (hold) {
+                               hold = min_t(u32, hold, 0x10000);
+                               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+                               reg_val |= (((hold - 1) & 0xffff)
+                                       << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+                       }
+                       if (setup) {
+                               setup = min_t(u32, setup, 0x10000);
+                               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+                               reg_val |= (((setup - 1) & 0xffff)
+                                       << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+                       }
+               } else {
+                       if (hold) {
+                               hold = min_t(u32, hold, 0x100);
+                               reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
+                               reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+                       }
+                       if (setup) {
+                               setup = min_t(u32, setup, 0x100);
+                               reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
+                               reg_val |= (((setup - 1) & 0xff)
+                                       << SPI_CFG0_CS_SETUP_OFFSET);
+                       }
+               }
+               writel(reg_val, mdata->base + SPI_CFG0_REG);
        }
-       writel(reg_val, mdata->base + SPI_CFG0_REG);
 
-       inactive = min_t(u32, inactive, 0x100);
-       reg_val = readl(mdata->base + SPI_CFG1_REG);
-       reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
-       reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
-       writel(reg_val, mdata->base + SPI_CFG1_REG);
+       if (inactive) {
+               inactive = min_t(u32, inactive, 0x100);
+               reg_val = readl(mdata->base + SPI_CFG1_REG);
+               reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+               reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+               writel(reg_val, mdata->base + SPI_CFG1_REG);
+       }
 
        return 0;
 }
index 9708b7827ff7068ab484b4132675131247cc6232..f5d32ec4634e341d51b20446e041886b07dbb871 100644 (file)
@@ -137,6 +137,13 @@ static int spi_mux_probe(struct spi_device *spi)
        priv = spi_controller_get_devdata(ctlr);
        priv->spi = spi;
 
+       /*
+        * Increase lockdep class as these lock are taken while the parent bus
+        * already holds their instance's lock.
+        */
+       lockdep_set_subclass(&ctlr->io_mutex, 1);
+       lockdep_set_subclass(&ctlr->add_lock, 1);
+
        priv->mux = devm_mux_control_get(&spi->dev, NULL);
        if (IS_ERR(priv->mux)) {
                ret = dev_err_probe(&spi->dev, PTR_ERR(priv->mux),
index a66fa97046ee1d8f17f0d11ed5e3fb688e37a87b..2b0301fc971c6cb98d4140f2cca1d44955a695e8 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <linux/acpi.h>
 #include <linux/bitops.h>
+#include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/delay.h>
 #define NXP_FSPI_MIN_IOMAP     SZ_4M
 
 #define DCFG_RCWSR1            0x100
+#define SYS_PLL_RAT            GENMASK(6, 2)
 
 /* Access flash memory using IP bus only */
 #define FSPI_QUIRK_USE_IP_ONLY BIT(0)
@@ -926,9 +928,8 @@ static void erratum_err050568(struct nxp_fspi *f)
                { .family = "QorIQ LS1028A" },
                { /* sentinel */ }
        };
-       struct device_node *np;
        struct regmap *map;
-       u32 val = 0, sysclk = 0;
+       u32 val, sys_pll_ratio;
        int ret;
 
        /* Check for LS1028A family */
@@ -937,7 +938,6 @@ static void erratum_err050568(struct nxp_fspi *f)
                return;
        }
 
-       /* Compute system clock frequency multiplier ratio */
        map = syscon_regmap_lookup_by_compatible("fsl,ls1028a-dcfg");
        if (IS_ERR(map)) {
                dev_err(f->dev, "No syscon regmap\n");
@@ -948,23 +948,11 @@ static void erratum_err050568(struct nxp_fspi *f)
        if (ret < 0)
                goto err;
 
-       /* Strap bits 6:2 define SYS_PLL_RAT i.e frequency multiplier ratio */
-       val = (val >> 2) & 0x1F;
-       WARN(val == 0, "Strapping is zero: Cannot determine ratio");
+       sys_pll_ratio = FIELD_GET(SYS_PLL_RAT, val);
+       dev_dbg(f->dev, "val: 0x%08x, sys_pll_ratio: %d\n", val, sys_pll_ratio);
 
-       /* Compute system clock frequency */
-       np = of_find_node_by_name(NULL, "clock-sysclk");
-       if (!np)
-               goto err;
-
-       if (of_property_read_u32(np, "clock-frequency", &sysclk))
-               goto err;
-
-       sysclk = (sysclk * val) / 1000000; /* Convert sysclk to Mhz */
-       dev_dbg(f->dev, "val: 0x%08x, sysclk: %dMhz\n", val, sysclk);
-
-       /* Use IP bus only if PLL is 300MHz */
-       if (sysclk == 300)
+       /* Use IP bus only if platform clock is 300MHz */
+       if (sys_pll_ratio == 3)
                f->devtype_data->quirks |= FSPI_QUIRK_USE_IP_ONLY;
 
        return;
index 540861ca2ba37d4ceffde5d974b0d3f760a3c412..553b6b9d02222ba835f468ec32acdd3ccd60155c 100644 (file)
@@ -600,6 +600,12 @@ static int rockchip_spi_transfer_one(
        int ret;
        bool use_dma;
 
+       /* Zero length transfers won't trigger an interrupt on completion */
+       if (!xfer->len) {
+               spi_finalize_current_transfer(ctlr);
+               return 1;
+       }
+
        WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
                (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
 
index ebd27f88303315db415e077e75a14a34170337b6..713292b0c71ea74889882375e7eff373dffb353b 100644 (file)
@@ -204,9 +204,6 @@ struct tegra_slink_data {
        struct dma_async_tx_descriptor          *tx_dma_desc;
 };
 
-static int tegra_slink_runtime_suspend(struct device *dev);
-static int tegra_slink_runtime_resume(struct device *dev);
-
 static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
                unsigned long reg)
 {
@@ -1185,7 +1182,7 @@ static int tegra_slink_resume(struct device *dev)
 }
 #endif
 
-static int tegra_slink_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
        struct tegra_slink_data *tspi = spi_master_get_devdata(master);
index 57e2499ec1ed9ad2bf08fd870e6aea09108fdcf7..926b68aa45d3ed2053bbc128a16d5935d8108be8 100644 (file)
@@ -58,10 +58,6 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
        const struct spi_device *spi = to_spi_device(dev);
        int len;
 
-       len = of_device_modalias(dev, buf, PAGE_SIZE);
-       if (len != -ENODEV)
-               return len;
-
        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
        if (len != -ENODEV)
                return len;
@@ -367,10 +363,6 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
        const struct spi_device         *spi = to_spi_device(dev);
        int rc;
 
-       rc = of_device_uevent_modalias(dev, env);
-       if (rc != -ENODEV)
-               return rc;
-
        rc = acpi_device_uevent_modalias(dev, env);
        if (rc != -ENODEV)
                return rc;
@@ -486,12 +478,6 @@ static LIST_HEAD(spi_controller_list);
  */
 static DEFINE_MUTEX(board_lock);
 
-/*
- * Prevents addition of devices with same chip select and
- * addition of devices below an unregistering controller.
- */
-static DEFINE_MUTEX(spi_add_lock);
-
 /**
  * spi_alloc_device - Allocate a new SPI device
  * @ctlr: Controller to which device is connected
@@ -644,9 +630,9 @@ int spi_add_device(struct spi_device *spi)
         * chipselect **BEFORE** we call setup(), else we'll trash
         * its configuration.  Lock against concurrent add() calls.
         */
-       mutex_lock(&spi_add_lock);
+       mutex_lock(&ctlr->add_lock);
        status = __spi_add_device(spi);
-       mutex_unlock(&spi_add_lock);
+       mutex_unlock(&ctlr->add_lock);
        return status;
 }
 EXPORT_SYMBOL_GPL(spi_add_device);
@@ -666,7 +652,7 @@ static int spi_add_device_locked(struct spi_device *spi)
        /* Set the bus ID string */
        spi_dev_set_name(spi);
 
-       WARN_ON(!mutex_is_locked(&spi_add_lock));
+       WARN_ON(!mutex_is_locked(&ctlr->add_lock));
        return __spi_add_device(spi);
 }
 
@@ -2561,6 +2547,12 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
                return NULL;
 
        device_initialize(&ctlr->dev);
+       INIT_LIST_HEAD(&ctlr->queue);
+       spin_lock_init(&ctlr->queue_lock);
+       spin_lock_init(&ctlr->bus_lock_spinlock);
+       mutex_init(&ctlr->bus_lock_mutex);
+       mutex_init(&ctlr->io_mutex);
+       mutex_init(&ctlr->add_lock);
        ctlr->bus_num = -1;
        ctlr->num_chipselect = 1;
        ctlr->slave = slave;
@@ -2833,11 +2825,6 @@ int spi_register_controller(struct spi_controller *ctlr)
                        return id;
                ctlr->bus_num = id;
        }
-       INIT_LIST_HEAD(&ctlr->queue);
-       spin_lock_init(&ctlr->queue_lock);
-       spin_lock_init(&ctlr->bus_lock_spinlock);
-       mutex_init(&ctlr->bus_lock_mutex);
-       mutex_init(&ctlr->io_mutex);
        ctlr->bus_lock_flag = 0;
        init_completion(&ctlr->xfer_completion);
        if (!ctlr->max_dma_len)
@@ -2974,7 +2961,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
 
        /* Prevent addition of new devices, unregister existing ones */
        if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
-               mutex_lock(&spi_add_lock);
+               mutex_lock(&ctlr->add_lock);
 
        device_for_each_child(&ctlr->dev, NULL, __unregister);
 
@@ -3005,7 +2992,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
        mutex_unlock(&board_lock);
 
        if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
-               mutex_unlock(&spi_add_lock);
+               mutex_unlock(&ctlr->add_lock);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_controller);
 
index 6dc29ce3b4bf8664e788fc73f195ad2b41029794..1bd73e322b7bbdcd2874d97668592791e639c4b4 100644 (file)
@@ -673,6 +673,19 @@ static const struct file_operations spidev_fops = {
 
 static struct class *spidev_class;
 
+static const struct spi_device_id spidev_spi_ids[] = {
+       { .name = "dh2228fv" },
+       { .name = "ltc2488" },
+       { .name = "sx1301" },
+       { .name = "bk4" },
+       { .name = "dhcom-board" },
+       { .name = "m53cpld" },
+       { .name = "spi-petra" },
+       { .name = "spi-authenta" },
+       {},
+};
+MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
+
 #ifdef CONFIG_OF
 static const struct of_device_id spidev_dt_ids[] = {
        { .compatible = "rohm,dh2228fv" },
@@ -818,6 +831,7 @@ static struct spi_driver spidev_spi_driver = {
        },
        .probe =        spidev_probe,
        .remove =       spidev_remove,
+       .id_table =     spidev_spi_ids,
 
        /* NOTE:  suspend/resume methods are not necessary here.
         * We don't do anything except pass the requests to/from
index e6d860a9678ed089073a9643e516599544571440..dc4ed0ff1ae277fc0041276f3340aecc6ec0059e 100644 (file)
@@ -761,6 +761,17 @@ out:
        gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
 }
 
+static void gb_tty_port_destruct(struct tty_port *port)
+{
+       struct gb_tty *gb_tty = container_of(port, struct gb_tty, port);
+
+       if (gb_tty->minor != GB_NUM_MINORS)
+               release_minor(gb_tty);
+       kfifo_free(&gb_tty->write_fifo);
+       kfree(gb_tty->buffer);
+       kfree(gb_tty);
+}
+
 static const struct tty_operations gb_ops = {
        .install =              gb_tty_install,
        .open =                 gb_tty_open,
@@ -786,6 +797,7 @@ static const struct tty_port_operations gb_port_ops = {
        .dtr_rts =              gb_tty_dtr_rts,
        .activate =             gb_tty_port_activate,
        .shutdown =             gb_tty_port_shutdown,
+       .destruct =             gb_tty_port_destruct,
 };
 
 static int gb_uart_probe(struct gbphy_device *gbphy_dev,
@@ -798,17 +810,11 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
        int retval;
        int minor;
 
-       gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
-       if (!gb_tty)
-               return -ENOMEM;
-
        connection = gb_connection_create(gbphy_dev->bundle,
                                          le16_to_cpu(gbphy_dev->cport_desc->id),
                                          gb_uart_request_handler);
-       if (IS_ERR(connection)) {
-               retval = PTR_ERR(connection);
-               goto exit_tty_free;
-       }
+       if (IS_ERR(connection))
+               return PTR_ERR(connection);
 
        max_payload = gb_operation_get_payload_size_max(connection);
        if (max_payload < sizeof(struct gb_uart_send_data_request)) {
@@ -816,13 +822,23 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
                goto exit_connection_destroy;
        }
 
+       gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
+       if (!gb_tty) {
+               retval = -ENOMEM;
+               goto exit_connection_destroy;
+       }
+
+       tty_port_init(&gb_tty->port);
+       gb_tty->port.ops = &gb_port_ops;
+       gb_tty->minor = GB_NUM_MINORS;
+
        gb_tty->buffer_payload_max = max_payload -
                        sizeof(struct gb_uart_send_data_request);
 
        gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
        if (!gb_tty->buffer) {
                retval = -ENOMEM;
-               goto exit_connection_destroy;
+               goto exit_put_port;
        }
 
        INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
@@ -830,7 +846,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
        retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
                             GFP_KERNEL);
        if (retval)
-               goto exit_buf_free;
+               goto exit_put_port;
 
        gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
        init_completion(&gb_tty->credits_complete);
@@ -844,7 +860,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
                } else {
                        retval = minor;
                }
-               goto exit_kfifo_free;
+               goto exit_put_port;
        }
 
        gb_tty->minor = minor;
@@ -853,9 +869,6 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
        init_waitqueue_head(&gb_tty->wioctl);
        mutex_init(&gb_tty->mutex);
 
-       tty_port_init(&gb_tty->port);
-       gb_tty->port.ops = &gb_port_ops;
-
        gb_tty->connection = connection;
        gb_tty->gbphy_dev = gbphy_dev;
        gb_connection_set_data(connection, gb_tty);
@@ -863,7 +876,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
 
        retval = gb_connection_enable_tx(connection);
        if (retval)
-               goto exit_release_minor;
+               goto exit_put_port;
 
        send_control(gb_tty, gb_tty->ctrlout);
 
@@ -890,16 +903,10 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
 
 exit_connection_disable:
        gb_connection_disable(connection);
-exit_release_minor:
-       release_minor(gb_tty);
-exit_kfifo_free:
-       kfifo_free(&gb_tty->write_fifo);
-exit_buf_free:
-       kfree(gb_tty->buffer);
+exit_put_port:
+       tty_port_put(&gb_tty->port);
 exit_connection_destroy:
        gb_connection_destroy(connection);
-exit_tty_free:
-       kfree(gb_tty);
 
        return retval;
 }
@@ -930,15 +937,10 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev)
        gb_connection_disable_rx(connection);
        tty_unregister_device(gb_tty_driver, gb_tty->minor);
 
-       /* FIXME - free transmit / receive buffers */
-
        gb_connection_disable(connection);
-       tty_port_destroy(&gb_tty->port);
        gb_connection_destroy(connection);
-       release_minor(gb_tty);
-       kfifo_free(&gb_tty->write_fifo);
-       kfree(gb_tty->buffer);
-       kfree(gb_tty);
+
+       tty_port_put(&gb_tty->port);
 }
 
 static int gb_tty_init(void)
index 8e085dda0c18d29ca62093b0d65add503f008285..712e01c37870c2d2b76530be1c928cedac290ddc 100644 (file)
@@ -1646,6 +1646,8 @@ static input_system_err_t input_system_configure_channel_sensor(
        default:
                return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
        }
+
+       return INPUT_SYSTEM_ERR_NO_ERROR;
 }
 
 // Test flags and set structure.
index 8a2edd67f2c6fc8031dd0a61784bd7a378ae8601..20e5081588719bc6fad73d6873c33dd6f279315c 100644 (file)
@@ -919,7 +919,7 @@ static int hantro_probe(struct platform_device *pdev)
                if (!vpu->variant->irqs[i].handler)
                        continue;
 
-               if (vpu->variant->num_clocks > 1) {
+               if (vpu->variant->num_irqs > 1) {
                        irq_name = vpu->variant->irqs[i].name;
                        irq = platform_get_irq_byname(vpu->pdev, irq_name);
                } else {
index c589fe9dae701a2582e6a05f42d26edfe7db88a8..825af5fd35e0fc0bd7008a68ee9622cf65ef6c7b 100644 (file)
@@ -135,7 +135,7 @@ void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
                sizeimage = bytesperline * height;
 
                /* Chroma plane size. */
-               sizeimage += bytesperline * height / 2;
+               sizeimage += bytesperline * ALIGN(height, 64) / 2;
 
                break;
 
index a6d589e89aeb01f171829aa180f0a34f97052baf..f27eba72d646f116d8e6eaf364ed5b7fd8da5391 100644 (file)
@@ -248,7 +248,7 @@ void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
 #ifdef CONFIG_88EU_AP_MODE
                struct sta_info *psta = NULL;
                struct sta_priv *pstapriv = &adapt->stapriv;
-               if ((mac_id - 1) > 0)
+               if (mac_id >= 2)
                        psta = pstapriv->sta_aid[(mac_id - 1) - 1];
                if (psta)
                        add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
index 81d4255d1785c6c1aeead854c9b7d4e69d315590..1fd37507600185e1e33e7da7df798bbaa8bf80cf 100644 (file)
@@ -5372,8 +5372,8 @@ static int rtw_mp_read_reg(struct net_device *dev,
 
                        pnext++;
                        if (*pnext != '\0') {
-                                 strtout = simple_strtoul(pnext, &ptmp, 16);
-                                 sprintf(extra, "%s %d", extra, strtout);
+                               strtout = simple_strtoul(pnext, &ptmp, 16);
+                               sprintf(extra + strlen(extra), " %d", strtout);
                        } else {
                                  break;
                        }
@@ -5405,7 +5405,7 @@ static int rtw_mp_read_reg(struct net_device *dev,
                        pnext++;
                        if (*pnext != '\0') {
                                strtout = simple_strtoul(pnext, &ptmp, 16);
-                               sprintf(extra, "%s %d", extra, strtout);
+                               sprintf(extra + strlen(extra), " %d", strtout);
                        } else {
                                break;
                        }
@@ -5512,7 +5512,7 @@ static int rtw_mp_read_rf(struct net_device *dev,
                pnext++;
                if (*pnext != '\0') {
                          strtou = simple_strtoul(pnext, &ptmp, 16);
-                         sprintf(extra, "%s %d", extra, strtou);
+                         sprintf(extra + strlen(extra), " %d", strtou);
                } else {
                          break;
                }
index b25369a134525e0c1bf03c45a00fe3b82f964824..967f10b9582a826c8d0101e273f710a1e46bba71 100644 (file)
@@ -182,7 +182,7 @@ create_pagelist(char *buf, char __user *ubuf,
                offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
        num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
 
-       if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
+       if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
                         sizeof(struct vchiq_pagelist_info)) /
                        (sizeof(u32) + sizeof(pages[0]) +
                         sizeof(struct scatterlist)))
index 102ec644bc8a0f894c9a38721228ce2ff52963fc..023bd4516a681c1d53e90d5bc004faeda6ae7e37 100644 (file)
@@ -1110,20 +1110,24 @@ static ssize_t alua_support_store(struct config_item *item,
 {
        struct se_dev_attrib *da = to_attrib(item);
        struct se_device *dev = da->da_dev;
-       bool flag;
+       bool flag, oldflag;
        int ret;
 
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
+       if (flag == oldflag)
+               return count;
+
        if (!(dev->transport->transport_flags_changeable &
              TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
                pr_err("dev[%p]: Unable to change SE Device alua_support:"
                        " alua_support has fixed value\n", dev);
-               return -EINVAL;
+               return -ENOSYS;
        }
 
-       ret = strtobool(page, &flag);
-       if (ret < 0)
-               return ret;
-
        if (flag)
                dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
        else
@@ -1145,20 +1149,24 @@ static ssize_t pgr_support_store(struct config_item *item,
 {
        struct se_dev_attrib *da = to_attrib(item);
        struct se_device *dev = da->da_dev;
-       bool flag;
+       bool flag, oldflag;
        int ret;
 
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
+       if (flag == oldflag)
+               return count;
+
        if (!(dev->transport->transport_flags_changeable &
              TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
                pr_err("dev[%p]: Unable to change SE Device pgr_support:"
                        " pgr_support has fixed value\n", dev);
-               return -EINVAL;
+               return -ENOSYS;
        }
 
-       ret = strtobool(page, &flag);
-       if (ret < 0)
-               return ret;
-
        if (flag)
                dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
        else
index 4b94b085625b40dad2b91e720afdadd2bd1a7449..3829b61b56c124076948e955d912eb38e6ce3d2c 100644 (file)
@@ -269,7 +269,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
        spin_lock(&dev->dev_reservation_lock);
        if (dev->reservation_holder &&
            dev->reservation_holder->se_node_acl != sess->se_node_acl) {
-               pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+               pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n",
                        tpg->se_tpg_tfo->fabric_name);
                pr_err("Original reserver LUN: %llu %s\n",
                        cmd->se_lun->unpacked_lun,
index 5ce13b099d7dc8fb7f665ff869f0c3f7cf023359..5363ebebfc357f480515a019e769091f095b69bd 100644 (file)
@@ -585,6 +585,9 @@ static int optee_remove(struct platform_device *pdev)
 {
        struct optee *optee = platform_get_drvdata(pdev);
 
+       /* Unregister OP-TEE specific client devices on TEE bus */
+       optee_unregister_devices();
+
        /*
         * Ask OP-TEE to free all cached shared memory objects to decrease
         * reference counters and also avoid wild pointers in secure world
index ec1d24693ebaae10d89874fc67ead117e961daa8..128a2d2a50a16c540d42bdc5dc1c5fdb84bd328a 100644 (file)
@@ -53,6 +53,13 @@ static int get_devices(struct tee_context *ctx, u32 session,
        return 0;
 }
 
+static void optee_release_device(struct device *dev)
+{
+       struct tee_client_device *optee_device = to_tee_client_device(dev);
+
+       kfree(optee_device);
+}
+
 static int optee_register_device(const uuid_t *device_uuid)
 {
        struct tee_client_device *optee_device = NULL;
@@ -63,6 +70,7 @@ static int optee_register_device(const uuid_t *device_uuid)
                return -ENOMEM;
 
        optee_device->dev.bus = &tee_bus_type;
+       optee_device->dev.release = optee_release_device;
        if (dev_set_name(&optee_device->dev, "optee-ta-%pUb", device_uuid)) {
                kfree(optee_device);
                return -ENOMEM;
@@ -154,3 +162,17 @@ int optee_enumerate_devices(u32 func)
 {
        return  __optee_enumerate_devices(func);
 }
+
+static int __optee_unregister_device(struct device *dev, void *data)
+{
+       if (!strncmp(dev_name(dev), "optee-ta", strlen("optee-ta")))
+               device_unregister(dev);
+
+       return 0;
+}
+
+void optee_unregister_devices(void)
+{
+       bus_for_each_dev(&tee_bus_type, NULL, NULL,
+                        __optee_unregister_device);
+}
index dbdd367be1568330aa5481f9924932196282d5d4..f6bb4a763ba94e80f0463cc42957702ef47f1b93 100644 (file)
@@ -184,6 +184,7 @@ void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
 #define PTA_CMD_GET_DEVICES            0x0
 #define PTA_CMD_GET_DEVICES_SUPP       0x1
 int optee_enumerate_devices(u32 func);
+void optee_unregister_devices(void);
 
 /*
  * Small helpers
index c41a9a501a6e9d8fcf5efce43259aca6466368b6..d167039af519eb904d9bc432e57e2e20b7fa9a57 100644 (file)
@@ -35,7 +35,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
                unsigned int nr_pages = 1 << order, i;
                struct page **pages;
 
-               pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+               pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
                if (!pages) {
                        rc = -ENOMEM;
                        goto err;
index 0f0038af2ad483c0ffbcb2cfa4ba3d8f87b39250..fb64acfd5e07d6eb295816cd7efe481cfc9f4966 100644 (file)
@@ -107,7 +107,7 @@ static int tcc_offset_update(unsigned int tcc)
        return 0;
 }
 
-static unsigned int tcc_offset_save;
+static int tcc_offset_save = -1;
 
 static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
                                struct device_attribute *attr, const char *buf,
@@ -352,7 +352,8 @@ int proc_thermal_resume(struct device *dev)
        proc_dev = dev_get_drvdata(dev);
        proc_thermal_read_ppcc(proc_dev);
 
-       tcc_offset_update(tcc_offset_save);
+       if (tcc_offset_save >= 0)
+               tcc_offset_update(tcc_offset_save);
 
        return 0;
 }
index 4c7ebd1d3f9c9537f89722f25d5962e58155c6b6..b1162e566a707d6489431bba18abbb893e57c401 100644 (file)
@@ -417,7 +417,7 @@ static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
                const struct tsens_sensor *s = &priv->sensor[i];
                u32 hw_id = s->hw_id;
 
-               if (IS_ERR(s->tzd))
+               if (!s->tzd)
                        continue;
                if (!tsens_threshold_violated(priv, hw_id, &d))
                        continue;
@@ -467,7 +467,7 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
                const struct tsens_sensor *s = &priv->sensor[i];
                u32 hw_id = s->hw_id;
 
-               if (IS_ERR(s->tzd))
+               if (!s->tzd)
                        continue;
                if (!tsens_threshold_violated(priv, hw_id, &d))
                        continue;
index 97ef9b040b84a95e82bd6c691f4bb87004c26ade..51374f4e1ccafadeb4ad4fd6ae7545092cd370d4 100644 (file)
@@ -222,15 +222,14 @@ int thermal_build_list_of_policies(char *buf)
 {
        struct thermal_governor *pos;
        ssize_t count = 0;
-       ssize_t size = PAGE_SIZE;
 
        mutex_lock(&thermal_governor_lock);
 
        list_for_each_entry(pos, &thermal_governor_list, governor_list) {
-               size = PAGE_SIZE - count;
-               count += scnprintf(buf + count, size, "%s ", pos->name);
+               count += scnprintf(buf + count, PAGE_SIZE - count, "%s ",
+                                  pos->name);
        }
-       count += scnprintf(buf + count, size, "\n");
+       count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
 
        mutex_unlock(&thermal_governor_lock);
 
index da19d7987d0057a83e7137c2eebaf317316a1108..78fd365893c13f284a7de60b399edb0da0a40aec 100644 (file)
@@ -7,6 +7,7 @@ thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
 thunderbolt-${CONFIG_ACPI} += acpi.o
 thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
 thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
+CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
 
 thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o
 obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o
index 8f143c09a1696eeed94e1d4788f06bce1d089d9a..f0bf01ea069ae0a217728987dd5237f8b4b6dd38 100644 (file)
@@ -618,10 +618,8 @@ static int __init xenboot_console_setup(struct console *console, char *string)
 {
        static struct xencons_info xenboot;
 
-       if (xen_initial_domain())
+       if (xen_initial_domain() || !xen_pv_domain())
                return 0;
-       if (!xen_pv_domain())
-               return -ENODEV;
 
        return xencons_info_pv_init(&xenboot, 0);
 }
@@ -632,17 +630,16 @@ static void xenboot_write_console(struct console *console, const char *string,
        unsigned int linelen, off = 0;
        const char *pos;
 
+       if (dom0_write_console(0, string, len) >= 0)
+               return;
+
        if (!xen_pv_domain()) {
                xen_hvm_early_write(0, string, len);
                return;
        }
 
-       dom0_write_console(0, string, len);
-
-       if (xen_initial_domain())
+       if (domU_write_console(0, "(early) ", 8) < 0)
                return;
-
-       domU_write_console(0, "(early) ", 8);
        while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
                linelen = pos-string+off;
                if (off + linelen > len)
index 891fd8345e2533dc4f6f5d53f4fe1c743b9165e3..73e5f1dbd075d844672bd4ad61946864b88460b9 100644 (file)
 #define UART_OMAP_EFR2_TIMEOUT_BEHAVE  BIT(6)
 
 /* RX FIFO occupancy indicator */
-#define UART_OMAP_RX_LVL               0x64
+#define UART_OMAP_RX_LVL               0x19
 
 struct omap8250_priv {
        int line;
index 71ae16de0f90e06ff6e6b9cb0a3de2b64ab6bd34..39fc96dc2531c1aedf93c63a7ccab68694bacdd1 100644 (file)
@@ -361,9 +361,13 @@ config SERIAL_8250_BCM2835AUX
          If unsure, say N.
 
 config SERIAL_8250_FSL
-       bool
+       bool "Freescale 16550 UART support" if COMPILE_TEST && !(PPC || ARM || ARM64)
        depends on SERIAL_8250_CONSOLE
-       default PPC || ARM || ARM64 || COMPILE_TEST
+       default PPC || ARM || ARM64
+       help
+         Selecting this option enables a workaround for a break-detection
+         erratum for Freescale 16550 UARTs in the 8250 driver. It also
+         enables support for ACPI enumeration.
 
 config SERIAL_8250_DW
        tristate "Support for Synopsys DesignWare 8250 quirks"
index 231de29a645214457b7656742fc090936d8efcf2..ab226da75f7bad12aec99ed097d6a630f37770a9 100644 (file)
@@ -163,7 +163,7 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
        st = readl(port->membase + UART_STAT);
        spin_unlock_irqrestore(&port->lock, flags);
 
-       return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0;
+       return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
 }
 
 static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
index a9acd93e85b7c5faaf36e0bcce574f5529974dc5..25c558e65ece0851c73ff873d308ae57f70827ad 100644 (file)
@@ -438,8 +438,8 @@ static void reset_tbufs(struct slgt_info *info);
 static void tdma_reset(struct slgt_info *info);
 static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
 
-static void get_signals(struct slgt_info *info);
-static void set_signals(struct slgt_info *info);
+static void get_gtsignals(struct slgt_info *info);
+static void set_gtsignals(struct slgt_info *info);
 static void set_rate(struct slgt_info *info, u32 data_rate);
 
 static void bh_transmit(struct slgt_info *info);
@@ -720,7 +720,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
                info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
                spin_lock_irqsave(&info->lock,flags);
-               set_signals(info);
+               set_gtsignals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 
@@ -730,7 +730,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
                if (!C_CRTSCTS(tty) || !tty_throttled(tty))
                        info->signals |= SerialSignal_RTS;
                spin_lock_irqsave(&info->lock,flags);
-               set_signals(info);
+               set_gtsignals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 
@@ -1181,7 +1181,7 @@ static inline void line_info(struct seq_file *m, struct slgt_info *info)
 
        /* output current serial signal states */
        spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
+       get_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
 
        stat_buf[0] = 0;
@@ -1281,7 +1281,7 @@ static void throttle(struct tty_struct * tty)
        if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock,flags);
                info->signals &= ~SerialSignal_RTS;
-               set_signals(info);
+               set_gtsignals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 }
@@ -1306,7 +1306,7 @@ static void unthrottle(struct tty_struct * tty)
        if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock,flags);
                info->signals |= SerialSignal_RTS;
-               set_signals(info);
+               set_gtsignals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 }
@@ -1477,7 +1477,7 @@ static int hdlcdev_open(struct net_device *dev)
 
        /* inform generic HDLC layer of current DCD status */
        spin_lock_irqsave(&info->lock, flags);
-       get_signals(info);
+       get_gtsignals(info);
        spin_unlock_irqrestore(&info->lock, flags);
        if (info->signals & SerialSignal_DCD)
                netif_carrier_on(dev);
@@ -2229,7 +2229,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
                if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
                        info->signals &= ~SerialSignal_RTS;
                        info->drop_rts_on_tx_done = false;
-                       set_signals(info);
+                       set_gtsignals(info);
                }
 
 #if SYNCLINK_GENERIC_HDLC
@@ -2394,7 +2394,7 @@ static void shutdown(struct slgt_info *info)
 
        if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
                info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
-               set_signals(info);
+               set_gtsignals(info);
        }
 
        flush_cond_wait(&info->gpio_wait_q);
@@ -2422,7 +2422,7 @@ static void program_hw(struct slgt_info *info)
        else
                async_mode(info);
 
-       set_signals(info);
+       set_gtsignals(info);
 
        info->dcd_chkcount = 0;
        info->cts_chkcount = 0;
@@ -2430,7 +2430,7 @@ static void program_hw(struct slgt_info *info)
        info->dsr_chkcount = 0;
 
        slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
-       get_signals(info);
+       get_gtsignals(info);
 
        if (info->netcount ||
            (info->port.tty && info->port.tty->termios.c_cflag & CREAD))
@@ -2667,7 +2667,7 @@ static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
        spin_lock_irqsave(&info->lock,flags);
 
        /* return immediately if state matches requested events */
-       get_signals(info);
+       get_gtsignals(info);
        s = info->signals;
 
        events = mask &
@@ -3085,7 +3085,7 @@ static int tiocmget(struct tty_struct *tty)
        unsigned long flags;
 
        spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
+       get_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
 
        result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
@@ -3124,7 +3124,7 @@ static int tiocmset(struct tty_struct *tty,
                info->signals &= ~SerialSignal_DTR;
 
        spin_lock_irqsave(&info->lock,flags);
-       set_signals(info);
+       set_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
        return 0;
 }
@@ -3135,7 +3135,7 @@ static int carrier_raised(struct tty_port *port)
        struct slgt_info *info = container_of(port, struct slgt_info, port);
 
        spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
+       get_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
        return (info->signals & SerialSignal_DCD) ? 1 : 0;
 }
@@ -3150,7 +3150,7 @@ static void dtr_rts(struct tty_port *port, int on)
                info->signals |= SerialSignal_RTS | SerialSignal_DTR;
        else
                info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
-       set_signals(info);
+       set_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
 }
 
@@ -3948,10 +3948,10 @@ static void tx_start(struct slgt_info *info)
 
                if (info->params.mode != MGSL_MODE_ASYNC) {
                        if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
-                               get_signals(info);
+                               get_gtsignals(info);
                                if (!(info->signals & SerialSignal_RTS)) {
                                        info->signals |= SerialSignal_RTS;
-                                       set_signals(info);
+                                       set_gtsignals(info);
                                        info->drop_rts_on_tx_done = true;
                                }
                        }
@@ -4005,7 +4005,7 @@ static void reset_port(struct slgt_info *info)
        rx_stop(info);
 
        info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
-       set_signals(info);
+       set_gtsignals(info);
 
        slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
 }
@@ -4427,7 +4427,7 @@ static void tx_set_idle(struct slgt_info *info)
 /*
  * get state of V24 status (input) signals
  */
-static void get_signals(struct slgt_info *info)
+static void get_gtsignals(struct slgt_info *info)
 {
        unsigned short status = rd_reg16(info, SSR);
 
@@ -4489,7 +4489,7 @@ static void msc_set_vcr(struct slgt_info *info)
 /*
  * set state of V24 control (output) signals
  */
-static void set_signals(struct slgt_info *info)
+static void set_gtsignals(struct slgt_info *info)
 {
        unsigned char val = rd_reg8(info, VCR);
        if (info->signals & SerialSignal_DTR)
index 756a4bfa6a693049c78697a74214c797b979832e..3e4e0b20b4bbbdfb6e725891b4b45d7359100ac9 100644 (file)
@@ -812,7 +812,6 @@ void tty_ldisc_release(struct tty_struct *tty)
 
        tty_ldisc_debug(tty, "released\n");
 }
-EXPORT_SYMBOL_GPL(tty_ldisc_release);
 
 /**
  *     tty_ldisc_init          -       ldisc setup for new tty
index 5d8c982019afc2e78db9cf8eb11984481445e52b..1f3b4a1422126bff26011c4ff7fd48bfa1f38e8c 100644 (file)
@@ -1100,6 +1100,19 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
        return 0;
 }
 
+static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
+{
+       struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+       if (priv_dev->dev_ver < DEV_VER_V3)
+               return;
+
+       if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
+               writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
+               writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+       }
+}
+
 /**
  * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
  * @priv_ep: endpoint object
@@ -1351,6 +1364,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
                /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
                writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
                writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+               cdns3_rearm_drdy_if_needed(priv_ep);
                trace_cdns3_doorbell_epx(priv_ep->name,
                                         readl(&priv_dev->regs->ep_traddr));
        }
index 8b7bc10b6e8b44a6169d0027b9eb3693b862835b..f1d100671ee6a174ec3637c2d5f7bc7baec01d36 100644 (file)
@@ -420,11 +420,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
        data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
        if (IS_ERR(data->phy)) {
                ret = PTR_ERR(data->phy);
-               /* Return -EINVAL if no usbphy is available */
-               if (ret == -ENODEV)
-                       data->phy = NULL;
-               else
-                       goto err_clk;
+               if (ret == -ENODEV) {
+                       data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+                       if (IS_ERR(data->phy)) {
+                               ret = PTR_ERR(data->phy);
+                               if (ret == -ENODEV)
+                                       data->phy = NULL;
+                               else
+                                       goto err_clk;
+                       }
+               }
        }
 
        pdata.usb_phy = data->phy;
index 8bbd8e29e60d1a65b8cf40b93cebc27d45448ddc..7b2e2420ecaea7496c29844c4048bd6d4706ab3c 100644 (file)
@@ -340,6 +340,9 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
                        acm->iocount.overrun++;
                spin_unlock_irqrestore(&acm->read_lock, flags);
 
+               if (newctrl & ACM_CTRL_BRK)
+                       tty_flip_buffer_push(&acm->port);
+
                if (difference)
                        wake_up_all(&acm->wioctl);
 
@@ -475,11 +478,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
 
 static void acm_process_read_urb(struct acm *acm, struct urb *urb)
 {
+       unsigned long flags;
+
        if (!urb->actual_length)
                return;
 
+       spin_lock_irqsave(&acm->read_lock, flags);
        tty_insert_flip_string(&acm->port, urb->transfer_buffer,
                        urb->actual_length);
+       spin_unlock_irqrestore(&acm->read_lock, flags);
+
        tty_flip_buffer_push(&acm->port);
 }
 
@@ -726,7 +734,8 @@ static void acm_port_destruct(struct tty_port *port)
 {
        struct acm *acm = container_of(port, struct acm, port);
 
-       acm_release_minor(acm);
+       if (acm->minor != ACM_MINOR_INVALID)
+               acm_release_minor(acm);
        usb_put_intf(acm->control);
        kfree(acm->country_codes);
        kfree(acm);
@@ -1323,8 +1332,10 @@ made_compressed_probe:
        usb_get_intf(acm->control); /* undone in destruct() */
 
        minor = acm_alloc_minor(acm);
-       if (minor < 0)
+       if (minor < 0) {
+               acm->minor = ACM_MINOR_INVALID;
                goto err_put_port;
+       }
 
        acm->minor = minor;
        acm->dev = usb_dev;
index 8aef5eb769a0de0e5a7bd1e853ea867fcdfffd42..3aa7f0a3ad71e8bcd3b17c7a6d2df3f8b52175bb 100644 (file)
@@ -22,6 +22,8 @@
 #define ACM_TTY_MAJOR          166
 #define ACM_TTY_MINORS         256
 
+#define ACM_MINOR_INVALID      ACM_TTY_MINORS
+
 /*
  * Requests.
  */
index 35d5908b5478aad688d6ddae66b72c8d3377ab4d..fdf79bcf7eb09e50ae223d60df3348727375fa7e 100644 (file)
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
 };
 
 /* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN_CORE
+#ifdef CONFIG_WWAN
 static int wdm_wwan_port_start(struct wwan_port *port)
 {
        struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
        /* inbuf has been copied, it is safe to check for outstanding data */
        schedule_work(&desc->service_outs_intr);
 }
-#else /* CONFIG_WWAN_CORE */
+#else /* CONFIG_WWAN */
 static void wdm_wwan_init(struct wdm_device *desc) {}
 static void wdm_wwan_deinit(struct wdm_device *desc) {}
 static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN_CORE */
+#endif /* CONFIG_WWAN */
 
 /* --- error handling --- */
 static void wdm_rxwork(struct work_struct *work)
index 5e8a04e3dd3c896ed3a9a4d309d8963202247e74..b856622431a73ef32286c581ab1137dc8bfa50dc 100644 (file)
@@ -6,8 +6,7 @@ config USB_COMMON
 
 config USB_LED_TRIG
        bool "USB LED Triggers"
-       depends on LEDS_CLASS && LEDS_TRIGGERS
-       select USB_COMMON
+       depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
        help
          This option adds LED triggers for USB host and/or gadget activity.
 
index 0f8b7c93310ea2eeb7c5a084698989627280a447..7ee6e4cc0d89e8c4d024a6d4f682e6ea8185c070 100644 (file)
@@ -2760,6 +2760,26 @@ static void usb_put_invalidate_rhdev(struct usb_hcd *hcd)
        usb_put_dev(rhdev);
 }
 
+/**
+ * usb_stop_hcd - Halt the HCD
+ * @hcd: the usb_hcd that has to be halted
+ *
+ * Stop the root-hub polling timer and invoke the HCD's ->stop callback.
+ */
+static void usb_stop_hcd(struct usb_hcd *hcd)
+{
+       hcd->rh_pollable = 0;
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       del_timer_sync(&hcd->rh_timer);
+
+       hcd->driver->stop(hcd);
+       hcd->state = HC_STATE_HALT;
+
+       /* In case the HCD restarted the timer, stop it again. */
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       del_timer_sync(&hcd->rh_timer);
+}
+
 /**
  * usb_add_hcd - finish generic HCD structure initialization and register
  * @hcd: the usb_hcd structure to initialize
@@ -2775,6 +2795,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
 {
        int retval;
        struct usb_device *rhdev;
+       struct usb_hcd *shared_hcd;
 
        if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
                hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2935,24 +2956,31 @@ int usb_add_hcd(struct usb_hcd *hcd,
                goto err_hcd_driver_start;
        }
 
+       /* starting here, usbcore will pay attention to the shared HCD roothub */
+       shared_hcd = hcd->shared_hcd;
+       if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+               retval = register_root_hub(shared_hcd);
+               if (retval != 0)
+                       goto err_register_root_hub;
+
+               if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+                       usb_hcd_poll_rh_status(shared_hcd);
+       }
+
        /* starting here, usbcore will pay attention to this root hub */
-       retval = register_root_hub(hcd);
-       if (retval != 0)
-               goto err_register_root_hub;
+       if (!HCD_DEFER_RH_REGISTER(hcd)) {
+               retval = register_root_hub(hcd);
+               if (retval != 0)
+                       goto err_register_root_hub;
 
-       if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
-               usb_hcd_poll_rh_status(hcd);
+               if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+                       usb_hcd_poll_rh_status(hcd);
+       }
 
        return retval;
 
 err_register_root_hub:
-       hcd->rh_pollable = 0;
-       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       del_timer_sync(&hcd->rh_timer);
-       hcd->driver->stop(hcd);
-       hcd->state = HC_STATE_HALT;
-       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       del_timer_sync(&hcd->rh_timer);
+       usb_stop_hcd(hcd);
 err_hcd_driver_start:
        if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
                free_irq(irqnum, hcd);
@@ -2985,6 +3013,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
 void usb_remove_hcd(struct usb_hcd *hcd)
 {
        struct usb_device *rhdev = hcd->self.root_hub;
+       bool rh_registered;
 
        dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
 
@@ -2995,6 +3024,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
 
        dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
        spin_lock_irq (&hcd_root_hub_lock);
+       rh_registered = hcd->rh_registered;
        hcd->rh_registered = 0;
        spin_unlock_irq (&hcd_root_hub_lock);
 
@@ -3004,7 +3034,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
        cancel_work_sync(&hcd->died_work);
 
        mutex_lock(&usb_bus_idr_lock);
-       usb_disconnect(&rhdev);         /* Sets rhdev to NULL */
+       if (rh_registered)
+               usb_disconnect(&rhdev);         /* Sets rhdev to NULL */
        mutex_unlock(&usb_bus_idr_lock);
 
        /*
@@ -3022,16 +3053,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
         * interrupt occurs), but usb_hcd_poll_rh_status() won't invoke
         * the hub_status_data() callback.
         */
-       hcd->rh_pollable = 0;
-       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       del_timer_sync(&hcd->rh_timer);
-
-       hcd->driver->stop(hcd);
-       hcd->state = HC_STATE_HALT;
-
-       /* In case the HCD restarted the timer, stop it again. */
-       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       del_timer_sync(&hcd->rh_timer);
+       usb_stop_hcd(hcd);
 
        if (usb_hcd_is_primary_hcd(hcd)) {
                if (hcd->irq > 0)
index 837237e4bc962e11a89ce3336ca7223f37c1f154..11d85a6e0b0dcade3c1bef40efa96924ce12e06a 100644 (file)
@@ -115,10 +115,16 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
  */
 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
 {
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       u16 limit = DSTS_SOFFN_LIMIT;
+
+       if (hsotg->gadget.speed != USB_SPEED_HIGH)
+               limit >>= 3;
+
        hs_ep->target_frame += hs_ep->interval;
-       if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
+       if (hs_ep->target_frame > limit) {
                hs_ep->frame_overrun = true;
-               hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
+               hs_ep->target_frame &= limit;
        } else {
                hs_ep->frame_overrun = false;
        }
@@ -136,10 +142,16 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
  */
 static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
 {
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       u16 limit = DSTS_SOFFN_LIMIT;
+
+       if (hsotg->gadget.speed != USB_SPEED_HIGH)
+               limit >>= 3;
+
        if (hs_ep->target_frame)
                hs_ep->target_frame -= 1;
        else
-               hs_ep->target_frame = DSTS_SOFFN_LIMIT;
+               hs_ep->target_frame = limit;
 }
 
 /**
@@ -1018,6 +1030,12 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
        dwc2_writel(hsotg, ctrl, depctl);
 }
 
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
+static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
+                                       struct dwc2_hsotg_ep *hs_ep,
+                                      struct dwc2_hsotg_req *hs_req,
+                                      int result);
+
 /**
  * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
  * @hsotg: The controller state.
@@ -1170,14 +1188,19 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
                }
        }
 
-       if (hs_ep->isochronous && hs_ep->interval == 1) {
-               hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
-               dwc2_gadget_incr_frame_num(hs_ep);
-
-               if (hs_ep->target_frame & 0x1)
-                       ctrl |= DXEPCTL_SETODDFR;
-               else
-                       ctrl |= DXEPCTL_SETEVENFR;
+       if (hs_ep->isochronous) {
+               if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
+                       if (hs_ep->interval == 1) {
+                               if (hs_ep->target_frame & 0x1)
+                                       ctrl |= DXEPCTL_SETODDFR;
+                               else
+                                       ctrl |= DXEPCTL_SETEVENFR;
+                       }
+                       ctrl |= DXEPCTL_CNAK;
+               } else {
+                       dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+                       return;
+               }
        }
 
        ctrl |= DXEPCTL_EPENA;  /* ensure ep enabled */
@@ -1325,12 +1348,16 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
        u32 target_frame = hs_ep->target_frame;
        u32 current_frame = hsotg->frame_number;
        bool frame_overrun = hs_ep->frame_overrun;
+       u16 limit = DSTS_SOFFN_LIMIT;
+
+       if (hsotg->gadget.speed != USB_SPEED_HIGH)
+               limit >>= 3;
 
        if (!frame_overrun && current_frame >= target_frame)
                return true;
 
        if (frame_overrun && current_frame >= target_frame &&
-           ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
+           ((current_frame - target_frame) < limit / 2))
                return true;
 
        return false;
@@ -1713,11 +1740,9 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
  */
 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
 {
-       u32 mask;
        struct dwc2_hsotg *hsotg = hs_ep->parent;
        int dir_in = hs_ep->dir_in;
        struct dwc2_hsotg_req *hs_req;
-       u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
 
        if (!list_empty(&hs_ep->queue)) {
                hs_req = get_ep_head(hs_ep);
@@ -1733,9 +1758,6 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
        } else {
                dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
                        __func__);
-               mask = dwc2_readl(hsotg, epmsk_reg);
-               mask |= DOEPMSK_OUTTKNEPDISMSK;
-               dwc2_writel(hsotg, mask, epmsk_reg);
        }
 }
 
@@ -2306,19 +2328,6 @@ static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
        dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
 }
 
-static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
-                                           u32 epctl_reg)
-{
-       u32 ctrl;
-
-       ctrl = dwc2_readl(hsotg, epctl_reg);
-       if (ctrl & DXEPCTL_EOFRNUM)
-               ctrl |= DXEPCTL_SETEVENFR;
-       else
-               ctrl |= DXEPCTL_SETODDFR;
-       dwc2_writel(hsotg, ctrl, epctl_reg);
-}
-
 /*
  * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
  * @hs_ep - The endpoint on which transfer went
@@ -2439,20 +2448,11 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
                        dwc2_hsotg_ep0_zlp(hsotg, true);
        }
 
-       /*
-        * Slave mode OUT transfers do not go through XferComplete so
-        * adjust the ISOC parity here.
-        */
-       if (!using_dma(hsotg)) {
-               if (hs_ep->isochronous && hs_ep->interval == 1)
-                       dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
-               else if (hs_ep->isochronous && hs_ep->interval > 1)
-                       dwc2_gadget_incr_frame_num(hs_ep);
-       }
-
        /* Set actual frame number for completed transfers */
-       if (!using_desc_dma(hsotg) && hs_ep->isochronous)
-               req->frame_number = hsotg->frame_number;
+       if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+               req->frame_number = hs_ep->target_frame;
+               dwc2_gadget_incr_frame_num(hs_ep);
+       }
 
        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
 }
@@ -2766,6 +2766,12 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
                return;
        }
 
+       /* Set actual frame number for completed transfers */
+       if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+               hs_req->req.frame_number = hs_ep->target_frame;
+               dwc2_gadget_incr_frame_num(hs_ep);
+       }
+
        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
 }
 
@@ -2826,23 +2832,18 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
 
                dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
 
-               if (hs_ep->isochronous) {
-                       dwc2_hsotg_complete_in(hsotg, hs_ep);
-                       return;
-               }
-
                if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
                        int dctl = dwc2_readl(hsotg, DCTL);
 
                        dctl |= DCTL_CGNPINNAK;
                        dwc2_writel(hsotg, dctl, DCTL);
                }
-               return;
-       }
+       } else {
 
-       if (dctl & DCTL_GOUTNAKSTS) {
-               dctl |= DCTL_CGOUTNAK;
-               dwc2_writel(hsotg, dctl, DCTL);
+               if (dctl & DCTL_GOUTNAKSTS) {
+                       dctl |= DCTL_CGOUTNAK;
+                       dwc2_writel(hsotg, dctl, DCTL);
+               }
        }
 
        if (!hs_ep->isochronous)
@@ -2863,8 +2864,6 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
                /* Update current frame number value. */
                hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
        } while (dwc2_gadget_target_frame_elapsed(hs_ep));
-
-       dwc2_gadget_start_next_request(hs_ep);
 }
 
 /**
@@ -2881,8 +2880,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
 {
        struct dwc2_hsotg *hsotg = ep->parent;
+       struct dwc2_hsotg_req *hs_req;
        int dir_in = ep->dir_in;
-       u32 doepmsk;
 
        if (dir_in || !ep->isochronous)
                return;
@@ -2896,28 +2895,39 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
                return;
        }
 
-       if (ep->interval > 1 &&
-           ep->target_frame == TARGET_FRAME_INITIAL) {
+       if (ep->target_frame == TARGET_FRAME_INITIAL) {
                u32 ctrl;
 
                ep->target_frame = hsotg->frame_number;
-               dwc2_gadget_incr_frame_num(ep);
+               if (ep->interval > 1) {
+                       ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
+                       if (ep->target_frame & 0x1)
+                               ctrl |= DXEPCTL_SETODDFR;
+                       else
+                               ctrl |= DXEPCTL_SETEVENFR;
 
-               ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
-               if (ep->target_frame & 0x1)
-                       ctrl |= DXEPCTL_SETODDFR;
-               else
-                       ctrl |= DXEPCTL_SETEVENFR;
+                       dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+               }
+       }
+
+       while (dwc2_gadget_target_frame_elapsed(ep)) {
+               hs_req = get_ep_head(ep);
+               if (hs_req)
+                       dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
 
-               dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+               dwc2_gadget_incr_frame_num(ep);
+               /* Update current frame number value. */
+               hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
        }
 
-       dwc2_gadget_start_next_request(ep);
-       doepmsk = dwc2_readl(hsotg, DOEPMSK);
-       doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
-       dwc2_writel(hsotg, doepmsk, DOEPMSK);
+       if (!ep->req)
+               dwc2_gadget_start_next_request(ep);
+
 }
 
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+                                  struct dwc2_hsotg_ep *hs_ep);
+
 /**
  * dwc2_gadget_handle_nak - handle NAK interrupt
  * @hs_ep: The endpoint on which interrupt is asserted.
@@ -2935,7 +2945,9 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 {
        struct dwc2_hsotg *hsotg = hs_ep->parent;
+       struct dwc2_hsotg_req *hs_req;
        int dir_in = hs_ep->dir_in;
+       u32 ctrl;
 
        if (!dir_in || !hs_ep->isochronous)
                return;
@@ -2977,13 +2989,29 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 
                        dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
                }
-
-               dwc2_hsotg_complete_request(hsotg, hs_ep,
-                                           get_ep_head(hs_ep), 0);
        }
 
-       if (!using_desc_dma(hsotg))
+       if (using_desc_dma(hsotg))
+               return;
+
+       ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
+       if (ctrl & DXEPCTL_EPENA)
+               dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+       else
+               dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+       while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
+               hs_req = get_ep_head(hs_ep);
+               if (hs_req)
+                       dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+
                dwc2_gadget_incr_frame_num(hs_ep);
+               /* Update current frame number value. */
+               hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+       }
+
+       if (!hs_ep->req)
+               dwc2_gadget_start_next_request(hs_ep);
 }
 
 /**
@@ -3039,21 +3067,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
 
                /* In DDMA handle isochronous requests separately */
                if (using_desc_dma(hsotg) && hs_ep->isochronous) {
-                       /* XferCompl set along with BNA */
-                       if (!(ints & DXEPINT_BNAINTR))
-                               dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+                       dwc2_gadget_complete_isoc_request_ddma(hs_ep);
                } else if (dir_in) {
                        /*
                         * We get OutDone from the FIFO, so we only
                         * need to look at completing IN requests here
                         * if operating slave mode
                         */
-                       if (hs_ep->isochronous && hs_ep->interval > 1)
-                               dwc2_gadget_incr_frame_num(hs_ep);
-
-                       dwc2_hsotg_complete_in(hsotg, hs_ep);
-                       if (ints & DXEPINT_NAKINTRPT)
-                               ints &= ~DXEPINT_NAKINTRPT;
+                       if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
+                               dwc2_hsotg_complete_in(hsotg, hs_ep);
 
                        if (idx == 0 && !hs_ep->req)
                                dwc2_hsotg_enqueue_setup(hsotg);
@@ -3062,10 +3084,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
                         * We're using DMA, we need to fire an OutDone here
                         * as we ignore the RXFIFO.
                         */
-                       if (hs_ep->isochronous && hs_ep->interval > 1)
-                               dwc2_gadget_incr_frame_num(hs_ep);
-
-                       dwc2_hsotg_handle_outdone(hsotg, idx);
+                       if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
+                               dwc2_hsotg_handle_outdone(hsotg, idx);
                }
        }
 
@@ -4085,6 +4105,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
                        mask |= DIEPMSK_NAKMSK;
                        dwc2_writel(hsotg, mask, DIEPMSK);
                } else {
+                       epctrl |= DXEPCTL_SNAK;
                        mask = dwc2_readl(hsotg, DOEPMSK);
                        mask |= DOEPMSK_OUTTKNEPDISMSK;
                        dwc2_writel(hsotg, mask, DOEPMSK);
index 2a7828971d0564743e7caa933ad485c8eeb5b299..a215ec9e172e69747e5d7c099def5a363b172431 100644 (file)
@@ -5191,6 +5191,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        hcd->has_tt = 1;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               retval = -EINVAL;
+               goto error1;
+       }
        hcd->rsrc_start = res->start;
        hcd->rsrc_len = resource_size(res);
 
index 01866dcb953bac535c6eda6b03d2f899c1044292..0104a80b185e17a2d4ef126d75991ed2562bec59 100644 (file)
@@ -264,19 +264,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
 {
        u32             reg;
        int             retries = 1000;
-       int             ret;
-
-       usb_phy_init(dwc->usb2_phy);
-       usb_phy_init(dwc->usb3_phy);
-       ret = phy_init(dwc->usb2_generic_phy);
-       if (ret < 0)
-               return ret;
-
-       ret = phy_init(dwc->usb3_generic_phy);
-       if (ret < 0) {
-               phy_exit(dwc->usb2_generic_phy);
-               return ret;
-       }
 
        /*
         * We're resetting only the device side because, if we're in host mode,
@@ -310,9 +297,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
                        udelay(1);
        } while (--retries);
 
-       phy_exit(dwc->usb3_generic_phy);
-       phy_exit(dwc->usb2_generic_phy);
-
        return -ETIMEDOUT;
 
 done:
@@ -982,9 +966,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
                dwc->phys_ready = true;
        }
 
+       usb_phy_init(dwc->usb2_phy);
+       usb_phy_init(dwc->usb3_phy);
+       ret = phy_init(dwc->usb2_generic_phy);
+       if (ret < 0)
+               goto err0a;
+
+       ret = phy_init(dwc->usb3_generic_phy);
+       if (ret < 0) {
+               phy_exit(dwc->usb2_generic_phy);
+               goto err0a;
+       }
+
        ret = dwc3_core_soft_reset(dwc);
        if (ret)
-               goto err0a;
+               goto err1;
 
        if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
            !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
index 804b505481633fa1671dcfd486e369a5a6841e45..4519d06c9ca2b5cef2df29ba1da31ea652210d77 100644 (file)
@@ -4243,7 +4243,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
        }
 
 
-       usb_initialize_gadget(dwc->sysdev, dwc->gadget, dwc_gadget_release);
+       usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
        dev                             = &dwc->gadget->dev;
        dev->platform_data              = dwc;
        dwc->gadget->ops                = &dwc3_gadget_ops;
index 3c34995276e7751929b22000a794b37bb6c63786..ef55b8bb5870ac1b9e51ee4e61aaea5143bc4286 100644 (file)
@@ -406,6 +406,14 @@ static struct usb_endpoint_descriptor ss_epin_fback_desc = {
        .bInterval = 4,
 };
 
+static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = {
+       .bLength                = sizeof(ss_epin_fback_desc_comp),
+       .bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+       .bMaxBurst              = 0,
+       .bmAttributes           = 0,
+       .wBytesPerInterval      = cpu_to_le16(4),
+};
+
 
 /* Audio Streaming IN Interface - Alt0 */
 static struct usb_interface_descriptor std_as_in_if0_desc = {
@@ -597,6 +605,7 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
        (struct usb_descriptor_header *)&ss_epout_desc_comp,
        (struct usb_descriptor_header *)&as_iso_out_desc,
        (struct usb_descriptor_header *)&ss_epin_fback_desc,
+       (struct usb_descriptor_header *)&ss_epin_fback_desc_comp,
 
        (struct usb_descriptor_header *)&std_as_in_if0_desc,
        (struct usb_descriptor_header *)&std_as_in_if1_desc,
@@ -665,11 +674,17 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
                ssize = uac2_opts->c_ssize;
        }
 
-       if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC))
+       if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC)) {
+         // Win10 requires max packet size + 1 frame
                srate = srate * (1000 + uac2_opts->fb_max) / 1000;
-
-       max_size_bw = num_channels(chmask) * ssize *
-               DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
+               // updated srate is always bigger, therefore DIV_ROUND_UP always yields +1
+               max_size_bw = num_channels(chmask) * ssize *
+                       (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))));
+       } else {
+               // adding 1 frame provision for Win10
+               max_size_bw = num_channels(chmask) * ssize *
+                       (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))) + 1);
+       }
        ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
                                                    max_size_ep));
 
@@ -705,6 +720,7 @@ static void setup_headers(struct f_uac2_opts *opts,
 {
        struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
        struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
+       struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
        struct usb_endpoint_descriptor *epout_desc;
        struct usb_endpoint_descriptor *epin_desc;
        struct usb_endpoint_descriptor *epin_fback_desc;
@@ -730,6 +746,7 @@ static void setup_headers(struct f_uac2_opts *opts,
                epout_desc_comp = &ss_epout_desc_comp;
                epin_desc_comp = &ss_epin_desc_comp;
                epin_fback_desc = &ss_epin_fback_desc;
+               epin_fback_desc_comp = &ss_epin_fback_desc_comp;
                ep_int_desc = &ss_ep_int_desc;
        }
 
@@ -773,8 +790,11 @@ static void setup_headers(struct f_uac2_opts *opts,
 
                headers[i++] = USBDHDR(&as_iso_out_desc);
 
-               if (EPOUT_FBACK_IN_EN(opts))
+               if (EPOUT_FBACK_IN_EN(opts)) {
                        headers[i++] = USBDHDR(epin_fback_desc);
+                       if (epin_fback_desc_comp)
+                               headers[i++] = USBDHDR(epin_fback_desc_comp);
+               }
        }
 
        if (EPIN_EN(opts)) {
@@ -1164,6 +1184,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
        agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize,
                                le16_to_cpu(ss_epout_desc.wMaxPacketSize));
 
+       ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize;
+       ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize;
+
        // HS and SS endpoint addresses are copied from autoconfigured FS descriptors
        hs_ep_int_desc.bEndpointAddress = fs_ep_int_desc.bEndpointAddress;
        hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
index 32ef228570835375b685c90ca7526066e5abf56e..ad16163b5ff803634d4245ccd6a97a00bf931b07 100644 (file)
@@ -96,11 +96,13 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
 };
 
 static void u_audio_set_fback_frequency(enum usb_device_speed speed,
+                                       struct usb_ep *out_ep,
                                        unsigned long long freq,
                                        unsigned int pitch,
                                        void *buf)
 {
        u32 ff = 0;
+       const struct usb_endpoint_descriptor *ep_desc;
 
        /*
         * Because the pitch base is 1000000, the final divider here
@@ -128,8 +130,13 @@ static void u_audio_set_fback_frequency(enum usb_device_speed speed,
                 * byte fromat (that is Q16.16)
                 *
                 * ff = (freq << 16) / 8000
+                *
+                * Win10 and OSX UAC2 drivers require number of samples per packet
+                * in order to honor the feedback value.
+                * Linux snd-usb-audio detects the applied bit-shift automatically.
                 */
-               freq <<= 4;
+               ep_desc = out_ep->desc;
+               freq <<= 4 + (ep_desc->bInterval - 1);
        }
 
        ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125);
@@ -267,7 +274,7 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
                pr_debug("%s: iso_complete status(%d) %d/%d\n",
                        __func__, status, req->actual, req->length);
 
-       u_audio_set_fback_frequency(audio_dev->gadget->speed,
+       u_audio_set_fback_frequency(audio_dev->gadget->speed, audio_dev->out_ep,
                                    params->c_srate, prm->pitch,
                                    req->buf);
 
@@ -526,7 +533,7 @@ int u_audio_start_capture(struct g_audio *audio_dev)
         * be meauserd at start of playback
         */
        prm->pitch = 1000000;
-       u_audio_set_fback_frequency(audio_dev->gadget->speed,
+       u_audio_set_fback_frequency(audio_dev->gadget->speed, ep,
                                    params->c_srate, prm->pitch,
                                    req_fback->buf);
 
index 65cae4883454504aff68079a06b9dcd022bee397..38e4d6b505a056a5193a6bfa3ba8eaf3183e93ed 100644 (file)
@@ -1250,7 +1250,7 @@ static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
                        do {
                                tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
                                udelay(1);
-                       } while (tmp != CS_IDST || timeout-- > 0);
+                       } while (tmp != CS_IDST && timeout-- > 0);
 
                        if (tmp == CS_IDST)
                                r8a66597_bset(r8a66597,
index 337b425dd4b0446c767f53cfde32f695fc12bb8a..2df52f75f6b3c7e0b947025660a051cda475ae48 100644 (file)
@@ -406,12 +406,9 @@ static int bcma_hcd_probe(struct bcma_device *core)
                return -ENOMEM;
        usb_dev->core = core;
 
-       if (core->dev.of_node) {
+       if (core->dev.of_node)
                usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
                                                    GPIOD_OUT_HIGH);
-               if (IS_ERR(usb_dev->gpio_desc))
-                       return PTR_ERR(usb_dev->gpio_desc);
-       }
 
        switch (core->id.id) {
        case BCMA_CORE_USB20_HOST:
index 6bdc6d6bf74de28bab8feab4a34dbf07f49484dc..1776c05d0a4865e5371e378a52fca60f4b6bbb6a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/moduleparam.h>
 #include <linux/dma-mapping.h>
 #include <linux/debugfs.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 
 #include <asm/byteorder.h>
@@ -1278,29 +1279,39 @@ MODULE_LICENSE ("GPL");
 
 #ifdef CONFIG_USB_EHCI_SH
 #include "ehci-sh.c"
-#define PLATFORM_DRIVER                ehci_hcd_sh_driver
 #endif
 
 #ifdef CONFIG_PPC_PS3
 #include "ehci-ps3.c"
-#define        PS3_SYSTEM_BUS_DRIVER   ps3_ehci_driver
 #endif
 
 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF
 #include "ehci-ppc-of.c"
-#define OF_PLATFORM_DRIVER     ehci_hcd_ppc_of_driver
 #endif
 
 #ifdef CONFIG_XPS_USB_HCD_XILINX
 #include "ehci-xilinx-of.c"
-#define XILINX_OF_PLATFORM_DRIVER      ehci_hcd_xilinx_of_driver
 #endif
 
 #ifdef CONFIG_SPARC_LEON
 #include "ehci-grlib.c"
-#define PLATFORM_DRIVER                ehci_grlib_driver
 #endif
 
+static struct platform_driver * const platform_drivers[] = {
+#ifdef CONFIG_USB_EHCI_SH
+       &ehci_hcd_sh_driver,
+#endif
+#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
+       &ehci_hcd_ppc_of_driver,
+#endif
+#ifdef CONFIG_XPS_USB_HCD_XILINX
+       &ehci_hcd_xilinx_of_driver,
+#endif
+#ifdef CONFIG_SPARC_LEON
+       &ehci_grlib_driver,
+#endif
+};
+
 static int __init ehci_hcd_init(void)
 {
        int retval = 0;
@@ -1324,47 +1335,23 @@ static int __init ehci_hcd_init(void)
        ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
 #endif
 
-#ifdef PLATFORM_DRIVER
-       retval = platform_driver_register(&PLATFORM_DRIVER);
+       retval = platform_register_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
        if (retval < 0)
                goto clean0;
-#endif
-
-#ifdef PS3_SYSTEM_BUS_DRIVER
-       retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
-       if (retval < 0)
-               goto clean2;
-#endif
 
-#ifdef OF_PLATFORM_DRIVER
-       retval = platform_driver_register(&OF_PLATFORM_DRIVER);
+#ifdef CONFIG_PPC_PS3
+       retval = ps3_ehci_driver_register(&ps3_ehci_driver);
        if (retval < 0)
-               goto clean3;
+               goto clean1;
 #endif
 
-#ifdef XILINX_OF_PLATFORM_DRIVER
-       retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
-       if (retval < 0)
-               goto clean4;
-#endif
-       return retval;
+       return 0;
 
-#ifdef XILINX_OF_PLATFORM_DRIVER
-       /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
-clean4:
-#endif
-#ifdef OF_PLATFORM_DRIVER
-       platform_driver_unregister(&OF_PLATFORM_DRIVER);
-clean3:
-#endif
-#ifdef PS3_SYSTEM_BUS_DRIVER
-       ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
-clean2:
+#ifdef CONFIG_PPC_PS3
+clean1:
 #endif
-#ifdef PLATFORM_DRIVER
-       platform_driver_unregister(&PLATFORM_DRIVER);
+       platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
 clean0:
-#endif
 #ifdef CONFIG_DYNAMIC_DEBUG
        debugfs_remove(ehci_debug_root);
        ehci_debug_root = NULL;
@@ -1376,18 +1363,10 @@ module_init(ehci_hcd_init);
 
 static void __exit ehci_hcd_cleanup(void)
 {
-#ifdef XILINX_OF_PLATFORM_DRIVER
-       platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
-#endif
-#ifdef OF_PLATFORM_DRIVER
-       platform_driver_unregister(&OF_PLATFORM_DRIVER);
-#endif
-#ifdef PLATFORM_DRIVER
-       platform_driver_unregister(&PLATFORM_DRIVER);
-#endif
-#ifdef PS3_SYSTEM_BUS_DRIVER
-       ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+#ifdef CONFIG_PPC_PS3
+       ps3_ehci_driver_unregister(&ps3_ehci_driver);
 #endif
+       platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
 #ifdef CONFIG_DYNAMIC_DEBUG
        debugfs_remove(ehci_debug_root);
 #endif
index 0b3722770760ca4c2a59ad1a0ef183148e27bcd5..ded9738392e4c6cdabcc9be8e7c794554ba27342 100644 (file)
 #include <mach/usb.h>
 
 
-/* OMAP-1510 OHCI has its own MMU for DMA */
-#define OMAP1510_LB_MEMSIZE    32      /* Should be same as SDRAM size */
-#define OMAP1510_LB_CLOCK_DIV  0xfffec10c
-#define OMAP1510_LB_MMU_CTL    0xfffec208
-#define OMAP1510_LB_MMU_LCK    0xfffec224
-#define OMAP1510_LB_MMU_LD_TLB 0xfffec228
-#define OMAP1510_LB_MMU_CAM_H  0xfffec22c
-#define OMAP1510_LB_MMU_CAM_L  0xfffec230
-#define OMAP1510_LB_MMU_RAM_H  0xfffec234
-#define OMAP1510_LB_MMU_RAM_L  0xfffec238
-
 #define DRIVER_DESC "OHCI OMAP driver"
 
 struct ohci_omap_priv {
@@ -104,61 +93,6 @@ static int omap_ohci_transceiver_power(struct ohci_omap_priv *priv, int on)
        return 0;
 }
 
-#ifdef CONFIG_ARCH_OMAP15XX
-/*
- * OMAP-1510 specific Local Bus clock on/off
- */
-static int omap_1510_local_bus_power(int on)
-{
-       if (on) {
-               omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
-               udelay(200);
-       } else {
-               omap_writel(0, OMAP1510_LB_MMU_CTL);
-       }
-
-       return 0;
-}
-
-/*
- * OMAP-1510 specific Local Bus initialization
- * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
- *       See also arch/mach-omap/memory.h for __virt_to_dma() and
- *       __dma_to_virt() which need to match with the physical
- *       Local Bus address below.
- */
-static int omap_1510_local_bus_init(void)
-{
-       unsigned int tlb;
-       unsigned long lbaddr, physaddr;
-
-       omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
-              OMAP1510_LB_CLOCK_DIV);
-
-       /* Configure the Local Bus MMU table */
-       for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
-               lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
-               physaddr = tlb * 0x00100000 + PHYS_OFFSET;
-               omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
-               omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
-                      OMAP1510_LB_MMU_CAM_L);
-               omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
-               omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
-               omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
-               omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
-       }
-
-       /* Enable the walking table */
-       omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
-       udelay(200);
-
-       return 0;
-}
-#else
-#define omap_1510_local_bus_power(x)   {}
-#define omap_1510_local_bus_init()     {}
-#endif
-
 #ifdef CONFIG_USB_OTG
 
 static void start_hnp(struct ohci_hcd *ohci)
@@ -229,10 +163,8 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
 
        omap_ohci_clock_power(priv, 1);
 
-       if (cpu_is_omap15xx()) {
-               omap_1510_local_bus_power(1);
-               omap_1510_local_bus_init();
-       }
+       if (config->lb_reset)
+               config->lb_reset();
 
        ret = ohci_setup(hcd);
        if (ret < 0)
index 6e784f2fc26d14f2cbcd02903172335b9d02c809..eb46e642e87aa5fee70a912d970eb1b7264dcbb5 100644 (file)
@@ -408,40 +408,38 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
                return -EBUSY;
 
        xhci_dbc_tty_init_port(dbc, port);
-       tty_dev = tty_port_register_device(&port->port,
-                                          dbc_tty_driver, 0, NULL);
-       if (IS_ERR(tty_dev)) {
-               ret = PTR_ERR(tty_dev);
-               goto register_fail;
-       }
 
        ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
        if (ret)
-               goto buf_alloc_fail;
+               goto err_exit_port;
 
        ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
                                      dbc_read_complete);
        if (ret)
-               goto request_fail;
+               goto err_free_fifo;
 
        ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
                                      dbc_write_complete);
        if (ret)
-               goto request_fail;
+               goto err_free_requests;
+
+       tty_dev = tty_port_register_device(&port->port,
+                                          dbc_tty_driver, 0, NULL);
+       if (IS_ERR(tty_dev)) {
+               ret = PTR_ERR(tty_dev);
+               goto err_free_requests;
+       }
 
        port->registered = true;
 
        return 0;
 
-request_fail:
+err_free_requests:
        xhci_dbc_free_requests(&port->read_pool);
        xhci_dbc_free_requests(&port->write_pool);
+err_free_fifo:
        kfifo_free(&port->write_fifo);
-
-buf_alloc_fail:
-       tty_unregister_device(dbc_tty_driver, 0);
-
-register_fail:
+err_exit_port:
        xhci_dbc_tty_exit_port(port);
 
        dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
index 2c9f25ca8eddf324282cbff84a554b3887f8ee09..2484a9d38ce2b808234d0f4ffafbaa6f59d163ee 100644 (file)
@@ -30,6 +30,7 @@
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009      0x1009
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100      0x1100
 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400      0x1400
 
 #define PCI_VENDOR_ID_ETRON            0x1b6f
@@ -113,6 +114,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        /* Look for vendor-specific quirks */
        if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
                        (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
+                        pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
                         pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
                if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
                                pdev->revision == 0x0) {
@@ -279,8 +281,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                        pdev->device == 0x3432)
                xhci->quirks |= XHCI_BROKEN_STREAMS;
 
-       if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+       if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
+               xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS;
+       }
 
        if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
                pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
index e676749f543baf02287979d014307d8ae323a2a0..311597bba80e2a4d469277042cbde721e93af335 100644 (file)
@@ -366,16 +366,22 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
 /* Must be called with xhci->lock held, releases and aquires lock back */
 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
 {
-       u64 temp_64;
+       u32 temp_32;
        int ret;
 
        xhci_dbg(xhci, "Abort command ring\n");
 
        reinit_completion(&xhci->cmd_ring_stop_completion);
 
-       temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
-       xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
-                       &xhci->op_regs->cmd_ring);
+       /*
+        * The control bits like command stop, abort are located in lower
+        * dword of the command ring control register. Limit the write
+        * to the lower dword to avoid corrupting the command ring pointer
+        * in case if the command ring is stopped by the time upper dword
+        * is written.
+        */
+       temp_32 = readl(&xhci->op_regs->cmd_ring);
+       writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
 
        /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
         * completion of the Command Abort operation. If CRR is not negated in 5
@@ -559,8 +565,11 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
        struct xhci_ring *ep_ring;
        struct xhci_command *cmd;
        struct xhci_segment *new_seg;
+       struct xhci_segment *halted_seg = NULL;
        union xhci_trb *new_deq;
        int new_cycle;
+       union xhci_trb *halted_trb;
+       int index = 0;
        dma_addr_t addr;
        u64 hw_dequeue;
        bool cycle_found = false;
@@ -598,7 +607,27 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
        hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
        new_seg = ep_ring->deq_seg;
        new_deq = ep_ring->dequeue;
-       new_cycle = hw_dequeue & 0x1;
+
+       /*
+        * Quirk: xHC write-back of the DCS field in the hardware dequeue
+        * pointer is wrong - use the cycle state of the TRB pointed to by
+        * the dequeue pointer.
+        */
+       if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
+           !(ep->ep_state & EP_HAS_STREAMS))
+               halted_seg = trb_in_td(xhci, td->start_seg,
+                                      td->first_trb, td->last_trb,
+                                      hw_dequeue & ~0xf, false);
+       if (halted_seg) {
+               index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
+                        sizeof(*halted_trb);
+               halted_trb = &halted_seg->trbs[index];
+               new_cycle = halted_trb->generic.field[3] & 0x1;
+               xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
+                        (u8)(hw_dequeue & 0x1), index, new_cycle);
+       } else {
+               new_cycle = hw_dequeue & 0x1;
+       }
 
        /*
         * We want to find the pointer, segment and cycle state of the new trb
index 575fa89a783f951a5b1e2b4a904348735b983c3f..1bf494b649bd24c834c26acaf984704e91203974 100644 (file)
@@ -1787,7 +1787,6 @@ static int tegra_xusb_remove(struct platform_device *pdev)
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
 static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
 {
        struct device *dev = hub->hcd->self.controller;
@@ -2102,7 +2101,7 @@ out:
        return err;
 }
 
-static int tegra_xusb_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_suspend(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2144,7 +2143,7 @@ out:
        return err;
 }
 
-static int tegra_xusb_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_resume(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2174,10 +2173,8 @@ static int tegra_xusb_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
-#ifdef CONFIG_PM
-static int tegra_xusb_runtime_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_suspend(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int ret;
@@ -2190,7 +2187,7 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
        return ret;
 }
 
-static int tegra_xusb_runtime_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_resume(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2201,7 +2198,6 @@ static int tegra_xusb_runtime_resume(struct device *dev)
 
        return err;
 }
-#endif
 
 static const struct dev_pm_ops tegra_xusb_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra_xusb_runtime_suspend,
index f3dabd02382c2aecf78631d83693970744a74818..541fe4dcc43a22d51704c145f9291dd91545b973 100644 (file)
@@ -692,6 +692,7 @@ int xhci_run(struct usb_hcd *hcd)
                if (ret)
                        xhci_free_command(xhci, command);
        }
+       set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "Finished xhci_run for USB2 roothub");
 
@@ -3213,10 +3214,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
                return;
 
        /* Bail out if toggle is already being cleared by a endpoint reset */
+       spin_lock_irqsave(&xhci->lock, flags);
        if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
                ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
+               spin_unlock_irqrestore(&xhci->lock, flags);
                return;
        }
+       spin_unlock_irqrestore(&xhci->lock, flags);
        /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
        if (usb_endpoint_xfer_control(&host_ep->desc) ||
            usb_endpoint_xfer_isoc(&host_ep->desc))
@@ -3302,8 +3306,10 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        xhci_free_command(xhci, cfg_cmd);
 cleanup:
        xhci_free_command(xhci, stop_cmd);
+       spin_lock_irqsave(&xhci->lock, flags);
        if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
                ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
+       spin_unlock_irqrestore(&xhci->lock, flags);
 }
 
 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
index dca6181c33fdbb9661b455a9811caf4d40c21761..5a75fe5631238467b853ec53e7bfd01cdf5d5b88 100644 (file)
@@ -1899,6 +1899,7 @@ struct xhci_hcd {
 #define XHCI_SG_TRB_CACHE_SIZE_QUIRK   BIT_ULL(39)
 #define XHCI_NO_SOFT_RETRY     BIT_ULL(40)
 #define XHCI_BROKEN_D3COLD     BIT_ULL(41)
+#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
index ce9fc46c92661cb259dc701ddcbe78e8c730efb0..b5935834f9d2414709d4b50bc8e9465fcb8e9f8b 100644 (file)
@@ -899,11 +899,13 @@ static int dsps_probe(struct platform_device *pdev)
        if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
                ret = dsps_setup_optional_vbus_irq(pdev, glue);
                if (ret)
-                       goto err;
+                       goto unregister_pdev;
        }
 
        return 0;
 
+unregister_pdev:
+       platform_device_unregister(glue->musb);
 err:
        pm_runtime_disable(&pdev->dev);
        iounmap(glue->usbss_base);
index c4293769220793bc8ddab886bb8d82bdd0c854fe..c968ecda42aa87d5e83ca58ea1ad6820fd7c75cd 100644 (file)
@@ -190,6 +190,7 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
        }
        if (len > 0) {
                /* Write the rest 1 - 3 bytes to FIFO */
+               val = 0;
                memcpy(&val, buf, len);
                musb_writel(fifo, 0, val);
        }
index 66a6ac50a4cdb8b9a1d1b7d1ec389a5d59fc3e89..189279869a8b061680e6e448565e5ced2eab0bcd 100644 (file)
@@ -233,6 +233,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
        { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
        { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+       { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
        { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
        { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
@@ -258,6 +259,7 @@ struct cp210x_serial_private {
        speed_t                 max_speed;
        bool                    use_actual_rate;
        bool                    no_flow_control;
+       bool                    no_event_mode;
 };
 
 enum cp210x_event_state {
@@ -1113,12 +1115,16 @@ static void cp210x_change_speed(struct tty_struct *tty,
 
 static void cp210x_enable_event_mode(struct usb_serial_port *port)
 {
+       struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
        struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
        int ret;
 
        if (port_priv->event_mode)
                return;
 
+       if (priv->no_event_mode)
+               return;
+
        port_priv->event_state = ES_DATA;
        port_priv->event_mode = true;
 
@@ -2074,6 +2080,33 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
        priv->use_actual_rate = use_actual_rate;
 }
 
+static void cp2102_determine_quirks(struct usb_serial *serial)
+{
+       struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+       u8 *buf;
+       int ret;
+
+       buf = kmalloc(2, GFP_KERNEL);
+       if (!buf)
+               return;
+       /*
+        * Some (possibly counterfeit) CP2102 do not support event-insertion
+        * mode and respond differently to malformed vendor requests.
+        * Specifically, they return one instead of two bytes when sent a
+        * two-byte part-number request.
+        */
+       ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+                       CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST,
+                       CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT);
+       if (ret == 1) {
+               dev_dbg(&serial->interface->dev,
+                               "device does not support event-insertion mode\n");
+               priv->no_event_mode = true;
+       }
+
+       kfree(buf);
+}
+
 static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
 {
        struct cp210x_serial_private *priv = usb_get_serial_data(serial);
@@ -2108,7 +2141,12 @@ static void cp210x_determine_type(struct usb_serial *serial)
                return;
        }
 
+       dev_dbg(&serial->interface->dev, "partnum = 0x%02x\n", priv->partnum);
+
        switch (priv->partnum) {
+       case CP210X_PARTNUM_CP2102:
+               cp2102_determine_quirks(serial);
+               break;
        case CP210X_PARTNUM_CP2105:
        case CP210X_PARTNUM_CP2108:
                cp210x_get_fw_version(serial, CP210X_GET_FW_VER);
index d7fe33ca73e4c4db71b62aed383121bbde560765..925067a7978d4ad395f644be59b205b8b685643c 100644 (file)
 #define BANDB_DEVICE_ID_USOPTL4_2P       0xBC02
 #define BANDB_DEVICE_ID_USOPTL4_4        0xAC44
 #define BANDB_DEVICE_ID_USOPTL4_4P       0xBC03
-#define BANDB_DEVICE_ID_USOPTL2_4        0xAC24
 
 /* Interrupt Routine Defines    */
 
@@ -186,7 +185,6 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
        { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
        { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
-       { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
        {}                      /* terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 29c765cc849573eac29b2c071eb0479c63fee7c9..a484ff5e4ebf83761964213eeb33d79f0ef28f6a 100644 (file)
@@ -246,11 +246,13 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Quectel's vendor ID */
 #define QUECTEL_PRODUCT_EC21                   0x0121
 #define QUECTEL_PRODUCT_EC25                   0x0125
+#define QUECTEL_PRODUCT_EG91                   0x0191
 #define QUECTEL_PRODUCT_EG95                   0x0195
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
 #define QUECTEL_PRODUCT_EM12                   0x0512
 #define QUECTEL_PRODUCT_RM500Q                 0x0800
+#define QUECTEL_PRODUCT_EC200S_CN              0x6002
 #define QUECTEL_PRODUCT_EC200T                 0x6026
 
 #define CMOTECH_VENDOR_ID                      0x16d8
@@ -1111,6 +1113,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
          .driver_info = NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0xff, 0xff),
+         .driver_info = NUMEP2 },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
          .driver_info = NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
@@ -1128,6 +1133,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
          .driver_info = ZLP },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
 
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
@@ -1205,6 +1211,14 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff),    /* Telit FD980 */
          .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff),    /* Telit LN920 (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff),    /* Telit LN920 (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff),    /* Telit LN920 (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff),    /* Telit LN920 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1219,6 +1233,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff),    /* Telit LE910Cx (RNDIS) */
          .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1204, 0xff),    /* Telit LE910Cx (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1650,7 +1666,6 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
@@ -2068,6 +2083,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
        { USB_DEVICE(0x0489, 0xe0b5),                                           /* Foxconn T77W968 ESIM */
          .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+       { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff),                     /* Foxconn T99W265 MBIM */
+         .driver_info = RSVD(3) },
        { USB_DEVICE(0x1508, 0x1001),                                           /* Fibocom NL668 (IOT version) */
          .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
        { USB_DEVICE(0x2cb7, 0x0104),                                           /* Fibocom NL678 series */
index 83da8236e3c8bc5bfce602eaabea50169a44827e..c18bf8164bc2e9676247f36e672f2c9cea489e30 100644 (file)
@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x907b)},   /* Sierra Wireless EM74xx */
        {DEVICE_SWI(0x1199, 0x9090)},   /* Sierra Wireless EM7565 QDL */
        {DEVICE_SWI(0x1199, 0x9091)},   /* Sierra Wireless EM7565 */
+       {DEVICE_SWI(0x1199, 0x90d2)},   /* Sierra Wireless EM9191 QDL */
        {DEVICE_SWI(0x413c, 0x81a2)},   /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a3)},   /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
index efa972be2ee346fec9c00ec252a85e233f7decae..c6b3fcf9018052a4e09ed4b71d7cac32725a4646 100644 (file)
@@ -416,9 +416,16 @@ UNUSUAL_DEV(  0x04cb, 0x0100, 0x0000, 0x2210,
                USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
 
 /*
- * Reported by Ondrej Zary <linux@rainbow-software.org>
+ * Reported by Ondrej Zary <linux@zary.sk>
  * The device reports one sector more and breaks when that sector is accessed
+ * Firmwares older than 2.6c (the latest one and the only that claims Linux
+ * support) have also broken tag handling
  */
+UNUSUAL_DEV(  0x04ce, 0x0002, 0x0000, 0x026b,
+               "ScanLogic",
+               "SL11R-IDE",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
 UNUSUAL_DEV(  0x04ce, 0x0002, 0x026c, 0x026c,
                "ScanLogic",
                "SL11R-IDE",
index c35a6db993f1b62813b7194846ce44b5078f1490..4051c8cd0cd8ad20623f26f236e12cc58b4a1bad 100644 (file)
@@ -50,7 +50,7 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
                "LaCie",
                "Rugged USB3-FW",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_IGNORE_UAS),
+               US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
 
 /*
  * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
index 9858716698dfe71d1f74e6fc6fd3b02388e20a26..c15eec9cc460a01a96553c00c52dda166767b028 100644 (file)
@@ -696,7 +696,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
                tcpm_pd_receive(tcpci->port, &msg);
        }
 
-       if (status & TCPC_ALERT_EXTENDED_STATUS) {
+       if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
                ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
                if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
                        tcpm_vbus_change(tcpci->port);
index a4d37205df5491bde61c14294398c89081153741..7f2f3ff1b39112e72b683873c2a031fdb155c644 100644 (file)
@@ -4876,6 +4876,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
                        tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
                break;
        case SRC_ATTACHED:
+       case SRC_STARTUP:
        case SRC_SEND_CAPABILITIES:
        case SRC_READY:
                if (tcpm_port_is_disconnected(port) ||
index 21b3ae25c76d2c210396af4d9f6dc6eeabf25a33..ea4cc0a6e40cc2412195b009069b46a7a5ac7343 100644 (file)
@@ -625,10 +625,6 @@ static int tps6598x_probe(struct i2c_client *client)
        if (ret < 0)
                return ret;
 
-       fwnode = device_get_named_child_node(&client->dev, "connector");
-       if (!fwnode)
-               return -ENODEV;
-
        /*
         * This fwnode has a "compatible" property, but is never populated as a
         * struct device. Instead we simply parse it to read the properties.
@@ -636,7 +632,9 @@ static int tps6598x_probe(struct i2c_client *client)
         * with existing DT files, we work around this by deleting any
         * fwnode_links to/from this fwnode.
         */
-       fw_devlink_purge_absent_suppliers(fwnode);
+       fwnode = device_get_named_child_node(&client->dev, "connector");
+       if (fwnode)
+               fw_devlink_purge_absent_suppliers(fwnode);
 
        tps->role_sw = fwnode_usb_role_switch_get(fwnode);
        if (IS_ERR(tps->role_sw)) {
index 294ba05e6fc978fa06741c93db121126fa8cf992..bd56de7484dcb1d72d2b99cbeef7932adfb81888 100644 (file)
@@ -1714,6 +1714,9 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        struct mlx5_vdpa_virtqueue *mvq;
 
+       if (!mvdev->actual_features)
+               return;
+
        if (!is_index_valid(mvdev, idx))
                return;
 
@@ -2145,6 +2148,8 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
 
        for (i = 0; i < ndev->mvdev.max_vqs; i++)
                ndev->vqs[i].ready = false;
+
+       ndev->mvdev.cvq.ready = false;
 }
 
 static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
index 29a38ecba19e43ab9a54906e28bff49d960ec15f..26e3d90d1e7c9425f2a3a24ac31001a2d6d1a0dc 100644 (file)
@@ -665,13 +665,11 @@ static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
 static int vduse_vdpa_reset(struct vdpa_device *vdpa)
 {
        struct vduse_dev *dev = vdpa_to_vduse(vdpa);
-
-       if (vduse_dev_set_status(dev, 0))
-               return -EIO;
+       int ret = vduse_dev_set_status(dev, 0);
 
        vduse_dev_reset(dev);
 
-       return 0;
+       return ret;
 }
 
 static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
@@ -1593,8 +1591,10 @@ static int vduse_init(void)
 
        vduse_irq_wq = alloc_workqueue("vduse-irq",
                                WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
-       if (!vduse_irq_wq)
+       if (!vduse_irq_wq) {
+               ret = -ENOMEM;
                goto err_wq;
+       }
 
        ret = vduse_domain_init();
        if (ret)
index 68198e0f2a63106bd345dc8e1ffb8d27bbe49555..a03b5a99c2dac7ec6e6d52838fb752802fc51fbd 100644 (file)
@@ -565,7 +565,7 @@ static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
 }
 
 struct vfio_pci_walk_info {
-       int (*fn)(struct pci_dev *, void *data);
+       int (*fn)(struct pci_dev *pdev, void *data);
        void *data;
        struct pci_dev *pdev;
        bool slot;
index 3a249ee7e14401d0f7459f8c401e2f9aebdce201..28ef323882fb29c66c04b6128021041e70b1f49a 100644 (file)
@@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net,
                .num = nvq->batched_xdp,
                .ptr = nvq->xdp,
        };
-       int err;
+       int i, err;
 
        if (nvq->batched_xdp == 0)
                goto signal_used;
@@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net,
        err = sock->ops->sendmsg(sock, msghdr, 0);
        if (unlikely(err < 0)) {
                vq_err(&nvq->vq, "Fail to batch sending packets\n");
+
+               /* free pages owned by XDP; since this is an unlikely error path,
+                * keep it simple and avoid more complex bulk update for the
+                * used pages
+                */
+               for (i = 0; i < nvq->batched_xdp; ++i)
+                       put_page(virt_to_head_page(nvq->xdp[i].data));
+               nvq->batched_xdp = 0;
+               nvq->done_idx = 0;
                return;
        }
 
index f41d081777f58b0f8f71b5d4b2a88a1d3a9abb02..35927ceb26ff32bd00476889b2c3e53fa309a8ed 100644 (file)
@@ -640,7 +640,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
        u64 offset, map_size, map_iova = iova;
        struct vdpa_map_file *map_file;
        struct vm_area_struct *vma;
-       int ret;
+       int ret = 0;
 
        mmap_read_lock(dev->mm);
 
index d33c5cd684c0b8d0780426b7310286cc94d5ffaf..6ed5e608dd04129fc7933789ecb7a74f3583cacc 100644 (file)
@@ -582,7 +582,9 @@ config FB_HP300
 
 config FB_TGA
        tristate "TGA/SFB+ framebuffer support"
-       depends on FB && (ALPHA || TC)
+       depends on FB
+       depends on PCI || TC
+       depends on ALPHA || TC
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
@@ -2191,8 +2193,9 @@ config FB_HYPERV
          This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
 
 config FB_SIMPLE
-       bool "Simple framebuffer support"
-       depends on (FB = y) && !DRM_SIMPLEDRM
+       tristate "Simple framebuffer support"
+       depends on FB
+       depends on !DRM_SIMPLEDRM
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index c5b99a4861e8709db4e96e7d683dc1731a945491..6b4d5a7f3e152b57cef93bb3f5f78966478d0741 100644 (file)
@@ -1267,7 +1267,7 @@ static struct platform_device *gbefb_device;
 static int __init gbefb_init(void)
 {
        int ret = platform_driver_register(&gbefb_driver);
-       if (!ret) {
+       if (IS_ENABLED(CONFIG_SGI_IP32) && !ret) {
                gbefb_device = platform_device_alloc("gbefb", 0);
                if (gbefb_device) {
                        ret = platform_device_add(gbefb_device);
index 588e02fb91d378aebf222d9f7d39da4d0348e1f9..0a5b54034d4b05a5d0c523f349e1f13cfed369ff 100644 (file)
@@ -345,8 +345,13 @@ static int virtio_device_of_init(struct virtio_device *dev)
        ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);
        BUG_ON(ret >= sizeof(compat));
 
+       /*
+        * On powerpc/pseries virtio devices are PCI devices so PCI
+        * vendor/device ids play the role of the "compatible" property.
+        * Simply don't init of_node in this case.
+        */
        if (!of_device_is_compatible(np, compat)) {
-               ret = -EINVAL;
+               ret = 0;
                goto out;
        }
 
index b81fe4f7d4341b122fc5e706221f80f63e04a6af..bf59faeb3de1baea554ca0e9d4bd597bf86a673a 100644 (file)
@@ -1666,7 +1666,7 @@ config WDT_MTX1
 
 config SIBYTE_WDOG
        tristate "Sibyte SoC hardware watchdog"
-       depends on CPU_SB1 || (MIPS && COMPILE_TEST)
+       depends on CPU_SB1
        help
          Watchdog driver for the built in watchdog hardware in Sibyte
          SoC processors.  There are apparently two watchdog timers
index 5f1ce59b44b95c5bdd4aad5a04dcd61559585eca..1b2c3aca6887c848678c822261af8662b100b506 100644 (file)
@@ -177,6 +177,7 @@ config XEN_GRANT_DMA_ALLOC
 
 config SWIOTLB_XEN
        def_bool y
+       depends on XEN_PV || ARM || ARM64
        select DMA_OPS
        select SWIOTLB
 
@@ -214,7 +215,7 @@ config XEN_PVCALLS_FRONTEND
          implements them.
 
 config XEN_PVCALLS_BACKEND
-       bool "XEN PV Calls backend driver"
+       tristate "XEN PV Calls backend driver"
        depends on INET && XEN && XEN_BACKEND
        help
          Experimental backend for the Xen PV Calls protocol
@@ -240,7 +241,7 @@ config XEN_PRIVCMD
 
 config XEN_ACPI_PROCESSOR
        tristate "Xen ACPI processor"
-       depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+       depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
        default m
        help
          This ACPI processor uploads Power Management information to the Xen
@@ -258,7 +259,7 @@ config XEN_ACPI_PROCESSOR
 
 config XEN_MCE_LOG
        bool "Xen platform mcelog"
-       depends on XEN_DOM0 && X86_MCE
+       depends on XEN_PV_DOM0 && X86_MCE
        help
          Allow kernel fetching MCE error from Xen platform and
          converting it into Linux mcelog format for mcelog tools
index 671c71245a7b2f6c2e985c81b8c5c9b5bd1e9bcf..3a50f097ed3edb9ea9d8c712cc84fc2493032457 100644 (file)
@@ -43,6 +43,8 @@
 #include <linux/sched.h>
 #include <linux/cred.h>
 #include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
 #include <linux/mm.h>
 #include <linux/memblock.h>
 #include <linux/pagemap.h>
@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
 
 /*
- * balloon_process() state:
+ * balloon_thread() state:
  *
  * BP_DONE: done or nothing to do,
  * BP_WAIT: wait to be rescheduled,
@@ -130,6 +132,8 @@ enum bp_state {
        BP_ECANCELED
 };
 
+/* Main waiting point for xen-balloon thread. */
+static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
 
 static DEFINE_MUTEX(balloon_mutex);
 
@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
 static LIST_HEAD(ballooned_pages);
 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 
-/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
 {
        if (val == MEM_ONLINE)
-               schedule_delayed_work(&balloon_worker, 0);
+               wake_up(&balloon_thread_wq);
 
        return NOTIFY_OK;
 }
@@ -491,18 +491,52 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 }
 
 /*
- * As this is a work item it is guaranteed to run as a single instance only.
+ * Stop waiting if either state is BP_DONE and ballooning action is
+ * needed, or if the credit has changed while state is not BP_DONE.
+ */
+static bool balloon_thread_cond(enum bp_state state, long credit)
+{
+       if (state == BP_DONE)
+               credit = 0;
+
+       return current_credit() != credit || kthread_should_stop();
+}
+
+/*
+ * As this is a kthread it is guaranteed to run as a single instance only.
  * We may of course race updates of the target counts (which are protected
  * by the balloon lock), or with changes to the Xen hard limit, but we will
  * recover from these in time.
  */
-static void balloon_process(struct work_struct *work)
+static int balloon_thread(void *unused)
 {
        enum bp_state state = BP_DONE;
        long credit;
+       unsigned long timeout;
+
+       set_freezable();
+       for (;;) {
+               switch (state) {
+               case BP_DONE:
+               case BP_ECANCELED:
+                       timeout = 3600 * HZ;
+                       break;
+               case BP_EAGAIN:
+                       timeout = balloon_stats.schedule_delay * HZ;
+                       break;
+               case BP_WAIT:
+                       timeout = HZ;
+                       break;
+               }
+
+               credit = current_credit();
 
+               wait_event_freezable_timeout(balloon_thread_wq,
+                       balloon_thread_cond(state, credit), timeout);
+
+               if (kthread_should_stop())
+                       return 0;
 
-       do {
                mutex_lock(&balloon_mutex);
 
                credit = current_credit();
@@ -529,12 +563,7 @@ static void balloon_process(struct work_struct *work)
                mutex_unlock(&balloon_mutex);
 
                cond_resched();
-
-       } while (credit && state == BP_DONE);
-
-       /* Schedule more work if there is some still to be done. */
-       if (state == BP_EAGAIN)
-               schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
+       }
 }
 
 /* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -542,7 +571,7 @@ void balloon_set_new_target(unsigned long target)
 {
        /* No need for lock. Not read-modify-write updates. */
        balloon_stats.target_pages = target;
-       schedule_delayed_work(&balloon_worker, 0);
+       wake_up(&balloon_thread_wq);
 }
 EXPORT_SYMBOL_GPL(balloon_set_new_target);
 
@@ -647,7 +676,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
 
        /* The balloon may be too large now. Shrink it if needed. */
        if (current_credit())
-               schedule_delayed_work(&balloon_worker, 0);
+               wake_up(&balloon_thread_wq);
 
        mutex_unlock(&balloon_mutex);
 }
@@ -679,6 +708,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
 
 static int __init balloon_init(void)
 {
+       struct task_struct *task;
+
        if (!xen_domain())
                return -ENODEV;
 
@@ -722,6 +753,12 @@ static int __init balloon_init(void)
        }
 #endif
 
+       task = kthread_run(balloon_thread, NULL, "xen-balloon");
+       if (IS_ERR(task)) {
+               pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
+               return PTR_ERR(task);
+       }
+
        /* Init the xen-balloon driver. */
        xen_balloon_init();
 
index 1e7f6b1c0c971a927cd3a6eec487b3fa48000982..fec1b6537166502e5b6dfedf058f83ffcd4e2e63 100644 (file)
@@ -381,6 +381,14 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
                        map->unmap_ops[offset+i].handle,
                        map->unmap_ops[offset+i].status);
                map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+               if (use_ptemod) {
+                       if (map->kunmap_ops[offset+i].status)
+                               err = -EINVAL;
+                       pr_debug("kunmap handle=%u st=%d\n",
+                                map->kunmap_ops[offset+i].handle,
+                                map->kunmap_ops[offset+i].status);
+                       map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+               }
        }
        return err;
 }
index 720a7b7abd46d690f2ed86f544064403ab4f1647..3369734108af23724e728531ce4024ea752b7fbc 100644 (file)
@@ -257,7 +257,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
        LIST_HEAD(pagelist);
        struct mmap_gfn_state state;
 
-       /* We only support privcmd_ioctl_mmap_batch for auto translated. */
+       /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -ENOSYS;
 
@@ -420,7 +420,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
        int rc;
        struct page **pages;
 
-       pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
+       pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
        if (pages == NULL)
                return -ENOMEM;
 
@@ -428,7 +428,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
        if (rc != 0) {
                pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
                        numpgs, rc);
-               kfree(pages);
+               kvfree(pages);
                return -ENOMEM;
        }
        BUG_ON(vma->vm_private_data != NULL);
@@ -803,21 +803,21 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
                unsigned int domid =
                        (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
                        DOMID_SELF : kdata.dom;
-               int num;
+               int num, *errs = (int *)pfns;
 
+               BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
                num = xen_remap_domain_mfn_array(vma,
                                                 kdata.addr & PAGE_MASK,
-                                                pfns, kdata.num, (int *)pfns,
+                                                pfns, kdata.num, errs,
                                                 vma->vm_page_prot,
-                                                domid,
-                                                vma->vm_private_data);
+                                                domid);
                if (num < 0)
                        rc = num;
                else if (num != kdata.num) {
                        unsigned int i;
 
                        for (i = 0; i < num; i++) {
-                               rc = pfns[i];
+                               rc = errs[i];
                                if (rc < 0)
                                        break;
                        }
@@ -912,7 +912,7 @@ static void privcmd_close(struct vm_area_struct *vma)
        else
                pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
                        numpgs, rc);
-       kfree(pages);
+       kvfree(pages);
 }
 
 static vm_fault_t privcmd_fault(struct vm_fault *vmf)
index 643fe440c46e5ebb4ba0042da6506d3910475699..e56a5faac395c6684413d7d0047d2a2ae9714a6d 100644 (file)
@@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
 
 static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 {
-       int i, rc;
-       int dma_bits;
+       int rc;
+       unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+       unsigned int i, dma_bits = order + PAGE_SHIFT;
        dma_addr_t dma_handle;
        phys_addr_t p = virt_to_phys(buf);
 
-       dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+       BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
+       BUG_ON(nslabs % IO_TLB_SEGSIZE);
 
        i = 0;
        do {
-               int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-
                do {
                        rc = xen_create_contiguous_region(
-                               p + (i << IO_TLB_SHIFT),
-                               get_order(slabs << IO_TLB_SHIFT),
+                               p + (i << IO_TLB_SHIFT), order,
                                dma_bits, &dma_handle);
                } while (rc && dma_bits++ < MAX_DMA_BITS);
                if (rc)
                        return rc;
 
-               i += slabs;
+               i += IO_TLB_SEGSIZE;
        } while (i < nslabs);
        return 0;
 }
@@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
        return "";
 }
 
-#define DEFAULT_NSLABS         ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-
-int __ref xen_swiotlb_init(void)
+int xen_swiotlb_init(void)
 {
        enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
        unsigned long bytes = swiotlb_size_or_default();
@@ -185,7 +182,7 @@ retry:
                order--;
        }
        if (!start)
-               goto error;
+               goto exit;
        if (order != get_order(bytes)) {
                pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
                        (PAGE_SIZE << order) >> 20);
@@ -208,15 +205,15 @@ retry:
        swiotlb_set_max_segment(PAGE_SIZE);
        return 0;
 error:
-       if (repeat--) {
+       if (nslabs > 1024 && repeat--) {
                /* Min is 2MB */
-               nslabs = max(1024UL, (nslabs >> 1));
-               pr_info("Lowering to %luMB\n",
-                       (nslabs << IO_TLB_SHIFT) >> 20);
+               nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+               bytes = nslabs << IO_TLB_SHIFT;
+               pr_info("Lowering to %luMB\n", bytes >> 20);
                goto retry;
        }
+exit:
        pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
-       free_pages((unsigned long)start, order);
        return rc;
 }
 
@@ -233,10 +230,11 @@ retry:
        /*
         * Get IO TLB memory from any location.
         */
-       start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
+       start = memblock_alloc(PAGE_ALIGN(bytes),
+                              IO_TLB_SEGSIZE << IO_TLB_SHIFT);
        if (!start)
-               panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
-                     __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
+               panic("%s: Failed to allocate %lu bytes\n",
+                     __func__, PAGE_ALIGN(bytes));
 
        /*
         * And replace that memory with pages under 4GB.
@@ -244,9 +242,9 @@ retry:
        rc = xen_swiotlb_fixup(start, nslabs);
        if (rc) {
                memblock_free(__pa(start), PAGE_ALIGN(bytes));
-               if (repeat--) {
+               if (nslabs > 1024 && repeat--) {
                        /* Min is 2MB */
-                       nslabs = max(1024UL, (nslabs >> 1));
+                       nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
                        bytes = nslabs << IO_TLB_SHIFT;
                        pr_info("Lowering to %luMB\n", bytes >> 20);
                        goto retry;
@@ -254,7 +252,7 @@ retry:
                panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
        }
 
-       if (swiotlb_init_with_tbl(start, nslabs, false))
+       if (swiotlb_init_with_tbl(start, nslabs, true))
                panic("Cannot allocate SWIOTLB buffer");
        swiotlb_set_max_segment(PAGE_SIZE);
 }
index eb2151fb60494c9330b29dcd136a577f9fdf2286..1769a44f4819275130de594a45731a9c0af9f6c4 100644 (file)
@@ -23,7 +23,7 @@ struct fscache_netfs v9fs_cache_netfs = {
        .version        = 0,
 };
 
-/**
+/*
  * v9fs_random_cachetag - Generate a random tag to be associated
  *                       with a new cache session.
  *
@@ -233,7 +233,7 @@ static void v9fs_vfs_readpage_complete(struct page *page, void *data,
        unlock_page(page);
 }
 
-/**
+/*
  * __v9fs_readpage_from_fscache - read a page from cache
  *
  * Returns 0 if the pages are in cache and a BIO is submitted,
@@ -268,7 +268,7 @@ int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
        }
 }
 
-/**
+/*
  * __v9fs_readpages_from_fscache - read multiple pages from cache
  *
  * Returns 0 if the pages are in cache and a BIO is submitted,
@@ -308,7 +308,7 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
        }
 }
 
-/**
+/*
  * __v9fs_readpage_to_fscache - write a page to the cache
  *
  */
index 9d9de62592be22e3a331eafc3556a190708fc698..b8863dd0de5cca569da550414f6c6fb446c6dda8 100644 (file)
 #include "v9fs_vfs.h"
 #include "fid.h"
 
+static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
+{
+       hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);
+}
+
+
 /**
  * v9fs_fid_add - add a fid to a dentry
  * @dentry: dentry that the fid is being added to
  * @fid: fid to add
  *
  */
-
-static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
-{
-       hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);
-}
-
 void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
 {
        spin_lock(&dentry->d_lock);
@@ -67,7 +67,7 @@ static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
 
 /**
  * v9fs_open_fid_add - add an open fid to an inode
- * @dentry: inode that the fid is being added to
+ * @inode: inode that the fid is being added to
  * @fid: fid to add
  *
  */
index cdb99507ef33d6e2c6e358d28ce2859260684506..2e0fa7c932db0e8552233e69268678bd42a52089 100644 (file)
@@ -155,6 +155,7 @@ int v9fs_show_options(struct seq_file *m, struct dentry *root)
 /**
  * v9fs_parse_options - parse mount options into session structure
  * @v9ses: existing v9fs session information
+ * @opts: The mount option string
  *
  * Return 0 upon success, -ERRNO upon failure.
  */
@@ -542,12 +543,9 @@ extern int v9fs_error_init(void);
 static struct kobject *v9fs_kobj;
 
 #ifdef CONFIG_9P_FSCACHE
-/**
- * caches_show - list caches associated with a session
- *
- * Returns the size of buffer written.
+/*
+ * List caches associated with a session
  */
-
 static ssize_t caches_show(struct kobject *kobj,
                           struct kobj_attribute *attr,
                           char *buf)
index cce9ace651a2dbce718bf0d344cd6eae095fffd5..1c4f1b39cc9505e4d29ed0ce3fac52be8036217f 100644 (file)
@@ -30,8 +30,7 @@
 
 /**
  * v9fs_fid_readpage - read an entire page in from 9P
- *
- * @fid: fid being read
+ * @data: Opaque pointer to the fid being read
  * @page: structure to page
  *
  */
@@ -116,6 +115,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
 
 /**
  * v9fs_release_page - release the private state associated with a page
+ * @page: The page to be released
+ * @gfp: The caller's allocation restrictions
  *
  * Returns 1 if the page can be released, false otherwise.
  */
@@ -129,9 +130,9 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
 
 /**
  * v9fs_invalidate_page - Invalidate a page completely or partially
- *
- * @page: structure to page
- * @offset: offset in the page
+ * @page: The page to be invalidated
+ * @offset: offset of the invalidated region
+ * @length: length of the invalidated region
  */
 
 static void v9fs_invalidate_page(struct page *page, unsigned int offset,
@@ -199,6 +200,8 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
 
 /**
  * v9fs_launder_page - Writeback a dirty page
+ * @page: The page to be cleaned up
+ *
  * Returns 0 on success.
  */
 
@@ -219,6 +222,7 @@ static int v9fs_launder_page(struct page *page)
 /**
  * v9fs_direct_IO - 9P address space operation for direct I/O
  * @iocb: target I/O control block
+ * @iter: The data/buffer to use
  *
  * The presence of v9fs_direct_IO() in the address space ops vector
  * allowes open() O_DIRECT flags which would have failed otherwise.
index aab5e653866040bc337781f05937190fd1426100..246235ebdb70a4b1f4b5bf6afb60db056e4426a5 100644 (file)
@@ -359,14 +359,11 @@ out_err:
 }
 
 /**
- * v9fs_file_read - read from a file
- * @filp: file pointer to read
- * @udata: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
+ * v9fs_file_read_iter - read from a file
+ * @iocb: The operation parameters
+ * @to: The buffer to read into
  *
  */
-
 static ssize_t
 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
@@ -388,11 +385,9 @@ v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 }
 
 /**
- * v9fs_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
+ * v9fs_file_write_iter - write to a file
+ * @iocb: The operation parameters
+ * @from: The data to write
  *
  */
 static ssize_t
@@ -561,11 +556,9 @@ out_unlock:
 }
 
 /**
- * v9fs_mmap_file_read - read from a file
- * @filp: file pointer to read
- * @data: user data buffer to read data into
- * @count: size of buffer
- * @offset: offset at which to read data
+ * v9fs_mmap_file_read_iter - read from a file
+ * @iocb: The operation parameters
+ * @to: The buffer to read into
  *
  */
 static ssize_t
@@ -576,11 +569,9 @@ v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 }
 
 /**
- * v9fs_mmap_file_write - write to a file
- * @filp: file pointer to write
- * @data: data buffer to write data from
- * @count: size of buffer
- * @offset: offset at which to write data
+ * v9fs_mmap_file_write_iter - write to a file
+ * @iocb: The operation parameters
+ * @from: The data to write
  *
  */
 static ssize_t
index 795706520b5e716783148c029248d84cd9a6eef6..08f48b70a741456c978ee2475d8a94d69560eb1f 100644 (file)
@@ -218,7 +218,7 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
 
 /**
  * v9fs_alloc_inode - helper function to allocate an inode
- *
+ * @sb: The superblock to allocate the inode from
  */
 struct inode *v9fs_alloc_inode(struct super_block *sb)
 {
@@ -238,7 +238,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
 
 /**
  * v9fs_free_inode - destroy an inode
- *
+ * @inode: The inode to be freed
  */
 
 void v9fs_free_inode(struct inode *inode)
@@ -343,7 +343,7 @@ error:
  * v9fs_get_inode - helper function to setup an inode
  * @sb: superblock
  * @mode: mode to setup inode with
- *
+ * @rdev: The device numbers to set
  */
 
 struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
@@ -369,7 +369,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
 }
 
 /**
- * v9fs_clear_inode - release an inode
+ * v9fs_evict_inode - Remove an inode from the inode cache
  * @inode: inode to release
  *
  */
@@ -665,14 +665,15 @@ error:
 
 /**
  * v9fs_vfs_create - VFS hook to create a regular file
+ * @mnt_userns: The user namespace of the mount
+ * @dir: The parent directory
+ * @dentry: The name of file to be created
+ * @mode: The UNIX file mode to set
+ * @excl: True if the file must not yet exist
  *
  * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open().  This is only called
  * for mknod(2).
  *
- * @dir: directory inode that is being created
- * @dentry:  dentry that is being deleted
- * @mode: create permissions
- *
  */
 
 static int
@@ -696,6 +697,7 @@ v9fs_vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
 
 /**
  * v9fs_vfs_mkdir - VFS mkdir hook to create a directory
+ * @mnt_userns: The user namespace of the mount
  * @dir:  inode that is being unlinked
  * @dentry: dentry that is being unlinked
  * @mode: mode for new directory
@@ -900,10 +902,12 @@ int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
 
 /**
  * v9fs_vfs_rename - VFS hook to rename an inode
+ * @mnt_userns: The user namespace of the mount
  * @old_dir:  old dir inode
  * @old_dentry: old dentry
  * @new_dir: new dir inode
  * @new_dentry: new dentry
+ * @flags: RENAME_* flags
  *
  */
 
@@ -1009,6 +1013,7 @@ done:
 
 /**
  * v9fs_vfs_getattr - retrieve file metadata
+ * @mnt_userns: The user namespace of the mount
  * @path: Object to query
  * @stat: metadata structure to populate
  * @request_mask: Mask of STATX_xxx flags indicating the caller's interests
@@ -1050,6 +1055,7 @@ v9fs_vfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
 
 /**
  * v9fs_vfs_setattr - set file metadata
+ * @mnt_userns: The user namespace of the mount
  * @dentry: file whose metadata to set
  * @iattr: metadata assignment structure
  *
@@ -1285,6 +1291,7 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
 
 /**
  * v9fs_vfs_symlink - helper function to create symlinks
+ * @mnt_userns: The user namespace of the mount
  * @dir: directory inode containing symlink
  * @dentry: dentry for symlink
  * @symname: symlink data
@@ -1340,6 +1347,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
 
 /**
  * v9fs_vfs_mknod - create a special file
+ * @mnt_userns: The user namespace of the mount
  * @dir: inode destination for new link
  * @dentry: dentry for file
  * @mode: mode for creation
index e1c0240b51c0350c99605eb502f109f2949337f3..01b9e1281a297968e13cdab4349f7df11fd6e263 100644 (file)
@@ -37,7 +37,10 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
                    struct dentry *dentry, umode_t omode, dev_t rdev);
 
 /**
- * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
+ * v9fs_get_fsgid_for_create - Helper function to get the gid for a new object
+ * @dir_inode: The directory inode
+ *
+ * Helper function to get the gid for creating a
  * new file system object. This checks the S_ISGID to determine the owning
  * group of the new file system object.
  */
@@ -211,12 +214,13 @@ int v9fs_open_to_dotl_flags(int flags)
 
 /**
  * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
+ * @mnt_userns: The user namespace of the mount
  * @dir: directory inode that is being created
  * @dentry:  dentry that is being deleted
  * @omode: create permissions
+ * @excl: True if the file must not yet exist
  *
  */
-
 static int
 v9fs_vfs_create_dotl(struct user_namespace *mnt_userns, struct inode *dir,
                     struct dentry *dentry, umode_t omode, bool excl)
@@ -361,6 +365,7 @@ err_clunk_old_fid:
 
 /**
  * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
+ * @mnt_userns: The user namespace of the mount
  * @dir:  inode that is being unlinked
  * @dentry: dentry that is being unlinked
  * @omode: mode for new directory
@@ -537,6 +542,7 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
 
 /**
  * v9fs_vfs_setattr_dotl - set file metadata
+ * @mnt_userns: The user namespace of the mount
  * @dentry: file whose metadata to set
  * @iattr: metadata assignment structure
  *
@@ -816,6 +822,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
 
 /**
  * v9fs_vfs_mknod_dotl - create a special file
+ * @mnt_userns: The user namespace of the mount
  * @dir: inode destination for new link
  * @dentry: dentry for file
  * @omode: mode for creation
index 7d9b23d981bf1aaac48478b3843e39a5c7f1f9bc..1b4d5809808d0d232d6a66d1c4a2b74d8bd980f7 100644 (file)
 #include <linux/sched.h>
 #include "internal.h"
 
+/*
+ * Handle invalidation of an mmap'd file.  We invalidate all the PTEs referring
+ * to the pages in this file's pagecache, forcing the kernel to go through
+ * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
+ * more fully.
+ */
+void afs_invalidate_mmap_work(struct work_struct *work)
+{
+       struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
+
+       unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+}
+
+void afs_server_init_callback_work(struct work_struct *work)
+{
+       struct afs_server *server = container_of(work, struct afs_server, initcb_work);
+       struct afs_vnode *vnode;
+       struct afs_cell *cell = server->cell;
+
+       down_read(&cell->fs_open_mmaps_lock);
+
+       list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
+               if (vnode->cb_server == server) {
+                       clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+                       queue_work(system_unbound_wq, &vnode->cb_work);
+               }
+       }
+
+       up_read(&cell->fs_open_mmaps_lock);
+}
+
 /*
  * Allow the fileserver to request callback state (re-)initialisation.
  * Unfortunately, UUIDs are not guaranteed unique.
@@ -29,8 +60,11 @@ void afs_init_callback_state(struct afs_server *server)
        rcu_read_lock();
        do {
                server->cb_s_break++;
-               server = rcu_dereference(server->uuid_next);
-       } while (0);
+               atomic_inc(&server->cell->fs_s_break);
+               if (!list_empty(&server->cell->fs_open_mmaps))
+                       queue_work(system_unbound_wq, &server->initcb_work);
+
+       } while ((server = rcu_dereference(server->uuid_next)));
        rcu_read_unlock();
 }
 
@@ -44,11 +78,17 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
        clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
        if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
                vnode->cb_break++;
+               vnode->cb_v_break = vnode->volume->cb_v_break;
                afs_clear_permits(vnode);
 
                if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
                        afs_lock_may_be_available(vnode);
 
+               if (reason != afs_cb_break_for_deleted &&
+                   vnode->status.type == AFS_FTYPE_FILE &&
+                   atomic_read(&vnode->cb_nr_mmap))
+                       queue_work(system_unbound_wq, &vnode->cb_work);
+
                trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
        } else {
                trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
index 887b673f6223022f2b9699503ef80796736f52be..d88407fb9bc09a8bb0e60ad7c9db320a028cab51 100644 (file)
@@ -166,6 +166,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
        seqlock_init(&cell->volume_lock);
        cell->fs_servers = RB_ROOT;
        seqlock_init(&cell->fs_lock);
+       INIT_LIST_HEAD(&cell->fs_open_mmaps);
+       init_rwsem(&cell->fs_open_mmaps_lock);
        rwlock_init(&cell->vl_servers_lock);
        cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
 
index ac829e63c5704cdf4cf421393728fb803a1ad927..4579bbda46346ce4f464a8e6cbeba6329ef1e261 100644 (file)
@@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
  */
 static int afs_d_revalidate_rcu(struct dentry *dentry)
 {
-       struct afs_vnode *dvnode, *vnode;
+       struct afs_vnode *dvnode;
        struct dentry *parent;
-       struct inode *dir, *inode;
+       struct inode *dir;
        long dir_version, de_version;
 
        _enter("%p", dentry);
@@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
                        return -ECHILD;
        }
 
-       /* Check to see if the vnode referred to by the dentry still
-        * has a callback.
-        */
-       if (d_really_is_positive(dentry)) {
-               inode = d_inode_rcu(dentry);
-               if (inode) {
-                       vnode = AFS_FS_I(inode);
-                       if (!afs_check_validity(vnode))
-                               return -ECHILD;
-               }
-       }
-
        return 1; /* Still valid */
 }
 
@@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        if (IS_ERR(key))
                key = NULL;
 
-       if (d_really_is_positive(dentry)) {
-               inode = d_inode(dentry);
-               if (inode) {
-                       vnode = AFS_FS_I(inode);
-                       afs_validate(vnode, key);
-                       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
-                               goto out_bad;
-               }
-       }
-
-       /* lock down the parent dentry so we can peer at it */
+       /* Hold the parent dentry so we can peer at it */
        parent = dget_parent(dentry);
        dir = AFS_FS_I(d_inode(parent));
 
@@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
        if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
                _debug("%pd: parent dir deleted", dentry);
-               goto out_bad_parent;
+               goto not_found;
        }
 
        /* We only need to invalidate a dentry if the server's copy changed
@@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        case 0:
                /* the filename maps to something */
                if (d_really_is_negative(dentry))
-                       goto out_bad_parent;
+                       goto not_found;
                inode = d_inode(dentry);
                if (is_bad_inode(inode)) {
                        printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
                               dentry);
-                       goto out_bad_parent;
+                       goto not_found;
                }
 
                vnode = AFS_FS_I(inode);
@@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
                               dentry, fid.unique,
                               vnode->fid.unique,
                               vnode->vfs_inode.i_generation);
-                       write_seqlock(&vnode->cb_lock);
-                       set_bit(AFS_VNODE_DELETED, &vnode->flags);
-                       write_sequnlock(&vnode->cb_lock);
                        goto not_found;
                }
                goto out_valid;
@@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        default:
                _debug("failed to iterate dir %pd: %d",
                       parent, ret);
-               goto out_bad_parent;
+               goto not_found;
        }
 
 out_valid:
@@ -1256,16 +1231,9 @@ out_valid_noupdate:
        _leave(" = 1 [valid]");
        return 1;
 
-       /* the dirent, if it exists, now points to a different vnode */
 not_found:
-       spin_lock(&dentry->d_lock);
-       dentry->d_flags |= DCACHE_NFSFS_RENAMED;
-       spin_unlock(&dentry->d_lock);
-
-out_bad_parent:
        _debug("dropping dentry %pd2", dentry);
        dput(parent);
-out_bad:
        key_put(key);
 
        _leave(" = 0 [bad]");
@@ -1792,6 +1760,10 @@ static int afs_link(struct dentry *from, struct inode *dir,
                goto error;
        }
 
+       ret = afs_validate(vnode, op->key);
+       if (ret < 0)
+               goto error_op;
+
        afs_op_set_vnode(op, 0, dvnode);
        afs_op_set_vnode(op, 1, vnode);
        op->file[0].dv_delta = 1;
@@ -1805,6 +1777,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
        op->create.reason       = afs_edit_dir_for_link;
        return afs_do_sync_operation(op);
 
+error_op:
+       afs_put_operation(op);
 error:
        d_drop(dentry);
        _leave(" = %d", ret);
@@ -1989,6 +1963,11 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
        if (IS_ERR(op))
                return PTR_ERR(op);
 
+       ret = afs_validate(vnode, op->key);
+       op->error = ret;
+       if (ret < 0)
+               goto error;
+
        afs_op_set_vnode(op, 0, orig_dvnode);
        afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
        op->file[0].dv_delta = 1;
index f4600c1353adf79fa002c8864ae574bee9c694ec..540b9fc96824adaa16d2d318d709ca67c75ef30c 100644 (file)
@@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
                if (b == nr_blocks) {
                        _debug("init %u", b);
                        afs_edit_init_block(meta, block, b);
-                       i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+                       afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
                }
 
                /* Only lower dir pages have a counter in the header. */
@@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
 new_directory:
        afs_edit_init_block(meta, meta, 0);
        i_size = AFS_DIR_BLOCK_SIZE;
-       i_size_write(&vnode->vfs_inode, i_size);
+       afs_set_i_size(vnode, i_size);
        slot = AFS_DIR_RESV_BLOCKS0;
        page = page0;
        block = meta;
index dae9a57d7ec0c11b105ffa7fc26c3da40d9a7158..45cfd50a95210b95375f1df9a44db34aabc3bb0c 100644 (file)
@@ -86,8 +86,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
        return afs_do_sync_operation(op);
 }
 
-/**
- * afs_sillyrename - Perform a silly-rename of a dentry
+/*
+ * Perform silly-rename of a dentry.
  *
  * AFS is stateless and the server doesn't know when the client is holding a
  * file open.  To prevent application problems when a file is unlinked while
index db035ae2a13451dc97fc10e97a6aeb50088898a4..e6c447ae91f38ab82a0dab2edc876a6afdb0ae76 100644 (file)
@@ -24,12 +24,16 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
 
 static void afs_readahead(struct readahead_control *ractl);
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+static void afs_vm_open(struct vm_area_struct *area);
+static void afs_vm_close(struct vm_area_struct *area);
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
 
 const struct file_operations afs_file_operations = {
        .open           = afs_open,
        .release        = afs_release,
        .llseek         = generic_file_llseek,
-       .read_iter      = generic_file_read_iter,
+       .read_iter      = afs_file_read_iter,
        .write_iter     = afs_file_write,
        .mmap           = afs_file_mmap,
        .splice_read    = generic_file_splice_read,
@@ -59,8 +63,10 @@ const struct address_space_operations afs_fs_aops = {
 };
 
 static const struct vm_operations_struct afs_vm_ops = {
+       .open           = afs_vm_open,
+       .close          = afs_vm_close,
        .fault          = filemap_fault,
-       .map_pages      = filemap_map_pages,
+       .map_pages      = afs_vm_map_pages,
        .page_mkwrite   = afs_page_mkwrite,
 };
 
@@ -295,7 +301,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
        fsreq->subreq   = subreq;
        fsreq->pos      = subreq->start + subreq->transferred;
        fsreq->len      = subreq->len   - subreq->transferred;
-       fsreq->key      = subreq->rreq->netfs_priv;
+       fsreq->key      = key_get(subreq->rreq->netfs_priv);
        fsreq->vnode    = vnode;
        fsreq->iter     = &fsreq->def_iter;
 
@@ -304,6 +310,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
                        fsreq->pos, fsreq->len);
 
        afs_fetch_data(fsreq->vnode, fsreq);
+       afs_put_read(fsreq);
 }
 
 static int afs_symlink_readpage(struct page *page)
@@ -490,15 +497,88 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
        return 1;
 }
 
+static void afs_add_open_mmap(struct afs_vnode *vnode)
+{
+       if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
+               down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+               list_add_tail(&vnode->cb_mmap_link,
+                             &vnode->volume->cell->fs_open_mmaps);
+
+               up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+       }
+}
+
+static void afs_drop_open_mmap(struct afs_vnode *vnode)
+{
+       if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+               return;
+
+       down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+       if (atomic_read(&vnode->cb_nr_mmap) == 0)
+               list_del_init(&vnode->cb_mmap_link);
+
+       up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+       flush_work(&vnode->cb_work);
+}
+
 /*
  * Handle setting up a memory mapping on an AFS file.
  */
 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
        int ret;
 
+       afs_add_open_mmap(vnode);
+
        ret = generic_file_mmap(file, vma);
        if (ret == 0)
                vma->vm_ops = &afs_vm_ops;
+       else
+               afs_drop_open_mmap(vnode);
        return ret;
 }
+
+static void afs_vm_open(struct vm_area_struct *vma)
+{
+       afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static void afs_vm_close(struct vm_area_struct *vma)
+{
+       afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
+{
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
+       struct afs_file *af = vmf->vma->vm_file->private_data;
+
+       switch (afs_validate(vnode, af->key)) {
+       case 0:
+               return filemap_map_pages(vmf, start_pgoff, end_pgoff);
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       case -EINTR:
+       case -ERESTARTSYS:
+               return VM_FAULT_RETRY;
+       case -ESTALE:
+       default:
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+       struct afs_file *af = iocb->ki_filp->private_data;
+       int ret;
+
+       ret = afs_validate(vnode, af->key);
+       if (ret < 0)
+               return ret;
+
+       return generic_file_read_iter(iocb, iter);
+}
index e7e98ad63a91ae8ea126a2ee646814a7ec20ab41..c0031a3ab42f5a921fe94feb7440d4bc1030fac4 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/slab.h>
 #include "afs_fs.h"
 #include "internal.h"
+#include "protocol_afs.h"
 #include "protocol_yfs.h"
 
 static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
@@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
        struct afs_addr_list *alist = call->alist;
        struct afs_server *server = call->server;
        unsigned int index = call->addr_ix;
-       unsigned int rtt_us = 0;
+       unsigned int rtt_us = 0, cap0;
        int ret = call->error;
 
        _enter("%pU,%u", &server->uuid, index);
@@ -159,6 +160,11 @@ responded:
                        clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
                        alist->addrs[index].srx_service = call->service_id;
                }
+               cap0 = ntohl(call->tmp);
+               if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
+                       set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+               else
+                       clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
        }
 
        if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
index dd3f45d906d23cf9ea8926b876a51099fd525cae..4943413d9c5f7ff3011ea2fad230e5a84a6e48e5 100644 (file)
@@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op)
        struct afs_read *req = op->fetch.req;
        __be32 *bp;
 
-       if (upper_32_bits(req->pos) ||
-           upper_32_bits(req->len) ||
-           upper_32_bits(req->pos + req->len))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_fetch_data64(op);
 
        _enter("");
@@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op)
               (unsigned long long)op->store.pos,
               (unsigned long long)op->store.i_size);
 
-       if (upper_32_bits(op->store.pos) ||
-           upper_32_bits(op->store.size) ||
-           upper_32_bits(op->store.i_size))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_store_data64(op);
 
        call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
@@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
               key_serial(op->key), vp->fid.vid, vp->fid.vnode);
 
        ASSERT(attr->ia_valid & ATTR_SIZE);
-       if (upper_32_bits(attr->ia_size))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_setattr_size64(op);
 
        call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
@@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
                        return ret;
 
                count = ntohl(call->tmp);
-
                call->count = count;
                call->count2 = count;
-               afs_extract_discard(call, count * sizeof(__be32));
+               if (count == 0) {
+                       call->unmarshall = 4;
+                       call->tmp = 0;
+                       break;
+               }
+
+               /* Extract the first word of the capabilities to call->tmp */
+               afs_extract_to_tmp(call);
                call->unmarshall++;
                fallthrough;
 
-               /* Extract capabilities words */
        case 2:
                ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
 
-               /* TODO: Examine capabilities */
+               afs_extract_discard(call, (count - 1) * sizeof(__be32));
+               call->unmarshall++;
+               fallthrough;
+
+               /* Extract remaining capabilities words */
+       case 3:
+               ret = afs_extract_data(call, false);
+               if (ret < 0)
+                       return ret;
 
                call->unmarshall++;
                break;
index 80b6c8d967d5cf1f34f7f6202294decf109c9165..8fcffea2daf50b2d8ac8a72f013c2159480f2407 100644 (file)
@@ -53,16 +53,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
                dump_stack();
 }
 
-/*
- * Set the file size and block count.  Estimate the number of 512 bytes blocks
- * used, rounded up to nearest 1K for consistency with other AFS clients.
- */
-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
-{
-       i_size_write(&vnode->vfs_inode, size);
-       vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
-}
-
 /*
  * Initialise an inode from the vnode status.
  */
@@ -587,22 +577,32 @@ static void afs_zap_data(struct afs_vnode *vnode)
 }
 
 /*
- * Get the server reinit counter for a vnode's current server.
+ * Check to see if we have a server currently serving this volume and that it
+ * hasn't been reinitialised or dropped from the list.
  */
-static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+static bool afs_check_server_good(struct afs_vnode *vnode)
 {
-       struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+       struct afs_server_list *slist;
        struct afs_server *server;
+       bool good;
        int i;
 
+       if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break))
+               return true;
+
+       rcu_read_lock();
+
+       slist = rcu_dereference(vnode->volume->servers);
        for (i = 0; i < slist->nr_servers; i++) {
                server = slist->servers[i].server;
                if (server == vnode->cb_server) {
-                       *_s_break = READ_ONCE(server->cb_s_break);
-                       return true;
+                       good = (vnode->cb_s_break == server->cb_s_break);
+                       rcu_read_unlock();
+                       return good;
                }
        }
 
+       rcu_read_unlock();
        return false;
 }
 
@@ -611,57 +611,46 @@ static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
  */
 bool afs_check_validity(struct afs_vnode *vnode)
 {
-       struct afs_volume *volume = vnode->volume;
        enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
        time64_t now = ktime_get_real_seconds();
-       bool valid;
-       unsigned int cb_break, cb_s_break, cb_v_break;
+       unsigned int cb_break;
        int seq = 0;
 
        do {
                read_seqbegin_or_lock(&vnode->cb_lock, &seq);
-               cb_v_break = READ_ONCE(volume->cb_v_break);
                cb_break = vnode->cb_break;
 
-               if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
-                   afs_get_s_break_rcu(vnode, &cb_s_break)) {
-                       if (vnode->cb_s_break != cb_s_break ||
-                           vnode->cb_v_break != cb_v_break) {
-                               vnode->cb_s_break = cb_s_break;
-                               vnode->cb_v_break = cb_v_break;
-                               need_clear = afs_cb_break_for_vsbreak;
-                               valid = false;
-                       } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+               if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+                       if (vnode->cb_v_break != vnode->volume->cb_v_break)
+                               need_clear = afs_cb_break_for_v_break;
+                       else if (!afs_check_server_good(vnode))
+                               need_clear = afs_cb_break_for_s_reinit;
+                       else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
                                need_clear = afs_cb_break_for_zap;
-                               valid = false;
-                       } else if (vnode->cb_expires_at - 10 <= now) {
+                       else if (vnode->cb_expires_at - 10 <= now)
                                need_clear = afs_cb_break_for_lapsed;
-                               valid = false;
-                       } else {
-                               valid = true;
-                       }
                } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
-                       valid = true;
+                       ;
                } else {
-                       vnode->cb_v_break = cb_v_break;
-                       valid = false;
+                       need_clear = afs_cb_break_no_promise;
                }
 
        } while (need_seqretry(&vnode->cb_lock, seq));
 
        done_seqretry(&vnode->cb_lock, seq);
 
-       if (need_clear != afs_cb_break_no_break) {
-               write_seqlock(&vnode->cb_lock);
-               if (cb_break == vnode->cb_break)
-                       __afs_break_callback(vnode, need_clear);
-               else
-                       trace_afs_cb_miss(&vnode->fid, need_clear);
-               write_sequnlock(&vnode->cb_lock);
-               valid = false;
-       }
+       if (need_clear == afs_cb_break_no_break)
+               return true;
 
-       return valid;
+       write_seqlock(&vnode->cb_lock);
+       if (need_clear == afs_cb_break_no_promise)
+               vnode->cb_v_break = vnode->volume->cb_v_break;
+       else if (cb_break == vnode->cb_break)
+               __afs_break_callback(vnode, need_clear);
+       else
+               trace_afs_cb_miss(&vnode->fid, need_clear);
+       write_sequnlock(&vnode->cb_lock);
+       return false;
 }
 
 /*
@@ -675,21 +664,20 @@ bool afs_check_validity(struct afs_vnode *vnode)
  */
 int afs_validate(struct afs_vnode *vnode, struct key *key)
 {
-       bool valid;
        int ret;
 
        _enter("{v={%llx:%llu} fl=%lx},%x",
               vnode->fid.vid, vnode->fid.vnode, vnode->flags,
               key_serial(key));
 
-       rcu_read_lock();
-       valid = afs_check_validity(vnode);
-       rcu_read_unlock();
-
-       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
-               clear_nlink(&vnode->vfs_inode);
+       if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
+               if (vnode->vfs_inode.i_nlink)
+                       clear_nlink(&vnode->vfs_inode);
+               goto valid;
+       }
 
-       if (valid)
+       if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+           afs_check_validity(vnode))
                goto valid;
 
        down_write(&vnode->validate_lock);
index 5ed416f4ff335baec6471086032b146b09c5399f..0ad97a8fc0d494d9fb40d02b4efaf7d7d8ba0ff8 100644 (file)
@@ -390,6 +390,9 @@ struct afs_cell {
        /* Active fileserver interaction state. */
        struct rb_root          fs_servers;     /* afs_server (by server UUID) */
        seqlock_t               fs_lock;        /* For fs_servers  */
+       struct rw_semaphore     fs_open_mmaps_lock;
+       struct list_head        fs_open_mmaps;  /* List of vnodes that are mmapped */
+       atomic_t                fs_s_break;     /* Counter of CB.InitCallBackState messages */
 
        /* VL server list. */
        rwlock_t                vl_servers_lock; /* Lock on vl_servers */
@@ -503,6 +506,7 @@ struct afs_server {
        struct hlist_node       addr4_link;     /* Link in net->fs_addresses4 */
        struct hlist_node       addr6_link;     /* Link in net->fs_addresses6 */
        struct hlist_node       proc_link;      /* Link in net->fs_proc */
+       struct work_struct      initcb_work;    /* Work for CB.InitCallBackState* */
        struct afs_server       *gc_next;       /* Next server in manager's list */
        time64_t                unuse_time;     /* Time at which last unused */
        unsigned long           flags;
@@ -516,6 +520,7 @@ struct afs_server {
 #define AFS_SERVER_FL_IS_YFS   16              /* Server is YFS not AFS */
 #define AFS_SERVER_FL_NO_IBULK 17              /* Fileserver doesn't support FS.InlineBulkStatus */
 #define AFS_SERVER_FL_NO_RM2   18              /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAS_FS64 19              /* Fileserver supports FS.{Fetch,Store}Data64 */
        atomic_t                ref;            /* Object refcount */
        atomic_t                active;         /* Active user count */
        u32                     addr_version;   /* Address list version */
@@ -657,7 +662,11 @@ struct afs_vnode {
        afs_lock_type_t         lock_type : 8;
 
        /* outstanding callback notification on this file */
+       struct work_struct      cb_work;        /* Work for mmap'd files */
+       struct list_head        cb_mmap_link;   /* Link in cell->fs_open_mmaps */
        void                    *cb_server;     /* Server with callback/filelock */
+       atomic_t                cb_nr_mmap;     /* Number of mmaps */
+       unsigned int            cb_fs_s_break;  /* Mass server break counter (cell->fs_s_break) */
        unsigned int            cb_s_break;     /* Mass break counter on ->server */
        unsigned int            cb_v_break;     /* Mass break counter on ->volume */
        unsigned int            cb_break;       /* Break counter on vnode */
@@ -965,6 +974,8 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
 /*
  * callback.c
  */
+extern void afs_invalidate_mmap_work(struct work_struct *);
+extern void afs_server_init_callback_work(struct work_struct *work);
 extern void afs_init_callback_state(struct afs_server *);
 extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
 extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
@@ -1585,6 +1596,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
                        (void *)(unsigned long)dir_vp->scb.status.data_version;
 }
 
+/*
+ * Set the file size and block count.  Estimate the number of 512 bytes blocks
+ * used, rounded up to nearest 1K for consistency with other AFS clients.
+ */
+static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
+{
+       i_size_write(&vnode->vfs_inode, size);
+       vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+}
+
 /*
  * Check for a conflicting operation on a directory that we just unlinked from.
  * If someone managed to sneak a link or an unlink in on the file we just
diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h
new file mode 100644 (file)
index 0000000..0c39358
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* AFS protocol bits
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+
+#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
+
+/* AFS3 Fileserver capabilities word 0 */
+#define AFS3_VICED_CAPABILITY_ERRORTRANS       0x0001 /* Uses UAE errors */
+#define AFS3_VICED_CAPABILITY_64BITFILES       0x0002 /* FetchData64 & StoreData64 supported */
+#define AFS3_VICED_CAPABILITY_WRITELOCKACL     0x0004 /* Can lock a file even without lock perm */
+#define AFS3_VICED_CAPABILITY_SANEACLS         0x0008 /* ACLs reviewed for sanity - don't use */
index b5bd03b1d3c7f0d271671a134f1cafb697825f42..e4cd89c44c465484f1de243cb9caaf120a0d05e8 100644 (file)
@@ -168,3 +168,9 @@ enum yfs_lock_type {
        yfs_LockMandatoryWrite  = 0x101,
        yfs_LockMandatoryExtend = 0x102,
 };
+
+/* RXYFS Viced Capability Flags */
+#define YFS_VICED_CAPABILITY_ERRORTRANS                0x0001 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_64BITFILES                0x0002 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_WRITELOCKACL      0x0004 /* Can lock a file even without lock perm */
+#define YFS_VICED_CAPABILITY_SANEACLS          0x0008 /* Deprecated v0.195 */
index d83f13c44b9213878ddb20ad4c914152994565d5..79e1a5f6701bed9822bb17e324740b6da13b94da 100644 (file)
@@ -374,6 +374,7 @@ selected_server:
        if (vnode->cb_server != server) {
                vnode->cb_server = server;
                vnode->cb_s_break = server->cb_s_break;
+               vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break);
                vnode->cb_v_break = vnode->volume->cb_v_break;
                clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
        }
index 684a2b02b9ff70ae056aab6a3807f624bb26c47a..6e5b9a19b234e0db75d3980086251c2825398daf 100644 (file)
@@ -235,6 +235,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
        server->addr_version = alist->version;
        server->uuid = *uuid;
        rwlock_init(&server->fs_lock);
+       INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
        init_waitqueue_head(&server->probe_wq);
        INIT_LIST_HEAD(&server->probe_link);
        spin_lock_init(&server->probe_lock);
@@ -467,6 +468,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
        if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
                afs_give_up_callbacks(net, server);
 
+       flush_work(&server->initcb_work);
        afs_put_server(net, server, afs_server_trace_destroy);
 }
 
index e38bb1e7a4d227bed688b396efa138a1dec9d7f4..d110def8aa8eb993212c00ed161089eba89a7545 100644 (file)
@@ -698,6 +698,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
        vnode->lock_state       = AFS_VNODE_LOCK_NONE;
 
        init_rwsem(&vnode->rmdir_lock);
+       INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
 
        _leave(" = %p", &vnode->vfs_inode);
        return &vnode->vfs_inode;
index c0534697268ef808e42e6a5762c44888e3148a3b..f24370f5c7744a953549614fa8d966df0018ed28 100644 (file)
@@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                write_seqlock(&vnode->cb_lock);
                i_size = i_size_read(&vnode->vfs_inode);
                if (maybe_i_size > i_size)
-                       i_size_write(&vnode->vfs_inode, maybe_i_size);
+                       afs_set_i_size(vnode, maybe_i_size);
                write_sequnlock(&vnode->cb_lock);
        }
 
@@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping,
                        }
 
                        /* Has the page moved or been split? */
-                       if (unlikely(page != xas_reload(&xas)))
+                       if (unlikely(page != xas_reload(&xas))) {
+                               put_page(page);
                                break;
+                       }
 
-                       if (!trylock_page(page))
+                       if (!trylock_page(page)) {
+                               put_page(page);
                                break;
+                       }
                        if (!PageDirty(page) || PageWriteback(page)) {
                                unlock_page(page);
+                               put_page(page);
                                break;
                        }
 
@@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping,
                        t = afs_page_dirty_to(page, priv);
                        if (f != 0 && !new_content) {
                                unlock_page(page);
+                               put_page(page);
                                break;
                        }
 
@@ -801,6 +807,7 @@ int afs_writepages(struct address_space *mapping,
 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+       struct afs_file *af = iocb->ki_filp->private_data;
        ssize_t result;
        size_t count = iov_iter_count(from);
 
@@ -816,6 +823,10 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
        if (!count)
                return 0;
 
+       result = afs_validate(vnode, af->key);
+       if (result < 0)
+               return result;
+
        result = generic_file_write_iter(iocb, from);
 
        _leave(" = %zd", result);
@@ -829,13 +840,18 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
  */
 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct inode *inode = file_inode(file);
-       struct afs_vnode *vnode = AFS_FS_I(inode);
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct afs_file *af = file->private_data;
+       int ret;
 
        _enter("{%llx:%llu},{n=%pD},%d",
               vnode->fid.vid, vnode->fid.vnode, file,
               datasync);
 
+       ret = afs_validate(vnode, af->key);
+       if (ret < 0)
+               return ret;
+
        return file_write_and_wait_range(file, start, end);
 }
 
@@ -849,11 +865,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        struct file *file = vmf->vma->vm_file;
        struct inode *inode = file_inode(file);
        struct afs_vnode *vnode = AFS_FS_I(inode);
+       struct afs_file *af = file->private_data;
        unsigned long priv;
        vm_fault_t ret = VM_FAULT_RETRY;
 
        _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
 
+       afs_validate(vnode, af->key);
+
        sb_start_pagefault(inode->i_sb);
 
        /* Wait for the page to be written to the cache before we allow it to
@@ -955,8 +974,7 @@ int afs_launder_page(struct page *page)
                iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
 
                trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
-               ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE,
-                                    true);
+               ret = afs_store_data(vnode, &iter, page_offset(page) + f, true);
        }
 
        trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
index 69d900a8473d4e39080635697796e9ec340b7b5e..a813b70f594e69ee043781a61de130a636ef9c6a 100644 (file)
@@ -630,7 +630,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
 
                        vaddr = eppnt->p_vaddr;
                        if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
-                               elf_type |= MAP_FIXED_NOREPLACE;
+                               elf_type |= MAP_FIXED;
                        else if (no_base && interp_elf_ex->e_type == ET_DYN)
                                load_addr = -vaddr;
 
index dff2c8a3e059f92379b50da3909f831eef20a51a..c0cebcf745cefdbe94ac83f3605acc380ef18a5c 100644 (file)
@@ -3030,7 +3030,7 @@ struct btrfs_dir_item *
 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root,
                            struct btrfs_path *path, u64 dir,
-                           u64 objectid, const char *name, int name_len,
+                           u64 index, const char *name, int name_len,
                            int mod);
 struct btrfs_dir_item *
 btrfs_search_dir_index_item(struct btrfs_root *root,
index f1274d5c3805eb6a8871ad2c51980977132c5b1e..7721ce0c060483256d1d529449753d152b7ffe1d 100644 (file)
@@ -190,9 +190,20 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
 }
 
 /*
- * lookup a directory item based on name.  'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory item by name.
+ *
+ * @trans:     The transaction handle to use. Can be NULL if @mod is 0.
+ * @root:      The root of the target tree.
+ * @path:      Path to use for the search.
+ * @dir:       The inode number (objectid) of the directory.
+ * @name:      The name associated to the directory entry we are looking for.
+ * @name_len:  The length of the name.
+ * @mod:       Used to indicate if the tree search is meant for a read only
+ *             lookup, for a modification lookup or for a deletion lookup, so
+ *             its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir item does not exists, an error pointer if an error
+ * happened, or a pointer to a dir item if a dir item exists for the given name.
  */
 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
                                             struct btrfs_root *root,
@@ -273,27 +284,42 @@ out:
 }
 
 /*
- * lookup a directory item based on index.  'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory index item by name and index number.
  *
- * The name is used to make sure the index really points to the name you were
- * looking for.
+ * @trans:     The transaction handle to use. Can be NULL if @mod is 0.
+ * @root:      The root of the target tree.
+ * @path:      Path to use for the search.
+ * @dir:       The inode number (objectid) of the directory.
+ * @index:     The index number.
+ * @name:      The name associated to the directory entry we are looking for.
+ * @name_len:  The length of the name.
+ * @mod:       Used to indicate if the tree search is meant for a read only
+ *             lookup, for a modification lookup or for a deletion lookup, so
+ *             its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir index item does not exists, an error pointer if an
+ * error happened, or a pointer to a dir item if the dir index item exists and
+ * matches the criteria (name and index number).
  */
 struct btrfs_dir_item *
 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root,
                            struct btrfs_path *path, u64 dir,
-                           u64 objectid, const char *name, int name_len,
+                           u64 index, const char *name, int name_len,
                            int mod)
 {
+       struct btrfs_dir_item *di;
        struct btrfs_key key;
 
        key.objectid = dir;
        key.type = BTRFS_DIR_INDEX_KEY;
-       key.offset = objectid;
+       key.offset = index;
 
-       return btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+       di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+       if (di == ERR_PTR(-ENOENT))
+               return NULL;
+
+       return di;
 }
 
 struct btrfs_dir_item *
index fc3da7585fb786be71f0308541a4d21b4b96305f..0ab456cb4bf801fc9f6f89623100ea182964b8f4 100644 (file)
@@ -4859,6 +4859,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 out_free_delayed:
        btrfs_free_delayed_extent_op(extent_op);
 out_free_buf:
+       btrfs_tree_unlock(buf);
        free_extent_buffer(buf);
 out_free_reserved:
        btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
index 2673c6ba7a4e30eb70a4feae2c7e86feae2c16f4..0b9401a5afd3335e8cd108f5006284e74a81a340 100644 (file)
@@ -665,7 +665,18 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
 
                if (!ordered) {
                        ordered = btrfs_lookup_ordered_extent(inode, offset);
-                       BUG_ON(!ordered); /* Logic error */
+                       /*
+                        * The bio range is not covered by any ordered extent,
+                        * must be a code logic error.
+                        */
+                       if (unlikely(!ordered)) {
+                               WARN(1, KERN_WARNING
+                       "no ordered extent for root %llu ino %llu offset %llu\n",
+                                    inode->root->root_key.objectid,
+                                    btrfs_ino(inode), offset);
+                               kvfree(sums);
+                               return BLK_STS_IOERR;
+                       }
                }
 
                nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
index 7ff577005d0febbf6be14c21a2d1c81f3e600c50..a1762363f61faff8248c9fc4e7cda9503c775da5 100644 (file)
@@ -734,8 +734,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
        if (args->start >= inode->disk_i_size && !args->replace_extent)
                modify_tree = 0;
 
-       update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
-                      root == fs_info->tree_root);
+       update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
        while (1) {
                recow = 0;
                ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -2704,14 +2703,16 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
                                                 drop_args.bytes_found);
                if (ret != -ENOSPC) {
                        /*
-                        * When cloning we want to avoid transaction aborts when
-                        * nothing was done and we are attempting to clone parts
-                        * of inline extents, in such cases -EOPNOTSUPP is
-                        * returned by __btrfs_drop_extents() without having
-                        * changed anything in the file.
+                        * The only time we don't want to abort is if we are
+                        * attempting to clone a partial inline extent, in which
+                        * case we'll get EOPNOTSUPP.  However if we aren't
+                        * clone we need to abort no matter what, because if we
+                        * got EOPNOTSUPP via prealloc then we messed up and
+                        * need to abort.
                         */
-                       if (extent_info && !extent_info->is_new_extent &&
-                           ret && ret != -EOPNOTSUPP)
+                       if (ret &&
+                           (ret != -EOPNOTSUPP ||
+                            (extent_info && extent_info->is_new_extent)))
                                btrfs_abort_transaction(trans, ret);
                        break;
                }
index 5ada02e0e629fe8b43437b6296b27a8ffa167b7f..aa5be0b24987ad4366854d6d4ad59c2d096321a0 100644 (file)
@@ -414,9 +414,10 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
 {
        lockdep_assert_held(&info->lock);
 
-       btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
+       /* The free space could be negative in case of overcommit */
+       btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
                   info->flags,
-                  info->total_bytes - btrfs_space_info_used(info, true),
+                  (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
                   info->full ? "" : "not ");
        btrfs_info(fs_info,
                "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
index f7efc26aa82a114e13e1816e40f881e840790108..b415c5ec03ea0642c4053c07fe857a2540f10a09 100644 (file)
@@ -939,9 +939,11 @@ out:
 }
 
 /*
- * helper function to see if a given name and sequence number found
- * in an inode back reference are already in a directory and correctly
- * point to this inode
+ * See if a given name and sequence number found in an inode back reference are
+ * already in a directory and correctly point to this inode.
+ *
+ * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
+ * exists.
  */
 static noinline int inode_in_dir(struct btrfs_root *root,
                                 struct btrfs_path *path,
@@ -950,29 +952,34 @@ static noinline int inode_in_dir(struct btrfs_root *root,
 {
        struct btrfs_dir_item *di;
        struct btrfs_key location;
-       int match = 0;
+       int ret = 0;
 
        di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
                                         index, name, name_len, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               ret = PTR_ERR(di);
+               goto out;
+       } else if (di) {
                btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
                if (location.objectid != objectid)
                        goto out;
-       } else
+       } else {
                goto out;
-       btrfs_release_path(path);
+       }
 
+       btrfs_release_path(path);
        di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
-       if (di && !IS_ERR(di)) {
-               btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
-               if (location.objectid != objectid)
-                       goto out;
-       } else
+       if (IS_ERR(di)) {
+               ret = PTR_ERR(di);
                goto out;
-       match = 1;
+       } else if (di) {
+               btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
+               if (location.objectid == objectid)
+                       ret = 1;
+       }
 out:
        btrfs_release_path(path);
-       return match;
+       return ret;
 }
 
 /*
@@ -1182,7 +1189,9 @@ next:
        /* look for a conflicting sequence number */
        di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
                                         ref_index, name, namelen, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               return PTR_ERR(di);
+       } else if (di) {
                ret = drop_one_dir_item(trans, root, path, dir, di);
                if (ret)
                        return ret;
@@ -1192,7 +1201,9 @@ next:
        /* look for a conflicting name */
        di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
                                   name, namelen, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               return PTR_ERR(di);
+       } else if (di) {
                ret = drop_one_dir_item(trans, root, path, dir, di);
                if (ret)
                        return ret;
@@ -1517,10 +1528,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                if (ret)
                        goto out;
 
-               /* if we already have a perfect match, we're done */
-               if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
-                                       btrfs_ino(BTRFS_I(inode)), ref_index,
-                                       name, namelen)) {
+               ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
+                                  btrfs_ino(BTRFS_I(inode)), ref_index,
+                                  name, namelen);
+               if (ret < 0) {
+                       goto out;
+               } else if (ret == 0) {
                        /*
                         * look for a conflicting back reference in the
                         * metadata. if we find one we have to unlink that name
@@ -1580,6 +1593,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                        if (ret)
                                goto out;
                }
+               /* Else, ret == 1, we already have a perfect match, we're done. */
 
                ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
                kfree(name);
@@ -1936,8 +1950,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        struct btrfs_key log_key;
        struct inode *dir;
        u8 log_type;
-       int exists;
-       int ret = 0;
+       bool exists;
+       int ret;
        bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
        bool name_added = false;
 
@@ -1957,12 +1971,12 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                   name_len);
 
        btrfs_dir_item_key_to_cpu(eb, di, &log_key);
-       exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
-       if (exists == 0)
-               exists = 1;
-       else
-               exists = 0;
+       ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
        btrfs_release_path(path);
+       if (ret < 0)
+               goto out;
+       exists = (ret == 0);
+       ret = 0;
 
        if (key->type == BTRFS_DIR_ITEM_KEY) {
                dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1977,7 +1991,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                ret = -EINVAL;
                goto out;
        }
-       if (IS_ERR_OR_NULL(dst_di)) {
+
+       if (IS_ERR(dst_di)) {
+               ret = PTR_ERR(dst_di);
+               goto out;
+       } else if (!dst_di) {
                /* we need a sequence number to insert, so we only
                 * do inserts for the BTRFS_DIR_INDEX_KEY types
                 */
@@ -2281,7 +2299,7 @@ again:
                                                     dir_key->offset,
                                                     name, name_len, 0);
                }
-               if (!log_di || log_di == ERR_PTR(-ENOENT)) {
+               if (!log_di) {
                        btrfs_dir_item_key_to_cpu(eb, di, &location);
                        btrfs_release_path(path);
                        btrfs_release_path(log_path);
@@ -3540,8 +3558,7 @@ out_unlock:
        if (err == -ENOSPC) {
                btrfs_set_log_full_commit(trans);
                err = 0;
-       } else if (err < 0 && err != -ENOENT) {
-               /* ENOENT can be returned if the entry hasn't been fsynced yet */
+       } else if (err < 0) {
                btrfs_abort_transaction(trans, err);
        }
 
index 28d443d3ef93ad710fe887fa97040bb8b5998f15..4968535dfff0aebb0801d440d2417b69dffeb34f 100644 (file)
@@ -451,7 +451,7 @@ static int del_orphan(struct btrfs_trans_handle *trans, struct btrfs_inode *inod
  */
 static int rollback_verity(struct btrfs_inode *inode)
 {
-       struct btrfs_trans_handle *trans;
+       struct btrfs_trans_handle *trans = NULL;
        struct btrfs_root *root = inode->root;
        int ret;
 
@@ -473,6 +473,7 @@ static int rollback_verity(struct btrfs_inode *inode)
        trans = btrfs_start_transaction(root, 2);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
+               trans = NULL;
                btrfs_handle_fs_error(root->fs_info, ret,
                        "failed to start transaction in verity rollback %llu",
                        (u64)inode->vfs_inode.i_ino);
@@ -490,8 +491,9 @@ static int rollback_verity(struct btrfs_inode *inode)
                btrfs_abort_transaction(trans, ret);
                goto out;
        }
-       btrfs_end_transaction(trans);
 out:
+       if (trans)
+               btrfs_end_transaction(trans);
        return ret;
 }
 
index 464485aa7318e6da914412a6908d3e571475ee97..2ec3b8ac8fa357772ce59892b6b1d9d3e1d3cf1d 100644 (file)
@@ -1137,6 +1137,19 @@ static void btrfs_close_one_device(struct btrfs_device *device)
        atomic_set(&device->dev_stats_ccnt, 0);
        extent_io_tree_release(&device->alloc_state);
 
+       /*
+        * Reset the flush error record. We might have a transient flush error
+        * in this mount, and if so we aborted the current transaction and set
+        * the fs to an error state, guaranteeing no super blocks can be further
+        * committed. However that error might be transient and if we unmount the
+        * filesystem and mount it again, we should allow the mount to succeed
+        * (btrfs_check_rw_degradable() should not fail) - if after mounting the
+        * filesystem again we still get flush errors, then we will again abort
+        * any transaction and set the error state, guaranteeing no commits of
+        * unsafe super blocks.
+        */
+       device->last_flush_error = 0;
+
        /* Verify the device is back in a pristine state  */
        ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
        ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
index ab7573d72dd7a2b8b97c18ef00d31f51b98051e1..c615387aedcae8bd9e94fbc3faef44a2bbb2496b 100644 (file)
@@ -1425,12 +1425,16 @@ void invalidate_bh_lrus(void)
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
-void invalidate_bh_lrus_cpu(int cpu)
+/*
+ * It's called from workqueue context so we need a bh_lru_lock to close
+ * the race with preemption/irq.
+ */
+void invalidate_bh_lrus_cpu(void)
 {
        struct bh_lru *b;
 
        bh_lru_lock();
-       b = per_cpu_ptr(&bh_lrus, cpu);
+       b = this_cpu_ptr(&bh_lrus);
        __invalidate_bh_lrus(b);
        bh_lru_unlock();
 }
index 6c0e52fd0743efda7852e89c52c35201f38bfed6..3e42d0466521fee94b01cf3273e4354257db6db4 100644 (file)
@@ -2263,7 +2263,7 @@ retry:
                        list_for_each_entry(req, &ci->i_unsafe_dirops,
                                            r_unsafe_dir_item) {
                                s = req->r_session;
-                               if (unlikely(s->s_mds > max)) {
+                               if (unlikely(s->s_mds >= max)) {
                                        spin_unlock(&ci->i_unsafe_lock);
                                        goto retry;
                                }
@@ -2277,7 +2277,7 @@ retry:
                        list_for_each_entry(req, &ci->i_unsafe_iops,
                                            r_unsafe_target_item) {
                                s = req->r_session;
-                               if (unlikely(s->s_mds > max)) {
+                               if (unlikely(s->s_mds >= max)) {
                                        spin_unlock(&ci->i_unsafe_lock);
                                        goto retry;
                                }
index 8a3b30ec860ca92b3e8b784370b414eabccfe532..8be57aaedab60b7cb211aaec7dd2a3cb1b3dd1e8 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cache.c - CIFS filesystem cache index structure definitions
+ *   CIFS filesystem cache index structure definitions
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
index 51a824fc926ad0b4dcca7ac9dca7f2d3ca8d1b4f..de2c12bcfa4bc62b715065a7a5d4bd9a397f9bb1 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs_debug.c
  *
  *   Copyright (C) International Business Machines  Corp., 2000,2005
  *
index 4fd788586399c00758b3d0725db1f497af890bfb..f97407520ea157d00ef1ee1c7c34a6f01ec92f81 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_fs_sb.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2004
  *   Author(s): Steve French (sfrench@us.ibm.com)
index ef723be358afdf68f850417fce7655d5564bf100..b87cbbe6d2d4bfdf1a2c932b5ff1adcfb4985678 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_ioctl.h
  *
  *   Structure definitions for io control for cifs/smb3
  *
index 8fa26a8530f8e060b5696cd2133cd6631b73f496..353bd0dd702606114b8ced486c0212235cf82018 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
+ *   SPNEGO upcall management for CIFS
  *
  *   Copyright (c) 2007 Red Hat, Inc.
  *   Author(s): Jeff Layton (jlayton@redhat.com)
index 31387d0ea32eae6b575fff59f35890f266f09641..e6a0451877d4790a2bab3f5e50786df285c6fecf 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS
+ *   SPNEGO upcall management for CIFS
  *
  *   Copyright (c) 2007 Red Hat, Inc.
  *   Author(s): Jeff Layton (jlayton@redhat.com)
index 171ad8b42107e255c01498354a1e05c8f4a13c3d..e7582dd791794cb3aa31b499785063a45eef8fa8 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs/cifs_unicode.c
  *
  *   Copyright (c) International Business Machines  Corp., 2000,2009
  *   Modified by Steve French (sfrench@us.ibm.com)
index 388eb536cff15967ba64c12ab017a68ae9cdd72d..ee3aab3dd4ac6f2f748b6c58e47dc423b4edb2f3 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsacl.c
  *
  *   Copyright (C) International Business Machines  Corp., 2007,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index f8292bcf8594fb47dfd4ce0933bc70aa7591c76d..ccbfc754bd3c7fb0ae0e32b10b28318027a9ae06 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsacl.h
  *
  *   Copyright (c) International Business Machines  Corp., 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 2e6f4034403767ca621c65fa394337a0416007af..d118282071b371e5384ec7e9dc5db3545c3d63be 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsencrypt.c
  *
  *   Encryption and hashing operations relating to NTLM, NTLMv2.  See MS-NLMP
  *   for more detailed information
index 8c20bfa187acd5198842205cd9b05aa35d43d57b..9fa930dfd78d60acb2f384124eb973657b016f78 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsfs.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index d25a4099b32e7713bf8e054f0e7cf0806c960f19..b50da1901ebd2ddaaa507c171fab8f773d7db849 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsfs.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index c068f7d8d879d47c9440b469bfda4ed605f2a342..e916470468ea93f9a62804a1b8252c361b2a7e66 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsglob.h
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1400,6 +1399,7 @@ struct cifsInodeInfo {
 #define CIFS_INO_INVALID_MAPPING         (4) /* pagecache is invalid */
 #define CIFS_INO_LOCK                    (5) /* lock bit for synchronization */
 #define CIFS_INO_MODIFIED_ATTR            (6) /* Indicate change in mtime/ctime */
+#define CIFS_INO_CLOSE_ON_LOCK            (7) /* Not to defer the close when lock is set */
        unsigned long flags;
        spinlock_t writers_lock;
        unsigned int writers;           /* Number of writers on this inode */
index 98e8e5aa061376e99a1c6ed934ce439dcadc9506..d2ff438fd31f8ad6c92ef8a82721b572e3d7de08 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifspdu.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2009
  *   Author(s): Steve French (sfrench@us.ibm.com)
index f9740c21ca3d655541ddf867904ae61e3f4cec63..d0f85b666662db178b4bc64ff25ee78a4ab73ea2 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsproto.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -268,6 +267,9 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
 
 extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
 
+extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+                               const char *path);
+
 extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
 extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
                                 int from_reconnect);
index a8e41c1e80cad52e9ecff37eefb6c21e192b01c6..243d17696f06199d5488b97bfdccdea218838ccc 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifssmb.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2010
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 0db344807ef13e55350da853c4e9974cb07348b3..c3b94c1e4591338b58304b088b2f07541dcfd741 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/connect.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2011
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1090,7 +1089,7 @@ next_pdu:
        module_put_and_exit(0);
 }
 
-/**
+/*
  * Returns true if srcaddr isn't specified and rhs isn't specified, or
  * if srcaddr is specified and matches the IP address of the rhs argument
  */
@@ -1550,6 +1549,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
 
 /**
  * cifs_setup_ipc - helper to setup the IPC tcon for the session
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
+ *       new tree connection for the IPC (interprocess communication RPC)
  *
  * A new IPC connection is made and stored in the session
  * tcon_ipc. The IPC tcon has the same lifetime as the session.
@@ -1605,6 +1607,7 @@ out:
 
 /**
  * cifs_free_ipc - helper to release the session IPC tcon
+ * @ses: smb session to unmount the IPC from
  *
  * Needs to be called everytime a session is destroyed.
  *
@@ -1855,6 +1858,8 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
 
 /**
  * cifs_get_smb_ses - get a session matching @ctx data from @server
+ * @server: server to setup the session to
+ * @ctx: superblock configuration context to use to setup the session
  *
  * This function assumes it is being called from cifs_mount() where we
  * already got a server reference (server refcount +1). See
@@ -2065,6 +2070,8 @@ cifs_put_tcon(struct cifs_tcon *tcon)
 
 /**
  * cifs_get_tcon - get a tcon matching @ctx data from @ses
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
  *
  * - tcon refcount is the number of mount points using the tcon.
  * - ses refcount is the number of tcon using the session.
@@ -2382,9 +2389,10 @@ cifs_match_super(struct super_block *sb, void *data)
        spin_lock(&cifs_tcp_ses_lock);
        cifs_sb = CIFS_SB(sb);
        tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
-       if (IS_ERR(tlink)) {
+       if (tlink == NULL) {
+               /* can not match superblock if tlink were ever null */
                spin_unlock(&cifs_tcp_ses_lock);
-               return rc;
+               return 0;
        }
        tcon = tlink_tcon(tlink);
        ses = tcon->ses;
@@ -3030,7 +3038,7 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx,
        return full_path;
 }
 
-/**
+/*
  * expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
  *
  * If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
index 5f8a302ffcb2fb3fcba6cd1b6ef814fc62be1806..6e8e7cc26ae24926164a9ceed85dc676ddcc53a2 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/dir.c
  *
  *   vfs operations that deal with dentries
  *
index 8c616aaeb7c4178305ad00c32c29f63c5bd1ee62..0458d28d71aa6590376f94200f79c3d3e1c38915 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *  fs/cifs/dns_resolve.c
  *
  *   Copyright (c) 2007 Igor Mammedov
  *   Author(s): Igor Mammedov (niallain@gmail.com)
index 9fa2807ef79e0461dd67ba2497df2fd2ed3bbdfd..afc0df381246bea7ab231b446c1f855fb24abe87 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
- *                            Handles host name to IP address resolution
+ *   DNS Resolver upcall management for CIFS DFS
+ *   Handles host name to IP address resolution
  *
  *   Copyright (c) International Business Machines  Corp., 2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 747a540db9544d87dea397f26b606295a0c7e0f1..37c28415df1e0227d52956a173e12cd6d9f6b9e5 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/export.c
  *
  *   Copyright (C) International Business Machines  Corp., 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index d0216472f1c6cf036b16016cc3c7bbdb98e726c4..13f3182cf7969a207fcb158ce387ed30e7350a88 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/file.c
  *
  *   vfs operations that deal with files
  *
@@ -883,8 +882,9 @@ int cifs_close(struct inode *inode, struct file *file)
                dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
                if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
                    cinode->lease_granted &&
+                   !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
                    dclose) {
-                       if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+                       if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
                                inode->i_ctime = inode->i_mtime = current_time(inode);
                                cifs_fscache_update_inode_cookie(inode);
                        }
@@ -1865,6 +1865,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
        cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
                        tcon->ses->server);
        cifs_sb = CIFS_FILE_SB(file);
+       set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
 
        if (cap_unix(tcon->ses) &&
            (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
@@ -3112,7 +3113,7 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
        struct cifs_tcon *tcon;
        struct cifs_sb_info *cifs_sb;
        struct dentry *dentry = ctx->cfile->dentry;
-       int rc;
+       ssize_t rc;
 
        tcon = tlink_tcon(ctx->cfile->tlink);
        cifs_sb = CIFS_SB(dentry->d_sb);
index fab47fa7df745c24981e4a176697a1c95678e0cd..8eedd20c44abdefa0f9155a8713469030800198e 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/fscache.c - CIFS filesystem cache interface
+ *   CIFS filesystem cache interface
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Author(s): Suresh Jayaraman <sjayaraman@suse.de>
index 82e856b9cf894a2da27e25cdb20af95c21c93ccc..9baa1d0f22bde504bbc5debf15ad4296b7972cdd 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/fscache.h - CIFS filesystem cache interface definitions
+ *   CIFS filesystem cache interface definitions
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
index 50c01cff4c84fef93ef3eb096d0dd0ff45f58c83..82848412ad85208f08d1fad12bc2871297c8f252 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/inode.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2010
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1625,7 +1624,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
                goto unlink_out;
        }
 
-       cifs_close_deferred_file(CIFS_I(inode));
+       cifs_close_deferred_file_under_dentry(tcon, full_path);
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2114,9 +2113,9 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
                goto cifs_rename_exit;
        }
 
-       cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+       cifs_close_deferred_file_under_dentry(tcon, from_name);
        if (d_inode(target_dentry) != NULL)
-               cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+               cifs_close_deferred_file_under_dentry(tcon, to_name);
 
        rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
                            to_name);
index 42c6a0bac6c8257094f0b7b7a54f5577e326ad76..0359b604bdbc0181944dda9ee4865ff667605fd0 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/ioctl.c
  *
  *   vfs operations that deal with io control
  *
@@ -359,7 +358,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        if (pSMBFile == NULL)
                                break;
                        tcon = tlink_tcon(pSMBFile->tlink);
-                       caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+                       /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
 
                        if (get_user(ExtAttrBits, (int __user *)arg)) {
                                rc = -EFAULT;
index f0a6d63bc08cbdf121c3d910fb0f84527dad0b82..852e54ee82c28279c00e6013b19916f3815a8b70 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/link.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 9469f1cf0b46a0eec11e21e1ca9cbec32b3f6b2b..bb1185fff8cc4d6b760cf01c22a72e50f575c591 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/misc.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -265,7 +264,8 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
 
                        /* Uid is not converted */
                        buffer->Uid = treeCon->ses->Suid;
-                       buffer->Mid = get_next_mid(treeCon->ses->server);
+                       if (treeCon->ses->server)
+                               buffer->Mid = get_next_mid(treeCon->ses->server);
                }
                if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
                        buffer->Flags2 |= SMBFLG2_DFS;
@@ -591,6 +591,7 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
 
 /**
  * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ * @cfile: The file to break the oplock on
  *
  * This function is called from the demultiplex thread when it
  * receives an oplock break for @cfile.
@@ -736,7 +737,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
                        if (cancel_delayed_work(&cfile->deferred)) {
                                tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                if (tmp_list == NULL)
-                                       continue;
+                                       break;
                                tmp_list->cfile = cfile;
                                list_add_tail(&tmp_list->list, &file_head);
                        }
@@ -767,7 +768,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
                        if (cancel_delayed_work(&cfile->deferred)) {
                                tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                if (tmp_list == NULL)
-                                       continue;
+                                       break;
                                tmp_list->cfile = cfile;
                                list_add_tail(&tmp_list->list, &file_head);
                        }
@@ -781,6 +782,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
                kfree(tmp_list);
        }
 }
+void
+cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+{
+       struct cifsFileInfo *cfile;
+       struct list_head *tmp;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
+       void *page;
+       const char *full_path;
+
+       INIT_LIST_HEAD(&file_head);
+       page = alloc_dentry_path();
+       spin_lock(&tcon->open_file_lock);
+       list_for_each(tmp, &tcon->openFileList) {
+               cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+               full_path = build_path_from_dentry(cfile->dentry, page);
+               if (strstr(full_path, path)) {
+                       if (delayed_work_pending(&cfile->deferred)) {
+                               if (cancel_delayed_work(&cfile->deferred)) {
+                                       tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                                       if (tmp_list == NULL)
+                                               break;
+                                       tmp_list->cfile = cfile;
+                                       list_add_tail(&tmp_list->list, &file_head);
+                               }
+                       }
+               }
+       }
+       spin_unlock(&tcon->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
+       }
+       free_dentry_path(page);
+}
 
 /* parses DFS refferal V3 structure
  * caller is responsible for freeing target_nodes
@@ -1029,6 +1067,9 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
 
 /**
  * cifs_alloc_hash - allocate hash and hash context together
+ * @name: The name of the crypto hash algo
+ * @shash: Where to put the pointer to the hash algo
+ * @sdesc: Where to put the pointer to the hash descriptor
  *
  * The caller has to make sure @sdesc is initialized to either NULL or
  * a valid context. Both can be freed via cifs_free_hash().
@@ -1067,6 +1108,8 @@ cifs_alloc_hash(const char *name,
 
 /**
  * cifs_free_hash - free hash and hash context together
+ * @shash: Where to find the pointer to the hash algo
+ * @sdesc: Where to find the pointer to the hash descriptor
  *
  * Freeing a NULL hash or context is safe.
  */
@@ -1082,8 +1125,10 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
 
 /**
  * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
- * Input: rqst - a smb_rqst, page - a page index for rqst
- * Output: *len - the length for this page, *offset - the offset for this page
+ * @rqst: The request descriptor
+ * @page: The index of the page to query
+ * @len: Where to store the length for this page:
+ * @offset: Where to store the offset for this page
  */
 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
                                unsigned int *len, unsigned int *offset)
@@ -1116,6 +1161,8 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
 
 /**
  * copy_path_name - copy src path to dst, possibly truncating
+ * @dst: The destination buffer
+ * @src: The source name
  *
  * returns number of bytes written (including trailing nul)
  */
index 0e728aac67e9f26c1294a95279d2d7a92139cc19..fa9fbd6a819cb8560527bcdfbebdc66a6a0c9e0b 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs/netmisc.c
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 378133ce8869c396d7e6c65c88b97aa943faec2d..25a2b8ef88b99fa2c0c4e2b9ae9ec374cb640124 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/ntlmssp.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 54d77c99e21c09f9adf00c789320c78e1ca502a2..1929e80c09ee1685e31835ad94d0f3f37a498847 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/readdir.c
  *
  *   Directory search handling
  *
index 137f7c95afd66bd1f83728e7e92ae54c5401ccbb..ae1d025da294a81c67d17f86cb5c1ca390f92056 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/rfc1002pdu.h
  *
  *   Protocol Data Unit definitions for RFC 1001/1002 support
  *
index 118403fbeda243391baac3ab0637fed7b550762f..23e02db7923f6b6d0625411daf047affd4a09d20 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/sess.c
  *
  *   SMB/CIFS session setup handling routines
  *
index c9d8a50062b81c106eaca1e908a8e93f9ccf287e..f5dcc4940b6da6da1b192ac3cdb4dc7ddcacd5a7 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2file.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *   Author(s): Steve French (sfrench@us.ibm.com),
index d0e9f3782bd9ca77d1a054993214d54a39da1cdb..ca692b2283cd37b879c6d8fbb38b592dca42e608 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2glob.h
  *
  *   Definitions for various global variables and structures
  *
index 957b2594f02ed17b8581193b7ddf55690c62f897..8297703492eea65a9ca35318ba3eb78a1535ad0c 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2inode.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 668f7710883184129816e955274309fc3d818d18..29b5554f6263fd72c291413ccfaa57b97fc71a1c 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2misc.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2011
  *                 Etersoft, 2012
index b6d2e3591927825014a4fb1b6530f2e34dbd3285..7829c590eeac64478a7855537e5c3fef90f92133 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2pdu.c
  *
  *   Copyright (C) International Business Machines  Corp., 2009, 2013
  *                 Etersoft, 2012
@@ -2398,7 +2397,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
        buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
        /* Ship the ACL for now. we will copy it into buf later. */
        aclptr = ptr;
-       ptr += sizeof(struct cifs_acl);
+       ptr += sizeof(struct smb3_acl);
 
        /* create one ACE to hold the mode embedded in reserved special SID */
        acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
@@ -2423,7 +2422,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
        acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
        acl.AclSize = cpu_to_le16(acl_size);
        acl.AceCount = cpu_to_le16(ace_count);
-       memcpy(aclptr, &acl, sizeof(struct cifs_acl));
+       memcpy(aclptr, &acl, sizeof(struct smb3_acl));
 
        buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
        *len = roundup(ptr - (__u8 *)buf, 8);
index e9cac7970b66bd56ea460f005aacc106cd80e34f..f32c99c9ba13179bd40b232bb696fc51495e0c47 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2pdu.h
  *
  *   Copyright (c) International Business Machines  Corp., 2009, 2013
  *                 Etersoft, 2012
index 263767f644f8226ab8e6a3219a2266e5235ed5e5..547945443fa7d0658d52e75f29663b4b02cc72b7 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2proto.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 0215ef36e2401adf1cc7017a680a4908a5c6f617..a9e958166fc53a3c4b5d7a23efb8d326deec3543 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2status.h
  *
  *   SMB2 Status code (network error) definitions
  *   Definitions are from MS-ERREF
index 6f7952ea494101e2ad139b8e3c9258087ee9d516..f59b956f9d2502cdaea6bcb9e89319d24c4d4100 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2transport.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 60189efb3236242a7f25515506c7242f060ac7da..aeffdad829e2e4289f97328e0d6cc1c22dc30a85 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smberr.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2004
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 75a95de320cfe2353848906378604cf8230a38e0..b7379329b741c92dee666011d4d42d0b79992a9e 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/transport.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 59b6c577aa0a165766d37aa71e94a488bd5825de..2f075b5b50df04744bdb60b0be0cd38afe0f0da8 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * fs/cifs/winucase.c
  *
  * Copyright (c) Jeffrey Layton <jlayton@redhat.com>, 2013
  *
index 9ed481e79ce0c275109deab3b7828be13a668d8a..7d8b72d67c80345066ece20fea04094d3afa8dbc 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/xattr.c
  *
  *   Copyright (c) International Business Machines  Corp., 2003, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 8129a430d789d92865a5ad71c0d5d9af43a87509..2f117c57160dc01c8f1cd09620eabf31ad84de45 100644 (file)
@@ -528,7 +528,7 @@ void debugfs_create_file_size(const char *name, umode_t mode,
 {
        struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
 
-       if (de)
+       if (!IS_ERR(de))
                d_inode(de)->i_size = file_size;
 }
 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
index 31ac3a73b390ce9e1072b27a6d1917717a4eec08..a552399e211d833d9fbe3b02098475beb3fbbec3 100644 (file)
@@ -176,7 +176,7 @@ static struct page *erofs_read_inode(struct inode *inode,
        }
 
        if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
-               if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_ALL)) {
+               if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
                        erofs_err(inode->i_sb,
                                  "unsupported chunk format %x of nid %llu",
                                  vi->chunkformat, vi->nid);
index 9fb98d85a3ceaaaf201bbb05a7ef142a1a7f8fc5..7a6df35fdc915baaacfee640f469895e2086fa74 100644 (file)
@@ -369,7 +369,8 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
        if (compacted_4b_initial == 32 / 4)
                compacted_4b_initial = 0;
 
-       if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
+       if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+           compacted_4b_initial < totalidx)
                compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
        else
                compacted_2b = 0;
index 1f3f4326bf3ce82d1a2fb350479ce59a13128fdb..c17ccc19b938e22d996476bc8567c5df22716daa 100644 (file)
@@ -48,10 +48,9 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
        struct ext2_sb_info *sbi = EXT2_SB(sb);
 
        if (block_group >= sbi->s_groups_count) {
-               ext2_error (sb, "ext2_get_group_desc",
-                           "block_group >= groups_count - "
-                           "block_group = %d, groups_count = %lu",
-                           block_group, sbi->s_groups_count);
+               WARN(1, "block_group >= groups_count - "
+                    "block_group = %d, groups_count = %lu",
+                    block_group, sbi->s_groups_count);
 
                return NULL;
        }
@@ -59,10 +58,9 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
        group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(sb);
        offset = block_group & (EXT2_DESC_PER_BLOCK(sb) - 1);
        if (!sbi->s_group_desc[group_desc]) {
-               ext2_error (sb, "ext2_get_group_desc",
-                           "Group descriptor not loaded - "
-                           "block_group = %d, group_desc = %lu, desc = %lu",
-                            block_group, group_desc, offset);
+               WARN(1, "Group descriptor not loaded - "
+                    "block_group = %d, group_desc = %lu, desc = %lu",
+                     block_group, group_desc, offset);
                return NULL;
        }
 
index ffb295aa891c03e00e938046363b6d7c46f0b4d5..74b172a4adda3a60a80c6a3c6300b57f0e7b37f4 100644 (file)
@@ -551,7 +551,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
        struct dir_private_info *info = file->private_data;
        struct inode *inode = file_inode(file);
        struct fname *fname;
-       int     ret;
+       int ret = 0;
 
        if (!info) {
                info = ext4_htree_create_dir_info(file, ctx->pos);
@@ -599,7 +599,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
                                                   info->curr_minor_hash,
                                                   &info->next_hash);
                        if (ret < 0)
-                               return ret;
+                               goto finished;
                        if (ret == 0) {
                                ctx->pos = ext4_get_htree_eof(file);
                                break;
@@ -630,7 +630,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
        }
 finished:
        info->last_pos = ctx->pos;
-       return 0;
+       return ret < 0 ? ret : 0;
 }
 
 static int ext4_release_dir(struct inode *inode, struct file *filp)
index 90ff5acaf11ffc442c0f0d4db545d287e51b272d..3825195539d7438a2db2b112b75e3af50b4ccccd 100644 (file)
@@ -3593,9 +3593,6 @@ extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
                                           unsigned flags,
                                           struct page **pagep,
                                           void **fsdata);
-extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
-                                        unsigned len, unsigned copied,
-                                        struct page *page);
 extern int ext4_try_add_inline_entry(handle_t *handle,
                                     struct ext4_filename *fname,
                                     struct inode *dir, struct inode *inode);
index c0de30f25185060a083aef4199086fea38819375..0e02571f2f828052111f36b066024ff7d98573a6 100644 (file)
@@ -5916,7 +5916,7 @@ void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
 }
 
 /* Check if *cur is a hole and if it is, skip it */
-static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
+static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
 {
        int ret;
        struct ext4_map_blocks map;
@@ -5925,9 +5925,12 @@ static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
        map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
 
        ret = ext4_map_blocks(NULL, inode, &map, 0);
+       if (ret < 0)
+               return ret;
        if (ret != 0)
-               return;
+               return 0;
        *cur = *cur + map.m_len;
+       return 0;
 }
 
 /* Count number of blocks used by this inode and update i_blocks */
@@ -5976,7 +5979,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
         * iblocks by total number of differences found.
         */
        cur = 0;
-       skip_hole(inode, &cur);
+       ret = skip_hole(inode, &cur);
+       if (ret < 0)
+               goto out;
        path = ext4_find_extent(inode, cur, NULL, 0);
        if (IS_ERR(path))
                goto out;
@@ -5995,8 +6000,12 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
                }
                cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
                                        ext4_ext_get_actual_len(ex));
-               skip_hole(inode, &cur);
-
+               ret = skip_hole(inode, &cur);
+               if (ret < 0) {
+                       ext4_ext_drop_refs(path);
+                       kfree(path);
+                       break;
+               }
                path2 = ext4_find_extent(inode, cur, NULL, 0);
                if (IS_ERR(path2)) {
                        ext4_ext_drop_refs(path);
index 8e610a381862f8db5029858f45dcdb5277b76622..8ea5a81e655489e48a1adbcdc27b7f653a88b3b4 100644 (file)
@@ -892,6 +892,12 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
                                            sizeof(lrange), (u8 *)&lrange, crc))
                                return -ENOSPC;
                } else {
+                       unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
+                               EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;
+
+                       /* Limit the number of blocks in one extent */
+                       map.m_len = min(max, map.m_len);
+
                        fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
                        ex = (struct ext4_extent *)&fc_ext.fc_ex;
                        ex->ee_block = cpu_to_le32(map.m_lblk);
index 82bf4ff6be28e2850ebc310b657cae1397f13e37..39a1ab129fdc94b2caa18a7febe8a5b0f8ffa7b4 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/iomap.h>
 #include <linux/fiemap.h>
 #include <linux/iversion.h>
+#include <linux/backing-dev.h>
 
 #include "ext4_jbd2.h"
 #include "ext4.h"
@@ -733,45 +734,83 @@ convert:
 int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
                               unsigned copied, struct page *page)
 {
-       int ret, no_expand;
+       handle_t *handle = ext4_journal_current_handle();
+       int no_expand;
        void *kaddr;
        struct ext4_iloc iloc;
+       int ret = 0, ret2;
+
+       if (unlikely(copied < len) && !PageUptodate(page))
+               copied = 0;
 
-       if (unlikely(copied < len)) {
-               if (!PageUptodate(page)) {
-                       copied = 0;
+       if (likely(copied)) {
+               ret = ext4_get_inode_loc(inode, &iloc);
+               if (ret) {
+                       unlock_page(page);
+                       put_page(page);
+                       ext4_std_error(inode->i_sb, ret);
                        goto out;
                }
-       }
+               ext4_write_lock_xattr(inode, &no_expand);
+               BUG_ON(!ext4_has_inline_data(inode));
 
-       ret = ext4_get_inode_loc(inode, &iloc);
-       if (ret) {
-               ext4_std_error(inode->i_sb, ret);
-               copied = 0;
-               goto out;
-       }
+               /*
+                * ei->i_inline_off may have changed since
+                * ext4_write_begin() called
+                * ext4_try_to_write_inline_data()
+                */
+               (void) ext4_find_inline_data_nolock(inode);
 
-       ext4_write_lock_xattr(inode, &no_expand);
-       BUG_ON(!ext4_has_inline_data(inode));
+               kaddr = kmap_atomic(page);
+               ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
+               kunmap_atomic(kaddr);
+               SetPageUptodate(page);
+               /* clear page dirty so that writepages wouldn't work for us. */
+               ClearPageDirty(page);
 
-       /*
-        * ei->i_inline_off may have changed since ext4_write_begin()
-        * called ext4_try_to_write_inline_data()
-        */
-       (void) ext4_find_inline_data_nolock(inode);
+               ext4_write_unlock_xattr(inode, &no_expand);
+               brelse(iloc.bh);
 
-       kaddr = kmap_atomic(page);
-       ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
-       kunmap_atomic(kaddr);
-       SetPageUptodate(page);
-       /* clear page dirty so that writepages wouldn't work for us. */
-       ClearPageDirty(page);
+               /*
+                * It's important to update i_size while still holding page
+                * lock: page writeout could otherwise come in and zero
+                * beyond i_size.
+                */
+               ext4_update_inode_size(inode, pos + copied);
+       }
+       unlock_page(page);
+       put_page(page);
 
-       ext4_write_unlock_xattr(inode, &no_expand);
-       brelse(iloc.bh);
-       mark_inode_dirty(inode);
+       /*
+        * Don't mark the inode dirty under page lock. First, it unnecessarily
+        * makes the holding time of page lock longer. Second, it forces lock
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+       if (likely(copied))
+               mark_inode_dirty(inode);
 out:
-       return copied;
+       /*
+        * If we didn't copy as much data as expected, we need to trim back
+        * size of xattr containing inline data.
+        */
+       if (pos + len > inode->i_size && ext4_can_truncate(inode))
+               ext4_orphan_add(handle, inode);
+
+       ret2 = ext4_journal_stop(handle);
+       if (!ret)
+               ret = ret2;
+       if (pos + len > inode->i_size) {
+               ext4_truncate_failed_write(inode);
+               /*
+                * If truncate failed early the inode might still be
+                * on the orphan list; we need to make sure the inode
+                * is removed from the orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
+       return ret ? ret : copied;
 }
 
 struct buffer_head *
@@ -953,43 +992,6 @@ out:
        return ret;
 }
 
-int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
-                                 unsigned len, unsigned copied,
-                                 struct page *page)
-{
-       int ret;
-
-       ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
-       if (ret < 0) {
-               unlock_page(page);
-               put_page(page);
-               return ret;
-       }
-       copied = ret;
-
-       /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_mutex.
-        *
-        * But it's important to update i_size while still holding page lock:
-        * page writeout could otherwise come in and zero beyond i_size.
-        */
-       if (pos+copied > inode->i_size)
-               i_size_write(inode, pos+copied);
-       unlock_page(page);
-       put_page(page);
-
-       /*
-        * Don't mark the inode dirty under page lock. First, it unnecessarily
-        * makes the holding time of page lock longer. Second, it forces lock
-        * ordering of page lock and transaction start for journaling
-        * filesystems.
-        */
-       mark_inode_dirty(inode);
-
-       return copied;
-}
-
 #ifdef INLINE_DIR_DEBUG
 void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
                          void *inline_start, int inline_size)
@@ -1917,6 +1919,24 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
        EXT4_I(inode)->i_disksize = i_size;
 
        if (i_size < inline_size) {
+               /*
+                * if there's inline data to truncate and this file was
+                * converted to extents after that inline data was written,
+                * the extent status cache must be cleared to avoid leaving
+                * behind stale delayed allocated extent entries
+                */
+               if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+retry:
+                       err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+                       if (err == -ENOMEM) {
+                               cond_resched();
+                               congestion_wait(BLK_RW_ASYNC, HZ/50);
+                               goto retry;
+                       }
+                       if (err)
+                               goto out_error;
+               }
+
                /* Clear the content in the xattr space. */
                if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
                        if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
index d18852d6029c194ae29e55a84353c11d5cb92365..0f06305167d5a301dd1ef4f707e9653fc6e75fbd 100644 (file)
@@ -1284,22 +1284,14 @@ static int ext4_write_end(struct file *file,
        loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int i_size_changed = 0;
-       int inline_data = ext4_has_inline_data(inode);
        bool verity = ext4_verity_in_progress(inode);
 
        trace_ext4_write_end(inode, pos, len, copied);
-       if (inline_data) {
-               ret = ext4_write_inline_data_end(inode, pos, len,
-                                                copied, page);
-               if (ret < 0) {
-                       unlock_page(page);
-                       put_page(page);
-                       goto errout;
-               }
-               copied = ret;
-       } else
-               copied = block_write_end(file, mapping, pos,
-                                        len, copied, page, fsdata);
+
+       if (ext4_has_inline_data(inode))
+               return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+       copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
        /*
         * it's important to update i_size while still holding page lock:
         * page writeout could otherwise come in and zero beyond i_size.
@@ -1320,7 +1312,7 @@ static int ext4_write_end(struct file *file,
         * ordering of page lock and transaction start for journaling
         * filesystems.
         */
-       if (i_size_changed || inline_data)
+       if (i_size_changed)
                ret = ext4_mark_inode_dirty(handle, inode);
 
        if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
@@ -1329,7 +1321,7 @@ static int ext4_write_end(struct file *file,
                 * inode->i_size. So truncate them
                 */
                ext4_orphan_add(handle, inode);
-errout:
+
        ret2 = ext4_journal_stop(handle);
        if (!ret)
                ret = ret2;
@@ -1395,7 +1387,6 @@ static int ext4_journalled_write_end(struct file *file,
        int partial = 0;
        unsigned from, to;
        int size_changed = 0;
-       int inline_data = ext4_has_inline_data(inode);
        bool verity = ext4_verity_in_progress(inode);
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
@@ -1404,16 +1395,10 @@ static int ext4_journalled_write_end(struct file *file,
 
        BUG_ON(!ext4_handle_valid(handle));
 
-       if (inline_data) {
-               ret = ext4_write_inline_data_end(inode, pos, len,
-                                                copied, page);
-               if (ret < 0) {
-                       unlock_page(page);
-                       put_page(page);
-                       goto errout;
-               }
-               copied = ret;
-       } else if (unlikely(copied < len) && !PageUptodate(page)) {
+       if (ext4_has_inline_data(inode))
+               return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+       if (unlikely(copied < len) && !PageUptodate(page)) {
                copied = 0;
                ext4_journalled_zero_new_buffers(handle, inode, page, from, to);
        } else {
@@ -1436,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file,
        if (old_size < pos && !verity)
                pagecache_isize_extended(inode, old_size, pos);
 
-       if (size_changed || inline_data) {
+       if (size_changed) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
                        ret = ret2;
@@ -1449,7 +1434,6 @@ static int ext4_journalled_write_end(struct file *file,
                 */
                ext4_orphan_add(handle, inode);
 
-errout:
        ret2 = ext4_journal_stop(handle);
        if (!ret)
                ret = ret2;
@@ -1644,6 +1628,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        int ret;
        bool allocated = false;
+       bool reserved = false;
 
        /*
         * If the cluster containing lblk is shared with a delayed,
@@ -1660,6 +1645,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
                ret = ext4_da_reserve_space(inode);
                if (ret != 0)   /* ENOSPC */
                        goto errout;
+               reserved = true;
        } else {   /* bigalloc */
                if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
                        if (!ext4_es_scan_clu(inode,
@@ -1672,6 +1658,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
                                        ret = ext4_da_reserve_space(inode);
                                        if (ret != 0)   /* ENOSPC */
                                                goto errout;
+                                       reserved = true;
                                } else {
                                        allocated = true;
                                }
@@ -1682,6 +1669,8 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
        }
 
        ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
+       if (ret && reserved)
+               ext4_da_release_space(inode, 1);
 
 errout:
        return ret;
@@ -1722,13 +1711,16 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                }
 
                /*
-                * Delayed extent could be allocated by fallocate.
-                * So we need to check it.
+                * the buffer head associated with a delayed and not unwritten
+                * block found in the extent status cache must contain an
+                * invalid block number and have its BH_New and BH_Delay bits
+                * set, reflecting the state assigned when the block was
+                * initially delayed allocated
                 */
-               if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
-                       map_bh(bh, inode->i_sb, invalid_block);
-                       set_buffer_new(bh);
-                       set_buffer_delay(bh);
+               if (ext4_es_is_delonly(&es)) {
+                       BUG_ON(bh->b_blocknr != invalid_block);
+                       BUG_ON(!buffer_new(bh));
+                       BUG_ON(!buffer_delay(bh));
                        return 0;
                }
 
@@ -2932,19 +2924,6 @@ static int ext4_nonda_switch(struct super_block *sb)
        return 0;
 }
 
-/* We always reserve for an inode update; the superblock could be there too */
-static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
-{
-       if (likely(ext4_has_feature_large_file(inode->i_sb)))
-               return 1;
-
-       if (pos + len <= 0x7fffffffULL)
-               return 1;
-
-       /* We might need to update the superblock to set LARGE_FILE */
-       return 2;
-}
-
 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                               loff_t pos, unsigned len, unsigned flags,
                               struct page **pagep, void **fsdata)
@@ -2953,7 +2932,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        struct page *page;
        pgoff_t index;
        struct inode *inode = mapping->host;
-       handle_t *handle;
 
        if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
                return -EIO;
@@ -2979,41 +2957,11 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                        return 0;
        }
 
-       /*
-        * grab_cache_page_write_begin() can take a long time if the
-        * system is thrashing due to memory pressure, or if the page
-        * is being written back.  So grab it first before we start
-        * the transaction handle.  This also allows us to allocate
-        * the page (if needed) without using GFP_NOFS.
-        */
-retry_grab:
+retry:
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
                return -ENOMEM;
-       unlock_page(page);
 
-       /*
-        * With delayed allocation, we don't log the i_disksize update
-        * if there is delayed block allocation. But we still need
-        * to journalling the i_disksize update if writes to the end
-        * of file which has an already mapped buffer.
-        */
-retry_journal:
-       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
-                               ext4_da_write_credits(inode, pos, len));
-       if (IS_ERR(handle)) {
-               put_page(page);
-               return PTR_ERR(handle);
-       }
-
-       lock_page(page);
-       if (page->mapping != mapping) {
-               /* The page got truncated from under us */
-               unlock_page(page);
-               put_page(page);
-               ext4_journal_stop(handle);
-               goto retry_grab;
-       }
        /* In case writeback began while the page was unlocked */
        wait_for_stable_page(page);
 
@@ -3025,20 +2973,18 @@ retry_journal:
 #endif
        if (ret < 0) {
                unlock_page(page);
-               ext4_journal_stop(handle);
+               put_page(page);
                /*
                 * block_write_begin may have instantiated a few blocks
                 * outside i_size.  Trim these off again. Don't need
-                * i_size_read because we hold i_mutex.
+                * i_size_read because we hold inode lock.
                 */
                if (pos + len > inode->i_size)
                        ext4_truncate_failed_write(inode);
 
                if (ret == -ENOSPC &&
                    ext4_should_retry_alloc(inode->i_sb, &retries))
-                       goto retry_journal;
-
-               put_page(page);
+                       goto retry;
                return ret;
        }
 
@@ -3075,8 +3021,6 @@ static int ext4_da_write_end(struct file *file,
                             struct page *page, void *fsdata)
 {
        struct inode *inode = mapping->host;
-       int ret = 0, ret2;
-       handle_t *handle = ext4_journal_current_handle();
        loff_t new_i_size;
        unsigned long start, end;
        int write_mode = (int)(unsigned long)fsdata;
@@ -3086,44 +3030,36 @@ static int ext4_da_write_end(struct file *file,
                                      len, copied, page, fsdata);
 
        trace_ext4_da_write_end(inode, pos, len, copied);
+
+       if (write_mode != CONVERT_INLINE_DATA &&
+           ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
+           ext4_has_inline_data(inode))
+               return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
        start = pos & (PAGE_SIZE - 1);
        end = start + copied - 1;
 
        /*
-        * generic_write_end() will run mark_inode_dirty() if i_size
-        * changes.  So let's piggyback the i_disksize mark_inode_dirty
-        * into that.
+        * Since we are holding inode lock, we are sure i_disksize <=
+        * i_size. We also know that if i_disksize < i_size, there are
+        * delalloc writes pending in the range upto i_size. If the end of
+        * the current write is <= i_size, there's no need to touch
+        * i_disksize since writeback will push i_disksize upto i_size
+        * eventually. If the end of the current write is > i_size and
+        * inside an allocated block (ext4_da_should_update_i_disksize()
+        * check), we need to update i_disksize here as neither
+        * ext4_writepage() nor certain ext4_writepages() paths not
+        * allocating blocks update i_disksize.
+        *
+        * Note that we defer inode dirtying to generic_write_end() /
+        * ext4_da_write_inline_data_end().
         */
        new_i_size = pos + copied;
-       if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
-               if (ext4_has_inline_data(inode) ||
-                   ext4_da_should_update_i_disksize(page, end)) {
-                       ext4_update_i_disksize(inode, new_i_size);
-                       /* We need to mark inode dirty even if
-                        * new_i_size is less that inode->i_size
-                        * bu greater than i_disksize.(hint delalloc)
-                        */
-                       ret = ext4_mark_inode_dirty(handle, inode);
-               }
-       }
+       if (copied && new_i_size > inode->i_size &&
+           ext4_da_should_update_i_disksize(page, end))
+               ext4_update_i_disksize(inode, new_i_size);
 
-       if (write_mode != CONVERT_INLINE_DATA &&
-           ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
-           ext4_has_inline_data(inode))
-               ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
-                                                    page);
-       else
-               ret2 = generic_write_end(file, mapping, pos, len, copied,
-                                                       page, fsdata);
-
-       copied = ret2;
-       if (ret2 < 0)
-               ret = ret2;
-       ret2 = ext4_journal_stop(handle);
-       if (unlikely(ret2 && !ret))
-               ret = ret2;
-
-       return ret ? ret : copied;
+       return generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 }
 
 /*
@@ -4340,6 +4276,12 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
                goto has_buffer;
 
        lock_buffer(bh);
+       if (ext4_buffer_uptodate(bh)) {
+               /* Someone brought it uptodate while we waited */
+               unlock_buffer(bh);
+               goto has_buffer;
+       }
+
        /*
         * If we have all information of the inode in memory and this
         * is the only valid inode in the block, we need not read the
index 0775950ee84e311980d29fb8fca95385cb8e10ff..88d5d274a86843293005590709eb5e77d958a179 100644 (file)
@@ -658,7 +658,7 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
                 * constraints, it may not be safe to do it right here so we
                 * defer superblock flushing to a workqueue.
                 */
-               if (continue_fs)
+               if (continue_fs && journal)
                        schedule_work(&EXT4_SB(sb)->s_error_work);
                else
                        ext4_commit_super(sb);
@@ -1350,6 +1350,12 @@ static void ext4_destroy_inode(struct inode *inode)
                                true);
                dump_stack();
        }
+
+       if (EXT4_I(inode)->i_reserved_data_blocks)
+               ext4_msg(inode->i_sb, KERN_ERR,
+                        "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
+                        inode->i_ino, EXT4_I(inode),
+                        EXT4_I(inode)->i_reserved_data_blocks);
 }
 
 static void init_once(void *foo)
@@ -3021,17 +3027,17 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
  */
 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
 {
-       loff_t res = EXT4_NDIR_BLOCKS;
+       unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;
        int meta_blocks;
-       loff_t upper_limit;
-       /* This is calculated to be the largest file size for a dense, block
+
+       /*
+        * This is calculated to be the largest file size for a dense, block
         * mapped file such that the file's total number of 512-byte sectors,
         * including data and all indirect blocks, does not exceed (2^48 - 1).
         *
         * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
         * number of 512-byte sectors of the file.
         */
-
        if (!has_huge_files) {
                /*
                 * !has_huge_files or implies that the inode i_block field
@@ -3074,7 +3080,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
        if (res > MAX_LFS_FILESIZE)
                res = MAX_LFS_FILESIZE;
 
-       return res;
+       return (loff_t)res;
 }
 
 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
@@ -5042,12 +5048,15 @@ failed_mount_wq:
        sbi->s_ea_block_cache = NULL;
 
        if (sbi->s_journal) {
+               /* flush s_error_work before journal destroy. */
+               flush_work(&sbi->s_error_work);
                jbd2_journal_destroy(sbi->s_journal);
                sbi->s_journal = NULL;
        }
 failed_mount3a:
        ext4_es_unregister_shrinker(sbi);
 failed_mount3:
+       /* flush s_error_work before sbi destroy */
        flush_work(&sbi->s_error_work);
        del_timer_sync(&sbi->s_err_report);
        ext4_stop_mmpd(sbi);
index f346a78f4bd67a1b6243aae4da7e1f22bb78bc7b..6a675652129b2193235c234dd4d09ab2a095e93a 100644 (file)
@@ -77,7 +77,6 @@ static WORK_STATE(INIT_OBJECT,                "INIT", fscache_initialise_object);
 static WORK_STATE(PARENT_READY,                "PRDY", fscache_parent_ready);
 static WORK_STATE(ABORT_INIT,          "ABRT", fscache_abort_initialisation);
 static WORK_STATE(LOOK_UP_OBJECT,      "LOOK", fscache_look_up_object);
-static WORK_STATE(CREATE_OBJECT,       "CRTO", fscache_look_up_object);
 static WORK_STATE(OBJECT_AVAILABLE,    "AVBL", fscache_object_available);
 static WORK_STATE(JUMPSTART_DEPS,      "JUMP", fscache_jumpstart_dependents);
 
@@ -907,6 +906,7 @@ static void fscache_dequeue_object(struct fscache_object *object)
  * @object: The object to ask about
  * @data: The auxiliary data for the object
  * @datalen: The size of the auxiliary data
+ * @object_size: The size of the object according to the server.
  *
  * This function consults the netfs about the coherency state of an object.
  * The caller must be holding a ref on cookie->n_active (held by
index 4338771077008fd947e9f9e4b368f263bba42895..e002cdfaf3cc77d91c21a197a62e6eafa0e4f265 100644 (file)
@@ -22,7 +22,10 @@ static void fscache_operation_dummy_cancel(struct fscache_operation *op)
 
 /**
  * fscache_operation_init - Do basic initialisation of an operation
+ * @cookie: The cookie to operate on
  * @op: The operation to initialise
+ * @processor: The function to perform the operation
+ * @cancel: A function to handle operation cancellation
  * @release: The release function to assign
  *
  * Do basic initialisation of an operation.  The caller must still set flags,
index 37710ca863b57be1326785bad9621d55d6bb2283..ed0cab8a32db109c6851ddcf567d8de83d1c5c22 100644 (file)
@@ -190,8 +190,10 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
        mapping->private_data = NULL;
        mapping->writeback_index = 0;
-       __init_rwsem(&mapping->invalidate_lock, "mapping.invalidate_lock",
-                    &sb->s_type->invalidate_lock_key);
+       init_rwsem(&mapping->invalidate_lock);
+       lockdep_set_class_and_name(&mapping->invalidate_lock,
+                                  &sb->s_type->invalidate_lock_key,
+                                  "mapping.invalidate_lock");
        inode->i_private = NULL;
        inode->i_mapping = mapping;
        INIT_HLIST_HEAD(&inode->i_dentry);      /* buggered by rcu freeing */
index 6c55362c1f99a52e010f9c62a65d18670dc24184..5bf8aa81715e125dfaa29e7b2c5e0609b05f2a7c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/rculist_nulls.h>
 #include <linux/cpu.h>
 #include <linux/tracehook.h>
+#include <uapi/linux/io_uring.h>
 
 #include "io-wq.h"
 
@@ -176,7 +177,6 @@ static void io_worker_ref_put(struct io_wq *wq)
 static void io_worker_exit(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
-       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 
        if (refcount_dec_and_test(&worker->ref))
                complete(&worker->ref_done);
@@ -186,7 +186,6 @@ static void io_worker_exit(struct io_worker *worker)
        if (worker->flags & IO_WORKER_F_FREE)
                hlist_nulls_del_rcu(&worker->nulls_node);
        list_del_rcu(&worker->all_list);
-       acct->nr_workers--;
        preempt_disable();
        io_wqe_dec_running(worker);
        worker->flags = 0;
@@ -246,8 +245,6 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
  */
 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
 {
-       bool do_create = false;
-
        /*
         * Most likely an attempt to queue unbounded work on an io_wq that
         * wasn't setup with any unbounded workers.
@@ -256,18 +253,15 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
                pr_warn_once("io-wq is not configured for unbound workers");
 
        raw_spin_lock(&wqe->lock);
-       if (acct->nr_workers < acct->max_workers) {
-               acct->nr_workers++;
-               do_create = true;
+       if (acct->nr_workers == acct->max_workers) {
+               raw_spin_unlock(&wqe->lock);
+               return true;
        }
+       acct->nr_workers++;
        raw_spin_unlock(&wqe->lock);
-       if (do_create) {
-               atomic_inc(&acct->nr_running);
-               atomic_inc(&wqe->wq->worker_refs);
-               return create_io_worker(wqe->wq, wqe, acct->index);
-       }
-
-       return true;
+       atomic_inc(&acct->nr_running);
+       atomic_inc(&wqe->wq->worker_refs);
+       return create_io_worker(wqe->wq, wqe, acct->index);
 }
 
 static void io_wqe_inc_running(struct io_worker *worker)
@@ -574,6 +568,7 @@ loop:
                }
                /* timed out, exit unless we're the last worker */
                if (last_timeout && acct->nr_workers > 1) {
+                       acct->nr_workers--;
                        raw_spin_unlock(&wqe->lock);
                        __set_current_state(TASK_RUNNING);
                        break;
@@ -589,9 +584,7 @@ loop:
 
                        if (!get_signal(&ksig))
                                continue;
-                       if (fatal_signal_pending(current))
-                               break;
-                       continue;
+                       break;
                }
                last_timeout = !ret;
        }
@@ -1287,6 +1280,10 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
 {
        int i, node, prev = 0;
 
+       BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
+
        for (i = 0; i < 2; i++) {
                if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
                        new_count[i] = task_rlimit(current, RLIMIT_NPROC);
index 16fb7436043c27653d9e1b8533c767a6a4683c74..6b9e702087824b274e734069c0bfe068a86e3483 100644 (file)
@@ -403,7 +403,6 @@ struct io_ring_ctx {
                struct wait_queue_head  cq_wait;
                unsigned                cq_extra;
                atomic_t                cq_timeouts;
-               struct fasync_struct    *cq_fasync;
                unsigned                cq_last_tm_flush;
        } ____cacheline_aligned_in_smp;
 
@@ -502,6 +501,7 @@ struct io_poll_update {
 struct io_close {
        struct file                     *file;
        int                             fd;
+       u32                             file_slot;
 };
 
 struct io_timeout_data {
@@ -712,6 +712,7 @@ struct io_async_rw {
        struct iovec                    fast_iov[UIO_FASTIOV];
        const struct iovec              *free_iovec;
        struct iov_iter                 iter;
+       struct iov_iter_state           iter_state;
        size_t                          bytes_done;
        struct wait_page_queue          wpq;
 };
@@ -735,7 +736,6 @@ enum {
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
        REQ_F_REISSUE_BIT,
-       REQ_F_DONT_REISSUE_BIT,
        REQ_F_CREDS_BIT,
        REQ_F_REFCOUNT_BIT,
        REQ_F_ARM_LTIMEOUT_BIT,
@@ -782,8 +782,6 @@ enum {
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
        /* caller should reissue async */
        REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
-       /* don't attempt request reissue, see io_rw_reissue() */
-       REQ_F_DONT_REISSUE      = BIT(REQ_F_DONT_REISSUE_BIT),
        /* supports async reads */
        REQ_F_NOWAIT_READ       = BIT(REQ_F_NOWAIT_READ_BIT),
        /* supports async writes */
@@ -1100,6 +1098,8 @@ static int io_req_prep_async(struct io_kiocb *req);
 
 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
                                 unsigned int issue_flags, u32 slot_index);
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
+
 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
 
 static struct kmem_cache *req_cachep;
@@ -1613,10 +1613,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
                wake_up(&ctx->sq_data->wait);
        if (io_should_trigger_evfd(ctx))
                eventfd_signal(ctx->cq_ev_fd, 1);
-       if (waitqueue_active(&ctx->poll_wait)) {
+       if (waitqueue_active(&ctx->poll_wait))
                wake_up_interruptible(&ctx->poll_wait);
-               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
-       }
 }
 
 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1630,10 +1628,8 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
        }
        if (io_should_trigger_evfd(ctx))
                eventfd_signal(ctx->cq_ev_fd, 1);
-       if (waitqueue_active(&ctx->poll_wait)) {
+       if (waitqueue_active(&ctx->poll_wait))
                wake_up_interruptible(&ctx->poll_wait);
-               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
-       }
 }
 
 /* Returns true if there are no backlogged entries after the flush */
@@ -2444,13 +2440,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                req = list_first_entry(done, struct io_kiocb, inflight_entry);
                list_del(&req->inflight_entry);
 
-               if (READ_ONCE(req->result) == -EAGAIN &&
-                   !(req->flags & REQ_F_DONT_REISSUE)) {
-                       req->iopoll_completed = 0;
-                       io_req_task_queue_reissue(req);
-                       continue;
-               }
-
                __io_cqring_fill_event(ctx, req->user_data, req->result,
                                        io_put_rw_kbuf(req));
                (*nr_events)++;
@@ -2613,8 +2602,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
 
        if (!rw)
                return !io_req_prep_async(req);
-       /* may have left rw->iter inconsistent on -EIOCBQUEUED */
-       iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
+       iov_iter_restore(&rw->iter, &rw->iter_state);
        return true;
 }
 
@@ -2714,10 +2702,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
        if (unlikely(res != req->result)) {
-               if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
-                   io_resubmit_prep(req))) {
-                       req_set_fail(req);
-                       req->flags |= REQ_F_DONT_REISSUE;
+               if (res == -EAGAIN && io_rw_should_reissue(req)) {
+                       req->flags |= REQ_F_REISSUE;
+                       return;
                }
        }
 
@@ -2843,7 +2830,8 @@ static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
        return __io_file_supports_nowait(req->file, rw);
 }
 
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     int rw)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw.kiocb;
@@ -2865,8 +2853,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(ret))
                return ret;
 
-       /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
-       if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
+       /*
+        * If the file is marked O_NONBLOCK, still allow retry for it if it
+        * supports async. Otherwise it's impossible to use O_NONBLOCK files
+        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+        */
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
                req->flags |= REQ_F_NOWAIT;
 
        ioprio = READ_ONCE(sqe->ioprio);
@@ -2931,7 +2924,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_async_rw *io = req->async_data;
-       bool check_reissue = kiocb->ki_complete == io_complete_rw;
 
        /* add previously done IO, if any */
        if (io && io->bytes_done > 0) {
@@ -2943,19 +2935,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 
        if (req->flags & REQ_F_CUR_POS)
                req->file->f_pos = kiocb->ki_pos;
-       if (ret >= 0 && check_reissue)
+       if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
                __io_complete_rw(req, ret, 0, issue_flags);
        else
                io_rw_done(kiocb, ret);
 
-       if (check_reissue && (req->flags & REQ_F_REISSUE)) {
+       if (req->flags & REQ_F_REISSUE) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        io_req_task_queue_reissue(req);
                } else {
+                       unsigned int cflags = io_put_rw_kbuf(req);
+                       struct io_ring_ctx *ctx = req->ctx;
+
                        req_set_fail(req);
-                       __io_req_complete(req, issue_flags, ret,
-                                         io_put_rw_kbuf(req));
+                       if (issue_flags & IO_URING_F_NONBLOCK) {
+                               mutex_lock(&ctx->uring_lock);
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                               mutex_unlock(&ctx->uring_lock);
+                       } else {
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                       }
                }
        }
 }
@@ -3263,12 +3263,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
                                ret = nr;
                        break;
                }
+               if (!iov_iter_is_bvec(iter)) {
+                       iov_iter_advance(iter, nr);
+               } else {
+                       req->rw.len -= nr;
+                       req->rw.addr += nr;
+               }
                ret += nr;
                if (nr != iovec.iov_len)
                        break;
-               req->rw.len -= nr;
-               req->rw.addr += nr;
-               iov_iter_advance(iter, nr);
        }
 
        return ret;
@@ -3315,12 +3318,17 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
        if (!force && !io_op_defs[req->opcode].needs_async_setup)
                return 0;
        if (!req->async_data) {
+               struct io_async_rw *iorw;
+
                if (io_alloc_async_data(req)) {
                        kfree(iovec);
                        return -ENOMEM;
                }
 
                io_req_map_rw(req, iovec, fast_iov, iter);
+               iorw = req->async_data;
+               /* we've copied and mapped the iter, ensure state is saved */
+               iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        }
        return 0;
 }
@@ -3339,6 +3347,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        iorw->free_iovec = iov;
        if (iov)
                req->flags |= REQ_F_NEED_CLEANUP;
+       iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        return 0;
 }
 
@@ -3346,7 +3355,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(!(req->file->f_mode & FMODE_READ)))
                return -EBADF;
-       return io_prep_rw(req, sqe);
+       return io_prep_rw(req, sqe, READ);
 }
 
 /*
@@ -3442,19 +3451,28 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
        struct io_async_rw *rw = req->async_data;
-       ssize_t io_size, ret, ret2;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
        if (rw) {
                iter = &rw->iter;
+               state = &rw->iter_state;
+               /*
+                * We come here from an earlier attempt, restore our state to
+                * match in case it doesn't. It's cheap enough that we don't
+                * need to make this conditional.
+                */
+               iov_iter_restore(iter, state);
                iovec = NULL;
        } else {
                ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
                if (ret < 0)
                        return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
        }
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+       req->result = iov_iter_count(iter);
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3468,7 +3486,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                return ret ?: -EAGAIN;
        }
 
-       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret)) {
                kfree(iovec);
                return ret;
@@ -3484,30 +3502,49 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                /* no retry on NONBLOCK nor RWF_NOWAIT */
                if (req->flags & REQ_F_NOWAIT)
                        goto done;
-               /* some cases will consume bytes even on error returns */
-               iov_iter_reexpand(iter, iter->count + iter->truncated);
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
                ret = 0;
        } else if (ret == -EIOCBQUEUED) {
                goto out_free;
-       } else if (ret <= 0 || ret == io_size || !force_nonblock ||
+       } else if (ret <= 0 || ret == req->result || !force_nonblock ||
                   (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
                /* read all, failed, already did sync or don't want to retry */
                goto done;
        }
 
+       /*
+        * Don't depend on the iter state matching what was consumed, or being
+        * untouched in case of error. Restore it and we'll advance it
+        * manually if we need to.
+        */
+       iov_iter_restore(iter, state);
+
        ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
        if (ret2)
                return ret2;
 
        iovec = NULL;
        rw = req->async_data;
-       /* now use our persistent iterator, if we aren't already */
-       iter = &rw->iter;
+       /*
+        * Now use our persistent iterator and state, if we aren't already.
+        * We've restored and mapped the iter to match.
+        */
+       if (iter != &rw->iter) {
+               iter = &rw->iter;
+               state = &rw->iter_state;
+       }
 
        do {
-               io_size -= ret;
+               /*
+                * We end up here because of a partial read, either from
+                * above or inside this loop. Advance the iter by the bytes
+                * that were consumed.
+                */
+               iov_iter_advance(iter, ret);
+               if (!iov_iter_count(iter))
+                       break;
                rw->bytes_done += ret;
+               iov_iter_save_state(iter, state);
+
                /* if we can retry, do so with the callbacks armed */
                if (!io_rw_should_retry(req)) {
                        kiocb->ki_flags &= ~IOCB_WAITQ;
@@ -3525,7 +3562,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                        return 0;
                /* we got some bytes, but not all. retry. */
                kiocb->ki_flags &= ~IOCB_WAITQ;
-       } while (ret > 0 && ret < io_size);
+               iov_iter_restore(iter, state);
+       } while (ret > 0);
 done:
        kiocb_done(kiocb, ret, issue_flags);
 out_free:
@@ -3539,7 +3577,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
                return -EBADF;
-       return io_prep_rw(req, sqe);
+       return io_prep_rw(req, sqe, WRITE);
 }
 
 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
@@ -3548,19 +3586,23 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
        struct io_async_rw *rw = req->async_data;
-       ssize_t ret, ret2, io_size;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
        if (rw) {
                iter = &rw->iter;
+               state = &rw->iter_state;
+               iov_iter_restore(iter, state);
                iovec = NULL;
        } else {
                ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
                if (ret < 0)
                        return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
        }
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+       req->result = iov_iter_count(iter);
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3577,7 +3619,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
            (req->flags & REQ_F_ISREG))
                goto copy_iov;
 
-       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret))
                goto out_free;
 
@@ -3624,9 +3666,7 @@ done:
                kiocb_done(kiocb, ret2, issue_flags);
        } else {
 copy_iov:
-               /* some cases will consume bytes even on error returns */
-               iov_iter_reexpand(iter, iter->count + iter->truncated);
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
+               iov_iter_restore(iter, state);
                ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
                return ret ?: -EAGAIN;
        }
@@ -4342,7 +4382,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
        int i, bid = pbuf->bid;
 
        for (i = 0; i < pbuf->nbufs; i++) {
-               buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+               buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
                if (!buf)
                        break;
 
@@ -4549,12 +4589,16 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
-           sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+           sqe->rw_flags || sqe->buf_index)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
 
        req->close.fd = READ_ONCE(sqe->fd);
+       req->close.file_slot = READ_ONCE(sqe->file_index);
+       if (req->close.file_slot && req->close.fd)
+               return -EINVAL;
+
        return 0;
 }
 
@@ -4566,6 +4610,11 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
        struct file *file = NULL;
        int ret = -EBADF;
 
+       if (req->close.file_slot) {
+               ret = io_close_fixed(req, issue_flags);
+               goto err;
+       }
+
        spin_lock(&files->file_lock);
        fdt = files_fdtable(files);
        if (close->fd >= fdt->max_fds) {
@@ -5293,7 +5342,7 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
        if (req->poll.events & EPOLLONESHOT)
                flags = 0;
        if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
-               req->poll.done = true;
+               req->poll.events |= EPOLLONESHOT;
                flags = 0;
        }
        if (flags & IORING_CQE_F_MORE)
@@ -5322,10 +5371,15 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
        } else {
                bool done;
 
+               if (req->poll.done) {
+                       spin_unlock(&ctx->completion_lock);
+                       return;
+               }
                done = __io_poll_complete(req, req->result);
                if (done) {
                        io_poll_remove_double(req);
                        hash_del(&req->hash_node);
+                       req->poll.done = true;
                } else {
                        req->result = 0;
                        add_wait_queue(req->poll.head, &req->poll.wait);
@@ -5463,6 +5517,7 @@ static void io_async_task_func(struct io_kiocb *req, bool *locked)
 
        hash_del(&req->hash_node);
        io_poll_remove_double(req);
+       apoll->poll.done = true;
        spin_unlock(&ctx->completion_lock);
 
        if (!READ_ONCE(apoll->poll.canceled))
@@ -5783,6 +5838,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
        struct io_ring_ctx *ctx = req->ctx;
        struct io_poll_table ipt;
        __poll_t mask;
+       bool done;
 
        ipt.pt._qproc = io_poll_queue_proc;
 
@@ -5791,13 +5847,13 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
 
        if (mask) { /* no async, we'd stolen it */
                ipt.error = 0;
-               io_poll_complete(req, mask);
+               done = io_poll_complete(req, mask);
        }
        spin_unlock(&ctx->completion_lock);
 
        if (mask) {
                io_cqring_ev_posted(ctx);
-               if (poll->events & EPOLLONESHOT)
+               if (done)
                        io_put_req(req);
        }
        return ipt.error;
@@ -6288,19 +6344,16 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
        struct io_uring_rsrc_update2 up;
        int ret;
 
-       if (issue_flags & IO_URING_F_NONBLOCK)
-               return -EAGAIN;
-
        up.offset = req->rsrc_update.offset;
        up.data = req->rsrc_update.arg;
        up.nr = 0;
        up.tags = 0;
        up.resv = 0;
 
-       mutex_lock(&ctx->uring_lock);
+       io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
        ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
                                        &up, req->rsrc_update.nr_args);
-       mutex_unlock(&ctx->uring_lock);
+       io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
 
        if (ret < 0)
                req_set_fail(req);
@@ -7515,6 +7568,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        break;
        } while (1);
 
+       if (uts) {
+               struct timespec64 ts;
+
+               if (get_timespec64(&ts, uts))
+                       return -EFAULT;
+               timeout = timespec64_to_jiffies(&ts);
+       }
+
        if (sig) {
 #ifdef CONFIG_COMPAT
                if (in_compat_syscall())
@@ -7528,14 +7589,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        return ret;
        }
 
-       if (uts) {
-               struct timespec64 ts;
-
-               if (get_timespec64(&ts, uts))
-                       return -EFAULT;
-               timeout = timespec64_to_jiffies(&ts);
-       }
-
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
        INIT_LIST_HEAD(&iowq.wq.entry);
@@ -8284,11 +8337,27 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 #endif
 }
 
+static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+                                struct io_rsrc_node *node, void *rsrc)
+{
+       struct io_rsrc_put *prsrc;
+
+       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+       if (!prsrc)
+               return -ENOMEM;
+
+       prsrc->tag = *io_get_tag_slot(data, idx);
+       prsrc->rsrc = rsrc;
+       list_add(&prsrc->list, &node->rsrc_list);
+       return 0;
+}
+
 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
                                 unsigned int issue_flags, u32 slot_index)
 {
        struct io_ring_ctx *ctx = req->ctx;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       bool needs_switch = false;
        struct io_fixed_file *file_slot;
        int ret = -EBADF;
 
@@ -8304,9 +8373,22 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 
        slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
        file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
-       ret = -EBADF;
-       if (file_slot->file_ptr)
-               goto err;
+
+       if (file_slot->file_ptr) {
+               struct file *old_file;
+
+               ret = io_rsrc_node_switch_start(ctx);
+               if (ret)
+                       goto err;
+
+               old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+               ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
+                                           ctx->rsrc_node, old_file);
+               if (ret)
+                       goto err;
+               file_slot->file_ptr = 0;
+               needs_switch = true;
+       }
 
        *io_get_tag_slot(ctx->file_data, slot_index) = 0;
        io_fixed_file_set(file_slot, file);
@@ -8318,25 +8400,50 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 
        ret = 0;
 err:
+       if (needs_switch)
+               io_rsrc_node_switch(ctx, ctx->file_data);
        io_ring_submit_unlock(ctx, !force_nonblock);
        if (ret)
                fput(file);
        return ret;
 }
 
-static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
-                                struct io_rsrc_node *node, void *rsrc)
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
 {
-       struct io_rsrc_put *prsrc;
+       unsigned int offset = req->close.file_slot - 1;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct io_fixed_file *file_slot;
+       struct file *file;
+       int ret, i;
 
-       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
-       if (!prsrc)
-               return -ENOMEM;
+       io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+       ret = -ENXIO;
+       if (unlikely(!ctx->file_data))
+               goto out;
+       ret = -EINVAL;
+       if (offset >= ctx->nr_user_files)
+               goto out;
+       ret = io_rsrc_node_switch_start(ctx);
+       if (ret)
+               goto out;
 
-       prsrc->tag = *io_get_tag_slot(data, idx);
-       prsrc->rsrc = rsrc;
-       list_add(&prsrc->list, &node->rsrc_list);
-       return 0;
+       i = array_index_nospec(offset, ctx->nr_user_files);
+       file_slot = io_fixed_file_slot(&ctx->file_table, i);
+       ret = -EBADF;
+       if (!file_slot->file_ptr)
+               goto out;
+
+       file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+       ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
+       if (ret)
+               goto out;
+
+       file_slot->file_ptr = 0;
+       io_rsrc_node_switch(ctx, ctx->file_data);
+       ret = 0;
+out:
+       io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+       return ret;
 }
 
 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
@@ -9105,8 +9212,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
        struct io_buffer *buf;
        unsigned long index;
 
-       xa_for_each(&ctx->io_buffers, index, buf)
+       xa_for_each(&ctx->io_buffers, index, buf) {
                __io_remove_buffers(ctx, buf, index, -1U);
+               cond_resched();
+       }
 }
 
 static void io_req_cache_free(struct list_head *list)
@@ -9231,13 +9340,6 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
        return mask;
 }
 
-static int io_uring_fasync(int fd, struct file *file, int on)
-{
-       struct io_ring_ctx *ctx = file->private_data;
-
-       return fasync_helper(fd, file, on, &ctx->cq_fasync);
-}
-
 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
 {
        const struct cred *creds;
@@ -9604,8 +9706,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
        struct io_tctx_node *node;
        unsigned long index;
 
-       xa_for_each(&tctx->xa, index, node)
+       xa_for_each(&tctx->xa, index, node) {
                io_uring_del_tctx_node(index);
+               cond_resched();
+       }
        if (wq) {
                /*
                 * Must be after io_uring_del_task_file() (removes nodes under
@@ -10029,7 +10133,6 @@ static const struct file_operations io_uring_fops = {
        .mmap_capabilities = io_uring_nommu_mmap_capabilities,
 #endif
        .poll           = io_uring_poll,
-       .fasync         = io_uring_fasync,
 #ifdef CONFIG_PROC_FS
        .show_fdinfo    = io_uring_show_fdinfo,
 #endif
@@ -10560,10 +10663,12 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                         * ordering. Fine to drop uring_lock here, we hold
                         * a ref to the ctx.
                         */
+                       refcount_inc(&sqd->refs);
                        mutex_unlock(&ctx->uring_lock);
                        mutex_lock(&sqd->lock);
                        mutex_lock(&ctx->uring_lock);
-                       tctx = sqd->thread->io_uring;
+                       if (sqd->thread)
+                               tctx = sqd->thread->io_uring;
                }
        } else {
                tctx = current->io_uring;
@@ -10577,16 +10682,20 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
        if (ret)
                goto err;
 
-       if (sqd)
+       if (sqd) {
                mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
 
        if (copy_to_user(arg, new_count, sizeof(new_count)))
                return -EFAULT;
 
        return 0;
 err:
-       if (sqd)
+       if (sqd) {
                mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
        return ret;
 }
 
index ba581429bf7b28b6e83e30429e253faa81c27a21..8e0a1378a4b1fe1a19aa4d100661639392fe1557 100644 (file)
@@ -1111,13 +1111,25 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
 
        kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
        /* attach dentry and inode */
-       if (kn && kernfs_active(kn)) {
+       if (kn) {
+               /* Inactive nodes are invisible to the VFS so don't
+                * create a negative.
+                */
+               if (!kernfs_active(kn)) {
+                       up_read(&kernfs_rwsem);
+                       return NULL;
+               }
                inode = kernfs_get_inode(dir->i_sb, kn);
                if (!inode)
                        inode = ERR_PTR(-ENOMEM);
        }
-       /* Needed only for negative dentry validation */
-       if (!inode)
+       /*
+        * Needed for negative dentry validation.
+        * The negative dentry can be created in kernfs_iop_lookup()
+        * or transforms from positive dentry in dentry_unlink_inode()
+        * called from vfs_rmdir().
+        */
+       if (!IS_ERR(inode))
                kernfs_set_rev(parent, dentry);
        up_read(&kernfs_rwsem);
 
index de36f12070bf03d304c59b916cfdb2de14d18765..71c989f1568dc0a1c991aa4cfbdde7a0767fdd11 100644 (file)
@@ -68,125 +68,6 @@ void ksmbd_copy_gss_neg_header(void *buf)
        memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
 }
 
-static void
-str_to_key(unsigned char *str, unsigned char *key)
-{
-       int i;
-
-       key[0] = str[0] >> 1;
-       key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
-       key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
-       key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
-       key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
-       key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
-       key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
-       key[7] = str[6] & 0x7F;
-       for (i = 0; i < 8; i++)
-               key[i] = (key[i] << 1);
-}
-
-static int
-smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
-{
-       unsigned char key2[8];
-       struct des_ctx ctx;
-
-       if (fips_enabled) {
-               ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");
-               return -ENOENT;
-       }
-
-       str_to_key(key, key2);
-       des_expand_key(&ctx, key2, DES_KEY_SIZE);
-       des_encrypt(&ctx, out, in);
-       memzero_explicit(&ctx, sizeof(ctx));
-       return 0;
-}
-
-static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
-{
-       int rc;
-
-       rc = smbhash(p24, c8, p21);
-       if (rc)
-               return rc;
-       rc = smbhash(p24 + 8, c8, p21 + 7);
-       if (rc)
-               return rc;
-       return smbhash(p24 + 16, c8, p21 + 14);
-}
-
-/* produce a md4 message digest from data of length n bytes */
-static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,
-                        int link_len)
-{
-       int rc;
-       struct ksmbd_crypto_ctx *ctx;
-
-       ctx = ksmbd_crypto_ctx_find_md4();
-       if (!ctx) {
-               ksmbd_debug(AUTH, "Crypto md4 allocation error\n");
-               return -ENOMEM;
-       }
-
-       rc = crypto_shash_init(CRYPTO_MD4(ctx));
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not init md4 shash\n");
-               goto out;
-       }
-
-       rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not update with link_str\n");
-               goto out;
-       }
-
-       rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);
-       if (rc)
-               ksmbd_debug(AUTH, "Could not generate md4 hash\n");
-out:
-       ksmbd_release_crypto_ctx(ctx);
-       return rc;
-}
-
-static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,
-                                    char *server_challenge, int len)
-{
-       int rc;
-       struct ksmbd_crypto_ctx *ctx;
-
-       ctx = ksmbd_crypto_ctx_find_md5();
-       if (!ctx) {
-               ksmbd_debug(AUTH, "Crypto md5 allocation error\n");
-               return -ENOMEM;
-       }
-
-       rc = crypto_shash_init(CRYPTO_MD5(ctx));
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not init md5 shash\n");
-               goto out;
-       }
-
-       rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not update with challenge\n");
-               goto out;
-       }
-
-       rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);
-       if (rc) {
-               ksmbd_debug(AUTH, "Could not update with nonce\n");
-               goto out;
-       }
-
-       rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);
-       if (rc)
-               ksmbd_debug(AUTH, "Could not generate md5 hash\n");
-out:
-       ksmbd_release_crypto_ctx(ctx);
-       return rc;
-}
-
 /**
  * ksmbd_gen_sess_key() - function to generate session key
  * @sess:      session of connection
@@ -324,43 +205,6 @@ out:
        return ret;
 }
 
-/**
- * ksmbd_auth_ntlm() - NTLM authentication handler
- * @sess:      session of connection
- * @pw_buf:    NTLM challenge response
- * @passkey:   user password
- *
- * Return:     0 on success, error number on error
- */
-int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf)
-{
-       int rc;
-       unsigned char p21[21];
-       char key[CIFS_AUTH_RESP_SIZE];
-
-       memset(p21, '\0', 21);
-       memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
-       rc = ksmbd_enc_p24(p21, sess->ntlmssp.cryptkey, key);
-       if (rc) {
-               pr_err("password processing failed\n");
-               return rc;
-       }
-
-       ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),
-                     CIFS_SMB1_SESSKEY_SIZE);
-       memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,
-              CIFS_AUTH_RESP_SIZE);
-       sess->sequence_number = 1;
-
-       if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {
-               ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");
-               return -EINVAL;
-       }
-
-       ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");
-       return 0;
-}
-
 /**
  * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
  * @sess:      session of connection
@@ -441,44 +285,6 @@ out:
        return rc;
 }
 
-/**
- * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler
- * @sess:      session of connection
- * @client_nonce:      client nonce from LM response.
- * @ntlm_resp:         ntlm response data from client.
- *
- * Return:     0 on success, error number on error
- */
-static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess, char *client_nonce,
-                              char *ntlm_resp)
-{
-       char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};
-       int rc;
-       unsigned char p21[21];
-       char key[CIFS_AUTH_RESP_SIZE];
-
-       rc = ksmbd_enc_update_sess_key(sess_key,
-                                      client_nonce,
-                                      (char *)sess->ntlmssp.cryptkey, 8);
-       if (rc) {
-               pr_err("password processing failed\n");
-               goto out;
-       }
-
-       memset(p21, '\0', 21);
-       memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
-       rc = ksmbd_enc_p24(p21, sess_key, key);
-       if (rc) {
-               pr_err("password processing failed\n");
-               goto out;
-       }
-
-       if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)
-               rc = -EINVAL;
-out:
-       return rc;
-}
-
 /**
  * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
  * authenticate blob
@@ -512,17 +318,6 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
        nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
        nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
 
-       /* process NTLM authentication */
-       if (nt_len == CIFS_AUTH_RESP_SIZE) {
-               if (le32_to_cpu(authblob->NegotiateFlags) &
-                   NTLMSSP_NEGOTIATE_EXTENDED_SEC)
-                       return __ksmbd_auth_ntlmv2(sess, (char *)authblob +
-                               lm_off, (char *)authblob + nt_off);
-               else
-                       return ksmbd_auth_ntlm(sess, (char *)authblob +
-                               nt_off);
-       }
-
        /* TODO : use domain name that imported from configuration file */
        domain_name = smb_strndup_from_utf16((const char *)authblob +
                        le32_to_cpu(authblob->DomainName.BufferOffset),
index af086d35398aea60fb7e795df94807689f721a33..48b18b4ec117e85393ffc72023d359a7ca45c19e 100644 (file)
@@ -296,10 +296,12 @@ int ksmbd_conn_handler_loop(void *p)
                pdu_size = get_rfc1002_len(hdr_buf);
                ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
 
-               /* make sure we have enough to get to SMB header end */
-               if (!ksmbd_pdu_size_has_room(pdu_size)) {
-                       ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
-                                   pdu_size);
+               /*
+                * Check if pdu size is valid (min : smb header size,
+                * max : 0x00FFFFFF).
+                */
+               if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+                   pdu_size > MAX_STREAM_PROT_LEN) {
                        continue;
                }
 
index 5f4b1008d17e0bc51888797e3ea9516d74e0e4c3..81488d04199da8da6a83d638a44a431fdfa629f6 100644 (file)
@@ -81,12 +81,6 @@ static struct shash_desc *alloc_shash_desc(int id)
        case CRYPTO_SHASH_SHA512:
                tfm = crypto_alloc_shash("sha512", 0, 0);
                break;
-       case CRYPTO_SHASH_MD4:
-               tfm = crypto_alloc_shash("md4", 0, 0);
-               break;
-       case CRYPTO_SHASH_MD5:
-               tfm = crypto_alloc_shash("md5", 0, 0);
-               break;
        default:
                return NULL;
        }
@@ -214,16 +208,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
        return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
 }
 
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void)
-{
-       return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD4);
-}
-
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void)
-{
-       return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD5);
-}
-
 static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
 {
        struct ksmbd_crypto_ctx *ctx;
index ef11154b43df3652335f8c73ee504f95df0250a1..4a367c62f6536662979a3cb4caf8ad2a253fcb9a 100644 (file)
@@ -15,8 +15,6 @@ enum {
        CRYPTO_SHASH_CMACAES,
        CRYPTO_SHASH_SHA256,
        CRYPTO_SHASH_SHA512,
-       CRYPTO_SHASH_MD4,
-       CRYPTO_SHASH_MD5,
        CRYPTO_SHASH_MAX,
 };
 
@@ -43,8 +41,6 @@ struct ksmbd_crypto_ctx {
 #define CRYPTO_CMACAES(c)      ((c)->desc[CRYPTO_SHASH_CMACAES])
 #define CRYPTO_SHA256(c)       ((c)->desc[CRYPTO_SHASH_SHA256])
 #define CRYPTO_SHA512(c)       ((c)->desc[CRYPTO_SHASH_SHA512])
-#define CRYPTO_MD4(c)          ((c)->desc[CRYPTO_SHASH_MD4])
-#define CRYPTO_MD5(c)          ((c)->desc[CRYPTO_SHASH_MD5])
 
 #define CRYPTO_HMACMD5_TFM(c)  ((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
 #define CRYPTO_HMACSHA256_TFM(c)\
@@ -52,8 +48,6 @@ struct ksmbd_crypto_ctx {
 #define CRYPTO_CMACAES_TFM(c)  ((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
 #define CRYPTO_SHA256_TFM(c)   ((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
 #define CRYPTO_SHA512_TFM(c)   ((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
-#define CRYPTO_MD4_TFM(c)      ((c)->desc[CRYPTO_SHASH_MD4]->tfm)
-#define CRYPTO_MD5_TFM(c)      ((c)->desc[CRYPTO_SHASH_MD5]->tfm)
 
 #define CRYPTO_GCM(c)          ((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
 #define CRYPTO_CCM(c)          ((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
@@ -64,8 +58,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
 void ksmbd_crypto_destroy(void);
index 49a5a3afa118162e70c49fe4e3048483d0c97d23..5b8f3e0ebdb36aef86f79c1ca475e646fa300f87 100644 (file)
@@ -12,7 +12,7 @@
 #include "unicode.h"
 #include "vfs_cache.h"
 
-#define KSMBD_VERSION  "3.1.9"
+#define KSMBD_VERSION  "3.4.2"
 
 extern int ksmbd_debug_types;
 
index 0b307ca28a19d99ef8371bb71cd200495f3a4c96..60e7ac62c9172f5cac831b677f184f045a4a0245 100644 (file)
@@ -158,25 +158,18 @@ out:
  * Return : windows path string or error
  */
 
-char *convert_to_nt_pathname(char *filename, char *sharepath)
+char *convert_to_nt_pathname(char *filename)
 {
        char *ab_pathname;
-       int len, name_len;
 
-       name_len = strlen(filename);
-       ab_pathname = kmalloc(name_len, GFP_KERNEL);
+       if (strlen(filename) == 0)
+               filename = "\\";
+
+       ab_pathname = kstrdup(filename, GFP_KERNEL);
        if (!ab_pathname)
                return NULL;
 
-       ab_pathname[0] = '\\';
-       ab_pathname[1] = '\0';
-
-       len = strlen(sharepath);
-       if (!strncmp(filename, sharepath, len) && name_len != len) {
-               strscpy(ab_pathname, &filename[len], name_len);
-               ksmbd_conv_path_to_windows(ab_pathname);
-       }
-
+       ksmbd_conv_path_to_windows(ab_pathname);
        return ab_pathname;
 }
 
@@ -240,7 +233,7 @@ char *ksmbd_extract_sharename(char *treename)
  *
  * Return:     converted name on success, otherwise NULL
  */
-char *convert_to_unix_name(struct ksmbd_share_config *share, char *name)
+char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name)
 {
        int no_slash = 0, name_len, path_len;
        char *new_name;
index af8717d4d85b32d725a6b7496be397afe32d2a7c..253366bd0951aa987d91c69143acc9a5d34efaa3 100644 (file)
@@ -14,13 +14,13 @@ struct ksmbd_file;
 int match_pattern(const char *str, size_t len, const char *pattern);
 int ksmbd_validate_filename(char *filename);
 int parse_stream_name(char *filename, char **stream_name, int *s_type);
-char *convert_to_nt_pathname(char *filename, char *sharepath);
+char *convert_to_nt_pathname(char *filename);
 int get_nlink(struct kstat *st);
 void ksmbd_conv_path_to_unix(char *path);
 void ksmbd_strip_last_slash(char *path);
 void ksmbd_conv_path_to_windows(char *path);
 char *ksmbd_extract_sharename(char *treename);
-char *convert_to_unix_name(struct ksmbd_share_config *share, char *name);
+char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name);
 
 #define KSMBD_DIR_INFO_ALIGNMENT       8
 struct ksmbd_dir_info;
index 16b6236d1bd20a557874e54a31304bc9c49d4de6..f9dae6ef21150ae9f26129001bb3c90e35195afc 100644 (file)
@@ -1451,26 +1451,47 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
  */
 struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
 {
-       char *data_offset;
        struct create_context *cc;
        unsigned int next = 0;
        char *name;
        struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+       unsigned int remain_len, name_off, name_len, value_off, value_len,
+                    cc_len;
 
-       data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);
-       cc = (struct create_context *)data_offset;
+       /*
+        * CreateContextsOffset and CreateContextsLength are guaranteed to
+        * be valid because of ksmbd_smb2_check_message().
+        */
+       cc = (struct create_context *)((char *)req + 4 +
+                                      le32_to_cpu(req->CreateContextsOffset));
+       remain_len = le32_to_cpu(req->CreateContextsLength);
        do {
-               int val;
-
                cc = (struct create_context *)((char *)cc + next);
-               name = le16_to_cpu(cc->NameOffset) + (char *)cc;
-               val = le16_to_cpu(cc->NameLength);
-               if (val < 4)
+               if (remain_len < offsetof(struct create_context, Buffer))
                        return ERR_PTR(-EINVAL);
 
-               if (memcmp(name, tag, val) == 0)
-                       return cc;
                next = le32_to_cpu(cc->Next);
+               name_off = le16_to_cpu(cc->NameOffset);
+               name_len = le16_to_cpu(cc->NameLength);
+               value_off = le16_to_cpu(cc->DataOffset);
+               value_len = le32_to_cpu(cc->DataLength);
+               cc_len = next ? next : remain_len;
+
+               if ((next & 0x7) != 0 ||
+                   next > remain_len ||
+                   name_off != offsetof(struct create_context, Buffer) ||
+                   name_len < 4 ||
+                   name_off + name_len > cc_len ||
+                   (value_off & 0x7) != 0 ||
+                   (value_off && (value_off < name_off + name_len)) ||
+                   ((u64)value_off + value_len > cc_len))
+                       return ERR_PTR(-EINVAL);
+
+               name = (char *)cc + name_off;
+               if (memcmp(name, tag, name_len) == 0)
+                       return cc;
+
+               remain_len -= next;
        } while (next != 0);
 
        return NULL;
index e6a9f6aa47ebbe1ca5eef91b4157e63c9d01b64f..2a2b2135bfdede572220ab0a84d16e9427989863 100644 (file)
@@ -584,6 +584,9 @@ static int __init ksmbd_server_init(void)
        ret = ksmbd_workqueue_init();
        if (ret)
                goto err_crypto_destroy;
+
+       pr_warn_once("The ksmbd server is experimental, use at your own risk.\n");
+
        return 0;
 
 err_crypto_destroy:
index 9aa46bb3e10d45d700af6eeb1a80106637883e82..9edd9c161b275e194f45045de6bef2f442e3820f 100644 (file)
@@ -80,18 +80,21 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
 };
 
 /*
- * Returns the pointer to the beginning of the data area. Length of the data
- * area and the offset to it (from the beginning of the smb are also returned.
+ * Set length of the data area and the offset to arguments.
+ * if they are invalid, return error.
  */
-static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
+static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+                                 struct smb2_hdr *hdr)
 {
+       int ret = 0;
+
        *off = 0;
        *len = 0;
 
        /* error reqeusts do not have data area */
        if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
            (((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE)
-               return NULL;
+               return ret;
 
        /*
         * Following commands have data areas so we have to get the location
@@ -165,69 +168,60 @@ static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
        case SMB2_IOCTL:
                *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
                *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
-
                break;
        default:
                ksmbd_debug(SMB, "no length check for command\n");
                break;
        }
 
-       /*
-        * Invalid length or offset probably means data area is invalid, but
-        * we have little choice but to ignore the data area in this case.
-        */
        if (*off > 4096) {
-               ksmbd_debug(SMB, "offset %d too large, data area ignored\n",
-                           *off);
-               *len = 0;
-               *off = 0;
-       } else if (*off < 0) {
-               ksmbd_debug(SMB,
-                           "negative offset %d to data invalid ignore data area\n",
-                           *off);
-               *off = 0;
-               *len = 0;
-       } else if (*len < 0) {
-               ksmbd_debug(SMB,
-                           "negative data length %d invalid, data area ignored\n",
-                           *len);
-               *len = 0;
-       } else if (*len > 128 * 1024) {
-               ksmbd_debug(SMB, "data area larger than 128K: %d\n", *len);
-               *len = 0;
+               ksmbd_debug(SMB, "offset %d too large\n", *off);
+               ret = -EINVAL;
+       } else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) {
+               ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n",
+                           MAX_STREAM_PROT_LEN, (u64)*off + *len);
+               ret = -EINVAL;
        }
 
-       /* return pointer to beginning of data area, ie offset from SMB start */
-       if ((*off != 0) && (*len != 0))
-               return (char *)hdr + *off;
-       else
-               return NULL;
+       return ret;
 }
 
 /*
  * Calculate the size of the SMB message based on the fixed header
  * portion, the number of word parameters and the data portion of the message.
  */
-static unsigned int smb2_calc_size(void *buf)
+static int smb2_calc_size(void *buf, unsigned int *len)
 {
        struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
        struct smb2_hdr *hdr = &pdu->hdr;
-       int offset; /* the offset from the beginning of SMB to data area */
-       int data_length; /* the length of the variable length data area */
+       unsigned int offset; /* the offset from the beginning of SMB to data area */
+       unsigned int data_length; /* the length of the variable length data area */
+       int ret;
+
        /* Structure Size has already been checked to make sure it is 64 */
-       int len = le16_to_cpu(hdr->StructureSize);
+       *len = le16_to_cpu(hdr->StructureSize);
 
        /*
         * StructureSize2, ie length of fixed parameter area has already
         * been checked to make sure it is the correct length.
         */
-       len += le16_to_cpu(pdu->StructureSize2);
+       *len += le16_to_cpu(pdu->StructureSize2);
+       /*
+        * StructureSize2 of smb2_lock pdu is set to 48, indicating
+        * the size of smb2 lock request with single smb2_lock_element
+        * regardless of number of locks. Subtract single
+        * smb2_lock_element for correct buffer size check.
+        */
+       if (hdr->Command == SMB2_LOCK)
+               *len -= sizeof(struct smb2_lock_element);
 
        if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
                goto calc_size_exit;
 
-       smb2_get_data_area_len(&offset, &data_length, hdr);
-       ksmbd_debug(SMB, "SMB2 data length %d offset %d\n", data_length,
+       ret = smb2_get_data_area_len(&offset, &data_length, hdr);
+       if (ret)
+               return ret;
+       ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length,
                    offset);
 
        if (data_length > 0) {
@@ -237,16 +231,19 @@ static unsigned int smb2_calc_size(void *buf)
                 * for some commands, typically those with odd StructureSize,
                 * so we must add one to the calculation.
                 */
-               if (offset + 1 < len)
+               if (offset + 1 < *len) {
                        ksmbd_debug(SMB,
-                                   "data area offset %d overlaps SMB2 header %d\n",
-                                   offset + 1, len);
-               else
-                       len = offset + data_length;
+                                   "data area offset %d overlaps SMB2 header %u\n",
+                                   offset + 1, *len);
+                       return -EINVAL;
+               }
+
+               *len = offset + data_length;
        }
+
 calc_size_exit:
-       ksmbd_debug(SMB, "SMB2 len %d\n", len);
-       return len;
+       ksmbd_debug(SMB, "SMB2 len %u\n", *len);
+       return 0;
 }
 
 static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
@@ -391,9 +388,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                return 1;
        }
 
-       clc_len = smb2_calc_size(hdr);
+       if (smb2_calc_size(hdr, &clc_len))
+               return 1;
+
        if (len != clc_len) {
-               /* server can return one byte more due to implied bcc[0] */
+               /* client can return one byte more due to implied bcc[0] */
                if (clc_len == len + 1)
                        return 0;
 
@@ -418,9 +417,6 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                        return 0;
                }
 
-               if (command == SMB2_LOCK_HE && len == 88)
-                       return 0;
-
                ksmbd_debug(SMB,
                            "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
                            len, clc_len, command,
index 197473871aa47b6718b75dce17933a2bbb23709e..b06456eb587b41f2e203cd67f666bc0f1c3afe83 100644 (file)
@@ -187,11 +187,6 @@ static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
        [SMB2_CHANGE_NOTIFY_HE] =       { .proc = smb2_notify},
 };
 
-int init_smb2_0_server(struct ksmbd_conn *conn)
-{
-       return -EOPNOTSUPP;
-}
-
 /**
  * init_smb2_1_server() - initialize a smb server connection with smb2.1
  *                     command dispatcher
index c86164dc70bb776732f6781b45bd50b691898787..005aa93a49d692d2ffe173970b9ba4ff8b728c7f 100644 (file)
@@ -236,9 +236,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
 
        if (conn->need_neg == false)
                return -EINVAL;
-       if (!(conn->dialect >= SMB20_PROT_ID &&
-             conn->dialect <= SMB311_PROT_ID))
-               return -EINVAL;
 
        rsp_hdr = work->response_buf;
 
@@ -433,7 +430,7 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
                work->compound_pfid = KSMBD_NO_FID;
        }
        memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2);
-       rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
+       rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
        rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
        rsp_hdr->Command = rcv_hdr->Command;
 
@@ -459,13 +456,22 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
 bool is_chained_smb2_message(struct ksmbd_work *work)
 {
        struct smb2_hdr *hdr = work->request_buf;
-       unsigned int len;
+       unsigned int len, next_cmd;
 
        if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
                return false;
 
        hdr = ksmbd_req_buf_next(work);
-       if (le32_to_cpu(hdr->NextCommand) > 0) {
+       next_cmd = le32_to_cpu(hdr->NextCommand);
+       if (next_cmd > 0) {
+               if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +
+                       __SMB2_HEADER_STRUCTURE_SIZE >
+                   get_rfc1002_len(work->request_buf)) {
+                       pr_err("next command(%u) offset exceeds smb msg size\n",
+                              next_cmd);
+                       return false;
+               }
+
                ksmbd_debug(SMB, "got SMB2 chained command\n");
                init_chained_smb2_rsp(work);
                return true;
@@ -634,7 +640,7 @@ static char *
 smb2_get_name(struct ksmbd_share_config *share, const char *src,
              const int maxlen, struct nls_table *local_nls)
 {
-       char *name, *unixname;
+       char *name;
 
        name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
        if (IS_ERR(name)) {
@@ -642,19 +648,9 @@ smb2_get_name(struct ksmbd_share_config *share, const char *src,
                return name;
        }
 
-       /* change it to absolute unix name */
        ksmbd_conv_path_to_unix(name);
        ksmbd_strip_last_slash(name);
-
-       unixname = convert_to_unix_name(share, name);
-       kfree(name);
-       if (!unixname) {
-               pr_err("can not convert absolute name\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       ksmbd_debug(SMB, "absolute name = %s\n", unixname);
-       return unixname;
+       return name;
 }
 
 int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
@@ -1068,6 +1064,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
        struct smb2_negotiate_req *req = work->request_buf;
        struct smb2_negotiate_rsp *rsp = work->response_buf;
        int rc = 0;
+       unsigned int smb2_buf_len, smb2_neg_size;
        __le32 status;
 
        ksmbd_debug(SMB, "Received negotiate request\n");
@@ -1085,6 +1082,44 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
                goto err_out;
        }
 
+       smb2_buf_len = get_rfc1002_len(work->request_buf);
+       smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4;
+       if (smb2_neg_size > smb2_buf_len) {
+               rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+               rc = -EINVAL;
+               goto err_out;
+       }
+
+       if (conn->dialect == SMB311_PROT_ID) {
+               unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset);
+
+               if (smb2_buf_len < nego_ctxt_off) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+
+               if (smb2_neg_size > nego_ctxt_off) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+
+               if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+                   nego_ctxt_off) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+       } else {
+               if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+                   smb2_buf_len) {
+                       rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+                       rc = -EINVAL;
+                       goto err_out;
+               }
+       }
+
        conn->cli_cap = le32_to_cpu(req->Capabilities);
        switch (conn->dialect) {
        case SMB311_PROT_ID:
@@ -1128,13 +1163,6 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
        case SMB21_PROT_ID:
                init_smb2_1_server(conn);
                break;
-       case SMB20_PROT_ID:
-               rc = init_smb2_0_server(conn);
-               if (rc) {
-                       rsp->hdr.Status = STATUS_NOT_SUPPORTED;
-                       goto err_out;
-               }
-               break;
        case SMB2X_PROT_ID:
        case BAD_PROT_ID:
        default:
@@ -1153,11 +1181,9 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
        rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
        rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               memcpy(conn->ClientGUID, req->ClientGUID,
-                      SMB2_CLIENT_GUID_SIZE);
-               conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
-       }
+       memcpy(conn->ClientGUID, req->ClientGUID,
+                       SMB2_CLIENT_GUID_SIZE);
+       conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
 
        rsp->StructureSize = cpu_to_le16(65);
        rsp->DialectRevision = cpu_to_le16(conn->dialect);
@@ -1499,11 +1525,9 @@ binding_session:
                }
        }
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               if (!ksmbd_conn_lookup_dialect(conn)) {
-                       pr_err("fail to verify the dialect\n");
-                       return -ENOENT;
-               }
+       if (!ksmbd_conn_lookup_dialect(conn)) {
+               pr_err("fail to verify the dialect\n");
+               return -ENOENT;
        }
        return 0;
 }
@@ -1585,11 +1609,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
                }
        }
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               if (!ksmbd_conn_lookup_dialect(conn)) {
-                       pr_err("fail to verify the dialect\n");
-                       return -ENOENT;
-               }
+       if (!ksmbd_conn_lookup_dialect(conn)) {
+               pr_err("fail to verify the dialect\n");
+               return -ENOENT;
        }
        return 0;
 }
@@ -2103,16 +2125,22 @@ out:
  * smb2_set_ea() - handler for setting extended attributes using set
  *             info command
  * @eabuf:     set info command buffer
+ * @buf_len:   set info command buffer length
  * @path:      dentry path for get ea
  *
  * Return:     0 on success, otherwise error
  */
-static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
+static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+                      struct path *path)
 {
        struct user_namespace *user_ns = mnt_user_ns(path->mnt);
        char *attr_name = NULL, *value;
        int rc = 0;
-       int next = 0;
+       unsigned int next = 0;
+
+       if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
+                       le16_to_cpu(eabuf->EaValueLength))
+               return -EINVAL;
 
        attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
        if (!attr_name)
@@ -2177,7 +2205,13 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
 
 next:
                next = le32_to_cpu(eabuf->NextEntryOffset);
+               if (next == 0 || buf_len < next)
+                       break;
+               buf_len -= next;
                eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
+               if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
+                       break;
+
        } while (next != 0);
 
        kfree(attr_name);
@@ -2348,7 +2382,7 @@ static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
                        return rc;
        }
 
-       rc = ksmbd_vfs_kern_path(name, 0, path, 0);
+       rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
        if (rc) {
                pr_err("cannot get linux path (%s), err = %d\n",
                       name, rc);
@@ -2377,6 +2411,10 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
        ksmbd_debug(SMB,
                    "Set ACLs using SMB2_CREATE_SD_BUFFER context\n");
        sd_buf = (struct create_sd_buf_req *)context;
+       if (le16_to_cpu(context->DataOffset) +
+           le32_to_cpu(context->DataLength) <
+           sizeof(struct create_sd_buf_req))
+               return -EINVAL;
        return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
                            le32_to_cpu(sd_buf->ccontext.DataLength), true);
 }
@@ -2423,7 +2461,7 @@ int smb2_open(struct ksmbd_work *work)
        struct oplock_info *opinfo;
        __le32 *next_ptr = NULL;
        int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
-       int rc = 0, len = 0;
+       int rc = 0;
        int contxt_cnt = 0, query_disk_id = 0;
        int maximal_access_ctxt = 0, posix_ctxt = 0;
        int s_type = 0;
@@ -2495,17 +2533,11 @@ int smb2_open(struct ksmbd_work *work)
                        goto err_out1;
                }
        } else {
-               len = strlen(share->path);
-               ksmbd_debug(SMB, "share path len %d\n", len);
-               name = kmalloc(len + 1, GFP_KERNEL);
+               name = kstrdup("", GFP_KERNEL);
                if (!name) {
-                       rsp->hdr.Status = STATUS_NO_MEMORY;
                        rc = -ENOMEM;
                        goto err_out1;
                }
-
-               memcpy(name, share->path, len);
-               *(name + len) = '\0';
        }
 
        req_op_level = req->RequestedOplockLevel;
@@ -2577,6 +2609,12 @@ int smb2_open(struct ksmbd_work *work)
                        goto err_out1;
                } else if (context) {
                        ea_buf = (struct create_ea_buf_req *)context;
+                       if (le16_to_cpu(context->DataOffset) +
+                           le32_to_cpu(context->DataLength) <
+                           sizeof(struct create_ea_buf_req)) {
+                               rc = -EINVAL;
+                               goto err_out1;
+                       }
                        if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
                                rsp->hdr.Status = STATUS_ACCESS_DENIED;
                                rc = -EACCES;
@@ -2615,6 +2653,12 @@ int smb2_open(struct ksmbd_work *work)
                        } else if (context) {
                                struct create_posix *posix =
                                        (struct create_posix *)context;
+                               if (le16_to_cpu(context->DataOffset) +
+                                   le32_to_cpu(context->DataLength) <
+                                   sizeof(struct create_posix)) {
+                                       rc = -EINVAL;
+                                       goto err_out1;
+                               }
                                ksmbd_debug(SMB, "get posix context\n");
 
                                posix_mode = le32_to_cpu(posix->Mode);
@@ -2628,13 +2672,9 @@ int smb2_open(struct ksmbd_work *work)
                goto err_out1;
        }
 
-       if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
-               /*
-                * On delete request, instead of following up, need to
-                * look the current entity
-                */
-               rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
-               if (!rc) {
+       rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
+       if (!rc) {
+               if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
                        /*
                         * If file exists with under flags, return access
                         * denied error.
@@ -2653,34 +2693,16 @@ int smb2_open(struct ksmbd_work *work)
                                path_put(&path);
                                goto err_out;
                        }
-               }
-       } else {
-               if (test_share_config_flag(work->tcon->share_conf,
-                                          KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) {
-                       /*
-                        * Use LOOKUP_FOLLOW to follow the path of
-                        * symlink in path buildup
-                        */
-                       rc = ksmbd_vfs_kern_path(name, LOOKUP_FOLLOW, &path, 1);
-                       if (rc) { /* Case for broken link ?*/
-                               rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
-                       }
-               } else {
-                       rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
-                       if (!rc && d_is_symlink(path.dentry)) {
-                               rc = -EACCES;
-                               path_put(&path);
-                               goto err_out;
-                       }
+               } else if (d_is_symlink(path.dentry)) {
+                       rc = -EACCES;
+                       path_put(&path);
+                       goto err_out;
                }
        }
 
        if (rc) {
-               if (rc == -EACCES) {
-                       ksmbd_debug(SMB,
-                                   "User does not have right permission\n");
+               if (rc != -ENOENT)
                        goto err_out;
-               }
                ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",
                            name, rc);
                rc = 0;
@@ -2786,7 +2808,15 @@ int smb2_open(struct ksmbd_work *work)
                created = true;
                user_ns = mnt_user_ns(path.mnt);
                if (ea_buf) {
-                       rc = smb2_set_ea(&ea_buf->ea, &path);
+                       if (le32_to_cpu(ea_buf->ccontext.DataLength) <
+                           sizeof(struct smb2_ea_info)) {
+                               rc = -EINVAL;
+                               goto err_out;
+                       }
+
+                       rc = smb2_set_ea(&ea_buf->ea,
+                                        le32_to_cpu(ea_buf->ccontext.DataLength),
+                                        &path);
                        if (rc == -EOPNOTSUPP)
                                rc = 0;
                        else if (rc)
@@ -3019,9 +3049,16 @@ int smb2_open(struct ksmbd_work *work)
                        rc = PTR_ERR(az_req);
                        goto err_out;
                } else if (az_req) {
-                       loff_t alloc_size = le64_to_cpu(az_req->AllocationSize);
+                       loff_t alloc_size;
                        int err;
 
+                       if (le16_to_cpu(az_req->ccontext.DataOffset) +
+                           le32_to_cpu(az_req->ccontext.DataLength) <
+                           sizeof(struct create_alloc_size_req)) {
+                               rc = -EINVAL;
+                               goto err_out;
+                       }
+                       alloc_size = le64_to_cpu(az_req->AllocationSize);
                        ksmbd_debug(SMB,
                                    "request smb2 create allocate size : %llu\n",
                                    alloc_size);
@@ -3176,7 +3213,7 @@ err_out1:
                        rsp->hdr.Status = STATUS_INVALID_PARAMETER;
                else if (rc == -EOPNOTSUPP)
                        rsp->hdr.Status = STATUS_NOT_SUPPORTED;
-               else if (rc == -EACCES || rc == -ESTALE)
+               else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV)
                        rsp->hdr.Status = STATUS_ACCESS_DENIED;
                else if (rc == -ENOENT)
                        rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
@@ -4041,6 +4078,10 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
        path = &fp->filp->f_path;
        /* single EA entry is requested with given user.* name */
        if (req->InputBufferLength) {
+               if (le32_to_cpu(req->InputBufferLength) <
+                   sizeof(struct smb2_ea_info_req))
+                       return -EINVAL;
+
                ea_req = (struct smb2_ea_info_req *)req->Buffer;
        } else {
                /* need to send all EAs, if no specific EA is requested*/
@@ -4186,7 +4227,7 @@ static void get_file_access_info(struct smb2_query_info_rsp *rsp,
 static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
                               struct ksmbd_file *fp, void *rsp_org)
 {
-       struct smb2_file_all_info *basic_info;
+       struct smb2_file_basic_info *basic_info;
        struct kstat stat;
        u64 time;
 
@@ -4196,7 +4237,7 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
                return -EACCES;
        }
 
-       basic_info = (struct smb2_file_all_info *)rsp->Buffer;
+       basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
        generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
                         &stat);
        basic_info->CreationTime = cpu_to_le64(fp->create_time);
@@ -4209,9 +4250,8 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
        basic_info->Attributes = fp->f_ci->m_fattr;
        basic_info->Pad1 = 0;
        rsp->OutputBufferLength =
-               cpu_to_le32(offsetof(struct smb2_file_all_info, AllocationSize));
-       inc_rfc1001_len(rsp_org, offsetof(struct smb2_file_all_info,
-                                         AllocationSize));
+               cpu_to_le32(sizeof(struct smb2_file_basic_info));
+       inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));
        return 0;
 }
 
@@ -4288,8 +4328,7 @@ static int get_file_all_info(struct ksmbd_work *work,
                return -EACCES;
        }
 
-       filename = convert_to_nt_pathname(fp->filename,
-                                         work->tcon->share_conf->path);
+       filename = convert_to_nt_pathname(fp->filename);
        if (!filename)
                return -ENOMEM;
 
@@ -4420,17 +4459,15 @@ static void get_file_stream_info(struct ksmbd_work *work,
                file_info->NextEntryOffset = cpu_to_le32(next);
        }
 
-       if (nbytes) {
+       if (!S_ISDIR(stat.mode)) {
                file_info = (struct smb2_file_stream_info *)
                        &rsp->Buffer[nbytes];
                streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
                                              "::$DATA", 7, conn->local_nls, 0);
                streamlen *= 2;
                file_info->StreamNameLength = cpu_to_le32(streamlen);
-               file_info->StreamSize = S_ISDIR(stat.mode) ? 0 :
-                       cpu_to_le64(stat.size);
-               file_info->StreamAllocationSize = S_ISDIR(stat.mode) ? 0 :
-                       cpu_to_le64(stat.size);
+               file_info->StreamSize = 0;
+               file_info->StreamAllocationSize = 0;
                nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
        }
 
@@ -4745,12 +4782,8 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
        struct path path;
        int rc = 0, len;
        int fs_infoclass_size = 0;
-       int lookup_flags = 0;
-
-       if (test_share_config_flag(share, KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
-               lookup_flags = LOOKUP_FOLLOW;
 
-       rc = ksmbd_vfs_kern_path(share->path, lookup_flags, &path, 0);
+       rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
        if (rc) {
                pr_err("cannot create vfs path\n");
                return -EIO;
@@ -5299,7 +5332,7 @@ static int smb2_rename(struct ksmbd_work *work,
                        goto out;
 
                len = strlen(new_name);
-               if (new_name[len - 1] != '/') {
+               if (len > 0 && new_name[len - 1] != '/') {
                        pr_err("not allow base filename in rename\n");
                        rc = -ESHARE;
                        goto out;
@@ -5327,11 +5360,14 @@ static int smb2_rename(struct ksmbd_work *work,
        }
 
        ksmbd_debug(SMB, "new name %s\n", new_name);
-       rc = ksmbd_vfs_kern_path(new_name, 0, &path, 1);
-       if (rc)
+       rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1);
+       if (rc) {
+               if (rc != -ENOENT)
+                       goto out;
                file_present = false;
-       else
+       } else {
                path_put(&path);
+       }
 
        if (ksmbd_share_veto_filename(share, new_name)) {
                rc = -ENOENT;
@@ -5371,7 +5407,7 @@ out:
 static int smb2_create_link(struct ksmbd_work *work,
                            struct ksmbd_share_config *share,
                            struct smb2_file_link_info *file_info,
-                           struct file *filp,
+                           unsigned int buf_len, struct file *filp,
                            struct nls_table *local_nls)
 {
        char *link_name = NULL, *target_name = NULL, *pathname = NULL;
@@ -5379,6 +5415,10 @@ static int smb2_create_link(struct ksmbd_work *work,
        bool file_present = true;
        int rc;
 
+       if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
+                       le32_to_cpu(file_info->FileNameLength))
+               return -EINVAL;
+
        ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");
        pathname = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!pathname)
@@ -5401,11 +5441,14 @@ static int smb2_create_link(struct ksmbd_work *work,
        }
 
        ksmbd_debug(SMB, "target name is %s\n", target_name);
-       rc = ksmbd_vfs_kern_path(link_name, 0, &path, 0);
-       if (rc)
+       rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0);
+       if (rc) {
+               if (rc != -ENOENT)
+                       goto out;
                file_present = false;
-       else
+       } else {
                path_put(&path);
+       }
 
        if (file_info->ReplaceIfExists) {
                if (file_present) {
@@ -5435,12 +5478,11 @@ out:
        return rc;
 }
 
-static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
+static int set_file_basic_info(struct ksmbd_file *fp,
+                              struct smb2_file_basic_info *file_info,
                               struct ksmbd_share_config *share)
 {
-       struct smb2_file_all_info *file_info;
        struct iattr attrs;
-       struct timespec64 ctime;
        struct file *filp;
        struct inode *inode;
        struct user_namespace *user_ns;
@@ -5449,7 +5491,6 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
        if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))
                return -EACCES;
 
-       file_info = (struct smb2_file_all_info *)buf;
        attrs.ia_valid = 0;
        filp = fp->filp;
        inode = file_inode(filp);
@@ -5463,13 +5504,11 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
                attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
        }
 
-       if (file_info->ChangeTime) {
+       attrs.ia_valid |= ATTR_CTIME;
+       if (file_info->ChangeTime)
                attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
-               ctime = attrs.ia_ctime;
-               attrs.ia_valid |= ATTR_CTIME;
-       } else {
-               ctime = inode->i_ctime;
-       }
+       else
+               attrs.ia_ctime = inode->i_ctime;
 
        if (file_info->LastWriteTime) {
                attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
@@ -5515,18 +5554,17 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
                        return -EACCES;
 
                inode_lock(inode);
+               inode->i_ctime = attrs.ia_ctime;
+               attrs.ia_valid &= ~ATTR_CTIME;
                rc = notify_change(user_ns, dentry, &attrs, NULL);
-               if (!rc) {
-                       inode->i_ctime = ctime;
-                       mark_inode_dirty(inode);
-               }
                inode_unlock(inode);
        }
        return rc;
 }
 
 static int set_file_allocation_info(struct ksmbd_work *work,
-                                   struct ksmbd_file *fp, char *buf)
+                                   struct ksmbd_file *fp,
+                                   struct smb2_file_alloc_info *file_alloc_info)
 {
        /*
         * TODO : It's working fine only when store dos attributes
@@ -5534,7 +5572,6 @@ static int set_file_allocation_info(struct ksmbd_work *work,
         * properly with any smb.conf option
         */
 
-       struct smb2_file_alloc_info *file_alloc_info;
        loff_t alloc_blks;
        struct inode *inode;
        int rc;
@@ -5542,7 +5579,6 @@ static int set_file_allocation_info(struct ksmbd_work *work,
        if (!(fp->daccess & FILE_WRITE_DATA_LE))
                return -EACCES;
 
-       file_alloc_info = (struct smb2_file_alloc_info *)buf;
        alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
        inode = file_inode(fp->filp);
 
@@ -5565,7 +5601,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
                 * inode size is retained by backup inode size.
                 */
                size = i_size_read(inode);
-               rc = ksmbd_vfs_truncate(work, NULL, fp, alloc_blks * 512);
+               rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
                if (rc) {
                        pr_err("truncate failed! filename : %s, err %d\n",
                               fp->filename, rc);
@@ -5578,9 +5614,8 @@ static int set_file_allocation_info(struct ksmbd_work *work,
 }
 
 static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
-                               char *buf)
+                               struct smb2_file_eof_info *file_eof_info)
 {
-       struct smb2_file_eof_info *file_eof_info;
        loff_t newsize;
        struct inode *inode;
        int rc;
@@ -5588,7 +5623,6 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
        if (!(fp->daccess & FILE_WRITE_DATA_LE))
                return -EACCES;
 
-       file_eof_info = (struct smb2_file_eof_info *)buf;
        newsize = le64_to_cpu(file_eof_info->EndOfFile);
        inode = file_inode(fp->filp);
 
@@ -5602,7 +5636,7 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
        if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
                ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",
                            fp->filename, newsize);
-               rc = ksmbd_vfs_truncate(work, NULL, fp, newsize);
+               rc = ksmbd_vfs_truncate(work, fp, newsize);
                if (rc) {
                        ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",
                                    fp->filename, rc);
@@ -5615,7 +5649,8 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
 }
 
 static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
-                          char *buf)
+                          struct smb2_file_rename_info *rename_info,
+                          unsigned int buf_len)
 {
        struct user_namespace *user_ns;
        struct ksmbd_file *parent_fp;
@@ -5628,6 +5663,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
                return -EACCES;
        }
 
+       if (buf_len < (u64)sizeof(struct smb2_file_rename_info) +
+                       le32_to_cpu(rename_info->FileNameLength))
+               return -EINVAL;
+
        user_ns = file_mnt_user_ns(fp->filp);
        if (ksmbd_stream_fd(fp))
                goto next;
@@ -5650,14 +5689,13 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
                }
        }
 next:
-       return smb2_rename(work, fp, user_ns,
-                          (struct smb2_file_rename_info *)buf,
+       return smb2_rename(work, fp, user_ns, rename_info,
                           work->sess->conn->local_nls);
 }
 
-static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
+static int set_file_disposition_info(struct ksmbd_file *fp,
+                                    struct smb2_file_disposition_info *file_info)
 {
-       struct smb2_file_disposition_info *file_info;
        struct inode *inode;
 
        if (!(fp->daccess & FILE_DELETE_LE)) {
@@ -5666,7 +5704,6 @@ static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
        }
 
        inode = file_inode(fp->filp);
-       file_info = (struct smb2_file_disposition_info *)buf;
        if (file_info->DeletePending) {
                if (S_ISDIR(inode->i_mode) &&
                    ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)
@@ -5678,15 +5715,14 @@ static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
        return 0;
 }
 
-static int set_file_position_info(struct ksmbd_file *fp, char *buf)
+static int set_file_position_info(struct ksmbd_file *fp,
+                                 struct smb2_file_pos_info *file_info)
 {
-       struct smb2_file_pos_info *file_info;
        loff_t current_byte_offset;
        unsigned long sector_size;
        struct inode *inode;
 
        inode = file_inode(fp->filp);
-       file_info = (struct smb2_file_pos_info *)buf;
        current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);
        sector_size = inode->i_sb->s_blocksize;
 
@@ -5702,12 +5738,11 @@ static int set_file_position_info(struct ksmbd_file *fp, char *buf)
        return 0;
 }
 
-static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
+static int set_file_mode_info(struct ksmbd_file *fp,
+                             struct smb2_file_mode_info *file_info)
 {
-       struct smb2_file_mode_info *file_info;
        __le32 mode;
 
-       file_info = (struct smb2_file_mode_info *)buf;
        mode = file_info->Mode;
 
        if ((mode & ~FILE_MODE_INFO_MASK) ||
@@ -5737,40 +5772,74 @@ static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
  * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH
  */
 static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
-                             int info_class, char *buf,
+                             struct smb2_set_info_req *req,
                              struct ksmbd_share_config *share)
 {
-       switch (info_class) {
+       unsigned int buf_len = le32_to_cpu(req->BufferLength);
+
+       switch (req->FileInfoClass) {
        case FILE_BASIC_INFORMATION:
-               return set_file_basic_info(fp, buf, share);
+       {
+               if (buf_len < sizeof(struct smb2_file_basic_info))
+                       return -EINVAL;
 
+               return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
+       }
        case FILE_ALLOCATION_INFORMATION:
-               return set_file_allocation_info(work, fp, buf);
+       {
+               if (buf_len < sizeof(struct smb2_file_alloc_info))
+                       return -EINVAL;
 
+               return set_file_allocation_info(work, fp,
+                                               (struct smb2_file_alloc_info *)req->Buffer);
+       }
        case FILE_END_OF_FILE_INFORMATION:
-               return set_end_of_file_info(work, fp, buf);
+       {
+               if (buf_len < sizeof(struct smb2_file_eof_info))
+                       return -EINVAL;
 
+               return set_end_of_file_info(work, fp,
+                                           (struct smb2_file_eof_info *)req->Buffer);
+       }
        case FILE_RENAME_INFORMATION:
+       {
                if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
                        ksmbd_debug(SMB,
                                    "User does not have write permission\n");
                        return -EACCES;
                }
-               return set_rename_info(work, fp, buf);
 
+               if (buf_len < sizeof(struct smb2_file_rename_info))
+                       return -EINVAL;
+
+               return set_rename_info(work, fp,
+                                      (struct smb2_file_rename_info *)req->Buffer,
+                                      buf_len);
+       }
        case FILE_LINK_INFORMATION:
+       {
+               if (buf_len < sizeof(struct smb2_file_link_info))
+                       return -EINVAL;
+
                return smb2_create_link(work, work->tcon->share_conf,
-                                       (struct smb2_file_link_info *)buf, fp->filp,
+                                       (struct smb2_file_link_info *)req->Buffer,
+                                       buf_len, fp->filp,
                                        work->sess->conn->local_nls);
-
+       }
        case FILE_DISPOSITION_INFORMATION:
+       {
                if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
                        ksmbd_debug(SMB,
                                    "User does not have write permission\n");
                        return -EACCES;
                }
-               return set_file_disposition_info(fp, buf);
 
+               if (buf_len < sizeof(struct smb2_file_disposition_info))
+                       return -EINVAL;
+
+               return set_file_disposition_info(fp,
+                                                (struct smb2_file_disposition_info *)req->Buffer);
+       }
        case FILE_FULL_EA_INFORMATION:
        {
                if (!(fp->daccess & FILE_WRITE_EA_LE)) {
@@ -5779,18 +5848,29 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
                        return -EACCES;
                }
 
-               return smb2_set_ea((struct smb2_ea_info *)buf,
-                                  &fp->filp->f_path);
-       }
+               if (buf_len < sizeof(struct smb2_ea_info))
+                       return -EINVAL;
 
+               return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+                                  buf_len, &fp->filp->f_path);
+       }
        case FILE_POSITION_INFORMATION:
-               return set_file_position_info(fp, buf);
+       {
+               if (buf_len < sizeof(struct smb2_file_pos_info))
+                       return -EINVAL;
 
+               return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
+       }
        case FILE_MODE_INFORMATION:
-               return set_file_mode_info(fp, buf);
+       {
+               if (buf_len < sizeof(struct smb2_file_mode_info))
+                       return -EINVAL;
+
+               return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
+       }
        }
 
-       pr_err("Unimplemented Fileinfoclass :%d\n", info_class);
+       pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass);
        return -EOPNOTSUPP;
 }
 
@@ -5851,8 +5931,7 @@ int smb2_set_info(struct ksmbd_work *work)
        switch (req->InfoType) {
        case SMB2_O_INFO_FILE:
                ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
-               rc = smb2_set_info_file(work, fp, req->FileInfoClass,
-                                       req->Buffer, work->tcon->share_conf);
+               rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);
                break;
        case SMB2_O_INFO_SECURITY:
                ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
@@ -5879,7 +5958,7 @@ int smb2_set_info(struct ksmbd_work *work)
        return 0;
 
 err_out:
-       if (rc == -EACCES || rc == -EPERM)
+       if (rc == -EACCES || rc == -EPERM || rc == -EXDEV)
                rsp->hdr.Status = STATUS_ACCESS_DENIED;
        else if (rc == -EINVAL)
                rsp->hdr.Status = STATUS_INVALID_PARAMETER;
@@ -8206,7 +8285,8 @@ void smb3_preauth_hash_rsp(struct ksmbd_work *work)
 
        WORK_BUFFERS(work, req, rsp);
 
-       if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE)
+       if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&
+           conn->preauth_info)
                ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
                                                 conn->preauth_info->Preauth_HashValue);
 
@@ -8310,31 +8390,29 @@ int smb3_decrypt_req(struct ksmbd_work *work)
        struct smb2_hdr *hdr;
        unsigned int pdu_length = get_rfc1002_len(buf);
        struct kvec iov[2];
-       unsigned int buf_data_size = pdu_length + 4 -
+       int buf_data_size = pdu_length + 4 -
                sizeof(struct smb2_transform_hdr);
        struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
-       unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
        int rc = 0;
 
-       sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
-       if (!sess) {
-               pr_err("invalid session id(%llx) in transform header\n",
-                      le64_to_cpu(tr_hdr->SessionId));
-               return -ECONNABORTED;
-       }
-
-       if (pdu_length + 4 <
-           sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {
+       if (buf_data_size < sizeof(struct smb2_hdr)) {
                pr_err("Transform message is too small (%u)\n",
                       pdu_length);
                return -ECONNABORTED;
        }
 
-       if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) {
+       if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) {
                pr_err("Transform message is broken\n");
                return -ECONNABORTED;
        }
 
+       sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
+       if (!sess) {
+               pr_err("invalid session id(%llx) in transform header\n",
+                      le64_to_cpu(tr_hdr->SessionId));
+               return -ECONNABORTED;
+       }
+
        iov[0].iov_base = buf;
        iov[0].iov_len = sizeof(struct smb2_transform_hdr);
        iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
index bcec845b03f344cec4838591143c3b0e6dbbf159..a6dec5ec6a5424c7075452c31dddf9d92aff7307 100644 (file)
@@ -1464,6 +1464,15 @@ struct smb2_file_all_info { /* data block encoding of response to level 18 */
        char   FileName[1];
 } __packed; /* level 18 Query */
 
+struct smb2_file_basic_info { /* data block encoding of response to level 18 */
+       __le64 CreationTime;    /* Beginning of FILE_BASIC_INFO equivalent */
+       __le64 LastAccessTime;
+       __le64 LastWriteTime;
+       __le64 ChangeTime;
+       __le32 Attributes;
+       __u32  Pad1;            /* End of FILE_BASIC_INFO_INFO equivalent */
+} __packed;
+
 struct smb2_file_alt_name_info {
        __le32 FileNameLength;
        char FileName[0];
@@ -1628,7 +1637,6 @@ struct smb2_posix_info {
 } __packed;
 
 /* functions */
-int init_smb2_0_server(struct ksmbd_conn *conn);
 void init_smb2_1_server(struct ksmbd_conn *conn);
 void init_smb3_0_server(struct ksmbd_conn *conn);
 void init_smb3_02_server(struct ksmbd_conn *conn);
index 43d3123d8b62ed8731b1b7e4ad3b07ce8e9eb2d5..707490ab1f4c42c29b7b47bda99288c866bab943 100644 (file)
@@ -21,7 +21,6 @@ static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
 #define MAGIC_CHAR '~'
 #define PERIOD '.'
 #define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
-#define KSMBD_MIN_SUPPORTED_HEADER_SIZE        (sizeof(struct smb2_hdr))
 
 struct smb_protocol {
        int             index;
@@ -89,7 +88,7 @@ unsigned int ksmbd_server_side_copy_max_total_size(void)
 
 inline int ksmbd_min_protocol(void)
 {
-       return SMB2_PROT;
+       return SMB21_PROT;
 }
 
 inline int ksmbd_max_protocol(void)
@@ -129,16 +128,22 @@ int ksmbd_lookup_protocol_idx(char *str)
  *
  * check for valid smb signature and packet direction(request/response)
  *
- * Return:      0 on success, otherwise 1
+ * Return:      0 on success, otherwise -EINVAL
  */
 int ksmbd_verify_smb_message(struct ksmbd_work *work)
 {
-       struct smb2_hdr *smb2_hdr = work->request_buf;
+       struct smb2_hdr *smb2_hdr = work->request_buf + work->next_smb2_rcv_hdr_off;
+       struct smb_hdr *hdr;
 
        if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
                return ksmbd_smb2_check_message(work);
 
-       return 0;
+       hdr = work->request_buf;
+       if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
+           hdr->Command == SMB_COM_NEGOTIATE)
+               return 0;
+
+       return -EINVAL;
 }
 
 /**
@@ -149,20 +154,7 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
  */
 bool ksmbd_smb_request(struct ksmbd_conn *conn)
 {
-       int type = *(char *)conn->request_buf;
-
-       switch (type) {
-       case RFC1002_SESSION_MESSAGE:
-               /* Regular SMB request */
-               return true;
-       case RFC1002_SESSION_KEEP_ALIVE:
-               ksmbd_debug(SMB, "RFC 1002 session keep alive\n");
-               break;
-       default:
-               ksmbd_debug(SMB, "RFC 1002 unknown request type 0x%x\n", type);
-       }
-
-       return false;
+       return conn->request_buf[0] == 0;
 }
 
 static bool supported_protocol(int idx)
@@ -176,10 +168,12 @@ static bool supported_protocol(int idx)
                idx <= server_conf.max_protocol);
 }
 
-static char *next_dialect(char *dialect, int *next_off)
+static char *next_dialect(char *dialect, int *next_off, int bcount)
 {
        dialect = dialect + *next_off;
-       *next_off = strlen(dialect);
+       *next_off = strnlen(dialect, bcount);
+       if (dialect[*next_off] != '\0')
+               return NULL;
        return dialect;
 }
 
@@ -194,7 +188,9 @@ static int ksmbd_lookup_dialect_by_name(char *cli_dialects, __le16 byte_count)
                dialect = cli_dialects;
                bcount = le16_to_cpu(byte_count);
                do {
-                       dialect = next_dialect(dialect, &next);
+                       dialect = next_dialect(dialect, &next, bcount);
+                       if (!dialect)
+                               break;
                        ksmbd_debug(SMB, "client requested dialect %s\n",
                                    dialect);
                        if (!strcmp(dialect, smb1_protos[i].name)) {
@@ -242,13 +238,22 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
 
 static int ksmbd_negotiate_smb_dialect(void *buf)
 {
-       __le32 proto;
+       int smb_buf_length = get_rfc1002_len(buf);
+       __le32 proto = ((struct smb2_hdr *)buf)->ProtocolId;
 
-       proto = ((struct smb2_hdr *)buf)->ProtocolId;
        if (proto == SMB2_PROTO_NUMBER) {
                struct smb2_negotiate_req *req;
+               int smb2_neg_size =
+                       offsetof(struct smb2_negotiate_req, Dialects) - 4;
 
                req = (struct smb2_negotiate_req *)buf;
+               if (smb2_neg_size > smb_buf_length)
+                       goto err_out;
+
+               if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+                   smb_buf_length)
+                       goto err_out;
+
                return ksmbd_lookup_dialect_by_id(req->Dialects,
                                                  req->DialectCount);
        }
@@ -258,14 +263,22 @@ static int ksmbd_negotiate_smb_dialect(void *buf)
                struct smb_negotiate_req *req;
 
                req = (struct smb_negotiate_req *)buf;
+               if (le16_to_cpu(req->ByteCount) < 2)
+                       goto err_out;
+
+               if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 +
+                       le16_to_cpu(req->ByteCount) > smb_buf_length) {
+                       goto err_out;
+               }
+
                return ksmbd_lookup_dialect_by_name(req->DialectsArray,
                                                    req->ByteCount);
        }
 
+err_out:
        return BAD_PROT_ID;
 }
 
-#define SMB_COM_NEGOTIATE      0x72
 int ksmbd_init_smb_server(struct ksmbd_work *work)
 {
        struct ksmbd_conn *conn = work->conn;
@@ -280,11 +293,6 @@ int ksmbd_init_smb_server(struct ksmbd_work *work)
        return 0;
 }
 
-bool ksmbd_pdu_size_has_room(unsigned int pdu)
-{
-       return (pdu >= KSMBD_MIN_SUPPORTED_HEADER_SIZE - 4);
-}
-
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
                                      struct ksmbd_file *dir,
                                      struct ksmbd_dir_info *d_info,
@@ -419,7 +427,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
 
 static int __smb2_negotiate(struct ksmbd_conn *conn)
 {
-       return (conn->dialect >= SMB20_PROT_ID &&
+       return (conn->dialect >= SMB21_PROT_ID &&
                conn->dialect <= SMB311_PROT_ID);
 }
 
@@ -449,7 +457,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
                }
        }
 
-       if (command == SMB2_NEGOTIATE_HE) {
+       if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
                ret = smb2_handle_negotiate(work);
                init_smb2_neg_rsp(work);
                return ret;
index 57c667c1be0694289fc2f314d568ec396b77a4f8..6e79e7577f6b7b1d31837101e8e9bdd0dbe38933 100644 (file)
 #define CIFS_DEFAULT_IOSIZE    (64 * 1024)
 #define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
 
-/* RFC 1002 session packet types */
-#define RFC1002_SESSION_MESSAGE                        0x00
-#define RFC1002_SESSION_REQUEST                        0x81
-#define RFC1002_POSITIVE_SESSION_RESPONSE      0x82
-#define RFC1002_NEGATIVE_SESSION_RESPONSE      0x83
-#define RFC1002_RETARGET_SESSION_RESPONSE      0x84
-#define RFC1002_SESSION_KEEP_ALIVE             0x85
+#define MAX_STREAM_PROT_LEN    0x00FFFFFF
 
 /* Responses when opening a file. */
 #define F_SUPERSEDED   0
                FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES)
 
 #define SMB1_PROTO_NUMBER              cpu_to_le32(0x424d53ff)
+#define SMB_COM_NEGOTIATE              0x72
 
 #define SMB1_CLIENT_GUID_SIZE          (16)
 struct smb_hdr {
@@ -500,8 +495,6 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
 
 int ksmbd_init_smb_server(struct ksmbd_work *work);
 
-bool ksmbd_pdu_size_has_room(unsigned int pdu);
-
 struct ksmbd_kstat;
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
                                      int info_level,
index 0a95cdec8c800d1f437d2e3c1555ac432ec7af42..bd792db326239410982461467eef79d33886cd2a 100644 (file)
@@ -380,7 +380,7 @@ static void parse_dacl(struct user_namespace *user_ns,
 {
        int i, ret;
        int num_aces = 0;
-       int acl_size;
+       unsigned int acl_size;
        char *acl_base;
        struct smb_ace **ppace;
        struct posix_acl_entry *cf_pace, *cf_pdace;
@@ -392,7 +392,7 @@ static void parse_dacl(struct user_namespace *user_ns,
                return;
 
        /* validate that we do not go past end of acl */
-       if (end_of_acl <= (char *)pdacl ||
+       if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
            end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
                pr_err("ACL too small to parse DACL\n");
                return;
@@ -431,8 +431,22 @@ static void parse_dacl(struct user_namespace *user_ns,
         * user/group/other have no permissions
         */
        for (i = 0; i < num_aces; ++i) {
+               if (end_of_acl - acl_base < acl_size)
+                       break;
+
                ppace[i] = (struct smb_ace *)(acl_base + acl_size);
                acl_base = (char *)ppace[i];
+               acl_size = offsetof(struct smb_ace, sid) +
+                       offsetof(struct smb_sid, sub_auth);
+
+               if (end_of_acl - acl_base < acl_size ||
+                   ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+                   (end_of_acl - acl_base <
+                    acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+                   (le16_to_cpu(ppace[i]->size) <
+                    acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
+                       break;
+
                acl_size = le16_to_cpu(ppace[i]->size);
                ppace[i]->access_req =
                        smb_map_generic_desired_access(ppace[i]->access_req);
@@ -807,6 +821,9 @@ int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
        if (!pntsd)
                return -EIO;
 
+       if (acl_len < sizeof(struct smb_ntsd))
+               return -EINVAL;
+
        owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
                        le32_to_cpu(pntsd->osidoffset));
        group_sid_ptr = (struct smb_sid *)((char *)pntsd +
index 52b2556e76b1bde3ceb23d9202df72dd4c75a05c..3a7fa23ba8508c952eb498154adbb9ca8eaa0ee0 100644 (file)
@@ -20,7 +20,6 @@
 #define SUBMOD_NAME    "smb_direct"
 
 #include <linux/kthread.h>
-#include <linux/rwlock.h>
 #include <linux/list.h>
 #include <linux/mempool.h>
 #include <linux/highmem.h>
index dc15a5ecd2e028daa56bc6272021d454ac111ce5..c14320e03b698629ecb3199e37ded26b284ed37c 100644 (file)
@@ -215,7 +215,7 @@ out_error:
  * ksmbd_kthread_fn() - listen to new SMB connections and callback server
  * @p:         arguments to forker thread
  *
- * Return:     Returns a task_struct or ERR_PTR
+ * Return:     0 on success, error number otherwise
  */
 static int ksmbd_kthread_fn(void *p)
 {
@@ -387,7 +387,7 @@ static void tcp_destroy_socket(struct socket *ksmbd_socket)
 /**
  * create_socket - create socket for ksmbd/0
  *
- * Return:     Returns a task_struct or ERR_PTR
+ * Return:     0 on success, error number otherwise
  */
 static int create_socket(struct interface *iface)
 {
index b047f2980d96c6a32190c8822760b8e1d01fa907..b41954294d38081b0e32a0b62bd0dafb6c1273ff 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/sched/xacct.h>
 #include <linux/crc32c.h>
 
+#include "../internal.h"       /* for vfs_path_lookup */
+
 #include "glob.h"
 #include "oplock.h"
 #include "connection.h"
@@ -44,7 +46,6 @@ static char *extract_last_component(char *path)
                p++;
        } else {
                p = NULL;
-               pr_err("Invalid path %s\n", path);
        }
        return p;
 }
@@ -155,7 +156,7 @@ int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
 /**
  * ksmbd_vfs_create() - vfs helper for smb create file
  * @work:      work
- * @name:      file name
+ * @name:      file name that is relative to share
  * @mode:      file create mode
  *
  * Return:     0 on success, otherwise error
@@ -166,7 +167,8 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
        struct dentry *dentry;
        int err;
 
-       dentry = kern_path_create(AT_FDCWD, name, &path, 0);
+       dentry = ksmbd_vfs_kern_path_create(work, name,
+                                           LOOKUP_NO_SYMLINKS, &path);
        if (IS_ERR(dentry)) {
                err = PTR_ERR(dentry);
                if (err != -ENOENT)
@@ -191,7 +193,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
 /**
  * ksmbd_vfs_mkdir() - vfs helper for smb create directory
  * @work:      work
- * @name:      directory name
+ * @name:      directory name that is relative to share
  * @mode:      directory create mode
  *
  * Return:     0 on success, otherwise error
@@ -203,7 +205,9 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
        struct dentry *dentry;
        int err;
 
-       dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+       dentry = ksmbd_vfs_kern_path_create(work, name,
+                                           LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+                                           &path);
        if (IS_ERR(dentry)) {
                err = PTR_ERR(dentry);
                if (err != -EEXIST)
@@ -578,7 +582,7 @@ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
 
 /**
  * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink
- * @name:      absolute directory or file name
+ * @name:      directory or file name that is relative to share
  *
  * Return:     0 on success, otherwise error
  */
@@ -588,16 +592,11 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
        struct path path;
        struct dentry *parent;
        int err;
-       int flags = 0;
 
        if (ksmbd_override_fsids(work))
                return -ENOMEM;
 
-       if (test_share_config_flag(work->tcon->share_conf,
-                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
-               flags = LOOKUP_FOLLOW;
-
-       err = kern_path(name, flags, &path);
+       err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false);
        if (err) {
                ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);
                ksmbd_revert_fsids(work);
@@ -642,7 +641,7 @@ out_err:
 /**
  * ksmbd_vfs_link() - vfs helper for creating smb hardlink
  * @oldname:   source file name
- * @newname:   hardlink name
+ * @newname:   hardlink name that is relative to share
  *
  * Return:     0 on success, otherwise error
  */
@@ -652,24 +651,20 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
        struct path oldpath, newpath;
        struct dentry *dentry;
        int err;
-       int flags = 0;
 
        if (ksmbd_override_fsids(work))
                return -ENOMEM;
 
-       if (test_share_config_flag(work->tcon->share_conf,
-                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
-               flags = LOOKUP_FOLLOW;
-
-       err = kern_path(oldname, flags, &oldpath);
+       err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath);
        if (err) {
                pr_err("cannot get linux path for %s, err = %d\n",
                       oldname, err);
                goto out1;
        }
 
-       dentry = kern_path_create(AT_FDCWD, newname, &newpath,
-                                 flags | LOOKUP_REVAL);
+       dentry = ksmbd_vfs_kern_path_create(work, newname,
+                                           LOOKUP_NO_SYMLINKS | LOOKUP_REVAL,
+                                           &newpath);
        if (IS_ERR(dentry)) {
                err = PTR_ERR(dentry);
                pr_err("path create err for %s, err %d\n", newname, err);
@@ -788,21 +783,19 @@ int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
        struct dentry *src_dent, *trap_dent, *src_child;
        char *dst_name;
        int err;
-       int flags;
 
        dst_name = extract_last_component(newname);
-       if (!dst_name)
-               return -EINVAL;
+       if (!dst_name) {
+               dst_name = newname;
+               newname = "";
+       }
 
        src_dent_parent = dget_parent(fp->filp->f_path.dentry);
        src_dent = fp->filp->f_path.dentry;
 
-       flags = LOOKUP_DIRECTORY;
-       if (test_share_config_flag(work->tcon->share_conf,
-                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
-               flags |= LOOKUP_FOLLOW;
-
-       err = kern_path(newname, flags, &dst_path);
+       err = ksmbd_vfs_kern_path(work, newname,
+                                 LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+                                 &dst_path, false);
        if (err) {
                ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);
                goto out;
@@ -848,61 +841,43 @@ out:
 /**
  * ksmbd_vfs_truncate() - vfs helper for smb file truncate
  * @work:      work
- * @name:      old filename
  * @fid:       file id of old file
  * @size:      truncate to given size
  *
  * Return:     0 on success, otherwise error
  */
-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+int ksmbd_vfs_truncate(struct ksmbd_work *work,
                       struct ksmbd_file *fp, loff_t size)
 {
-       struct path path;
        int err = 0;
+       struct file *filp;
 
-       if (name) {
-               err = kern_path(name, 0, &path);
-               if (err) {
-                       pr_err("cannot get linux path for %s, err %d\n",
-                              name, err);
-                       return err;
-               }
-               err = vfs_truncate(&path, size);
-               if (err)
-                       pr_err("truncate failed for %s err %d\n",
-                              name, err);
-               path_put(&path);
-       } else {
-               struct file *filp;
-
-               filp = fp->filp;
-
-               /* Do we need to break any of a levelII oplock? */
-               smb_break_all_levII_oplock(work, fp, 1);
+       filp = fp->filp;
 
-               if (!work->tcon->posix_extensions) {
-                       struct inode *inode = file_inode(filp);
+       /* Do we need to break any of a levelII oplock? */
+       smb_break_all_levII_oplock(work, fp, 1);
 
-                       if (size < inode->i_size) {
-                               err = check_lock_range(filp, size,
-                                                      inode->i_size - 1, WRITE);
-                       } else {
-                               err = check_lock_range(filp, inode->i_size,
-                                                      size - 1, WRITE);
-                       }
+       if (!work->tcon->posix_extensions) {
+               struct inode *inode = file_inode(filp);
 
-                       if (err) {
-                               pr_err("failed due to lock\n");
-                               return -EAGAIN;
-                       }
+               if (size < inode->i_size) {
+                       err = check_lock_range(filp, size,
+                                              inode->i_size - 1, WRITE);
+               } else {
+                       err = check_lock_range(filp, inode->i_size,
+                                              size - 1, WRITE);
                }
 
-               err = vfs_truncate(&filp->f_path, size);
-               if (err)
-                       pr_err("truncate failed for filename : %s err %d\n",
-                              fp->filename, err);
+               if (err) {
+                       pr_err("failed due to lock\n");
+                       return -EAGAIN;
+               }
        }
 
+       err = vfs_truncate(&filp->f_path, size);
+       if (err)
+               pr_err("truncate failed for filename : %s err %d\n",
+                      fp->filename, err);
        return err;
 }
 
@@ -1220,22 +1195,25 @@ static int ksmbd_vfs_lookup_in_dir(struct path *dir, char *name, size_t namelen)
 
 /**
  * ksmbd_vfs_kern_path() - lookup a file and get path info
- * @name:      name of file for lookup
+ * @name:      file path that is relative to share
  * @flags:     lookup flags
  * @path:      if lookup succeed, return path info
  * @caseless:  caseless filename lookup
  *
  * Return:     0 on success, otherwise error
  */
-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
-                       bool caseless)
+int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+                       unsigned int flags, struct path *path, bool caseless)
 {
+       struct ksmbd_share_config *share_conf = work->tcon->share_conf;
        int err;
 
-       if (name[0] != '/')
-               return -EINVAL;
-
-       err = kern_path(name, flags, path);
+       flags |= LOOKUP_BENEATH;
+       err = vfs_path_lookup(share_conf->vfs_path.dentry,
+                             share_conf->vfs_path.mnt,
+                             name,
+                             flags,
+                             path);
        if (!err)
                return 0;
 
@@ -1249,11 +1227,10 @@ int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
                        return -ENOMEM;
 
                path_len = strlen(filepath);
-               remain_len = path_len - 1;
+               remain_len = path_len;
 
-               err = kern_path("/", flags, &parent);
-               if (err)
-                       goto out;
+               parent = share_conf->vfs_path;
+               path_get(&parent);
 
                while (d_can_lookup(parent.dentry)) {
                        char *filename = filepath + path_len - remain_len;
@@ -1266,21 +1243,21 @@ int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
 
                        err = ksmbd_vfs_lookup_in_dir(&parent, filename,
                                                      filename_len);
-                       if (err) {
-                               path_put(&parent);
+                       path_put(&parent);
+                       if (err)
                                goto out;
-                       }
 
-                       path_put(&parent);
                        next[0] = '\0';
 
-                       err = kern_path(filepath, flags, &parent);
+                       err = vfs_path_lookup(share_conf->vfs_path.dentry,
+                                             share_conf->vfs_path.mnt,
+                                             filepath,
+                                             flags,
+                                             &parent);
                        if (err)
                                goto out;
-
-                       if (is_last) {
-                               path->mnt = parent.mnt;
-                               path->dentry = parent.dentry;
+                       else if (is_last) {
+                               *path = parent;
                                goto out;
                        }
 
@@ -1296,6 +1273,23 @@ out:
        return err;
 }
 
+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+                                         const char *name,
+                                         unsigned int flags,
+                                         struct path *path)
+{
+       char *abs_name;
+       struct dentry *dent;
+
+       abs_name = convert_to_unix_name(work->tcon->share_conf, name);
+       if (!abs_name)
+               return ERR_PTR(-ENOMEM);
+
+       dent = kern_path_create(AT_FDCWD, abs_name, path, flags);
+       kfree(abs_name);
+       return dent;
+}
+
 int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
                                struct dentry *dentry)
 {
index 85db50abdb24465bc7f4ef680b98dfa22ca127b1..7b1dcaa3fbdc3121ac42bb967f4b13b1794ca05b 100644 (file)
@@ -126,7 +126,7 @@ int ksmbd_vfs_link(struct ksmbd_work *work,
 int ksmbd_vfs_getattr(struct path *path, struct kstat *stat);
 int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
                        char *newname);
-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+int ksmbd_vfs_truncate(struct ksmbd_work *work,
                       struct ksmbd_file *fp, loff_t size);
 struct srv_copychunk;
 int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
@@ -152,8 +152,13 @@ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
                                size_t *xattr_stream_name_size, int s_type);
 int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
                           struct dentry *dentry, char *attr_name);
-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
+int ksmbd_vfs_kern_path(struct ksmbd_work *work,
+                       char *name, unsigned int flags, struct path *path,
                        bool caseless);
+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+                                         const char *name,
+                                         unsigned int flags,
+                                         struct path *path);
 int ksmbd_vfs_empty_dir(struct ksmbd_file *fp);
 void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option);
 int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
index c69a0bb76c940f400004bb660c2d94974628efc8..4f1a451da5ba23794903bb34697dfa9197993580 100644 (file)
@@ -134,18 +134,9 @@ svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
 static inline bool
 svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
 {
-       unsigned int quadlen = XDR_QUADLEN(obj->len);
-       __be32 *p;
-
-       if (xdr_stream_encode_u32(xdr, obj->len) < 0)
-               return false;
-       p = xdr_reserve_space(xdr, obj->len);
-       if (!p)
+       if (obj->len > XDR_MAX_NETOBJ)
                return false;
-       p[quadlen - 1] = 0;     /* XDR pad */
-       memcpy(p, obj->data, obj->len);
-
-       return true;
+       return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
 }
 
 #endif /* _LOCKD_SVCXDR_H_ */
index 0b6cd3b8734c6e1643baad8c5bd668cd161c6988..994ec22d40402b7a7c3c5206e434d114ca81298a 100644 (file)
@@ -150,7 +150,7 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
 {
        struct iov_iter iter;
 
-       iov_iter_xarray(&iter, WRITE, &subreq->rreq->mapping->i_pages,
+       iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
                        subreq->start + subreq->transferred,
                        subreq->len   - subreq->transferred);
        iov_iter_zero(iov_iter_count(&iter), &iter);
index edec458315854631e59ae0f0a4a9070b3ef6d4be..0a9b72685f984857f3e7a04651f8c5ca2e2b631f 100644 (file)
@@ -42,7 +42,6 @@ EXPORT_SYMBOL_GPL(locks_start_grace);
 
 /**
  * locks_end_grace
- * @net: net namespace that this lock manager belongs to
  * @lm: who this grace period is for
  *
  * Call this function to state that the given lock manager is ready to
index 7629248fdd532885d35423968c396dbbe9d56a6a..be3c1aad50ea3974433f39b3e6f362f2f0839155 100644 (file)
@@ -542,7 +542,7 @@ nfsd_file_close_inode_sync(struct inode *inode)
 }
 
 /**
- * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
+ * nfsd_file_close_inode - attempt a delayed close of a nfsd_file
  * @inode: inode of the file to attempt to remove
  *
  * Walk the whole hash bucket, looking for any files that correspond to "inode".
index 42356416f0a0e895456c73939b11e9aef7e7c034..3f4027a5de8834d9b9641fc81723958fb24de954 100644 (file)
@@ -3570,7 +3570,7 @@ static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_s
 }
 
 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
-                               struct nfsd4_session *session, u32 req)
+               struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
 {
        struct nfs4_client *clp = session->se_client;
        struct svc_xprt *xpt = rqst->rq_xprt;
@@ -3593,6 +3593,8 @@ static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
        else
                status = nfserr_inval;
        spin_unlock(&clp->cl_lock);
+       if (status == nfs_ok && conn)
+               *conn = c;
        return status;
 }
 
@@ -3617,8 +3619,16 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
        status = nfserr_wrong_cred;
        if (!nfsd4_mach_creds_match(session->se_client, rqstp))
                goto out;
-       status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
-       if (status == nfs_ok || status == nfserr_inval)
+       status = nfsd4_match_existing_connection(rqstp, session,
+                       bcts->dir, &conn);
+       if (status == nfs_ok) {
+               if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
+                               bcts->dir == NFS4_CDFC4_BACK)
+                       conn->cn_flags |= NFS4_CDFC4_BACK;
+               nfsd4_probe_callback(session->se_client);
+               goto out;
+       }
+       if (status == nfserr_inval)
                goto out;
        status = nfsd4_map_bcts_dir(&bcts->dir);
        if (status)
index 7abeccb975b22703cb40ab36b310da1657d8bc34..cf030ebe28275f04ee5bb7809bc55af193778761 100644 (file)
@@ -3544,15 +3544,18 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
                goto fail;
        cd->rd_maxcount -= entry_bytes;
        /*
-        * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
-        * let's always let through the first entry, at least:
+        * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and
+        * notes that it could be zero. If it is zero, then the server
+        * should enforce only the rd_maxcount value.
         */
-       if (!cd->rd_dircount)
-               goto fail;
-       name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
-       if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
-               goto fail;
-       cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
+       if (cd->rd_dircount) {
+               name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
+               if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
+                       goto fail;
+               cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
+               if (!cd->rd_dircount)
+                       cd->rd_maxcount = 0;
+       }
 
        cd->cookie_offset = cookie_offset;
 skip_entry:
index c2c3d9077dc5813fbd63ee1df4bd154d7a376813..070e5dd03e26f3b92049edd7d1a7b98683fba64d 100644 (file)
@@ -793,7 +793,10 @@ out_close:
                svc_xprt_put(xprt);
        }
 out_err:
-       nfsd_destroy(net);
+       if (!list_empty(&nn->nfsd_serv->sv_permsocks))
+               nn->nfsd_serv->sv_nrthreads--;
+        else
+               nfsd_destroy(net);
        return err;
 }
 
@@ -1545,7 +1548,7 @@ static int __init init_nfsd(void)
                goto out_free_all;
        return 0;
 out_free_all:
-       unregister_pernet_subsys(&nfsd_net_ops);
+       unregister_filesystem(&nfsd_fs_type);
 out_free_exports:
        remove_proc_entry("fs/nfs/exports", NULL);
        remove_proc_entry("fs/nfs", NULL);
index 34c4cbf7e29bc49bdb3fa060fd2c235c3fb68536..e8c00dda42adbbff372908fdcb3d7d6814567cdf 100644 (file)
@@ -6,13 +6,9 @@
  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/hash.h>
-#include <linux/nls.h>
-#include <linux/ratelimit.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -291,7 +287,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
                if (!rsize) {
                        /* Empty resident -> Non empty nonresident. */
                } else if (!is_data) {
-                       err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
+                       err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
                        if (err)
                                goto out2;
                } else if (!page) {
@@ -451,11 +447,8 @@ again:
 again_1:
        align = sbi->cluster_size;
 
-       if (is_ext) {
+       if (is_ext)
                align <<= attr_b->nres.c_unit;
-               if (is_attr_sparsed(attr_b))
-                       keep_prealloc = false;
-       }
 
        old_valid = le64_to_cpu(attr_b->nres.valid_size);
        old_size = le64_to_cpu(attr_b->nres.data_size);
@@ -465,9 +458,6 @@ again_1:
        new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
        new_alen = new_alloc >> cluster_bits;
 
-       if (keep_prealloc && is_ext)
-               keep_prealloc = false;
-
        if (keep_prealloc && new_size < old_size) {
                attr_b->nres.data_size = cpu_to_le64(new_size);
                mi_b->dirty = true;
@@ -529,7 +519,7 @@ add_alloc_in_same_attr_seg:
                } else if (pre_alloc == -1) {
                        pre_alloc = 0;
                        if (type == ATTR_DATA && !name_len &&
-                           sbi->options.prealloc) {
+                           sbi->options->prealloc) {
                                CLST new_alen2 = bytes_to_cluster(
                                        sbi, get_pre_allocated(new_size));
                                pre_alloc = new_alen2 - new_alen;
@@ -1966,7 +1956,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
                        return 0;
 
                from = vbo;
-               to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
+               to = min_t(u64, vbo + bytes, data_size);
                memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
                return 0;
        }
index fa32399eb5171ce25f037fbd1bd8e483a37b839c..bad6d8a849a24b3a4a9fc2c4c372496d6ac1b54a 100644 (file)
@@ -5,10 +5,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -336,7 +333,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
 
        if (attr && attr->non_res) {
                err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
-                                       al->size);
+                                       al->size, 0);
                if (err)
                        return err;
                al->dirty = false;
@@ -423,7 +420,7 @@ next:
        return true;
 }
 
-int al_update(struct ntfs_inode *ni)
+int al_update(struct ntfs_inode *ni, int sync)
 {
        int err;
        struct ATTRIB *attr;
@@ -445,7 +442,7 @@ int al_update(struct ntfs_inode *ni)
                memcpy(resident_data(attr), al->le, al->size);
        } else {
                err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
-                                       al->size);
+                                       al->size, sync);
                if (err)
                        goto out;
 
index ce304d40b5e1642cc84d48143e2aa961ac3ea735..50d838093790a1f07474ea201510dde42d2431f3 100644 (file)
@@ -5,13 +5,8 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/types.h>
 
-#include "debug.h"
-#include "ntfs.h"
 #include "ntfs_fs.h"
 
 #define BITS_IN_SIZE_T (sizeof(size_t) * 8)
@@ -124,8 +119,7 @@ bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
 
        pos = nbits & 7;
        if (pos) {
-               u8 mask = fill_mask[pos];
-
+               mask = fill_mask[pos];
                if ((*map & mask) != mask)
                        return false;
        }
index 831501555009533d8b3eac04b6258e7072bd6304..aa184407520f0263844839e03d2f1edde3c6628f 100644 (file)
  *
  */
 
-#include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
 
-#include "debug.h"
 #include "ntfs.h"
 #include "ntfs_fs.h"
 
@@ -435,7 +433,7 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
                ;
        } else {
                n3 = rb_next(&e->count.node);
-               max_new_len = len > new_len ? len : new_len;
+               max_new_len = max(len, new_len);
                if (!n3) {
                        wnd->extent_max = max_new_len;
                } else {
@@ -731,7 +729,7 @@ int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
                        wbits = wnd->bits_last;
 
                tail = wbits - wbit;
-               op = tail < bits ? tail : bits;
+               op = min_t(u32, tail, bits);
 
                bh = wnd_map(wnd, iw);
                if (IS_ERR(bh)) {
@@ -784,7 +782,7 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
                        wbits = wnd->bits_last;
 
                tail = wbits - wbit;
-               op = tail < bits ? tail : bits;
+               op = min_t(u32, tail, bits);
 
                bh = wnd_map(wnd, iw);
                if (IS_ERR(bh)) {
@@ -834,7 +832,7 @@ static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
                        wbits = wnd->bits_last;
 
                tail = wbits - wbit;
-               op = tail < bits ? tail : bits;
+               op = min_t(u32, tail, bits);
 
                if (wbits != wnd->free_bits[iw]) {
                        bool ret;
@@ -926,7 +924,7 @@ use_wnd:
                        wbits = wnd->bits_last;
 
                tail = wbits - wbit;
-               op = tail < bits ? tail : bits;
+               op = min_t(u32, tail, bits);
 
                if (wnd->free_bits[iw]) {
                        bool ret;
index 31120569a87b9b07cf91f8e0b8c7291db059f4f3..53ef7489c75fd70f239d4e50af9a5e2f097c6bde 100644 (file)
@@ -11,6 +11,9 @@
 #ifndef _LINUX_NTFS3_DEBUG_H
 #define _LINUX_NTFS3_DEBUG_H
 
+struct super_block;
+struct inode;
+
 #ifndef Add2Ptr
 #define Add2Ptr(P, I)          ((void *)((u8 *)(P) + (I)))
 #define PtrOffset(B, O)                ((size_t)((size_t)(O) - (size_t)(B)))
index 93f6d485564e0103e56ad417f184f1ace0926eea..fb438d6040409838389ba7454645034ab2fa7a08 100644 (file)
@@ -7,10 +7,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/iversion.h>
 #include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs_fs.h"
 
 /* Convert little endian UTF-16 to NLS string. */
-int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
+int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const __le16 *name, u32 len,
                      u8 *buf, int buf_len)
 {
-       int ret, uni_len, warn;
-       const __le16 *ip;
+       int ret, warn;
        u8 *op;
-       struct nls_table *nls = sbi->options.nls;
+       struct nls_table *nls = sbi->options->nls;
 
        static_assert(sizeof(wchar_t) == sizeof(__le16));
 
        if (!nls) {
                /* UTF-16 -> UTF-8 */
-               ret = utf16s_to_utf8s((wchar_t *)uni->name, uni->len,
-                                     UTF16_LITTLE_ENDIAN, buf, buf_len);
+               ret = utf16s_to_utf8s(name, len, UTF16_LITTLE_ENDIAN, buf,
+                                     buf_len);
                buf[ret] = '\0';
                return ret;
        }
 
-       ip = uni->name;
        op = buf;
-       uni_len = uni->len;
        warn = 0;
 
-       while (uni_len--) {
+       while (len--) {
                u16 ec;
                int charlen;
                char dump[5];
@@ -52,7 +46,7 @@ int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
                        break;
                }
 
-               ec = le16_to_cpu(*ip++);
+               ec = le16_to_cpu(*name++);
                charlen = nls->uni2char(ec, op, buf_len);
 
                if (charlen > 0) {
@@ -186,7 +180,7 @@ int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
 {
        int ret, slen;
        const u8 *end;
-       struct nls_table *nls = sbi->options.nls;
+       struct nls_table *nls = sbi->options->nls;
        u16 *uname = uni->name;
 
        static_assert(sizeof(wchar_t) == sizeof(u16));
@@ -301,14 +295,14 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
                return 0;
 
        /* Skip meta files. Unless option to show metafiles is set. */
-       if (!sbi->options.showmeta && ntfs_is_meta_file(sbi, ino))
+       if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino))
                return 0;
 
-       if (sbi->options.nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
+       if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
                return 0;
 
-       name_len = ntfs_utf16_to_nls(sbi, (struct le_str *)&fname->name_len,
-                                    name, PATH_MAX);
+       name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
+                                    PATH_MAX);
        if (name_len <= 0) {
                ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
                          ino);
index 424450e77ad52ae550e8de444dc393411d97cde6..43b1451bff539576cd03c0f624708bf65b959270 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/compat.h>
 #include <linux/falloc.h>
 #include <linux/fiemap.h>
-#include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -588,8 +587,11 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                truncate_pagecache(inode, vbo_down);
 
                if (!is_sparsed(ni) && !is_compressed(ni)) {
-                       /* Normal file. */
-                       err = ntfs_zero_range(inode, vbo, end);
+                       /*
+                        * Normal file, can't make hole.
+                        * TODO: Try to find way to save info about hole.
+                        */
+                       err = -EOPNOTSUPP;
                        goto out;
                }
 
@@ -737,7 +739,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
        umode_t mode = inode->i_mode;
        int err;
 
-       if (sbi->options.no_acs_rules) {
+       if (sbi->options->noacsrules) {
                /* "No access rules" - Force any changes of time etc. */
                attr->ia_valid |= ATTR_FORCE;
                /* and disable for editing some attributes. */
@@ -1185,7 +1187,7 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
        int err = 0;
 
        /* If we are last writer on the inode, drop the block reservation. */
-       if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
+       if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) &&
                                      atomic_read(&inode->i_writecount) == 1)) {
                ni_lock(ni);
                down_write(&ni->file.run_lock);
index 938b12d56ca676308d2af82666a4f6f19cc8f122..6f47a9c17f896c62e355db7a6948075a88247378 100644 (file)
@@ -5,11 +5,8 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fiemap.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
 #include <linux/vmalloc.h>
 
 #include "debug.h"
@@ -708,18 +705,35 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
                        continue;
 
                mi = ni_find_mi(ni, ino_get(&le->ref));
+               if (!mi) {
+                       /* Should never happened, 'cause already checked. */
+                       goto bad;
+               }
 
                attr = mi_find_attr(mi, NULL, le->type, le_name(le),
                                    le->name_len, &le->id);
+               if (!attr) {
+                       /* Should never happened, 'cause already checked. */
+                       goto bad;
+               }
                asize = le32_to_cpu(attr->size);
 
                /* Insert into primary record. */
                attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
                                          le->name_len, asize,
                                          le16_to_cpu(attr->name_off));
-               id = attr_ins->id;
+               if (!attr_ins) {
+                       /*
+                        * Internal error.
+                        * Either no space in primary record (already checked).
+                        * Either tried to insert another
+                        * non indexed attribute (logic error).
+                        */
+                       goto bad;
+               }
 
                /* Copy all except id. */
+               id = attr_ins->id;
                memcpy(attr_ins, attr, asize);
                attr_ins->id = id;
 
@@ -735,6 +749,10 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
        ni->attr_list.dirty = false;
 
        return 0;
+bad:
+       ntfs_inode_err(&ni->vfs_inode, "Internal error");
+       make_bad_inode(&ni->vfs_inode);
+       return -EINVAL;
 }
 
 /*
@@ -956,6 +974,13 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
                        continue;
                }
 
+               /*
+                * Do not try to insert this attribute
+                * if there is no room in record.
+                */
+               if (le32_to_cpu(mi->mrec->used) + asize > sbi->record_size)
+                       continue;
+
                /* Try to insert attribute into this subrecord. */
                attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
                                       name_off, svcn, ins_le);
@@ -1451,7 +1476,7 @@ int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
                attr->res.flags = RESIDENT_FLAG_INDEXED;
 
                /* is_attr_indexed(attr)) == true */
-               le16_add_cpu(&ni->mi.mrec->hard_links, +1);
+               le16_add_cpu(&ni->mi.mrec->hard_links, 1);
                ni->mi.dirty = true;
        }
        attr->res.res = 0;
@@ -1606,7 +1631,7 @@ struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
 
        *le = NULL;
 
-       if (FILE_NAME_POSIX == name_type)
+       if (name_type == FILE_NAME_POSIX)
                return NULL;
 
        /* Enumerate all names. */
@@ -1706,18 +1731,16 @@ out:
 /*
  * ni_parse_reparse
  *
- * Buffer is at least 24 bytes.
+ * buffer - memory for reparse buffer header
  */
 enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
-                                  void *buffer)
+                                  struct REPARSE_DATA_BUFFER *buffer)
 {
        const struct REPARSE_DATA_BUFFER *rp = NULL;
        u8 bits;
        u16 len;
        typeof(rp->CompressReparseBuffer) *cmpr;
 
-       static_assert(sizeof(struct REPARSE_DATA_BUFFER) <= 24);
-
        /* Try to estimate reparse point. */
        if (!attr->non_res) {
                rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
@@ -1803,6 +1826,9 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
                return REPARSE_NONE;
        }
 
+       if (buffer != rp)
+               memcpy(buffer, rp, sizeof(struct REPARSE_DATA_BUFFER));
+
        /* Looks like normal symlink. */
        return REPARSE_LINK;
 }
@@ -2906,9 +2932,8 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
                memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de + 1, de_key_size);
                mi_get_ref(&ni->mi, &de->ref);
 
-               if (indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 1)) {
+               if (indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 1))
                        return false;
-               }
        }
 
        return true;
@@ -3077,7 +3102,9 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
                        const struct EA_INFO *info;
 
                        info = resident_data_ex(attr, sizeof(struct EA_INFO));
-                       dup->ea_size = info->size_pack;
+                       /* If ATTR_EA_INFO exists 'info' can't be NULL. */
+                       if (info)
+                               dup->ea_size = info->size_pack;
                }
        }
 
@@ -3205,7 +3232,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
                                        goto out;
                        }
 
-                       err = al_update(ni);
+                       err = al_update(ni, sync);
                        if (err)
                                goto out;
                }
index b5853aed0e25bc6d93b3f6f667ae36feda3d980b..06492f088d6020592bd4fdb8b1a6ecd22b823324 100644 (file)
@@ -6,12 +6,8 @@
  */
 
 #include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/hash.h>
-#include <linux/nls.h>
 #include <linux/random.h>
-#include <linux/ratelimit.h>
 #include <linux/slab.h>
 
 #include "debug.h"
@@ -2219,7 +2215,7 @@ file_is_valid:
 
                        err = ntfs_sb_write_run(log->ni->mi.sbi,
                                                &log->ni->file.run, off, page,
-                                               log->page_size);
+                                               log->page_size, 0);
 
                        if (err)
                                goto out;
@@ -3710,7 +3706,7 @@ move_data:
 
        if (a_dirty) {
                attr = oa->attr;
-               err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes);
+               err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 0);
                if (err)
                        goto out;
        }
@@ -5152,10 +5148,10 @@ end_reply:
 
        ntfs_fix_pre_write(&rh->rhdr, log->page_size);
 
-       err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size);
+       err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size, 0);
        if (!err)
                err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
-                                       rh, log->page_size);
+                                       rh, log->page_size, 0);
 
        kfree(rh);
        if (err)
index 91e3743e1442f10cfc727eba0587a9ef0b20c039..4de9acb1696898057fe3a096984ab3c6c1119810 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -358,7 +358,7 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
                             enum ALLOCATE_OPT opt)
 {
        int err;
-       CLST alen = 0;
+       CLST alen;
        struct super_block *sb = sbi->sb;
        size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
        struct wnd_bitmap *wnd = &sbi->used.bitmap;
@@ -370,27 +370,28 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
                if (!zlen) {
                        err = ntfs_refresh_zone(sbi);
                        if (err)
-                               goto out;
+                               goto up_write;
+
                        zlen = wnd_zone_len(wnd);
                }
 
                if (!zlen) {
                        ntfs_err(sbi->sb, "no free space to extend mft");
-                       goto out;
+                       err = -ENOSPC;
+                       goto up_write;
                }
 
                lcn = wnd_zone_bit(wnd);
-               alen = zlen > len ? len : zlen;
+               alen = min_t(CLST, len, zlen);
 
                wnd_zone_set(wnd, lcn + alen, zlen - alen);
 
                err = wnd_set_used(wnd, lcn, alen);
-               if (err) {
-                       up_write(&wnd->rw_lock);
-                       return err;
-               }
+               if (err)
+                       goto up_write;
+
                alcn = lcn;
-               goto out;
+               goto space_found;
        }
        /*
         * 'Cause cluster 0 is always used this value means that we should use
@@ -404,49 +405,45 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
 
        alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
        if (alen)
-               goto out;
+               goto space_found;
 
        /* Try to use clusters from MftZone. */
        zlen = wnd_zone_len(wnd);
        zeroes = wnd_zeroes(wnd);
 
        /* Check too big request */
-       if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE)
-               goto out;
+       if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
+               err = -ENOSPC;
+               goto up_write;
+       }
 
        /* How many clusters to cat from zone. */
        zlcn = wnd_zone_bit(wnd);
        zlen2 = zlen >> 1;
-       ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
-       new_zlen = zlen - ztrim;
-
-       if (new_zlen < NTFS_MIN_MFT_ZONE) {
-               new_zlen = NTFS_MIN_MFT_ZONE;
-               if (new_zlen > zlen)
-                       new_zlen = zlen;
-       }
+       ztrim = clamp_val(len, zlen2, zlen);
+       new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
 
        wnd_zone_set(wnd, zlcn, new_zlen);
 
        /* Allocate continues clusters. */
        alen = wnd_find(wnd, len, 0,
                        BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
-
-out:
-       if (alen) {
-               err = 0;
-               *new_len = alen;
-               *new_lcn = alcn;
-
-               ntfs_unmap_meta(sb, alcn, alen);
-
-               /* Set hint for next requests. */
-               if (!(opt & ALLOCATE_MFT))
-                       sbi->used.next_free_lcn = alcn + alen;
-       } else {
+       if (!alen) {
                err = -ENOSPC;
+               goto up_write;
        }
 
+space_found:
+       err = 0;
+       *new_len = alen;
+       *new_lcn = alcn;
+
+       ntfs_unmap_meta(sb, alcn, alen);
+
+       /* Set hint for next requests. */
+       if (!(opt & ALLOCATE_MFT))
+               sbi->used.next_free_lcn = alcn + alen;
+up_write:
        up_write(&wnd->rw_lock);
        return err;
 }
@@ -1080,7 +1077,7 @@ int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
 }
 
 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
-                     u64 vbo, const void *buf, size_t bytes)
+                     u64 vbo, const void *buf, size_t bytes, int sync)
 {
        struct super_block *sb = sbi->sb;
        u8 cluster_bits = sbi->cluster_bits;
@@ -1099,8 +1096,8 @@ int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
        len = ((u64)clen << cluster_bits) - off;
 
        for (;;) {
-               u32 op = len < bytes ? len : bytes;
-               int err = ntfs_sb_write(sb, lbo, op, buf, 0);
+               u32 op = min_t(u64, len, bytes);
+               int err = ntfs_sb_write(sb, lbo, op, buf, sync);
 
                if (err)
                        return err;
@@ -1300,7 +1297,7 @@ int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
        nb->off = off = lbo & (blocksize - 1);
 
        for (;;) {
-               u32 len32 = len < bytes ? len : bytes;
+               u32 len32 = min_t(u64, len, bytes);
                sector_t block = lbo >> sb->s_blocksize_bits;
 
                do {
@@ -2175,7 +2172,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
 
        /* Write main SDS bucket. */
        err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
-                               d_security, aligned_sec_size);
+                               d_security, aligned_sec_size, 0);
 
        if (err)
                goto out;
@@ -2193,7 +2190,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
 
        /* Write copy SDS bucket. */
        err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
-                               aligned_sec_size);
+                               aligned_sec_size, 0);
        if (err)
                goto out;
 
index 0daca9adc54c79084743efc2487b49a0ac0d3227..6f81e3a49abfb30f7aa570e26cf595b3e8599032 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
 
 #include "debug.h"
 #include "ntfs.h"
@@ -671,138 +671,74 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
                                  const struct INDEX_HDR *hdr, const void *key,
                                  size_t key_len, const void *ctx, int *diff)
 {
-       struct NTFS_DE *e;
+       struct NTFS_DE *e, *found = NULL;
        NTFS_CMP_FUNC cmp = indx->cmp;
+       int min_idx = 0, mid_idx, max_idx = 0;
+       int diff2;
+       int table_size = 8;
        u32 e_size, e_key_len;
        u32 end = le32_to_cpu(hdr->used);
        u32 off = le32_to_cpu(hdr->de_off);
+       u16 offs[128];
 
-#ifdef NTFS3_INDEX_BINARY_SEARCH
-       int max_idx = 0, fnd, min_idx;
-       int nslots = 64;
-       u16 *offs;
-
-       if (end > 0x10000)
-               goto next;
-
-       offs = kmalloc(sizeof(u16) * nslots, GFP_NOFS);
-       if (!offs)
-               goto next;
+fill_table:
+       if (off + sizeof(struct NTFS_DE) > end)
+               return NULL;
 
-       /* Use binary search algorithm. */
-next1:
-       if (off + sizeof(struct NTFS_DE) > end) {
-               e = NULL;
-               goto out1;
-       }
        e = Add2Ptr(hdr, off);
        e_size = le16_to_cpu(e->size);
 
-       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) {
-               e = NULL;
-               goto out1;
-       }
-
-       if (max_idx >= nslots) {
-               u16 *ptr;
-               int new_slots = ALIGN(2 * nslots, 8);
-
-               ptr = kmalloc(sizeof(u16) * new_slots, GFP_NOFS);
-               if (ptr)
-                       memcpy(ptr, offs, sizeof(u16) * max_idx);
-               kfree(offs);
-               offs = ptr;
-               nslots = new_slots;
-               if (!ptr)
-                       goto next;
-       }
-
-       /* Store entry table. */
-       offs[max_idx] = off;
+       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
+               return NULL;
 
        if (!de_is_last(e)) {
+               offs[max_idx] = off;
                off += e_size;
-               max_idx += 1;
-               goto next1;
-       }
 
-       /*
-        * Table of pointers is created.
-        * Use binary search to find entry that is <= to the search value.
-        */
-       fnd = -1;
-       min_idx = 0;
+               max_idx++;
+               if (max_idx < table_size)
+                       goto fill_table;
 
-       while (min_idx <= max_idx) {
-               int mid_idx = min_idx + ((max_idx - min_idx) >> 1);
-               int diff2;
-
-               e = Add2Ptr(hdr, offs[mid_idx]);
+               max_idx--;
+       }
 
-               e_key_len = le16_to_cpu(e->key_size);
+binary_search:
+       e_key_len = le16_to_cpu(e->key_size);
 
-               diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
+       diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
+       if (diff2 > 0) {
+               if (found) {
+                       min_idx = mid_idx + 1;
+               } else {
+                       if (de_is_last(e))
+                               return NULL;
 
-               if (!diff2) {
-                       *diff = 0;
-                       goto out1;
+                       max_idx = 0;
+                       table_size = min(table_size * 2,
+                                        (int)ARRAY_SIZE(offs));
+                       goto fill_table;
                }
-
-               if (diff2 < 0) {
+       } else if (diff2 < 0) {
+               if (found)
                        max_idx = mid_idx - 1;
-                       fnd = mid_idx;
-                       if (!fnd)
-                               break;
-               } else {
-                       min_idx = mid_idx + 1;
-               }
-       }
+               else
+                       max_idx--;
 
-       if (fnd == -1) {
-               e = NULL;
-               goto out1;
+               found = e;
+       } else {
+               *diff = 0;
+               return e;
        }
 
-       *diff = -1;
-       e = Add2Ptr(hdr, offs[fnd]);
-
-out1:
-       kfree(offs);
-
-       return e;
-#endif
-
-next:
-       /*
-        * Entries index are sorted.
-        * Enumerate all entries until we find entry
-        * that is <= to the search value.
-        */
-       if (off + sizeof(struct NTFS_DE) > end)
-               return NULL;
-
-       e = Add2Ptr(hdr, off);
-       e_size = le16_to_cpu(e->size);
-
-       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
-               return NULL;
-
-       off += e_size;
-
-       e_key_len = le16_to_cpu(e->key_size);
-
-       *diff = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
-       if (!*diff)
-               return e;
+       if (min_idx > max_idx) {
+               *diff = -1;
+               return found;
+       }
 
-       if (*diff <= 0)
-               return e;
+       mid_idx = (min_idx + max_idx) >> 1;
+       e = Add2Ptr(hdr, offs[mid_idx]);
 
-       if (de_is_last(e)) {
-               *diff = 1;
-               return e;
-       }
-       goto next;
+       goto binary_search;
 }
 
 /*
@@ -1136,9 +1072,7 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
        if (!e)
                return -EINVAL;
 
-       if (fnd)
-               fnd->root_de = e;
-
+       fnd->root_de = e;
        err = 0;
 
        for (;;) {
@@ -1401,7 +1335,7 @@ ok:
 static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
                                CLST *vbn)
 {
-       int err = -ENOMEM;
+       int err;
        struct ntfs_sb_info *sbi = ni->mi.sbi;
        struct ATTRIB *bitmap;
        struct ATTRIB *alloc;
index db2a5a4c38e4d62abfb2c7ca16947cd766982b13..859951d785cb2f4380c85b1d799f66f008da7d45 100644 (file)
@@ -5,10 +5,8 @@
  *
  */
 
-#include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/iversion.h>
 #include <linux/mpage.h>
 #include <linux/namei.h>
 #include <linux/nls.h>
@@ -49,8 +47,8 @@ static struct inode *ntfs_read_mft(struct inode *inode,
 
        inode->i_op = NULL;
        /* Setup 'uid' and 'gid' */
-       inode->i_uid = sbi->options.fs_uid;
-       inode->i_gid = sbi->options.fs_gid;
+       inode->i_uid = sbi->options->fs_uid;
+       inode->i_gid = sbi->options->fs_gid;
 
        err = mi_init(&ni->mi, sbi, ino);
        if (err)
@@ -224,12 +222,9 @@ next_attr:
                if (!attr->non_res) {
                        ni->i_valid = inode->i_size = rsize;
                        inode_set_bytes(inode, rsize);
-                       t32 = asize;
-               } else {
-                       t32 = le16_to_cpu(attr->nres.run_off);
                }
 
-               mode = S_IFREG | (0777 & sbi->options.fs_fmask_inv);
+               mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
 
                if (!attr->non_res) {
                        ni->ni_flags |= NI_FLAG_RESIDENT;
@@ -272,7 +267,7 @@ next_attr:
                        goto out;
 
                mode = sb->s_root
-                              ? (S_IFDIR | (0777 & sbi->options.fs_dmask_inv))
+                              ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv))
                               : (S_IFDIR | 0777);
                goto next_attr;
 
@@ -315,17 +310,14 @@ next_attr:
                rp_fa = ni_parse_reparse(ni, attr, &rp);
                switch (rp_fa) {
                case REPARSE_LINK:
-                       if (!attr->non_res) {
-                               inode->i_size = rsize;
-                               inode_set_bytes(inode, rsize);
-                               t32 = asize;
-                       } else {
-                               inode->i_size =
-                                       le64_to_cpu(attr->nres.data_size);
-                               t32 = le16_to_cpu(attr->nres.run_off);
-                       }
+                       /*
+                        * Normal symlink.
+                        * Assume one unicode symbol == one utf8.
+                        */
+                       inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
+                                                           .PrintNameLength) /
+                                       sizeof(u16);
 
-                       /* Looks like normal symlink. */
                        ni->i_valid = inode->i_size;
 
                        /* Clear directory bit. */
@@ -422,7 +414,7 @@ end_enum:
                ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
                inode->i_op = &ntfs_link_inode_operations;
                inode->i_fop = NULL;
-               inode_nohighmem(inode); // ??
+               inode_nohighmem(inode);
        } else if (S_ISREG(mode)) {
                ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
                inode->i_op = &ntfs_file_inode_operations;
@@ -443,7 +435,7 @@ end_enum:
                goto out;
        }
 
-       if ((sbi->options.sys_immutable &&
+       if ((sbi->options->sys_immutable &&
             (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
            !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
                inode->i_flags |= S_IMMUTABLE;
@@ -1200,9 +1192,13 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
        struct REPARSE_DATA_BUFFER *rp = NULL;
        bool rp_inserted = false;
 
+       ni_lock_dir(dir_ni);
+
        dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
-       if (!dir_root)
-               return ERR_PTR(-EINVAL);
+       if (!dir_root) {
+               err = -EINVAL;
+               goto out1;
+       }
 
        if (S_ISDIR(mode)) {
                /* Use parent's directory attributes. */
@@ -1244,7 +1240,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
                 *      }
                 */
        } else if (S_ISREG(mode)) {
-               if (sbi->options.sparse) {
+               if (sbi->options->sparse) {
                        /* Sparsed regular file, cause option 'sparse'. */
                        fa = FILE_ATTRIBUTE_SPARSE_FILE |
                             FILE_ATTRIBUTE_ARCHIVE;
@@ -1486,7 +1482,10 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
                asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
                t16 = PtrOffset(rec, attr);
 
-               /* 0x78 - the size of EA + EAINFO to store WSL */
+               /*
+                * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
+                * It is good idea to keep extened attributes resident.
+                */
                if (asize + t16 + 0x78 + 8 > sbi->record_size) {
                        CLST alen;
                        CLST clst = bytes_to_cluster(sbi, nsize);
@@ -1521,14 +1520,14 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
                        }
 
                        asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
-                       inode->i_size = nsize;
                } else {
                        attr->res.data_off = SIZEOF_RESIDENT_LE;
                        attr->res.data_size = cpu_to_le32(nsize);
                        memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
-                       inode->i_size = nsize;
                        nsize = 0;
                }
+               /* Size of symlink equals the length of input string. */
+               inode->i_size = size;
 
                attr->size = cpu_to_le32(asize);
 
@@ -1551,6 +1550,9 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
        if (err)
                goto out6;
 
+       /* Unlock parent directory before ntfs_init_acl. */
+       ni_unlock(dir_ni);
+
        inode->i_generation = le16_to_cpu(rec->seq);
 
        dir->i_mtime = dir->i_ctime = inode->i_atime;
@@ -1562,6 +1564,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
                inode->i_op = &ntfs_link_inode_operations;
                inode->i_fop = NULL;
                inode->i_mapping->a_ops = &ntfs_aops;
+               inode->i_size = size;
+               inode_nohighmem(inode);
        } else if (S_ISREG(mode)) {
                inode->i_op = &ntfs_file_inode_operations;
                inode->i_fop = &ntfs_file_operations;
@@ -1577,7 +1581,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
        if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
                err = ntfs_init_acl(mnt_userns, inode, dir);
                if (err)
-                       goto out6;
+                       goto out7;
        } else
 #endif
        {
@@ -1586,7 +1590,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
 
        /* Write non resident data. */
        if (nsize) {
-               err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
+               err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0);
                if (err)
                        goto out7;
        }
@@ -1607,8 +1611,10 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
 out7:
 
        /* Undo 'indx_insert_entry'. */
+       ni_lock_dir(dir_ni);
        indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
                          le16_to_cpu(new_de->key_size), sbi);
+       /* ni_unlock(dir_ni); will be called later. */
 out6:
        if (rp_inserted)
                ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
@@ -1632,8 +1638,10 @@ out2:
        kfree(rp);
 
 out1:
-       if (err)
+       if (err) {
+               ni_unlock(dir_ni);
                return ERR_PTR(err);
+       }
 
        unlock_new_inode(inode);
 
@@ -1754,15 +1762,15 @@ void ntfs_evict_inode(struct inode *inode)
 static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
                                      int buflen)
 {
-       int i, err = 0;
+       int i, err = -EINVAL;
        struct ntfs_inode *ni = ntfs_i(inode);
        struct super_block *sb = inode->i_sb;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
-       u64 i_size = inode->i_size;
-       u16 nlen = 0;
+       u64 size;
+       u16 ulen = 0;
        void *to_free = NULL;
        struct REPARSE_DATA_BUFFER *rp;
-       struct le_str *uni;
+       const __le16 *uname;
        struct ATTRIB *attr;
 
        /* Reparse data present. Try to parse it. */
@@ -1771,68 +1779,64 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
 
        *buffer = 0;
 
-       /* Read into temporal buffer. */
-       if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
-               err = -EINVAL;
-               goto out;
-       }
-
        attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
-       if (!attr) {
-               err = -EINVAL;
+       if (!attr)
                goto out;
-       }
 
        if (!attr->non_res) {
-               rp = resident_data_ex(attr, i_size);
-               if (!rp) {
-                       err = -EINVAL;
+               rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
+               if (!rp)
                        goto out;
-               }
+               size = le32_to_cpu(attr->res.data_size);
        } else {
-               rp = kmalloc(i_size, GFP_NOFS);
+               size = le64_to_cpu(attr->nres.data_size);
+               rp = NULL;
+       }
+
+       if (size > sbi->reparse.max_size || size <= sizeof(u32))
+               goto out;
+
+       if (!rp) {
+               rp = kmalloc(size, GFP_NOFS);
                if (!rp) {
                        err = -ENOMEM;
                        goto out;
                }
                to_free = rp;
-               err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, i_size, NULL);
+               /* Read into temporal buffer. */
+               err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
                if (err)
                        goto out;
        }
 
-       err = -EINVAL;
-
        /* Microsoft Tag. */
        switch (rp->ReparseTag) {
        case IO_REPARSE_TAG_MOUNT_POINT:
                /* Mount points and junctions. */
                /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
-               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
-                                      MountPointReparseBuffer.PathBuffer))
+               if (size <= offsetof(struct REPARSE_DATA_BUFFER,
+                                    MountPointReparseBuffer.PathBuffer))
                        goto out;
-               uni = Add2Ptr(rp,
-                             offsetof(struct REPARSE_DATA_BUFFER,
-                                      MountPointReparseBuffer.PathBuffer) +
-                                     le16_to_cpu(rp->MountPointReparseBuffer
-                                                         .PrintNameOffset) -
-                                     2);
-               nlen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
+               uname = Add2Ptr(rp,
+                               offsetof(struct REPARSE_DATA_BUFFER,
+                                        MountPointReparseBuffer.PathBuffer) +
+                                       le16_to_cpu(rp->MountPointReparseBuffer
+                                                           .PrintNameOffset));
+               ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
                break;
 
        case IO_REPARSE_TAG_SYMLINK:
                /* FolderSymbolicLink */
                /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
-               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
-                                      SymbolicLinkReparseBuffer.PathBuffer))
+               if (size <= offsetof(struct REPARSE_DATA_BUFFER,
+                                    SymbolicLinkReparseBuffer.PathBuffer))
                        goto out;
-               uni = Add2Ptr(rp,
-                             offsetof(struct REPARSE_DATA_BUFFER,
-                                      SymbolicLinkReparseBuffer.PathBuffer) +
-                                     le16_to_cpu(rp->SymbolicLinkReparseBuffer
-                                                         .PrintNameOffset) -
-                                     2);
-               nlen = le16_to_cpu(
+               uname = Add2Ptr(
+                       rp, offsetof(struct REPARSE_DATA_BUFFER,
+                                    SymbolicLinkReparseBuffer.PathBuffer) +
+                                   le16_to_cpu(rp->SymbolicLinkReparseBuffer
+                                                       .PrintNameOffset));
+               ulen = le16_to_cpu(
                        rp->SymbolicLinkReparseBuffer.PrintNameLength);
                break;
 
@@ -1864,29 +1868,28 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
                        goto out;
                }
                if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
-                   i_size <= sizeof(struct REPARSE_POINT)) {
+                   size <= sizeof(struct REPARSE_POINT)) {
                        goto out;
                }
 
                /* Users tag. */
-               uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
-               nlen = le16_to_cpu(rp->ReparseDataLength) -
+               uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
+               ulen = le16_to_cpu(rp->ReparseDataLength) -
                       sizeof(struct REPARSE_POINT);
        }
 
        /* Convert nlen from bytes to UNICODE chars. */
-       nlen >>= 1;
+       ulen >>= 1;
 
        /* Check that name is available. */
-       if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
+       if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
                goto out;
 
        /* If name is already zero terminated then truncate it now. */
-       if (!uni->name[nlen - 1])
-               nlen -= 1;
-       uni->len = nlen;
+       if (!uname[ulen - 1])
+               ulen -= 1;
 
-       err = ntfs_utf16_to_nls(sbi, uni, buffer, buflen);
+       err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
 
        if (err < 0)
                goto out;
index 2d70ae42f1b511f3b43af7a9b886ac1ffd21822b..dd7ced000d0e755ca1b337273dab7577c194a7c4 100644 (file)
@@ -5,6 +5,9 @@
  * Copyright (C) 2015 Eric Biggers
  */
 
+#ifndef _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H
+#define _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H
+
 #include <linux/string.h>
 #include <linux/compiler.h>
 #include <linux/types.h>
@@ -336,3 +339,5 @@ static forceinline u8 *lz_copy(u8 *dst, u32 length, u32 offset, const u8 *bufend
 
        return dst;
 }
+
+#endif /* _LINUX_NTFS3_LIB_DECOMPRESS_COMMON_H */
index f508fbad2e712d946274b13f0ec7b244dc264a4d..90309a5ae59c7a98f3f0b61f5883fe390790eb50 100644 (file)
@@ -7,6 +7,10 @@
  * - linux kernel code style
  */
 
+#ifndef _LINUX_NTFS3_LIB_LIB_H
+#define _LINUX_NTFS3_LIB_LIB_H
+
+#include <linux/types.h>
 
 /* globals from xpress_decompress.c */
 struct xpress_decompressor *xpress_allocate_decompressor(void);
@@ -24,3 +28,5 @@ int lzx_decompress(struct lzx_decompressor *__restrict d,
                   const void *__restrict compressed_data,
                   size_t compressed_size, void *__restrict uncompressed_data,
                   size_t uncompressed_size);
+
+#endif /* _LINUX_NTFS3_LIB_LIB_H */
index f1f691a67cc490254957f062cfc38ec12ac7e834..28f654561f279a5d09ddd4a1352930b0fefb5416 100644 (file)
@@ -5,13 +5,13 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/fs.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
 
 #include "debug.h"
-#include "ntfs.h"
 #include "ntfs_fs.h"
 
 // clang-format off
@@ -292,7 +292,7 @@ next:
 /*
  * get_lznt_ctx
  * @level: 0 - Standard compression.
- *        !0 - Best compression, requires a lot of cpu.
+ *        !0 - Best compression, requires a lot of cpu.
  */
 struct lznt *get_lznt_ctx(int level)
 {
index e58415d0713280297df1b7bac30eb17642431fa0..bc741213ad84833b7fd434c32d4b784c6e7cb2f7 100644 (file)
@@ -5,11 +5,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/iversion.h>
-#include <linux/namei.h>
 #include <linux/nls.h>
 
 #include "debug.h"
@@ -99,16 +95,11 @@ static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
 static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
                       struct dentry *dentry, umode_t mode, bool excl)
 {
-       struct ntfs_inode *ni = ntfs_i(dir);
        struct inode *inode;
 
-       ni_lock_dir(ni);
-
        inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFREG | mode,
                                  0, NULL, 0, NULL);
 
-       ni_unlock(ni);
-
        return IS_ERR(inode) ? PTR_ERR(inode) : 0;
 }
 
@@ -120,16 +111,11 @@ static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
 static int ntfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
                      struct dentry *dentry, umode_t mode, dev_t rdev)
 {
-       struct ntfs_inode *ni = ntfs_i(dir);
        struct inode *inode;
 
-       ni_lock_dir(ni);
-
        inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, mode, rdev,
                                  NULL, 0, NULL);
 
-       ni_unlock(ni);
-
        return IS_ERR(inode) ? PTR_ERR(inode) : 0;
 }
 
@@ -200,15 +186,10 @@ static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
 {
        u32 size = strlen(symname);
        struct inode *inode;
-       struct ntfs_inode *ni = ntfs_i(dir);
-
-       ni_lock_dir(ni);
 
        inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFLNK | 0777,
                                  0, symname, size, NULL);
 
-       ni_unlock(ni);
-
        return IS_ERR(inode) ? PTR_ERR(inode) : 0;
 }
 
@@ -219,15 +200,10 @@ static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
                      struct dentry *dentry, umode_t mode)
 {
        struct inode *inode;
-       struct ntfs_inode *ni = ntfs_i(dir);
-
-       ni_lock_dir(ni);
 
        inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFDIR | mode,
                                  0, NULL, 0, NULL);
 
-       ni_unlock(ni);
-
        return IS_ERR(inode) ? PTR_ERR(inode) : 0;
 }
 
index 6bb3e595263b67c7cc66a0fecc5dfa68310826d4..9cc396b117bfd9b02531ad6c6de89ded53bba7f3 100644 (file)
 #ifndef _LINUX_NTFS3_NTFS_H
 #define _LINUX_NTFS3_NTFS_H
 
-/* TODO: Check 4K MFT record and 512 bytes cluster. */
+#include <linux/blkdev.h>
+#include <linux/build_bug.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "debug.h"
 
-/* Activate this define to use binary search in indexes. */
-#define NTFS3_INDEX_BINARY_SEARCH
+/* TODO: Check 4K MFT record and 512 bytes cluster. */
 
 /* Check each run for marked clusters. */
 #define NTFS3_CHECK_FREE_CLST
 
 #define NTFS_NAME_LEN 255
 
-/* ntfs.sys used 500 maximum links on-disk struct allows up to 0xffff. */
-#define NTFS_LINK_MAX 0x400
-//#define NTFS_LINK_MAX 0xffff
+/*
+ * ntfs.sys used 500 maximum links on-disk struct allows up to 0xffff.
+ * xfstest generic/041 creates 3003 hardlinks.
+ */
+#define NTFS_LINK_MAX 4000
 
 /*
  * Activate to use 64 bit clusters instead of 32 bits in ntfs.sys.
index dc71c59fd44545118b6ab3023492aa8e4e75b017..8aaec7e0804efaa4b66727bf0d7360f68abba8c0 100644 (file)
@@ -9,6 +9,37 @@
 #ifndef _LINUX_NTFS3_NTFS_FS_H
 #define _LINUX_NTFS3_NTFS_FS_H
 
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/cleancache.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/uidgid.h>
+#include <asm/div64.h>
+#include <asm/page.h>
+
+#include "debug.h"
+#include "ntfs.h"
+
+struct dentry;
+struct fiemap_extent_info;
+struct user_namespace;
+struct page;
+struct writeback_control;
+enum utf16_endian;
+
+
 #define MINUS_ONE_T                    ((size_t)(-1))
 /* Biggest MFT / smallest cluster */
 #define MAXIMUM_BYTES_PER_MFT          4096
@@ -52,6 +83,7 @@
 // clang-format on
 
 struct ntfs_mount_options {
+       char *nls_name;
        struct nls_table *nls;
 
        kuid_t fs_uid;
@@ -59,19 +91,16 @@ struct ntfs_mount_options {
        u16 fs_fmask_inv;
        u16 fs_dmask_inv;
 
-       unsigned uid : 1, /* uid was set. */
-               gid : 1, /* gid was set. */
-               fmask : 1, /* fmask was set. */
-               dmask : 1, /* dmask was set. */
-               sys_immutable : 1, /* Immutable system files. */
-               discard : 1, /* Issue discard requests on deletions. */
-               sparse : 1, /* Create sparse files. */
-               showmeta : 1, /* Show meta files. */
-               nohidden : 1, /* Do not show hidden files. */
-               force : 1, /* Rw mount dirty volume. */
-               no_acs_rules : 1, /*Exclude acs rules. */
-               prealloc : 1 /* Preallocate space when file is growing. */
-               ;
+       unsigned fmask : 1; /* fmask was set. */
+       unsigned dmask : 1; /*dmask was set. */
+       unsigned sys_immutable : 1; /* Immutable system files. */
+       unsigned discard : 1; /* Issue discard requests on deletions. */
+       unsigned sparse : 1; /* Create sparse files. */
+       unsigned showmeta : 1; /* Show meta files. */
+       unsigned nohidden : 1; /* Do not show hidden files. */
+       unsigned force : 1; /* RW mount dirty volume. */
+       unsigned noacsrules : 1; /* Exclude acs rules. */
+       unsigned prealloc : 1; /* Preallocate space when file is growing. */
 };
 
 /* Special value to unpack and deallocate. */
@@ -182,10 +211,8 @@ struct ntfs_sb_info {
        u32 blocks_per_cluster; // cluster_size / sb->s_blocksize
 
        u32 record_size;
-       u32 sector_size;
        u32 index_size;
 
-       u8 sector_bits;
        u8 cluster_bits;
        u8 record_bits;
 
@@ -279,7 +306,7 @@ struct ntfs_sb_info {
 #endif
        } compress;
 
-       struct ntfs_mount_options options;
+       struct ntfs_mount_options *options;
        struct ratelimit_state msg_ratelimit;
 };
 
@@ -436,7 +463,7 @@ bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
 bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
                  const __le16 *name, size_t name_len,
                  const struct MFT_REF *ref);
-int al_update(struct ntfs_inode *ni);
+int al_update(struct ntfs_inode *ni, int sync);
 static inline size_t al_aligned(size_t size)
 {
        return (size + 1023) & ~(size_t)1023;
@@ -448,7 +475,7 @@ bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
 size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
 
 /* Globals from dir.c */
-int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
+int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const __le16 *name, u32 len,
                      u8 *buf, int buf_len);
 int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
                      struct cpu_str *uni, u32 max_ulen,
@@ -520,7 +547,7 @@ struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
                                     struct ATTR_LIST_ENTRY **entry);
 int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa);
 enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
-                                  void *buffer);
+                                  struct REPARSE_DATA_BUFFER *buffer);
 int ni_write_inode(struct inode *inode, int sync, const char *hint);
 #define _ni_write_inode(i, w) ni_write_inode(i, w, __func__)
 int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
@@ -577,7 +604,7 @@ int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
                  const void *buffer, int wait);
 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
-                     u64 vbo, const void *buf, size_t bytes);
+                     u64 vbo, const void *buf, size_t bytes, int sync);
 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
                                   const struct runs_tree *run, u64 vbo);
 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
index 103705c86772f45a90227e6434449e99c4d7d94b..861e35791506e801dc446414d935d5463e64a1fb 100644 (file)
@@ -5,10 +5,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs.h"
index 26ed2b64345e665a39c6b37d88b29c5b81592ecd..a8fec651f9732878ad871cc476d8a6fcf28f9312 100644 (file)
@@ -7,10 +7,8 @@
  */
 
 #include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
 #include <linux/log2.h>
-#include <linux/nls.h>
 
 #include "debug.h"
 #include "ntfs.h"
index 55bbc9200a10ebdf3011e0a517eb44afd8a35f3f..d41d76979e121fd9e836ef0072b122ab6de3735b 100644 (file)
  *
  */
 
-#include <linux/backing-dev.h>
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/exportfs.h>
 #include <linux/fs.h>
-#include <linux/iversion.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
 #include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/nls.h>
-#include <linux/parser.h>
 #include <linux/seq_file.h>
 #include <linux/statfs.h>
 
@@ -205,9 +204,11 @@ void *ntfs_put_shared(void *ptr)
        return ret;
 }
 
-static inline void clear_mount_options(struct ntfs_mount_options *options)
+static inline void put_mount_options(struct ntfs_mount_options *options)
 {
+       kfree(options->nls_name);
        unload_nls(options->nls);
+       kfree(options);
 }
 
 enum Opt {
@@ -223,218 +224,175 @@ enum Opt {
        Opt_nohidden,
        Opt_showmeta,
        Opt_acl,
-       Opt_noatime,
-       Opt_nls,
+       Opt_iocharset,
        Opt_prealloc,
-       Opt_no_acs_rules,
+       Opt_noacsrules,
        Opt_err,
 };
 
-static const match_table_t ntfs_tokens = {
-       { Opt_uid, "uid=%u" },
-       { Opt_gid, "gid=%u" },
-       { Opt_umask, "umask=%o" },
-       { Opt_dmask, "dmask=%o" },
-       { Opt_fmask, "fmask=%o" },
-       { Opt_immutable, "sys_immutable" },
-       { Opt_discard, "discard" },
-       { Opt_force, "force" },
-       { Opt_sparse, "sparse" },
-       { Opt_nohidden, "nohidden" },
-       { Opt_acl, "acl" },
-       { Opt_noatime, "noatime" },
-       { Opt_showmeta, "showmeta" },
-       { Opt_nls, "nls=%s" },
-       { Opt_prealloc, "prealloc" },
-       { Opt_no_acs_rules, "no_acs_rules" },
-       { Opt_err, NULL },
+static const struct fs_parameter_spec ntfs_fs_parameters[] = {
+       fsparam_u32("uid",                      Opt_uid),
+       fsparam_u32("gid",                      Opt_gid),
+       fsparam_u32oct("umask",                 Opt_umask),
+       fsparam_u32oct("dmask",                 Opt_dmask),
+       fsparam_u32oct("fmask",                 Opt_fmask),
+       fsparam_flag_no("sys_immutable",        Opt_immutable),
+       fsparam_flag_no("discard",              Opt_discard),
+       fsparam_flag_no("force",                Opt_force),
+       fsparam_flag_no("sparse",               Opt_sparse),
+       fsparam_flag_no("hidden",               Opt_nohidden),
+       fsparam_flag_no("acl",                  Opt_acl),
+       fsparam_flag_no("showmeta",             Opt_showmeta),
+       fsparam_flag_no("prealloc",             Opt_prealloc),
+       fsparam_flag_no("acsrules",             Opt_noacsrules),
+       fsparam_string("iocharset",             Opt_iocharset),
+       {}
 };
 
-static noinline int ntfs_parse_options(struct super_block *sb, char *options,
-                                      int silent,
-                                      struct ntfs_mount_options *opts)
+/*
+ * Load nls table or if @nls is utf8 then return NULL.
+ */
+static struct nls_table *ntfs_load_nls(char *nls)
 {
-       char *p;
-       substring_t args[MAX_OPT_ARGS];
-       int option;
-       char nls_name[30];
-       struct nls_table *nls;
+       struct nls_table *ret;
 
-       opts->fs_uid = current_uid();
-       opts->fs_gid = current_gid();
-       opts->fs_fmask_inv = opts->fs_dmask_inv = ~current_umask();
-       nls_name[0] = 0;
+       if (!nls)
+               nls = CONFIG_NLS_DEFAULT;
 
-       if (!options)
-               goto out;
+       if (strcmp(nls, "utf8") == 0)
+               return NULL;
 
-       while ((p = strsep(&options, ","))) {
-               int token;
+       if (strcmp(nls, CONFIG_NLS_DEFAULT) == 0)
+               return load_nls_default();
 
-               if (!*p)
-                       continue;
+       ret = load_nls(nls);
+       if (ret)
+               return ret;
 
-               token = match_token(p, ntfs_tokens, args);
-               switch (token) {
-               case Opt_immutable:
-                       opts->sys_immutable = 1;
-                       break;
-               case Opt_uid:
-                       if (match_int(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_uid = make_kuid(current_user_ns(), option);
-                       if (!uid_valid(opts->fs_uid))
-                               return -EINVAL;
-                       opts->uid = 1;
-                       break;
-               case Opt_gid:
-                       if (match_int(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_gid = make_kgid(current_user_ns(), option);
-                       if (!gid_valid(opts->fs_gid))
-                               return -EINVAL;
-                       opts->gid = 1;
-                       break;
-               case Opt_umask:
-                       if (match_octal(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_fmask_inv = opts->fs_dmask_inv = ~option;
-                       opts->fmask = opts->dmask = 1;
-                       break;
-               case Opt_dmask:
-                       if (match_octal(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_dmask_inv = ~option;
-                       opts->dmask = 1;
-                       break;
-               case Opt_fmask:
-                       if (match_octal(&args[0], &option))
-                               return -EINVAL;
-                       opts->fs_fmask_inv = ~option;
-                       opts->fmask = 1;
-                       break;
-               case Opt_discard:
-                       opts->discard = 1;
-                       break;
-               case Opt_force:
-                       opts->force = 1;
-                       break;
-               case Opt_sparse:
-                       opts->sparse = 1;
-                       break;
-               case Opt_nohidden:
-                       opts->nohidden = 1;
-                       break;
-               case Opt_acl:
+       return ERR_PTR(-EINVAL);
+}
+
+static int ntfs_fs_parse_param(struct fs_context *fc,
+                              struct fs_parameter *param)
+{
+       struct ntfs_mount_options *opts = fc->fs_private;
+       struct fs_parse_result result;
+       int opt;
+
+       opt = fs_parse(fc, ntfs_fs_parameters, param, &result);
+       if (opt < 0)
+               return opt;
+
+       switch (opt) {
+       case Opt_uid:
+               opts->fs_uid = make_kuid(current_user_ns(), result.uint_32);
+               if (!uid_valid(opts->fs_uid))
+                       return invalf(fc, "ntfs3: Invalid value for uid.");
+               break;
+       case Opt_gid:
+               opts->fs_gid = make_kgid(current_user_ns(), result.uint_32);
+               if (!gid_valid(opts->fs_gid))
+                       return invalf(fc, "ntfs3: Invalid value for gid.");
+               break;
+       case Opt_umask:
+               if (result.uint_32 & ~07777)
+                       return invalf(fc, "ntfs3: Invalid value for umask.");
+               opts->fs_fmask_inv = ~result.uint_32;
+               opts->fs_dmask_inv = ~result.uint_32;
+               opts->fmask = 1;
+               opts->dmask = 1;
+               break;
+       case Opt_dmask:
+               if (result.uint_32 & ~07777)
+                       return invalf(fc, "ntfs3: Invalid value for dmask.");
+               opts->fs_dmask_inv = ~result.uint_32;
+               opts->dmask = 1;
+               break;
+       case Opt_fmask:
+               if (result.uint_32 & ~07777)
+                       return invalf(fc, "ntfs3: Invalid value for fmask.");
+               opts->fs_fmask_inv = ~result.uint_32;
+               opts->fmask = 1;
+               break;
+       case Opt_immutable:
+               opts->sys_immutable = result.negated ? 0 : 1;
+               break;
+       case Opt_discard:
+               opts->discard = result.negated ? 0 : 1;
+               break;
+       case Opt_force:
+               opts->force = result.negated ? 0 : 1;
+               break;
+       case Opt_sparse:
+               opts->sparse = result.negated ? 0 : 1;
+               break;
+       case Opt_nohidden:
+               opts->nohidden = result.negated ? 1 : 0;
+               break;
+       case Opt_acl:
+               if (!result.negated)
 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
-                       sb->s_flags |= SB_POSIXACL;
-                       break;
+                       fc->sb_flags |= SB_POSIXACL;
 #else
-                       ntfs_err(sb, "support for ACL not compiled in!");
-                       return -EINVAL;
+                       return invalf(fc, "ntfs3: Support for ACL not compiled in!");
 #endif
-               case Opt_noatime:
-                       sb->s_flags |= SB_NOATIME;
-                       break;
-               case Opt_showmeta:
-                       opts->showmeta = 1;
-                       break;
-               case Opt_nls:
-                       match_strlcpy(nls_name, &args[0], sizeof(nls_name));
-                       break;
-               case Opt_prealloc:
-                       opts->prealloc = 1;
-                       break;
-               case Opt_no_acs_rules:
-                       opts->no_acs_rules = 1;
-                       break;
-               default:
-                       if (!silent)
-                               ntfs_err(
-                                       sb,
-                                       "Unrecognized mount option \"%s\" or missing value",
-                                       p);
-                       //return -EINVAL;
-               }
-       }
-
-out:
-       if (!strcmp(nls_name[0] ? nls_name : CONFIG_NLS_DEFAULT, "utf8")) {
-               /*
-                * For UTF-8 use utf16s_to_utf8s()/utf8s_to_utf16s()
-                * instead of NLS.
-                */
-               nls = NULL;
-       } else if (nls_name[0]) {
-               nls = load_nls(nls_name);
-               if (!nls) {
-                       ntfs_err(sb, "failed to load \"%s\"", nls_name);
-                       return -EINVAL;
-               }
-       } else {
-               nls = load_nls_default();
-               if (!nls) {
-                       ntfs_err(sb, "failed to load default nls");
-                       return -EINVAL;
-               }
+               else
+                       fc->sb_flags &= ~SB_POSIXACL;
+               break;
+       case Opt_showmeta:
+               opts->showmeta = result.negated ? 0 : 1;
+               break;
+       case Opt_iocharset:
+               kfree(opts->nls_name);
+               opts->nls_name = param->string;
+               param->string = NULL;
+               break;
+       case Opt_prealloc:
+               opts->prealloc = result.negated ? 0 : 1;
+               break;
+       case Opt_noacsrules:
+               opts->noacsrules = result.negated ? 1 : 0;
+               break;
+       default:
+               /* Should not be here unless we forget add case. */
+               return -EINVAL;
        }
-       opts->nls = nls;
-
        return 0;
 }
 
-static int ntfs_remount(struct super_block *sb, int *flags, char *data)
+static int ntfs_fs_reconfigure(struct fs_context *fc)
 {
-       int err, ro_rw;
+       struct super_block *sb = fc->root->d_sb;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
-       struct ntfs_mount_options old_opts;
-       char *orig_data = kstrdup(data, GFP_KERNEL);
-
-       if (data && !orig_data)
-               return -ENOMEM;
+       struct ntfs_mount_options *new_opts = fc->fs_private;
+       int ro_rw;
 
-       /* Store  original options. */
-       memcpy(&old_opts, &sbi->options, sizeof(old_opts));
-       clear_mount_options(&sbi->options);
-       memset(&sbi->options, 0, sizeof(sbi->options));
-
-       err = ntfs_parse_options(sb, data, 0, &sbi->options);
-       if (err)
-               goto restore_opts;
-
-       ro_rw = sb_rdonly(sb) && !(*flags & SB_RDONLY);
+       ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
        if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
-               ntfs_warn(
-                       sb,
-                       "Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
-               err = -EINVAL;
-               goto restore_opts;
+               errorf(fc, "ntfs3: Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
+               return -EINVAL;
        }
 
+       new_opts->nls = ntfs_load_nls(new_opts->nls_name);
+       if (IS_ERR(new_opts->nls)) {
+               new_opts->nls = NULL;
+               errorf(fc, "ntfs3: Cannot load iocharset %s", new_opts->nls_name);
+               return -EINVAL;
+       }
+       if (new_opts->nls != sbi->options->nls)
+               return invalf(fc, "ntfs3: Cannot use different iocharset when remounting!");
+
        sync_filesystem(sb);
 
        if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
-           !sbi->options.force) {
-               ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
-               err = -EINVAL;
-               goto restore_opts;
+           !new_opts->force) {
+               errorf(fc, "ntfs3: Volume is dirty and \"force\" flag is not set!");
+               return -EINVAL;
        }
 
-       clear_mount_options(&old_opts);
-
-       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME) |
-                SB_NODIRATIME | SB_NOATIME;
-       ntfs_info(sb, "re-mounted. Opts: %s", orig_data);
-       err = 0;
-       goto out;
+       memcpy(sbi->options, new_opts, sizeof(*new_opts));
 
-restore_opts:
-       clear_mount_options(&sbi->options);
-       memcpy(&sbi->options, &old_opts, sizeof(old_opts));
-
-out:
-       kfree(orig_data);
-       return err;
+       return 0;
 }
 
 static struct kmem_cache *ntfs_inode_cachep;
@@ -513,8 +471,6 @@ static noinline void put_ntfs(struct ntfs_sb_info *sbi)
        xpress_free_decompressor(sbi->compress.xpress);
        lzx_free_decompressor(sbi->compress.lzx);
 #endif
-       clear_mount_options(&sbi->options);
-
        kfree(sbi);
 }
 
@@ -525,7 +481,9 @@ static void ntfs_put_super(struct super_block *sb)
        /* Mark rw ntfs as clear, if possible. */
        ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
 
+       put_mount_options(sbi->options);
        put_ntfs(sbi);
+       sb->s_fs_info = NULL;
 
        sync_blockdev(sb->s_bdev);
 }
@@ -552,23 +510,21 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
 {
        struct super_block *sb = root->d_sb;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
-       struct ntfs_mount_options *opts = &sbi->options;
+       struct ntfs_mount_options *opts = sbi->options;
        struct user_namespace *user_ns = seq_user_ns(m);
 
-       if (opts->uid)
-               seq_printf(m, ",uid=%u",
-                          from_kuid_munged(user_ns, opts->fs_uid));
-       if (opts->gid)
-               seq_printf(m, ",gid=%u",
-                          from_kgid_munged(user_ns, opts->fs_gid));
+       seq_printf(m, ",uid=%u",
+                 from_kuid_munged(user_ns, opts->fs_uid));
+       seq_printf(m, ",gid=%u",
+                 from_kgid_munged(user_ns, opts->fs_gid));
        if (opts->fmask)
                seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
        if (opts->dmask)
                seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
        if (opts->nls)
-               seq_printf(m, ",nls=%s", opts->nls->charset);
+               seq_printf(m, ",iocharset=%s", opts->nls->charset);
        else
-               seq_puts(m, ",nls=utf8");
+               seq_puts(m, ",iocharset=utf8");
        if (opts->sys_immutable)
                seq_puts(m, ",sys_immutable");
        if (opts->discard)
@@ -581,14 +537,12 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",nohidden");
        if (opts->force)
                seq_puts(m, ",force");
-       if (opts->no_acs_rules)
-               seq_puts(m, ",no_acs_rules");
+       if (opts->noacsrules)
+               seq_puts(m, ",noacsrules");
        if (opts->prealloc)
                seq_puts(m, ",prealloc");
        if (sb->s_flags & SB_POSIXACL)
                seq_puts(m, ",acl");
-       if (sb->s_flags & SB_NOATIME)
-               seq_puts(m, ",noatime");
 
        return 0;
 }
@@ -643,7 +597,6 @@ static const struct super_operations ntfs_sops = {
        .statfs = ntfs_statfs,
        .show_options = ntfs_show_options,
        .sync_fs = ntfs_sync_fs,
-       .remount_fs = ntfs_remount,
        .write_inode = ntfs3_write_inode,
 };
 
@@ -729,7 +682,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        struct ntfs_sb_info *sbi = sb->s_fs_info;
        int err;
        u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
-       u64 sectors, clusters, fs_size, mlcn, mlcn2;
+       u64 sectors, clusters, mlcn, mlcn2;
        struct NTFS_BOOT *boot;
        struct buffer_head *bh;
        struct MFT_REC *rec;
@@ -787,20 +740,20 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
                goto out;
        }
 
-       sbi->sector_size = boot_sector_size;
-       sbi->sector_bits = blksize_bits(boot_sector_size);
-       fs_size = (sectors + 1) << sbi->sector_bits;
+       sbi->volume.size = sectors * boot_sector_size;
 
-       gb = format_size_gb(fs_size, &mb);
+       gb = format_size_gb(sbi->volume.size + boot_sector_size, &mb);
 
        /*
         * - Volume formatted and mounted with the same sector size.
         * - Volume formatted 4K and mounted as 512.
         * - Volume formatted 512 and mounted as 4K.
         */
-       if (sbi->sector_size != sector_size) {
-               ntfs_warn(sb,
-                         "Different NTFS' sector size and media sector size");
+       if (boot_sector_size != sector_size) {
+               ntfs_warn(
+                       sb,
+                       "Different NTFS' sector size (%u) and media sector size (%u)",
+                       boot_sector_size, sector_size);
                dev_size += sector_size - 1;
        }
 
@@ -810,8 +763,19 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        sbi->mft.lbo = mlcn << sbi->cluster_bits;
        sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits;
 
-       if (sbi->cluster_size < sbi->sector_size)
+       /* Compare boot's cluster and sector. */
+       if (sbi->cluster_size < boot_sector_size)
+               goto out;
+
+       /* Compare boot's cluster and media sector. */
+       if (sbi->cluster_size < sector_size) {
+               /* No way to use ntfs_get_block in this case. */
+               ntfs_err(
+                       sb,
+                       "Failed to mount 'cause NTFS's cluster size (%u) is less than media sector size (%u)",
+                       sbi->cluster_size, sector_size);
                goto out;
+       }
 
        sbi->cluster_mask = sbi->cluster_size - 1;
        sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask;
@@ -836,10 +800,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
                                  : (u32)boot->index_size << sbi->cluster_bits;
 
        sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
-       sbi->volume.size = sectors << sbi->sector_bits;
 
        /* Warning if RAW volume. */
-       if (dev_size < fs_size) {
+       if (dev_size < sbi->volume.size + boot_sector_size) {
                u32 mb0, gb0;
 
                gb0 = format_size_gb(dev_size, &mb0);
@@ -883,8 +846,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        rec->total = cpu_to_le32(sbi->record_size);
        ((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
 
-       if (sbi->cluster_size < PAGE_SIZE)
-               sb_set_blocksize(sb, sbi->cluster_size);
+       sb_set_blocksize(sb, min_t(u32, sbi->cluster_size, PAGE_SIZE));
 
        sbi->block_mask = sb->s_blocksize - 1;
        sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
@@ -897,9 +859,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        if (clusters >= (1ull << (64 - sbi->cluster_bits)))
                sbi->maxbytes = -1;
        sbi->maxbytes_sparse = -1;
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
 #else
        /* Maximum size for sparse file. */
        sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
+       sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
 #endif
 
        err = 0;
@@ -913,14 +877,13 @@ out:
 /*
  * ntfs_fill_super - Try to mount.
  */
-static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
+static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
 {
        int err;
-       struct ntfs_sb_info *sbi;
+       struct ntfs_sb_info *sbi = sb->s_fs_info;
        struct block_device *bdev = sb->s_bdev;
-       struct inode *bd_inode = bdev->bd_inode;
-       struct request_queue *rq = bdev_get_queue(bdev);
-       struct inode *inode = NULL;
+       struct request_queue *rq;
+       struct inode *inode;
        struct ntfs_inode *ni;
        size_t i, tt;
        CLST vcn, lcn, len;
@@ -928,18 +891,11 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        const struct VOLUME_INFO *info;
        u32 idx, done, bytes;
        struct ATTR_DEF_ENTRY *t;
-       u16 *upcase = NULL;
        u16 *shared;
-       bool is_ro;
        struct MFT_REF ref;
 
        ref.high = 0;
 
-       sbi = kzalloc(sizeof(struct ntfs_sb_info), GFP_NOFS);
-       if (!sbi)
-               return -ENOMEM;
-
-       sb->s_fs_info = sbi;
        sbi->sb = sb;
        sb->s_flags |= SB_NODIRATIME;
        sb->s_magic = 0x7366746e; // "ntfs"
@@ -948,41 +904,27 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
        sb->s_xattr = ntfs_xattr_handlers;
 
-       ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
-                            DEFAULT_RATELIMIT_BURST);
-
-       err = ntfs_parse_options(sb, data, silent, &sbi->options);
-       if (err)
+       sbi->options->nls = ntfs_load_nls(sbi->options->nls_name);
+       if (IS_ERR(sbi->options->nls)) {
+               sbi->options->nls = NULL;
+               errorf(fc, "Cannot load nls %s", sbi->options->nls_name);
+               err = -EINVAL;
                goto out;
+       }
 
-       if (!rq || !blk_queue_discard(rq) || !rq->limits.discard_granularity) {
-               ;
-       } else {
+       rq = bdev_get_queue(bdev);
+       if (blk_queue_discard(rq) && rq->limits.discard_granularity) {
                sbi->discard_granularity = rq->limits.discard_granularity;
                sbi->discard_granularity_mask_inv =
                        ~(u64)(sbi->discard_granularity - 1);
        }
 
-       sb_set_blocksize(sb, PAGE_SIZE);
-
        /* Parse boot. */
        err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
-                                 bd_inode->i_size);
+                                 bdev->bd_inode->i_size);
        if (err)
                goto out;
 
-#ifdef CONFIG_NTFS3_64BIT_CLUSTER
-       sb->s_maxbytes = MAX_LFS_FILESIZE;
-#else
-       sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
-#endif
-
-       mutex_init(&sbi->compress.mtx_lznt);
-#ifdef CONFIG_NTFS3_LZX_XPRESS
-       mutex_init(&sbi->compress.mtx_xpress);
-       mutex_init(&sbi->compress.mtx_lzx);
-#endif
-
        /*
         * Load $Volume. This should be done before $LogFile
         * 'cause 'sbi->volume.ni' is used 'ntfs_set_state'.
@@ -991,9 +933,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_VOL);
        inode = ntfs_iget5(sb, &ref, &NAME_VOLUME);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $Volume.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1015,36 +956,33 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        } else {
                /* Should we break mounting here? */
                //err = -EINVAL;
-               //goto out;
+               //goto put_inode_out;
        }
 
        attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL);
        if (!attr || is_attr_ext(attr)) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
 
        info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
        if (!info) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
 
        sbi->volume.major_ver = info->major_ver;
        sbi->volume.minor_ver = info->minor_ver;
        sbi->volume.flags = info->flags;
-
        sbi->volume.ni = ni;
-       inode = NULL;
 
        /* Load $MFTMirr to estimate recs_mirr. */
        ref.low = cpu_to_le32(MFT_REC_MIRR);
        ref.seq = cpu_to_le16(MFT_REC_MIRR);
        inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $MFTMirr.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1058,9 +996,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_LOG);
        inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load \x24LogFile.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1068,22 +1005,19 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
        err = ntfs_loadlog_and_replay(ni, sbi);
        if (err)
-               goto out;
+               goto put_inode_out;
 
        iput(inode);
-       inode = NULL;
-
-       is_ro = sb_rdonly(sbi->sb);
 
        if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
-               if (!is_ro) {
+               if (!sb_rdonly(sb)) {
                        ntfs_warn(sb,
                                  "failed to replay log file. Can't mount rw!");
                        err = -EINVAL;
                        goto out;
                }
        } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
-               if (!is_ro && !sbi->options.force) {
+               if (!sb_rdonly(sb) && !sbi->options->force) {
                        ntfs_warn(
                                sb,
                                "volume is dirty and \"force\" flag is not set!");
@@ -1098,9 +1032,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
        inode = ntfs_iget5(sb, &ref, &NAME_MFT);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $MFT.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1112,11 +1045,11 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
        err = wnd_init(&sbi->mft.bitmap, sb, tt);
        if (err)
-               goto out;
+               goto put_inode_out;
 
        err = ni_load_all_mi(ni);
        if (err)
-               goto out;
+               goto put_inode_out;
 
        sbi->mft.ni = ni;
 
@@ -1125,9 +1058,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
        inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $BadClus.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
@@ -1150,18 +1082,15 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_BITMAP);
        inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $Bitmap.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
-       ni = ntfs_i(inode);
-
 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
        if (inode->i_size >> 32) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
 #endif
 
@@ -1169,14 +1098,14 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        tt = sbi->used.bitmap.nbits;
        if (inode->i_size < bitmap_size(tt)) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
 
        /* Not necessary. */
        sbi->used.bitmap.set_tail = true;
-       err = wnd_init(&sbi->used.bitmap, sbi->sb, tt);
+       err = wnd_init(&sbi->used.bitmap, sb, tt);
        if (err)
-               goto out;
+               goto put_inode_out;
 
        iput(inode);
 
@@ -1188,23 +1117,22 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        /* Load $AttrDef. */
        ref.low = cpu_to_le32(MFT_REC_ATTR);
        ref.seq = cpu_to_le16(MFT_REC_ATTR);
-       inode = ntfs_iget5(sbi->sb, &ref, &NAME_ATTRDEF);
+       inode = ntfs_iget5(sb, &ref, &NAME_ATTRDEF);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load $AttrDef -> %d", err);
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
        if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) {
                err = -EINVAL;
-               goto out;
+               goto put_inode_out;
        }
        bytes = inode->i_size;
        sbi->def_table = t = kmalloc(bytes, GFP_NOFS);
        if (!t) {
                err = -ENOMEM;
-               goto out;
+               goto put_inode_out;
        }
 
        for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
@@ -1213,7 +1141,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
                if (IS_ERR(page)) {
                        err = PTR_ERR(page);
-                       goto out;
+                       goto put_inode_out;
                }
                memcpy(Add2Ptr(t, done), page_address(page),
                       min(PAGE_SIZE, tail));
@@ -1221,7 +1149,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
 
                if (!idx && ATTR_STD != t->type) {
                        err = -EINVAL;
-                       goto out;
+                       goto put_inode_out;
                }
        }
 
@@ -1254,33 +1182,24 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
        ref.seq = cpu_to_le16(MFT_REC_UPCASE);
        inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
        if (IS_ERR(inode)) {
+               ntfs_err(sb, "Failed to load $UpCase.");
                err = PTR_ERR(inode);
-               ntfs_err(sb, "Failed to load \x24LogFile.");
-               inode = NULL;
                goto out;
        }
 
-       ni = ntfs_i(inode);
-
        if (inode->i_size != 0x10000 * sizeof(short)) {
                err = -EINVAL;
-               goto out;
-       }
-
-       sbi->upcase = upcase = kvmalloc(0x10000 * sizeof(short), GFP_KERNEL);
-       if (!upcase) {
-               err = -ENOMEM;
-               goto out;
+               goto put_inode_out;
        }
 
        for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
                const __le16 *src;
-               u16 *dst = Add2Ptr(upcase, idx << PAGE_SHIFT);
+               u16 *dst = Add2Ptr(sbi->upcase, idx << PAGE_SHIFT);
                struct page *page = ntfs_map_page(inode->i_mapping, idx);
 
                if (IS_ERR(page)) {
                        err = PTR_ERR(page);
-                       goto out;
+                       goto put_inode_out;
                }
 
                src = page_address(page);
@@ -1294,14 +1213,13 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
                ntfs_unmap_page(page);
        }
 
-       shared = ntfs_set_shared(upcase, 0x10000 * sizeof(short));
-       if (shared && upcase != shared) {
+       shared = ntfs_set_shared(sbi->upcase, 0x10000 * sizeof(short));
+       if (shared && sbi->upcase != shared) {
+               kvfree(sbi->upcase);
                sbi->upcase = shared;
-               kvfree(upcase);
        }
 
        iput(inode);
-       inode = NULL;
 
        if (is_ntfs3(sbi)) {
                /* Load $Secure. */
@@ -1331,34 +1249,31 @@ load_root:
        ref.seq = cpu_to_le16(MFT_REC_ROOT);
        inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
        if (IS_ERR(inode)) {
-               err = PTR_ERR(inode);
                ntfs_err(sb, "Failed to load root.");
-               inode = NULL;
+               err = PTR_ERR(inode);
                goto out;
        }
 
-       ni = ntfs_i(inode);
-
        sb->s_root = d_make_root(inode);
-
        if (!sb->s_root) {
-               err = -EINVAL;
-               goto out;
+               err = -ENOMEM;
+               goto put_inode_out;
        }
 
+       fc->fs_private = NULL;
+
        return 0;
 
-out:
+put_inode_out:
        iput(inode);
-
-       if (sb->s_root) {
-               d_drop(sb->s_root);
-               sb->s_root = NULL;
-       }
-
+out:
+       /*
+        * Free resources here.
+        * ntfs_fs_free will be called with fc->s_fs_info = NULL
+        */
        put_ntfs(sbi);
-
        sb->s_fs_info = NULL;
+
        return err;
 }
 
@@ -1403,7 +1318,7 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
        if (sbi->flags & NTFS_FLAGS_NODISCARD)
                return -EOPNOTSUPP;
 
-       if (!sbi->options.discard)
+       if (!sbi->options->discard)
                return -EOPNOTSUPP;
 
        lbo = (u64)lcn << sbi->cluster_bits;
@@ -1428,19 +1343,99 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
        return err;
 }
 
-static struct dentry *ntfs_mount(struct file_system_type *fs_type, int flags,
-                                const char *dev_name, void *data)
+static int ntfs_fs_get_tree(struct fs_context *fc)
+{
+       return get_tree_bdev(fc, ntfs_fill_super);
+}
+
+/*
+ * ntfs_fs_free - Free fs_context.
+ *
+ * Note that this will be called after fill_super and reconfigure
+ * even when they pass. So they have to take pointers if they pass.
+ */
+static void ntfs_fs_free(struct fs_context *fc)
 {
-       return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
+       struct ntfs_mount_options *opts = fc->fs_private;
+       struct ntfs_sb_info *sbi = fc->s_fs_info;
+
+       if (sbi)
+               put_ntfs(sbi);
+
+       if (opts)
+               put_mount_options(opts);
+}
+
+static const struct fs_context_operations ntfs_context_ops = {
+       .parse_param    = ntfs_fs_parse_param,
+       .get_tree       = ntfs_fs_get_tree,
+       .reconfigure    = ntfs_fs_reconfigure,
+       .free           = ntfs_fs_free,
+};
+
+/*
+ * ntfs_init_fs_context - Initialize spi and opts
+ *
+ * This will called when mount/remount. We will first initiliaze
+ * options so that if remount we can use just that.
+ */
+static int ntfs_init_fs_context(struct fs_context *fc)
+{
+       struct ntfs_mount_options *opts;
+       struct ntfs_sb_info *sbi;
+
+       opts = kzalloc(sizeof(struct ntfs_mount_options), GFP_NOFS);
+       if (!opts)
+               return -ENOMEM;
+
+       /* Default options. */
+       opts->fs_uid = current_uid();
+       opts->fs_gid = current_gid();
+       opts->fs_fmask_inv = ~current_umask();
+       opts->fs_dmask_inv = ~current_umask();
+
+       if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
+               goto ok;
+
+       sbi = kzalloc(sizeof(struct ntfs_sb_info), GFP_NOFS);
+       if (!sbi)
+               goto free_opts;
+
+       sbi->upcase = kvmalloc(0x10000 * sizeof(short), GFP_KERNEL);
+       if (!sbi->upcase)
+               goto free_sbi;
+
+       ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
+                            DEFAULT_RATELIMIT_BURST);
+
+       mutex_init(&sbi->compress.mtx_lznt);
+#ifdef CONFIG_NTFS3_LZX_XPRESS
+       mutex_init(&sbi->compress.mtx_xpress);
+       mutex_init(&sbi->compress.mtx_lzx);
+#endif
+
+       sbi->options = opts;
+       fc->s_fs_info = sbi;
+ok:
+       fc->fs_private = opts;
+       fc->ops = &ntfs_context_ops;
+
+       return 0;
+free_sbi:
+       kfree(sbi);
+free_opts:
+       kfree(opts);
+       return -ENOMEM;
 }
 
 // clang-format off
 static struct file_system_type ntfs_fs_type = {
-       .owner          = THIS_MODULE,
-       .name           = "ntfs3",
-       .mount          = ntfs_mount,
-       .kill_sb        = kill_block_super,
-       .fs_flags       = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+       .owner                  = THIS_MODULE,
+       .name                   = "ntfs3",
+       .init_fs_context        = ntfs_init_fs_context,
+       .parameters             = ntfs_fs_parameters,
+       .kill_sb                = kill_block_super,
+       .fs_flags               = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
 };
 // clang-format on
 
index bbeba778237eefc318b00e04b05ad0ac1592e1ec..b5e8256fd710d5263b2be07b92df14cb8eea3c35 100644 (file)
@@ -5,13 +5,9 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/module.h>
-#include <linux/nls.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
 
-#include "debug.h"
-#include "ntfs.h"
 #include "ntfs_fs.h"
 
 static inline u16 upcase_unicode_char(const u16 *upcase, u16 chr)
index 7282d85c4ece447c42118b81ae82d9924003a0ce..afd0ddad826ff49f661ce758bcbecec46921fd1c 100644 (file)
@@ -5,10 +5,7 @@
  *
  */
 
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/fs.h>
-#include <linux/nls.h>
 #include <linux/posix_acl.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/xattr.h>
@@ -78,6 +75,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
                        size_t add_bytes, const struct EA_INFO **info)
 {
        int err;
+       struct ntfs_sb_info *sbi = ni->mi.sbi;
        struct ATTR_LIST_ENTRY *le = NULL;
        struct ATTRIB *attr_info, *attr_ea;
        void *ea_p;
@@ -102,10 +100,10 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
 
        /* Check Ea limit. */
        size = le32_to_cpu((*info)->size);
-       if (size > ni->mi.sbi->ea_max_size)
+       if (size > sbi->ea_max_size)
                return -EFBIG;
 
-       if (attr_size(attr_ea) > ni->mi.sbi->ea_max_size)
+       if (attr_size(attr_ea) > sbi->ea_max_size)
                return -EFBIG;
 
        /* Allocate memory for packed Ea. */
@@ -113,15 +111,16 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
        if (!ea_p)
                return -ENOMEM;
 
-       if (attr_ea->non_res) {
+       if (!size) {
+               ;
+       } else if (attr_ea->non_res) {
                struct runs_tree run;
 
                run_init(&run);
 
                err = attr_load_runs(attr_ea, ni, &run, NULL);
                if (!err)
-                       err = ntfs_read_run_nb(ni->mi.sbi, &run, 0, ea_p, size,
-                                              NULL);
+                       err = ntfs_read_run_nb(sbi, &run, 0, ea_p, size, NULL);
                run_close(&run);
 
                if (err)
@@ -260,7 +259,7 @@ out:
 
 static noinline int ntfs_set_ea(struct inode *inode, const char *name,
                                size_t name_len, const void *value,
-                               size_t val_size, int flags, int locked)
+                               size_t val_size, int flags)
 {
        struct ntfs_inode *ni = ntfs_i(inode);
        struct ntfs_sb_info *sbi = ni->mi.sbi;
@@ -279,8 +278,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
        u64 new_sz;
        void *p;
 
-       if (!locked)
-               ni_lock(ni);
+       ni_lock(ni);
 
        run_init(&ea_run);
 
@@ -370,21 +368,22 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
        new_ea->name[name_len] = 0;
        memcpy(new_ea->name + name_len + 1, value, val_size);
        new_pack = le16_to_cpu(ea_info.size_pack) + packed_ea_size(new_ea);
-
-       /* Should fit into 16 bits. */
-       if (new_pack > 0xffff) {
-               err = -EFBIG; // -EINVAL?
-               goto out;
-       }
        ea_info.size_pack = cpu_to_le16(new_pack);
-
        /* New size of ATTR_EA. */
        size += add;
-       if (size > sbi->ea_max_size) {
+       ea_info.size = cpu_to_le32(size);
+
+       /*
+        * 1. Check ea_info.size_pack for overflow.
+        * 2. New attibute size must fit value from $AttrDef
+        */
+       if (new_pack > 0xffff || size > sbi->ea_max_size) {
+               ntfs_inode_warn(
+                       inode,
+                       "The size of extended attributes must not exceed 64KiB");
                err = -EFBIG; // -EINVAL?
                goto out;
        }
-       ea_info.size = cpu_to_le32(size);
 
 update_ea:
 
@@ -444,7 +443,7 @@ update_ea:
                /* Delete xattr, ATTR_EA */
                ni_remove_attr_le(ni, attr, mi, le);
        } else if (attr->non_res) {
-               err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size);
+               err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size, 0);
                if (err)
                        goto out;
        } else {
@@ -468,8 +467,7 @@ update_ea:
        mark_inode_dirty(&ni->vfs_inode);
 
 out:
-       if (!locked)
-               ni_unlock(ni);
+       ni_unlock(ni);
 
        run_close(&ea_run);
        kfree(ea_all);
@@ -478,12 +476,6 @@ out:
 }
 
 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
-static inline void ntfs_posix_acl_release(struct posix_acl *acl)
-{
-       if (acl && refcount_dec_and_test(&acl->a_refcount))
-               kfree(acl);
-}
-
 static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
                                         struct inode *inode, int type,
                                         int locked)
@@ -521,12 +513,15 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
        /* Translate extended attribute to acl. */
        if (err >= 0) {
                acl = posix_acl_from_xattr(mnt_userns, buf, err);
-               if (!IS_ERR(acl))
-                       set_cached_acl(inode, type, acl);
+       } else if (err == -ENODATA) {
+               acl = NULL;
        } else {
-               acl = err == -ENODATA ? NULL : ERR_PTR(err);
+               acl = ERR_PTR(err);
        }
 
+       if (!IS_ERR(acl))
+               set_cached_acl(inode, type, acl);
+
        __putname(buf);
 
        return acl;
@@ -546,12 +541,13 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
 
 static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
                                    struct inode *inode, struct posix_acl *acl,
-                                   int type, int locked)
+                                   int type)
 {
        const char *name;
        size_t size, name_len;
        void *value = NULL;
        int err = 0;
+       int flags;
 
        if (S_ISLNK(inode->i_mode))
                return -EOPNOTSUPP;
@@ -561,22 +557,15 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
                if (acl) {
                        umode_t mode = inode->i_mode;
 
-                       err = posix_acl_equiv_mode(acl, &mode);
-                       if (err < 0)
-                               return err;
+                       err = posix_acl_update_mode(mnt_userns, inode, &mode,
+                                                   &acl);
+                       if (err)
+                               goto out;
 
                        if (inode->i_mode != mode) {
                                inode->i_mode = mode;
                                mark_inode_dirty(inode);
                        }
-
-                       if (!err) {
-                               /*
-                                * ACL can be exactly represented in the
-                                * traditional file mode permission bits.
-                                */
-                               acl = NULL;
-                       }
                }
                name = XATTR_NAME_POSIX_ACL_ACCESS;
                name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
@@ -594,20 +583,24 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
        }
 
        if (!acl) {
+               /* Remove xattr if it can be presented via mode. */
                size = 0;
                value = NULL;
+               flags = XATTR_REPLACE;
        } else {
                size = posix_acl_xattr_size(acl->a_count);
                value = kmalloc(size, GFP_NOFS);
                if (!value)
                        return -ENOMEM;
-
                err = posix_acl_to_xattr(mnt_userns, acl, value, size);
                if (err < 0)
                        goto out;
+               flags = 0;
        }
 
-       err = ntfs_set_ea(inode, name, name_len, value, size, 0, locked);
+       err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+       if (err == -ENODATA && !size)
+               err = 0; /* Removing non existed xattr. */
        if (!err)
                set_cached_acl(inode, type, acl);
 
@@ -623,68 +616,7 @@ out:
 int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
                 struct posix_acl *acl, int type)
 {
-       return ntfs_set_acl_ex(mnt_userns, inode, acl, type, 0);
-}
-
-static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
-                             struct inode *inode, int type, void *buffer,
-                             size_t size)
-{
-       struct posix_acl *acl;
-       int err;
-
-       if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
-               ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
-               return -EOPNOTSUPP;
-       }
-
-       acl = ntfs_get_acl(inode, type, false);
-       if (IS_ERR(acl))
-               return PTR_ERR(acl);
-
-       if (!acl)
-               return -ENODATA;
-
-       err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
-       ntfs_posix_acl_release(acl);
-
-       return err;
-}
-
-static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
-                             struct inode *inode, int type, const void *value,
-                             size_t size)
-{
-       struct posix_acl *acl;
-       int err;
-
-       if (!(inode->i_sb->s_flags & SB_POSIXACL)) {
-               ntfs_inode_warn(inode, "add mount option \"acl\" to use acl");
-               return -EOPNOTSUPP;
-       }
-
-       if (!inode_owner_or_capable(mnt_userns, inode))
-               return -EPERM;
-
-       if (!value) {
-               acl = NULL;
-       } else {
-               acl = posix_acl_from_xattr(mnt_userns, value, size);
-               if (IS_ERR(acl))
-                       return PTR_ERR(acl);
-
-               if (acl) {
-                       err = posix_acl_valid(mnt_userns, acl);
-                       if (err)
-                               goto release_and_out;
-               }
-       }
-
-       err = ntfs_set_acl(mnt_userns, inode, acl, type);
-
-release_and_out:
-       ntfs_posix_acl_release(acl);
-       return err;
+       return ntfs_set_acl_ex(mnt_userns, inode, acl, type);
 }
 
 /*
@@ -698,54 +630,27 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
        struct posix_acl *default_acl, *acl;
        int err;
 
-       /*
-        * TODO: Refactoring lock.
-        * ni_lock(dir) ... -> posix_acl_create(dir,...) -> ntfs_get_acl -> ni_lock(dir)
-        */
-       inode->i_default_acl = NULL;
-
-       default_acl = ntfs_get_acl_ex(mnt_userns, dir, ACL_TYPE_DEFAULT, 1);
-
-       if (!default_acl || default_acl == ERR_PTR(-EOPNOTSUPP)) {
-               inode->i_mode &= ~current_umask();
-               err = 0;
-               goto out;
-       }
-
-       if (IS_ERR(default_acl)) {
-               err = PTR_ERR(default_acl);
-               goto out;
-       }
-
-       acl = default_acl;
-       err = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
-       if (err < 0)
-               goto out1;
-       if (!err) {
-               posix_acl_release(acl);
-               acl = NULL;
-       }
+       err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
+       if (err)
+               return err;
 
-       if (!S_ISDIR(inode->i_mode)) {
+       if (default_acl) {
+               err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
+                                     ACL_TYPE_DEFAULT);
                posix_acl_release(default_acl);
-               default_acl = NULL;
+       } else {
+               inode->i_default_acl = NULL;
        }
 
-       if (default_acl)
-               err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
-                                     ACL_TYPE_DEFAULT, 1);
-
        if (!acl)
                inode->i_acl = NULL;
-       else if (!err)
-               err = ntfs_set_acl_ex(mnt_userns, inode, acl, ACL_TYPE_ACCESS,
-                                     1);
-
-       posix_acl_release(acl);
-out1:
-       posix_acl_release(default_acl);
+       else {
+               if (!err)
+                       err = ntfs_set_acl_ex(mnt_userns, inode, acl,
+                                             ACL_TYPE_ACCESS);
+               posix_acl_release(acl);
+       }
 
-out:
        return err;
 }
 #endif
@@ -772,7 +677,7 @@ int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
 int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
                    int mask)
 {
-       if (ntfs_sb(inode->i_sb)->options.no_acs_rules) {
+       if (ntfs_sb(inode->i_sb)->options->noacsrules) {
                /* "No access rules" mode - Allow all changes. */
                return 0;
        }
@@ -880,23 +785,6 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
                goto out;
        }
 
-#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
-            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
-                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
-           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
-            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
-                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
-               /* TODO: init_user_ns? */
-               err = ntfs_xattr_get_acl(
-                       &init_user_ns, inode,
-                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
-                               ? ACL_TYPE_ACCESS
-                               : ACL_TYPE_DEFAULT,
-                       buffer, size);
-               goto out;
-       }
-#endif
        /* Deal with NTFS extended attribute. */
        err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
 
@@ -1009,24 +897,8 @@ set_new_fa:
                goto out;
        }
 
-#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
-            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
-                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
-           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
-            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
-                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
-               err = ntfs_xattr_set_acl(
-                       mnt_userns, inode,
-                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
-                               ? ACL_TYPE_ACCESS
-                               : ACL_TYPE_DEFAULT,
-                       value, size);
-               goto out;
-       }
-#endif
        /* Deal with NTFS extended attribute. */
-       err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
+       err = ntfs_set_ea(inode, name, name_len, value, size, flags);
 
 out:
        return err;
@@ -1042,28 +914,29 @@ int ntfs_save_wsl_perm(struct inode *inode)
        int err;
        __le32 value;
 
+       /* TODO: refactor this, so we don't lock 4 times in ntfs_set_ea */
        value = cpu_to_le32(i_uid_read(inode));
        err = ntfs_set_ea(inode, "$LXUID", sizeof("$LXUID") - 1, &value,
-                         sizeof(value), 0, 0);
+                         sizeof(value), 0);
        if (err)
                goto out;
 
        value = cpu_to_le32(i_gid_read(inode));
        err = ntfs_set_ea(inode, "$LXGID", sizeof("$LXGID") - 1, &value,
-                         sizeof(value), 0, 0);
+                         sizeof(value), 0);
        if (err)
                goto out;
 
        value = cpu_to_le32(inode->i_mode);
        err = ntfs_set_ea(inode, "$LXMOD", sizeof("$LXMOD") - 1, &value,
-                         sizeof(value), 0, 0);
+                         sizeof(value), 0);
        if (err)
                goto out;
 
        if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
                value = cpu_to_le32(inode->i_rdev);
                err = ntfs_set_ea(inode, "$LXDEV", sizeof("$LXDEV") - 1, &value,
-                                 sizeof(value), 0, 0);
+                                 sizeof(value), 0);
                if (err)
                        goto out;
        }
index 359524b7341fb3ba72d47ef35ab1f50221b001b5..801e60bab95557659c73614e228086df90837303 100644 (file)
@@ -3951,7 +3951,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
                oi = OCFS2_I(inode);
                oi->ip_dir_lock_gen++;
                mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
-               goto out;
+               goto out_forget;
        }
 
        if (!S_ISREG(inode->i_mode))
@@ -3982,6 +3982,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
                filemap_fdatawait(mapping);
        }
 
+out_forget:
        forget_all_cached_acls(inode);
 
 out:
index 1fefb2b8960e94a8c9a7a1b81a5e70b83af4520f..93c7c267de934de9b10040151d1646887b3596bf 100644 (file)
@@ -1219,9 +1219,13 @@ static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
                                goto out_dput;
                }
        } else {
-               if (!d_is_negative(newdentry) &&
-                   (!new_opaque || !ovl_is_whiteout(newdentry)))
-                       goto out_dput;
+               if (!d_is_negative(newdentry)) {
+                       if (!new_opaque || !ovl_is_whiteout(newdentry))
+                               goto out_dput;
+               } else {
+                       if (flags & RENAME_EXCHANGE)
+                               goto out_dput;
+               }
        }
 
        if (olddentry == trap)
index d081faa55e830e3e66469475e5e236d107c8b681..c88ac571593dc1924e9ea0fbf3ab5aeaa588a69f 100644 (file)
@@ -296,6 +296,12 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        if (ret)
                return ret;
 
+       ret = -EINVAL;
+       if (iocb->ki_flags & IOCB_DIRECT &&
+           (!real.file->f_mapping->a_ops ||
+            !real.file->f_mapping->a_ops->direct_IO))
+               goto out_fdput;
+
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
        if (is_sync_kiocb(iocb)) {
                ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
@@ -320,7 +326,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 out:
        revert_creds(old_cred);
        ovl_file_accessed(file);
-
+out_fdput:
        fdput(real);
 
        return ret;
@@ -349,6 +355,12 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
        if (ret)
                goto out_unlock;
 
+       ret = -EINVAL;
+       if (iocb->ki_flags & IOCB_DIRECT &&
+           (!real.file->f_mapping->a_ops ||
+            !real.file->f_mapping->a_ops->direct_IO))
+               goto out_fdput;
+
        if (!ovl_should_sync(OVL_FS(inode->i_sb)))
                ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
 
@@ -384,6 +396,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
        }
 out:
        revert_creds(old_cred);
+out_fdput:
        fdput(real);
 
 out_unlock:
index a6ee23aadd2837829037a61beb22bfa24e99c50c..66645a5a35f306bc2213baff61166f03f9cb9e91 100644 (file)
 #include <linux/buffer_head.h>
 #include "qnx4.h"
 
+/*
+ * A qnx4 directory entry is an inode entry or link info
+ * depending on the status field in the last byte. The
+ * first byte is where the name start either way, and a
+ * zero means it's empty.
+ *
+ * Also, due to a bug in gcc, we don't want to use the
+ * real (differently sized) name arrays in the inode and
+ * link entries, but always the 'de_name[]' one in the
+ * fake struct entry.
+ *
+ * See
+ *
+ *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
+ *
+ * for details, but basically gcc will take the size of the
+ * 'name' array from one of the used union entries randomly.
+ *
+ * This use of 'de_name[]' (48 bytes) avoids the false positive
+ * warnings that would happen if gcc decides to use 'inode.di_name'
+ * (16 bytes) even when the pointer and size were to come from
+ * 'link.dl_name' (48 bytes).
+ *
+ * In all cases the actual name pointer itself is the same, it's
+ * only the gcc internal 'what is the size of this field' logic
+ * that can get confused.
+ */
+union qnx4_directory_entry {
+       struct {
+               const char de_name[48];
+               u8 de_pad[15];
+               u8 de_status;
+       };
+       struct qnx4_inode_entry inode;
+       struct qnx4_link_info link;
+};
+
 static int qnx4_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *inode = file_inode(file);
        unsigned int offset;
        struct buffer_head *bh;
-       struct qnx4_inode_entry *de;
-       struct qnx4_link_info *le;
        unsigned long blknum;
        int ix, ino;
        int size;
@@ -38,27 +73,27 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
                }
                ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
                for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
+                       union qnx4_directory_entry *de;
+
                        offset = ix * QNX4_DIR_ENTRY_SIZE;
-                       de = (struct qnx4_inode_entry *) (bh->b_data + offset);
-                       if (!de->di_fname[0])
+                       de = (union qnx4_directory_entry *) (bh->b_data + offset);
+
+                       if (!de->de_name[0])
                                continue;
-                       if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
+                       if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
                                continue;
-                       if (!(de->di_status & QNX4_FILE_LINK))
-                               size = QNX4_SHORT_NAME_MAX;
-                       else
-                               size = QNX4_NAME_MAX;
-                       size = strnlen(de->di_fname, size);
-                       QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
-                       if (!(de->di_status & QNX4_FILE_LINK))
+                       if (!(de->de_status & QNX4_FILE_LINK)) {
+                               size = sizeof(de->inode.di_fname);
                                ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
-                       else {
-                               le  = (struct qnx4_link_info*)de;
-                               ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
+                       else {
+                               size = sizeof(de->link.dl_fname);
+                               ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
                                        QNX4_INODES_PER_BLOCK +
-                                       le->dl_inode_ndx;
+                                       de->link.dl_inode_ndx;
                        }
-                       if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
+                       size = strnlen(de->de_name, size);
+                       QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
+                       if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
                                brelse(bh);
                                return 0;
                        }
index d01e8c9d7a3118eec4c0c6253e6ffc161e7be15f..926f87cd6af025f7bebadc47e3a568b3f36acb4b 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1+ */
 /*
- *   fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ *   SMB, CIFS, SMB2 FSCTL definitions
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2013
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 4f5e59f062846afb243333a08b6751616e8d03d0..37dd3fe5b1e981a127d9b718d65f1bb625dc470a 100644 (file)
 
 #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
 
-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
+static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
 
 static int follow_symlinks;
 module_param(follow_symlinks, int, 0444);
@@ -386,12 +383,7 @@ fail_nomem:
 
 static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
 {
-       unsigned char *options = data;
-
-       if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
-                      options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
-                      options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
-                      options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
+       if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
                vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
                return -EINVAL;
        }
index 77e159a0346b17078701dd1b3bf2cc5416f9e4e4..60a4372aa4d75f5157865bbf525bfb1a2cd4bb7e 100644 (file)
@@ -177,7 +177,7 @@ static int build_merkle_tree(struct file *filp,
         * (level 0) and ascending to the root node (level 'num_levels - 1').
         * Then at the end (level 'num_levels'), calculate the root hash.
         */
-       blocks = (inode->i_size + params->block_size - 1) >>
+       blocks = ((u64)inode->i_size + params->block_size - 1) >>
                 params->log_blocksize;
        for (level = 0; level <= params->num_levels; level++) {
                err = build_merkle_tree_level(filp, level, blocks, params,
index 60ff8af7219feafe90b06d5c8c5b0df301498186..92df87f5fa3881abec4f18d20686cc991558ced4 100644 (file)
@@ -89,7 +89,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
         */
 
        /* Compute number of levels and the number of blocks in each level */
-       blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;
+       blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;
        pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
        while (blocks > 1) {
                if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
index a0212e67d6f44d29fd11fcddd6ba5a5c351f431f..027faa8883aab6804aa2b18480b8df2a4e4c68bb 100644 (file)
@@ -14,14 +14,6 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
 }
 #endif
 
-#ifndef acpi_os_memmap
-static inline void __iomem *acpi_os_memmap(acpi_physical_address phys,
-                                           acpi_size size)
-{
-       return ioremap_cache(phys, size);
-}
-#endif
-
 extern bool acpi_permanent_mmap;
 
 void __iomem __ref
index e93375c710b932504d629bb68c3c08bc3ec24072..7ce93aaf69f8dbd45d054ff0b708d802cf786bfb 100644 (file)
@@ -957,7 +957,7 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
 
 #ifndef iounmap
 #define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
 {
 }
 #endif
@@ -1023,16 +1023,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
        port &= IO_SPACE_LIMIT;
        return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
 }
-#define __pci_ioport_unmap __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p)
-{
-       uintptr_t start = (uintptr_t) PCI_IOBASE;
-       uintptr_t addr = (uintptr_t) p;
-
-       if (addr >= start && addr < start + IO_SPACE_LIMIT)
-               return;
-       iounmap(p);
-}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
 #endif
 
 #ifndef ioport_unmap
@@ -1048,21 +1039,10 @@ extern void ioport_unmap(void __iomem *p);
 #endif /* CONFIG_HAS_IOPORT_MAP */
 
 #ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p) {}
-#endif
-
 #ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-       __pci_ioport_unmap(p);
-}
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
 #endif
-#endif /* CONFIG_GENERIC_IOMAP */
 
 #ifndef xlate_dev_mem_ptr
 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
index 9b3eb6d86200180d1fe6ff40687c807bb6107bc4..08237ae8b840d8baf5ce3028b1ceccaa5ff0199c 100644 (file)
@@ -110,16 +110,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
 }
 #endif
 
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
-#endif
-
 #include <asm-generic/pci_iomap.h>
 
 #endif
index c1ab6a6e72b535a0cecdc29a1a55dd6e65650f78..d3eae6cdbacbb81688bc8834c01524ff9f79a047 100644 (file)
@@ -197,10 +197,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
        return hv_vp_index[cpu_number];
 }
 
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
-                                   const struct cpumask *cpus)
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus,
+                                   bool exclude_self)
 {
        int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+       int this_cpu = smp_processor_id();
 
        /* valid_bank_mask can represent up to 64 banks */
        if (hv_max_vp_index / 64 >= 64)
@@ -218,6 +220,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
         * Some banks may end up being empty but this is acceptable.
         */
        for_each_cpu(cpu, cpus) {
+               if (exclude_self && cpu == this_cpu)
+                       continue;
                vcpu = hv_cpu_number_to_vp_number(cpu);
                if (vcpu == VP_INVAL)
                        return -1;
@@ -232,6 +236,19 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
        return nr_bank;
 }
 
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus)
+{
+       return __cpumask_to_vpset(vpset, cpus, false);
+}
+
+static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus)
+{
+       WARN_ON_ONCE(preemptible());
+       return __cpumask_to_vpset(vpset, cpus, true);
+}
+
 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
 bool hv_is_hyperv_initialized(void);
 bool hv_is_hibernation_supported(void);
index df636c6d8e6c3f8344af1b0a198b4d682500937c..5a2f9bf53384b600ba41a40085a1dafbec6e6f1e 100644 (file)
@@ -18,6 +18,7 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
 extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
                                        unsigned long offset,
                                        unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 /* Create a virtual mapping cookie for a port on a given PCI device.
  * Do not call this directly, it exists to make it easier for architectures
  * to override */
@@ -50,6 +51,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
 {
        return NULL;
 }
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
 #endif
 
 #endif /* __ASM_GENERIC_PCI_IOMAP_H */
index aa50bf2959fef5c0fe4ac6c837117491cc0f9fe2..f2984af2b85bd54383453e5f4e96f645db769ef4 100644 (file)
  * GCC 4.5 and later have a 32 bytes section alignment for structures.
  * Except GCC 4.9, that feels the need to align on 64 bytes.
  */
-#if __GNUC__ == 4 && __GNUC_MINOR__ == 9
-#define STRUCT_ALIGNMENT 64
-#else
 #define STRUCT_ALIGNMENT 32
-#endif
 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
 
 /*
index 24b40e5c160b2b43bdbe58bfedfc58ba2855aeff..018e776a34b9a90c06a0b2540b5aa0b0ef7fa36b 100644 (file)
@@ -613,7 +613,7 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
  * and is automatically cleaned up after the test case concludes. See &struct
  * kunit_resource for more information.
  */
-void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
 
 /**
  * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
@@ -657,9 +657,9 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
  *
  * See kcalloc() and kunit_kmalloc_array() for more information.
  */
-static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags)
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
 {
-       return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO);
+       return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
 }
 
 void kunit_cleanup(struct kunit *test);
index 864b9997efb20d55c5df3fd2b544575134e070e9..90f21898aad838660e993c3e9fce553ea4b7af56 100644 (file)
@@ -61,7 +61,6 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr);
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
-int kvm_pmu_probe_pmuver(void);
 #else
 struct kvm_pmu {
 };
@@ -118,8 +117,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        return 0;
 }
 
-static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
-
 #endif
 
 #endif
index 7d1cabe152622f7b42bcc691af539f4e0dd85989..63ccb525219022e17e39771c669de161ed54666a 100644 (file)
@@ -321,10 +321,20 @@ asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
  * from register 0 to 3 on return from the SMC instruction.  An optional
  * quirk structure provides vendor specific behavior.
  */
+#ifdef CONFIG_HAVE_ARM_SMCCC
 asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
                        unsigned long a2, unsigned long a3, unsigned long a4,
                        unsigned long a5, unsigned long a6, unsigned long a7,
                        struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+                       unsigned long a2, unsigned long a3, unsigned long a4,
+                       unsigned long a5, unsigned long a6, unsigned long a7,
+                       struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+       *res = (struct arm_smccc_res){};
+}
+#endif
 
 /**
  * __arm_smccc_hvc() - make HVC calls
index f4c16f19f83e3edcf11abb4541a538ef468d395a..020a7d5bf4701afbf1e504476ecd41323bd1fa6a 100644 (file)
@@ -578,11 +578,12 @@ struct btf_func_model {
  * programs only. Should not be used with normal calls and indirect calls.
  */
 #define BPF_TRAMP_F_SKIP_FRAME         BIT(2)
-
 /* Store IP address of the caller on the trampoline stack,
  * so it's available for trampoline's programs.
  */
 #define BPF_TRAMP_F_IP_ARG             BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET     BIT(4)
 
 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
  * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
index 6486d3c19463240024b2f7a65fc8412ed146d968..36f33685c8c00adca9a2d8f1c9814d9e78a83fd3 100644 (file)
@@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
 struct buffer_head *__bread_gfp(struct block_device *,
                                sector_t block, unsigned size, gfp_t gfp);
 void invalidate_bh_lrus(void);
-void invalidate_bh_lrus_cpu(int cpu);
+void invalidate_bh_lrus_cpu(void);
 bool has_bh_in_lru(int cpu, void *dummy);
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
 void free_buffer_head(struct buffer_head * bh);
@@ -408,7 +408,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
 static inline void invalidate_inode_buffers(struct inode *inode) {}
 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
-static inline void invalidate_bh_lrus_cpu(int cpu) {}
+static inline void invalidate_bh_lrus_cpu(void) {}
 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
 #define buffer_heads_over_limit 0
 
index e1c705fdfa7c531c0aaa8c3148853e2bcc8b83ba..db2e147e069fe52555bd8f8d39c2cd1e28341934 100644 (file)
@@ -752,107 +752,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
  * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
  * per-socket cgroup information except for memcg association.
  *
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overridden to carry prioidx and/or
- * classid.  The two modes are distinguished by whether the lowest bit is
- * set.  Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works.  There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled.  On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked.  While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
  */
 struct sock_cgroup_data {
-       union {
-#ifdef __LITTLE_ENDIAN
-               struct {
-                       u8      is_data : 1;
-                       u8      no_refcnt : 1;
-                       u8      unused : 6;
-                       u8      padding;
-                       u16     prioidx;
-                       u32     classid;
-               } __packed;
-#else
-               struct {
-                       u32     classid;
-                       u16     prioidx;
-                       u8      padding;
-                       u8      unused : 6;
-                       u8      no_refcnt : 1;
-                       u8      is_data : 1;
-               } __packed;
+       struct cgroup   *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       u32             classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+       u16             prioidx; /* v1 */
 #endif
-               u64             val;
-       };
 };
 
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid.  Such races are short-lived and the result isn't critical.
- */
 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
 {
-       /* fallback to 1 which is always the ID of the root cgroup */
-       return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+       return READ_ONCE(skcd->prioidx);
+#else
+       return 1;
+#endif
 }
 
 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
 {
-       /* fallback to 0 which is the unconfigured default classid */
-       return (skcd->is_data & 1) ? skcd->classid : 0;
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       return READ_ONCE(skcd->classid);
+#else
+       return 0;
+#endif
 }
 
-/*
- * If invoked concurrently, the updaters may clobber each other.  The
- * caller is responsible for synchronization.
- */
 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
                                           u16 prioidx)
 {
-       struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
-       if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
-               return;
-
-       if (!(skcd_buf.is_data & 1)) {
-               skcd_buf.val = 0;
-               skcd_buf.is_data = 1;
-       }
-
-       skcd_buf.prioidx = prioidx;
-       WRITE_ONCE(skcd->val, skcd_buf.val);    /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+       WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
 }
 
 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
                                           u32 classid)
 {
-       struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
-       if (sock_cgroup_classid(&skcd_buf) == classid)
-               return;
-
-       if (!(skcd_buf.is_data & 1)) {
-               skcd_buf.val = 0;
-               skcd_buf.is_data = 1;
-       }
-
-       skcd_buf.classid = classid;
-       WRITE_ONCE(skcd->val, skcd_buf.val);    /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       WRITE_ONCE(skcd->classid, classid);
+#endif
 }
 
 #else  /* CONFIG_SOCK_CGROUP_DATA */
index 7bf60454a31361f3a0dd8b24fca91a743c8eaead..75c151413fda8c9ae1436983932ad7b09c3fda23 100644 (file)
@@ -829,33 +829,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
  */
 #ifdef CONFIG_SOCK_CGROUP_DATA
 
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
 void cgroup_sk_clone(struct sock_cgroup_data *skcd);
 void cgroup_sk_free(struct sock_cgroup_data *skcd);
 
 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
 {
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-       unsigned long v;
-
-       /*
-        * @skcd->val is 64bit but the following is safe on 32bit too as we
-        * just need the lower ulong to be written and read atomically.
-        */
-       v = READ_ONCE(skcd->val);
-
-       if (v & 3)
-               return &cgrp_dfl_root.cgrp;
-
-       return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
-       return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+       return skcd->cgroup;
 }
 
 #else  /* CONFIG_CGROUP_DATA */
index 49b0ac8b6fd323b422c047043126951bd0c68531..3c4de9b6c6e3e07e06e48c27dc01eae012e3391c 100644 (file)
 #define __no_sanitize_coverage
 #endif
 
-/*
- * Not all versions of clang implement the type-generic versions
- * of the builtin overflow checkers. Fortunately, clang implements
- * __has_builtin allowing us to avoid awkward version
- * checks. Unfortunately, we don't know which version of gcc clang
- * pretends to be, so the macro may or may not be defined.
- */
-#if __has_builtin(__builtin_mul_overflow) && \
-    __has_builtin(__builtin_add_overflow) && \
-    __has_builtin(__builtin_sub_overflow)
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
-
 #if __has_feature(shadow_call_stack)
 # define __noscs       __attribute__((__no_sanitize__("shadow-call-stack")))
 #endif
index 21c36b69eb06c15d4306f8c29ccbc251893095f2..bd2b881c6b63a883a118b0895bb50d3e1ae3cbb5 100644 (file)
 
 #if GCC_VERSION >= 70000
 #define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
+#else
 #define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
 #endif
 
 #if __has_attribute(__no_sanitize_address__)
 #define __no_sanitize_coverage
 #endif
 
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
-
 /*
  * Turn individual warnings and errors on and off locally, depending
  * on version.
index b67261a1e3e9cf382398befac327f7ec3108b4a3..3d5af56337bdb5ea0c88dca48a45e7524bf86420 100644 (file)
@@ -188,6 +188,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
     (typeof(ptr)) (__ptr + (off)); })
 #endif
 
+#define absolute_pointer(val)  RELOC_HIDE((void *)(val), 0)
+
 #ifndef OPTIMIZER_HIDE_VAR
 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
 #define OPTIMIZER_HIDE_VAR(var)                                                \
index 8f2106e9e5c124285f7474953d84966d91c5e24d..e6ec634039658e714f1c9ad9f2854580d646a318 100644 (file)
  * Provide links to the documentation of each supported compiler, if it exists.
  */
 
-/*
- * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support gcc < 5, we implement __has_attribute
- * by hand.
- */
-#ifndef __has_attribute
-# define __has_attribute(x) __GCC4_has_attribute_##x
-# define __GCC4_has_attribute___assume_aligned__      1
-# define __GCC4_has_attribute___copy__                0
-# define __GCC4_has_attribute___designated_init__     0
-# define __GCC4_has_attribute___error__               1
-# define __GCC4_has_attribute___externally_visible__  1
-# define __GCC4_has_attribute___no_caller_saved_registers__ 0
-# define __GCC4_has_attribute___noclone__             1
-# define __GCC4_has_attribute___no_profile_instrument_function__ 0
-# define __GCC4_has_attribute___nonstring__           0
-# define __GCC4_has_attribute___no_sanitize_address__ 1
-# define __GCC4_has_attribute___no_sanitize_undefined__ 1
-# define __GCC4_has_attribute___no_sanitize_coverage__ 0
-# define __GCC4_has_attribute___fallthrough__         0
-# define __GCC4_has_attribute___warning__             1
-#endif
-
 /*
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
  */
@@ -77,7 +54,6 @@
  * compiler should see some alignment anyway, when the return value is
  * massaged by 'flags = ptr & 3; ptr &= ~3;').
  *
- * Optional: only supported since gcc >= 4.9
  * Optional: not supported by icc
  *
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
index 5d4d07a9e1ed15e67c249cc402ba1908ef412b06..1e7399fc69c0a1e5304b892bb8accc15567cdfd3 100644 (file)
@@ -996,14 +996,15 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
  * cpumask; Typically used by bin_attribute to export cpumask bitmask
  * ABI.
  *
- * Returns the length of how many bytes have been copied.
+ * Returns the length of how many bytes have been copied, excluding
+ * terminating '\0'.
  */
 static inline ssize_t
 cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
                loff_t off, size_t count)
 {
        return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
-                                  nr_cpu_ids, off, count);
+                                  nr_cpu_ids, off, count) - 1;
 }
 
 /**
@@ -1018,7 +1019,7 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
                loff_t off, size_t count)
 {
        return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
-                                  nr_cpu_ids, off, count);
+                                  nr_cpu_ids, off, count) - 1;
 }
 
 #if NR_CPUS <= BITS_PER_LONG
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644 (file)
index 0000000..8c3d45e
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE       0
+#define MV88E6XXX_VID_BRIDGED          (VLAN_N_VID - 1)
+
+#endif
index c6bc45ae5e03a4116c49ee22d91081288257111f..8ae999f587c48eb74b7c018a479a4d6eb836b2fe 100644 (file)
@@ -1,11 +1,32 @@
 /* SPDX-License-Identifier: GPL-2.0
- * Copyright 2019-2021 NXP Semiconductors
+ * Copyright 2019-2021 NXP
  */
 
 #ifndef _NET_DSA_TAG_OCELOT_H
 #define _NET_DSA_TAG_OCELOT_H
 
+#include <linux/kthread.h>
 #include <linux/packing.h>
+#include <linux/skbuff.h>
+
+struct ocelot_skb_cb {
+       struct sk_buff *clone;
+       unsigned int ptp_class; /* valid only for clones */
+       u8 ptp_cmd;
+       u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+       ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C                 0
+#define IFH_TAG_TYPE_S                 1
+
+#define IFH_REW_OP_NOOP                        0x0
+#define IFH_REW_OP_DSCP                        0x1
+#define IFH_REW_OP_ONE_STEP_PTP                0x2
+#define IFH_REW_OP_TWO_STEP_PTP                0x3
+#define IFH_REW_OP_ORIGIN_PTP          0x5
 
 #define OCELOT_TAG_LEN                 16
 #define OCELOT_SHORT_PREFIX_LEN                4
  *         +------+------+------+------+------+------+------+------+
  */
 
+struct felix_deferred_xmit_work {
+       struct dsa_port *dp;
+       struct sk_buff *skb;
+       struct kthread_work work;
+};
+
+struct felix_port {
+       void (*xmit_work_fn)(struct kthread_work *work);
+       struct kthread_worker *xmit_worker;
+};
+
 static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
 {
        packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
@@ -215,4 +247,21 @@ static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
        packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
 }
 
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+       struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+       u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+       u32 rew_op = 0;
+
+       if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+               rew_op = ptp_cmd;
+               rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+       } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+               rew_op = ptp_cmd;
+       }
+
+       return rew_op;
+}
+
 #endif
index 171106202fe5bef538778f5bd681223e6c430421..9e07079528a53fe39e3e4974998785cefcecd47d 100644 (file)
@@ -48,6 +48,10 @@ struct sja1105_tagger_data {
        spinlock_t meta_lock;
        unsigned long state;
        u8 ts_id;
+       /* Used on SJA1110 where meta frames are generated only for
+        * 2-step TX timestamps
+        */
+       struct sk_buff_head skb_txtstamp_queue;
 };
 
 struct sja1105_skb_cb {
@@ -69,42 +73,24 @@ struct sja1105_port {
        bool hwts_tx_en;
 };
 
-enum sja1110_meta_tstamp {
-       SJA1110_META_TSTAMP_TX = 0,
-       SJA1110_META_TSTAMP_RX = 1,
-};
-
-#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
-
-void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
-                                enum sja1110_meta_tstamp dir, u64 tstamp);
-
-#else
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS                        8
 
-static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
-                                              u8 ts_id, enum sja1110_meta_tstamp dir,
-                                              u64 tstamp)
+static inline s64 ns_to_sja1105_ticks(s64 ns)
 {
+       return ns / SJA1105_TICK_NS;
 }
 
-#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
-
-#if IS_ENABLED(CONFIG_NET_DSA_SJA1105)
-
-extern const struct dsa_switch_ops sja1105_switch_ops;
-
-static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
 {
-       return dp->ds->ops == &sja1105_switch_ops;
+       return ticks * SJA1105_TICK_NS;
 }
 
-#else
-
 static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
 {
-       return false;
+       return true;
 }
 
-#endif
-
 #endif /* _NET_DSA_SJA1105_H */
index 928c411bd50963278407ad317b22db4eaaed0895..c58d5045148547a136a523669b0f1007342ea56d 100644 (file)
@@ -308,7 +308,7 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
  */
 static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
 {
-       ether_addr_copy(dev->dev_addr, addr);
+       __dev_addr_set(dev, addr, ETH_ALEN);
 }
 
 /**
index 59828516ebaf1932a098eb9988f95fe8d17da5fb..9f4ad719bfe3f8b5f49361db3750359bf954dfdc 100644 (file)
@@ -22,10 +22,15 @@ struct device;
  * LINKS_ADDED:        The fwnode has already be parsed to add fwnode links.
  * NOT_DEVICE: The fwnode will never be populated as a struct device.
  * INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ *                          driver needs its child devices to be bound with
+ *                          their respective drivers as soon as they are
+ *                          added.
  */
-#define FWNODE_FLAG_LINKS_ADDED                BIT(0)
-#define FWNODE_FLAG_NOT_DEVICE         BIT(1)
-#define FWNODE_FLAG_INITIALIZED                BIT(2)
+#define FWNODE_FLAG_LINKS_ADDED                        BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE                 BIT(1)
+#define FWNODE_FLAG_INITIALIZED                        BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD   BIT(3)
 
 struct fwnode_handle {
        struct fwnode_handle *secondary;
index 23e4ee5235769367c8ba3e8fd5b22245ca9ba60d..9ee238ad29ce91ebe9c888c6e9ee9b8b3aaccf85 100644 (file)
@@ -251,7 +251,7 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
 }
 
 void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
                                    irq_hw_number_t hwirq_max, int direct_max,
                                    const struct irq_domain_ops *ops,
                                    void *host_data);
index 041ca7f15ea4514f750f3f200c82d3fa377f94a7..0f18df7fe87491c8880e20c4934798bd6efc36fc 100644 (file)
@@ -608,7 +608,6 @@ struct kvm {
        unsigned long mmu_notifier_range_start;
        unsigned long mmu_notifier_range_end;
 #endif
-       long tlbs_dirty;
        struct list_head devices;
        u64 manual_dirty_log_protect;
        struct dentry *debugfs_dentry;
@@ -721,11 +720,6 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
        return NULL;
 }
 
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
-       return vcpu->vcpu_idx;
-}
-
 #define kvm_for_each_memslot(memslot, slots)                           \
        for (memslot = &slots->memslots[0];                             \
             memslot < slots->memslots + slots->used_slots; memslot++)  \
index ffb787d5ebde3045dea36bcce8b37953dae43b6e..5e6dc38f418e48c3b2ce3f3d08a10ae6bbb3e675 100644 (file)
@@ -80,6 +80,9 @@ struct mdio_driver {
 
        /* Clears up any memory if needed */
        void (*remove)(struct mdio_device *mdiodev);
+
+       /* Quiesces the device on system shutdown, turns off interrupts etc */
+       void (*shutdown)(struct mdio_device *mdiodev);
 };
 
 static inline struct mdio_driver *
index b066024c62e3f4937849c0fa5ac0d7091166160a..34de69b3b8badcdf971cd3f34b805d1b5b23f52d 100644 (file)
@@ -118,6 +118,7 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 
 void memblock_free_all(void);
+void memblock_free_ptr(void *ptr, size_t size);
 void reset_node_managed_pages(pg_data_t *pgdat);
 void reset_all_zones_managed_pages(void);
 
index 326250996b4e06f0d53196697822b0f2d8b998fa..c8077e936691997f452bb2aeef0da9fd3de6467b 100644 (file)
@@ -19,6 +19,11 @@ struct migration_target_control;
  */
 #define MIGRATEPAGE_SUCCESS            0
 
+/*
+ * Keep sync with:
+ * - macro MIGRATE_REASON in include/trace/events/migrate.h
+ * - migrate_reason_names[MR_TYPES] in mm/debug.c
+ */
 enum migrate_reason {
        MR_COMPACTION,
        MR_MEMORY_FAILURE,
@@ -32,7 +37,6 @@ enum migrate_reason {
        MR_TYPES
 };
 
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
 extern const char *migrate_reason_names[MR_TYPES];
 
 #ifdef CONFIG_MIGRATION
index f3638d09ba776468ec0f84439d0116e791993d18..993204a6c1a1376e47490ab0f72e0254921f85d3 100644 (file)
@@ -9475,16 +9475,22 @@ struct mlx5_ifc_pcmr_reg_bits {
        u8         reserved_at_0[0x8];
        u8         local_port[0x8];
        u8         reserved_at_10[0x10];
+
        u8         entropy_force_cap[0x1];
        u8         entropy_calc_cap[0x1];
        u8         entropy_gre_calc_cap[0x1];
-       u8         reserved_at_23[0x1b];
+       u8         reserved_at_23[0xf];
+       u8         rx_ts_over_crc_cap[0x1];
+       u8         reserved_at_33[0xb];
        u8         fcs_cap[0x1];
        u8         reserved_at_3f[0x1];
+
        u8         entropy_force[0x1];
        u8         entropy_calc[0x1];
        u8         entropy_gre_calc[0x1];
-       u8         reserved_at_43[0x1b];
+       u8         reserved_at_43[0xf];
+       u8         rx_ts_over_crc[0x1];
+       u8         reserved_at_53[0xb];
        u8         fcs_chk[0x1];
        u8         reserved_at_5f[0x1];
 };
index b179f1e3541a29c30487977f5ca45c2acc7291ce..96e113e23d04ef6d5976b246e8527201b5f5f902 100644 (file)
@@ -144,15 +144,6 @@ static inline void mmap_read_unlock(struct mm_struct *mm)
        up_read(&mm->mmap_lock);
 }
 
-static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
-{
-       if (mmap_read_trylock(mm)) {
-               rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
-               return true;
-       }
-       return false;
-}
-
 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
 {
        __mmap_lock_trace_released(mm, false);
index 923dada24eb4163b2e5fb7f9be88ae78db2c34ca..c0c0cefc3b925658e2b5f61de820b8b90a5b2d39 100644 (file)
@@ -150,6 +150,20 @@ static inline int nvmem_cell_read_u64(struct device *dev,
        return -EOPNOTSUPP;
 }
 
+static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
+                                                const char *cell_id,
+                                                u32 *val)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
+                                                 const char *cell_id,
+                                                 u64 *val)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline struct nvmem_device *nvmem_device_get(struct device *dev,
                                                    const char *name)
 {
index 0f12345c21fb58430ea93884c4ad5f4688ad7d18..4669632bd72bcd4f8cdf586243022f8383c43337 100644 (file)
@@ -6,12 +6,9 @@
 #include <linux/limits.h>
 
 /*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
  *
  * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
  * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -54,7 +51,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
        return unlikely(overflow);
 }
 
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
 /*
  * For simplicity and code hygiene, the fallback code below insists on
  * a, b and *d having the same type (similar to the min() and max()
@@ -90,134 +86,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
        __builtin_mul_overflow(__a, __b, __d);  \
 }))
 
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a + __b;                       \
-       *__d < __a;                             \
-})
-#define __unsigned_sub_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a - __b;                       \
-       __a < __b;                              \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({            \
-       typeof(a) __a = (a);                            \
-       typeof(b) __b = (b);                            \
-       typeof(d) __d = (d);                            \
-       (void) (&__a == &__b);                          \
-       (void) (&__a == __d);                           \
-       *__d = __a * __b;                               \
-       __builtin_constant_p(__b) ?                     \
-         __b > 0 && __a > type_max(typeof(__a)) / __b : \
-         __a > 0 && __b > type_max(typeof(__b)) / __a;  \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a + (u64)__b;             \
-       (((~(__a ^ __b)) & (*__d ^ __a))        \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a - (u64)__b;             \
-       ((((__a ^ __b)) & (*__d ^ __a))         \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({                              \
-       typeof(a) __a = (a);                                            \
-       typeof(b) __b = (b);                                            \
-       typeof(d) __d = (d);                                            \
-       typeof(a) __tmax = type_max(typeof(a));                         \
-       typeof(a) __tmin = type_min(typeof(a));                         \
-       (void) (&__a == &__b);                                          \
-       (void) (&__a == __d);                                           \
-       *__d = (u64)__a * (u64)__b;                                     \
-       (__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||        \
-       (__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
-       (__b == (typeof(__b))-1 && __a == __tmin);                      \
-})
-
-
-#define check_add_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_add_overflow(a, b, d),                 \
-                       __unsigned_add_overflow(a, b, d)))
-
-#define check_sub_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_sub_overflow(a, b, d),                 \
-                       __unsigned_sub_overflow(a, b, d)))
-
-#define check_mul_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_mul_overflow(a, b, d),                 \
-                       __unsigned_mul_overflow(a, b, d)))
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
 /** check_shl_overflow() - Calculate a left-shifted value and check overflow
  *
  * @a: Value to be shifted
index 54667735cc67588da572105b6cbe8be6b75e0503..8d6571feb95de84ef4b0472e16c4bdfede15c3a6 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _LINUX_PACKING_H
index 505480217cf1a9b4f779b454d42bc78c464e189b..2512e2f9cd4e7964fe08c42052d4313f5cfc6679 100644 (file)
@@ -163,6 +163,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 #endif
 
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x)   do { } while(0)
+#endif
+
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
 struct arm_pmu *armpmu_alloc_atomic(void);
index fe156a8170aa39ab4236a22174aa73ac0959d93b..9b60bb89d86ab817737139c18b8b6186d08de875 100644 (file)
@@ -683,7 +683,9 @@ struct perf_event {
        /*
         * timestamp shadows the actual context timing but it can
         * be safely used in NMI interrupt context. It reflects the
-        * context time as it was when the event was last scheduled in.
+        * context time as it was when the event was last scheduled in,
+        * or when ctx_sched_in failed to schedule the event because we
+        * run out of PMC.
         *
         * ctx_time already accounts for ctx->timestamp. Therefore to
         * compute ctx_time for a sample, simply add perf_clock().
index 6beb26b7151d21c18c7c3dd52c73290c0d8d48c5..86be8bf27b41b773f3957c7b5ff63f6592e8ab18 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <linux/mm.h>
 
+#define ARCH_DEFAULT_PKEY      0
+
 #ifdef CONFIG_ARCH_HAS_PKEYS
 #include <asm/pkeys.h>
 #else /* ! CONFIG_ARCH_HAS_PKEYS */
index 43b5ce139c375e9af1527b462b1cd04a3992dbc1..878e572a78bf60ce2870809b6919f741e05c01d7 100644 (file)
@@ -48,6 +48,8 @@ struct omap_usb_config {
        u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
 
        int (*ocpi_enable)(void);
+
+       void (*lb_reset)(void);
 };
 
 #endif /* __LINUX_USB_OMAP1_H */
index c0475d1c98850b1058c83a728bc14c3ff504c5c6..81cad9e1e4120f5db1146cb65f4e6e7a25ceea84 100644 (file)
@@ -61,7 +61,6 @@ enum qcom_scm_ice_cipher {
 #define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
 #define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
 
-#if IS_ENABLED(CONFIG_QCOM_SCM)
 extern bool qcom_scm_is_available(void);
 
 extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
@@ -115,74 +114,4 @@ extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
 extern int qcom_scm_lmh_profile_change(u32 profile_id);
 extern bool qcom_scm_lmh_dcvsh_available(void);
 
-#else
-
-#include <linux/errno.h>
-
-static inline bool qcom_scm_is_available(void) { return false; }
-
-static inline int qcom_scm_set_cold_boot_addr(void *entry,
-               const cpumask_t *cpus) { return -ENODEV; }
-static inline int qcom_scm_set_warm_boot_addr(void *entry,
-               const cpumask_t *cpus) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
-               { return -ENODEV; }
-
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
-               size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
-               phys_addr_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
-               { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
-               { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
-               { return -ENODEV; }
-
-static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
-               { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
-               { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
-               { return -ENODEV; }
-extern inline int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
-                                                u32 cp_nonpixel_start,
-                                                u32 cp_nonpixel_size)
-               { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
-               unsigned int *src, const struct qcom_scm_vmperm *newvm,
-               unsigned int dest_cnt) { return -ENODEV; }
-
-static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
-static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
-               u32 size, u32 mode) { return -ENODEV; }
-static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
-               u32 offset, u32 size) { return -ENODEV; }
-
-static inline bool qcom_scm_ice_available(void) { return false; }
-static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; }
-static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
-                                      enum qcom_scm_ice_cipher cipher,
-                                      u32 data_unit_size) { return -ENODEV; }
-
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
-               u32 *resp) { return -ENODEV; }
-
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
-               { return -ENODEV; }
-
-static inline int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
-                                    u64 limit_node, u32 node_id, u64 version)
-               { return -ENODEV; }
-
-static inline int qcom_scm_lmh_profile_change(u32 profile_id) { return -ENODEV; }
-
-static inline bool qcom_scm_lmh_dcvsh_available(void) { return -ENODEV; }
-#endif
 #endif
index e12b524426b0286be06751bb3ae891a5e55b055e..c1a927ddec646c60d5d22b212852116eecb08327 100644 (file)
@@ -1471,6 +1471,7 @@ struct task_struct {
                                        mce_whole_page : 1,
                                        __mce_reserved : 62;
        struct callback_head            mce_kill_me;
+       int                             mce_count;
 #endif
 
 #ifdef CONFIG_KRETPROBES
@@ -1719,7 +1720,7 @@ extern struct pid *cad_pid;
 #define tsk_used_math(p)                       ((p)->flags & PF_USED_MATH)
 #define used_math()                            tsk_used_math(current)
 
-static inline bool is_percpu_thread(void)
+static __always_inline bool is_percpu_thread(void)
 {
 #ifdef CONFIG_SMP
        return (current->flags & PF_NO_SETAFFINITY) &&
index 6bdb0db3e8258ad2745705a9b046eb1c93e05840..841e2f0f5240ba9e210bb9a3fc1cbedc2162b2a8 100644 (file)
@@ -1940,7 +1940,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
        WRITE_ONCE(newsk->prev, prev);
        WRITE_ONCE(next->prev, newsk);
        WRITE_ONCE(prev->next, newsk);
-       list->qlen++;
+       WRITE_ONCE(list->qlen, list->qlen + 1);
 }
 
 static inline void __skb_queue_splice(const struct sk_buff_head *list,
index 8371bca13729b9f9541730fdcf52b1c664ffd619..6b0b686f6f9042ec732ca3b2476656a07a1adc6a 100644 (file)
@@ -531,6 +531,9 @@ struct spi_controller {
        /* I/O mutex */
        struct mutex            io_mutex;
 
+       /* Used to avoid adding the same CS twice */
+       struct mutex            add_lock;
+
        /* lock and mutex for SPI bus locking */
        spinlock_t              bus_lock_spinlock;
        struct mutex            bus_lock_mutex;
index 3e80c4bc66f7cdc2f27dbde3e9301c0afec32d07..2564b7434b4d73fbaa74a9d5ea587f50f84a3ebf 100644 (file)
@@ -197,6 +197,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
 
        mem_cgroup_handle_over_high();
        blkcg_maybe_throttle_current();
+
+       rseq_handle_notify_resume(NULL, regs);
 }
 
 /*
index 5265024e8b9094cae8b211928e030980f7ec7d12..207101a9c5c326958eaae5b4be2208f0b6e2028e 100644 (file)
@@ -27,6 +27,12 @@ enum iter_type {
        ITER_DISCARD,
 };
 
+struct iov_iter_state {
+       size_t iov_offset;
+       size_t count;
+       unsigned long nr_segs;
+};
+
 struct iov_iter {
        u8 iter_type;
        bool data_source;
@@ -47,7 +53,6 @@ struct iov_iter {
                };
                loff_t xarray_start;
        };
-       size_t truncated;
 };
 
 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
@@ -55,6 +60,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i)
        return i->iter_type;
 }
 
+static inline void iov_iter_save_state(struct iov_iter *iter,
+                                      struct iov_iter_state *state)
+{
+       state->iov_offset = iter->iov_offset;
+       state->count = iter->count;
+       state->nr_segs = iter->nr_segs;
+}
+
 static inline bool iter_is_iovec(const struct iov_iter *i)
 {
        return iov_iter_type(i) == ITER_IOVEC;
@@ -233,6 +246,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
                        size_t maxsize, size_t *start);
 int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
 
 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
 
@@ -255,10 +269,8 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
         * conversion in assignement is by definition greater than all
         * values of size_t, including old i->count.
         */
-       if (i->count > count) {
-               i->truncated += i->count - count;
+       if (i->count > count)
                i->count = count;
-       }
 }
 
 /*
@@ -267,7 +279,6 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
  */
 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 {
-       i->truncated -= count - i->count;
        i->count = count;
 }
 
index 548a028f2dabb70fd4e6c7465abea66619fd7183..2c1fc9212cf287ae4ef8622a3e59e54081fed0d6 100644 (file)
@@ -124,6 +124,7 @@ struct usb_hcd {
 #define HCD_FLAG_RH_RUNNING            5       /* root hub is running? */
 #define HCD_FLAG_DEAD                  6       /* controller has died? */
 #define HCD_FLAG_INTF_AUTHORIZED       7       /* authorize interfaces? */
+#define HCD_FLAG_DEFER_RH_REGISTER     8       /* Defer roothub registration */
 
        /* The flags can be tested using these macros; they are likely to
         * be slightly faster than test_bit().
@@ -134,6 +135,7 @@ struct usb_hcd {
 #define HCD_WAKEUP_PENDING(hcd)        ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
 #define HCD_RH_RUNNING(hcd)    ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
 #define HCD_DEAD(hcd)          ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
 
        /*
         * Specifies if interfaces are authorized by default
index 2ebef6b1a3d6cd8f429be752b14c110acff63f90..74d3c1efd9bb5c8d8d6507f5f9a0ff607d893789 100644 (file)
@@ -399,9 +399,8 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
  * RETURNS:
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
-struct workqueue_struct *alloc_workqueue(const char *fmt,
-                                        unsigned int flags,
-                                        int max_active, ...);
+__printf(1, 4) struct workqueue_struct *
+alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
 
 /**
  * alloc_ordered_workqueue - allocate an ordered workqueue
index f9a17145255a3606d434db0e0b2ef6ec0bf76e51..d784e76113b8d4593f07b29f6cf4fe6465eae39b 100644 (file)
@@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
        return dp->type == DSA_PORT_TYPE_USER;
 }
 
+static inline bool dsa_port_is_unused(struct dsa_port *dp)
+{
+       return dp->type == DSA_PORT_TYPE_UNUSED;
+}
+
 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
 {
        return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
@@ -580,8 +585,16 @@ struct dsa_switch_ops {
        int     (*change_tag_protocol)(struct dsa_switch *ds, int port,
                                       enum dsa_tag_protocol proto);
 
+       /* Optional switch-wide initialization and destruction methods */
        int     (*setup)(struct dsa_switch *ds);
        void    (*teardown)(struct dsa_switch *ds);
+
+       /* Per-port initialization and destruction methods. Mandatory if the
+        * driver registers devlink port regions, optional otherwise.
+        */
+       int     (*port_setup)(struct dsa_switch *ds, int port);
+       void    (*port_teardown)(struct dsa_switch *ds, int port);
+
        u32     (*get_phy_flags)(struct dsa_switch *ds, int port);
 
        /*
@@ -1041,6 +1054,7 @@ static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
 
 void dsa_unregister_switch(struct dsa_switch *ds);
 int dsa_register_switch(struct dsa_switch *ds);
+void dsa_switch_shutdown(struct dsa_switch *ds);
 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
 #ifdef CONFIG_PM_SLEEP
 int dsa_switch_suspend(struct dsa_switch *ds);
index 21c5386d4a6dc9108b9fd17aeb4f1e6bc365c074..ab5348e57db1a627cbce2dededb2e9b754d1f2cd 100644 (file)
@@ -597,5 +597,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
 int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
                     u8 rt_family, unsigned char *flags, bool skip_oif);
 int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
-                   int nh_weight, u8 rt_family);
+                   int nh_weight, u8 rt_family, u32 nh_tclassid);
 #endif  /* _NET_FIB_H */
index af0fc13cea3499e60e910adfcd98033d258ec937..618d1f427cb2761cee907d3615d19df39a6b50de 100644 (file)
@@ -2818,13 +2818,13 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
  * when they are able to replace in-use PTK keys according to the following
  * requirements:
- * 1) They do not hand over frames decrypted with the old key to
-      mac80211 once the call to set_key() with command %DISABLE_KEY has been
-      completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,
+ * 1) They do not hand over frames decrypted with the old key to mac80211
+      once the call to set_key() with command %DISABLE_KEY has been completed,
    2) either drop or continue to use the old key for any outgoing frames queued
       at the time of the key deletion (including re-transmits),
    3) never send out a frame queued prior to the set_key() %SET_KEY command
-      encrypted with the new key and
+      encrypted with the new key when also needing
+      @IEEE80211_KEY_FLAG_GENERATE_IV and
    4) never send out a frame unencrypted when it should be encrypted.
    Mac80211 will not queue any new frames for a deleted key to the driver.
  */
index 0fd8a4159662d094fd5f02c34c856a9bb08d2813..ceadf8ba25a446b648f21adb71361609ff58beee 100644 (file)
@@ -17,7 +17,6 @@ struct inet_frags_ctl;
 struct nft_ct_frag6_pernet {
        struct ctl_table_header *nf_frag_frags_hdr;
        struct fqdir    *fqdir;
-       unsigned int users;
 };
 
 #endif /* _NF_DEFRAG_IPV6_H */
index 148f5d8ee5ab35fc002060d854ce2a89e2ef9f21..a16171c5fd9ebc8c9cb214e700584ca34eb1a016 100644 (file)
@@ -1202,7 +1202,7 @@ struct nft_object *nft_obj_lookup(const struct net *net,
 
 void nft_obj_notify(struct net *net, const struct nft_table *table,
                    struct nft_object *obj, u32 portid, u32 seq,
-                   int event, int family, int report, gfp_t gfp);
+                   int event, u16 flags, int family, int report, gfp_t gfp);
 
 /**
  *     struct nft_object_type - stateful object type
index 986a2a9cfdfa444fe8c2087e1a72507baf469a81..b593f95e99913e9946da7bd2d3b28462350217ee 100644 (file)
@@ -27,5 +27,11 @@ struct netns_nf {
 #if IS_ENABLED(CONFIG_DECNET)
        struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
 #endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+       unsigned int defrag_ipv4_users;
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+       unsigned int defrag_ipv6_users;
+#endif
 };
 #endif
index 10e1777877e6aaf818c7fba442e5dd297c925bd9..28085b995ddcfe2269f1f8524ea747a881d835a4 100644 (file)
@@ -325,7 +325,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
                struct fib_nh_common *nhc = &nhi->fib_nhc;
                int weight = nhg->nh_entries[i].weight;
 
-               if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
+               if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
                        return -EMSGSIZE;
        }
 
index 6d7b12cba0158f9a954949b1fc42ee418d4b56a7..bf79f3a890af263dcb52056a56bbdf751e28064f 100644 (file)
@@ -11,6 +11,7 @@
 #include <uapi/linux/pkt_sched.h>
 
 #define DEFAULT_TX_QUEUE_LEN   1000
+#define STAB_SIZE_LOG_MAX      30
 
 struct qdisc_walker {
        int     stop;
index 66a9a90f9558e4c739e01192b3f8cee9fa271ae9..ea6fbc88c8f90f44f660bbca2cd2288e73039ec7 100644 (file)
@@ -307,6 +307,7 @@ struct bpf_local_storage;
   *    @sk_priority: %SO_PRIORITY setting
   *    @sk_type: socket type (%SOCK_STREAM, etc)
   *    @sk_protocol: which protocol this socket belongs in this network family
+  *    @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred
   *    @sk_peer_pid: &struct pid for this socket's peer
   *    @sk_peer_cred: %SO_PEERCRED setting
   *    @sk_rcvlowat: %SO_RCVLOWAT setting
@@ -488,8 +489,10 @@ struct sock {
        u8                      sk_prefer_busy_poll;
        u16                     sk_busy_poll_budget;
 #endif
+       spinlock_t              sk_peer_lock;
        struct pid              *sk_peer_pid;
        const struct cred       *sk_peer_cred;
+
        long                    sk_rcvtimeo;
        ktime_t                 sk_stamp;
 #if BITS_PER_LONG==32
@@ -1623,7 +1626,36 @@ void release_sock(struct sock *sk);
                                SINGLE_DEPTH_NESTING)
 #define bh_unlock_sock(__sk)   spin_unlock(&((__sk)->sk_lock.slock))
 
-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+
+/**
+ * lock_sock_fast - fast version of lock_sock
+ * @sk: socket
+ *
+ * This version should be used for very small section, where process wont block
+ * return false if fast path is taken:
+ *
+ *   sk_lock.slock locked, owned = 0, BH disabled
+ *
+ * return true if slow path is taken:
+ *
+ *   sk_lock.slock unlocked, owned = 1, BH enabled
+ */
+static inline bool lock_sock_fast(struct sock *sk)
+{
+       /* The sk_lock has mutex_lock() semantics here. */
+       mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+       return __lock_sock_fast(sk);
+}
+
+/* fast socket lock variant for caller already holding a [different] socket lock */
+static inline bool lock_sock_fast_nested(struct sock *sk)
+{
+       mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+
+       return __lock_sock_fast(sk);
+}
 
 /**
  * unlock_sock_fast - complement of lock_sock_fast
@@ -1640,6 +1672,7 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
                release_sock(sk);
                __release(&sk->sk_lock.slock);
        } else {
+               mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
                spin_unlock_bh(&sk->sk_lock.slock);
        }
 }
index 09a17f6e93a7921b52a91ba7a611724876c1d7a1..b97e142a7ca92d5f45fa21374c2e627ff29afef5 100644 (file)
@@ -146,7 +146,6 @@ struct scsi_device {
        struct scsi_vpd __rcu *vpd_pg83;
        struct scsi_vpd __rcu *vpd_pg80;
        struct scsi_vpd __rcu *vpd_pg89;
-       unsigned char current_tag;      /* current tag */
        struct scsi_target      *sdev_target;
 
        blist_flags_t           sdev_bflags; /* black/white flags as also found in
index 06706a9fd5b1c2d1a1e5dc4ad03ff5a5e32983ef..d7055b41982dfaa7f99189728c14b1cd0316dbc9 100644 (file)
 /* Source PGIDs, one per physical port */
 #define PGID_SRC                       80
 
-#define IFH_TAG_TYPE_C                 0
-#define IFH_TAG_TYPE_S                 1
-
-#define IFH_REW_OP_NOOP                        0x0
-#define IFH_REW_OP_DSCP                        0x1
-#define IFH_REW_OP_ONE_STEP_PTP                0x2
-#define IFH_REW_OP_TWO_STEP_PTP                0x3
-#define IFH_REW_OP_ORIGIN_PTP          0x5
-
 #define OCELOT_NUM_TC                  8
 
 #define OCELOT_SPEED_2500              0
@@ -603,10 +594,10 @@ struct ocelot_port {
        /* The VLAN ID that will be transmitted as untagged, on egress */
        struct ocelot_vlan              native_vlan;
 
+       unsigned int                    ptp_skbs_in_flight;
        u8                              ptp_cmd;
        struct sk_buff_head             tx_skbs;
        u8                              ts_id;
-       spinlock_t                      ts_id_lock;
 
        phy_interface_t                 phy_mode;
 
@@ -680,6 +671,9 @@ struct ocelot {
        struct ptp_clock                *ptp_clock;
        struct ptp_clock_info           ptp_info;
        struct hwtstamp_config          hwtstamp_config;
+       unsigned int                    ptp_skbs_in_flight;
+       /* Protects the 2-step TX timestamp ID logic */
+       spinlock_t                      ts_id_lock;
        /* Protects the PTP interface state */
        struct mutex                    ptp_lock;
        /* Protects the PTP clock */
@@ -692,15 +686,6 @@ struct ocelot_policer {
        u32 burst; /* bytes */
 };
 
-struct ocelot_skb_cb {
-       struct sk_buff *clone;
-       u8 ptp_cmd;
-       u8 ts_id;
-};
-
-#define OCELOT_SKB_CB(skb) \
-       ((struct ocelot_skb_cb *)((skb)->cb))
-
 #define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
 #define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
 #define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
@@ -752,8 +737,6 @@ u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
 void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
                              u32 val, u32 reg, u32 offset);
 
-#if IS_ENABLED(CONFIG_MSCC_OCELOT_SWITCH_LIB)
-
 /* Packet I/O */
 bool ocelot_can_inject(struct ocelot *ocelot, int grp);
 void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
@@ -761,36 +744,6 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
 int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
 void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
 
-u32 ocelot_ptp_rew_op(struct sk_buff *skb);
-#else
-
-static inline bool ocelot_can_inject(struct ocelot *ocelot, int grp)
-{
-       return false;
-}
-
-static inline void ocelot_port_inject_frame(struct ocelot *ocelot, int port,
-                                           int grp, u32 rew_op,
-                                           struct sk_buff *skb)
-{
-}
-
-static inline int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp,
-                                       struct sk_buff **skb)
-{
-       return -EIO;
-}
-
-static inline void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
-{
-}
-
-static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
-       return 0;
-}
-#endif
-
 /* Hardware initialization */
 int ocelot_regfields_init(struct ocelot *ocelot,
                          const struct reg_field *const regfields);
index ded497d72bdbbcad572be9636416b04bf24c8546..f085884b1fa27ffc14293deb5a3a7bb8083b5662 100644 (file)
@@ -13,6 +13,9 @@
 #include <linux/ptp_clock_kernel.h>
 #include <soc/mscc/ocelot.h>
 
+#define OCELOT_MAX_PTP_ID              63
+#define OCELOT_PTP_FIFO_SIZE           128
+
 #define PTP_PIN_CFG_RSZ                        0x20
 #define PTP_PIN_TOD_SEC_MSB_RSZ                PTP_PIN_CFG_RSZ
 #define PTP_PIN_TOD_SEC_LSB_RSZ                PTP_PIN_CFG_RSZ
index 25fd525aaf92844e75b7e709415a4fdfc530671f..4869ebbd438d9a66c584bce7573ccfa1cba25431 100644 (file)
@@ -694,7 +694,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
 int ocelot_vcap_filter_del(struct ocelot *ocelot,
                           struct ocelot_vcap_filter *rule);
 struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id,
-                                   bool tc_offload);
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+                                   unsigned long cookie, bool tc_offload);
 
 #endif /* _OCELOT_VCAP_H_ */
index 01570dbda503850a0a7eb5fb2da4cc3d806401e1..0e45963bb767f767f85eeb59e8bc6dc8603959a2 100644 (file)
@@ -224,6 +224,7 @@ struct hda_codec {
 #endif
 
        /* misc flags */
+       unsigned int configured:1; /* codec was configured */
        unsigned int in_freeing:1; /* being released */
        unsigned int registered:1; /* codec was registered */
        unsigned int display_power_control:1; /* needs display power */
index 989e1517332d63184602a78e1f51351832e58f4d..7a08ed2acd609f7a969a4d480cb5a2739939d677 100644 (file)
@@ -98,6 +98,7 @@ struct snd_rawmidi_file {
        struct snd_rawmidi *rmidi;
        struct snd_rawmidi_substream *input;
        struct snd_rawmidi_substream *output;
+       unsigned int user_pversion;     /* supported protocol version */
 };
 
 struct snd_rawmidi_str {
index 9f73ed2cf06116b8fcfee52d701f77bb93af713b..bca73e8c8cdec8db0ab38f9cc4bfa46b3d4b51a2 100644 (file)
@@ -306,11 +306,13 @@ enum afs_flock_operation {
 
 enum afs_cb_break_reason {
        afs_cb_break_no_break,
+       afs_cb_break_no_promise,
        afs_cb_break_for_callback,
        afs_cb_break_for_deleted,
        afs_cb_break_for_lapsed,
+       afs_cb_break_for_s_reinit,
        afs_cb_break_for_unlink,
-       afs_cb_break_for_vsbreak,
+       afs_cb_break_for_v_break,
        afs_cb_break_for_volume_callback,
        afs_cb_break_for_zap,
 };
@@ -602,11 +604,13 @@ enum afs_cb_break_reason {
 
 #define afs_cb_break_reasons                                           \
        EM(afs_cb_break_no_break,               "no-break")             \
+       EM(afs_cb_break_no_promise,             "no-promise")           \
        EM(afs_cb_break_for_callback,           "break-cb")             \
        EM(afs_cb_break_for_deleted,            "break-del")            \
        EM(afs_cb_break_for_lapsed,             "break-lapsed")         \
+       EM(afs_cb_break_for_s_reinit,           "s-reinit")             \
        EM(afs_cb_break_for_unlink,             "break-unlink")         \
-       EM(afs_cb_break_for_vsbreak,            "break-vs")             \
+       EM(afs_cb_break_for_v_break,            "break-v")              \
        EM(afs_cb_break_for_volume_callback,    "break-v-cb")           \
        E_(afs_cb_break_for_zap,                "break-zap")
 
index 9a448fe9355dbebaf08148b2f70a29b652a596fc..920b6a303d60cbfbb9276f3aef8ad2c571173b07 100644 (file)
@@ -178,7 +178,7 @@ TRACE_EVENT(cachefiles_unlink,
                             ),
 
            TP_fast_assign(
-                   __entry->obj        = obj->fscache.debug_id;
+                   __entry->obj        = obj ? obj->fscache.debug_id : UINT_MAX;
                    __entry->de         = de;
                    __entry->why        = why;
                           ),
@@ -205,7 +205,7 @@ TRACE_EVENT(cachefiles_rename,
                             ),
 
            TP_fast_assign(
-                   __entry->obj        = obj->fscache.debug_id;
+                   __entry->obj        = obj ? obj->fscache.debug_id : UINT_MAX;
                    __entry->de         = de;
                    __entry->to         = to;
                    __entry->why        = why;
@@ -305,7 +305,7 @@ TRACE_EVENT(cachefiles_mark_buried,
                             ),
 
            TP_fast_assign(
-                   __entry->obj        = obj->fscache.debug_id;
+                   __entry->obj        = obj ? obj->fscache.debug_id : UINT_MAX;
                    __entry->de         = de;
                    __entry->why        = why;
                           ),
index bf9806fd1306547e8ac6bc6bec6df71dfd8c5d14..db4f2cec8360629e5d7d19b6208b13a86e4c271c 100644 (file)
@@ -35,20 +35,20 @@ TRACE_EVENT(erofs_lookup,
        TP_STRUCT__entry(
                __field(dev_t,          dev     )
                __field(erofs_nid_t,    nid     )
-               __field(const char *,   name    )
+               __string(name,          dentry->d_name.name     )
                __field(unsigned int,   flags   )
        ),
 
        TP_fast_assign(
                __entry->dev    = dir->i_sb->s_dev;
                __entry->nid    = EROFS_I(dir)->nid;
-               __entry->name   = dentry->d_name.name;
+               __assign_str(name, dentry->d_name.name);
                __entry->flags  = flags;
        ),
 
        TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
                show_dev_nid(__entry),
-               __entry->name,
+               __get_str(name),
                __entry->flags)
 );
 
index 20e435fe657a1ffe82a685d24fb4fadff59324e6..3246f2c746969c1c35bfc3780c94a4ab516f7a26 100644 (file)
@@ -225,7 +225,14 @@ struct binder_freeze_info {
 
 struct binder_frozen_status_info {
        __u32            pid;
+
+       /* process received sync transactions since last frozen
+        * bit 0: received sync transaction after being frozen
+        * bit 1: new pending sync transaction during freezing
+        */
        __u32            sync_recv;
+
+       /* process received async transactions since last frozen */
        __u32            async_recv;
 };
 
index 69829205fdb5e8f388488466678261788dd15e9a..8e87d27b095165f3b941a55474f1530294e70d2a 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
 /*
- *   include/uapi/linux/cifs/cifs_mount.h
  *
  *   Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
  *
index 6135d92e0d47bf2f7fd5b79f9d982dc598c5c184..daf82a230c0e711dfbe8efcdffdddf48bd617890 100644 (file)
@@ -26,7 +26,7 @@
 #ifndef _UAPI_HYPERV_H
 #define _UAPI_HYPERV_H
 
-#include <linux/uuid.h>
+#include <linux/types.h>
 
 /*
  * Framework version for util services.
index 59ef35154e3d812b931294ad29e92c78b91d8595..b270a07b285eccf0938a3620b583a0bcb58d622c 100644 (file)
@@ -317,13 +317,19 @@ enum {
        IORING_REGISTER_IOWQ_AFF                = 17,
        IORING_UNREGISTER_IOWQ_AFF              = 18,
 
-       /* set/get max number of workers */
+       /* set/get max number of io-wq workers */
        IORING_REGISTER_IOWQ_MAX_WORKERS        = 19,
 
        /* this goes last */
        IORING_REGISTER_LAST
 };
 
+/* io-wq worker categories */
+enum {
+       IO_WQ_BOUND,
+       IO_WQ_UNBOUND,
+};
+
 /* deprecated, see struct io_uring_rsrc_update */
 struct io_uring_files_update {
        __u32 offset;
index b96c1ea7166d6d277e50dc476f0927a16651a0c8..eda0426ec4c2bf09db2ec9f5cd94ee11019e62ab 100644 (file)
@@ -213,13 +213,13 @@ enum {
        XFRM_MSG_GETSPDINFO,
 #define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO
 
+       XFRM_MSG_MAPPING,
+#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
+
        XFRM_MSG_SETDEFAULT,
 #define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT
        XFRM_MSG_GETDEFAULT,
 #define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT
-
-       XFRM_MSG_MAPPING,
-#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
        __XFRM_MSG_MAX
 };
 #define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -514,9 +514,12 @@ struct xfrm_user_offload {
 #define XFRM_OFFLOAD_INBOUND   2
 
 struct xfrm_userpolicy_default {
-#define XFRM_USERPOLICY_DIRMASK_MAX    (sizeof(__u8) * 8)
-       __u8                            dirmask;
-       __u8                            action;
+#define XFRM_USERPOLICY_UNSPEC 0
+#define XFRM_USERPOLICY_BLOCK  1
+#define XFRM_USERPOLICY_ACCEPT 2
+       __u8                            in;
+       __u8                            fwd;
+       __u8                            out;
 };
 
 #ifndef __KERNEL__
index 7cc2a0f3f2f56e462bc2cef69aa73eb5c459bb49..d13bb8c1b45058b81bc2ba52f9c07a64f8a2ce3d 100644 (file)
@@ -917,7 +917,6 @@ struct hl_wait_cs_in {
 #define HL_WAIT_CS_STATUS_BUSY         1
 #define HL_WAIT_CS_STATUS_TIMEDOUT     2
 #define HL_WAIT_CS_STATUS_ABORTED      3
-#define HL_WAIT_CS_STATUS_INTERRUPTED  4
 
 #define HL_WAIT_CS_STATUS_FLAG_GONE            0x1
 #define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD   0x2
@@ -1286,7 +1285,8 @@ struct hl_debug_args {
  * EIO       - The CS was aborted (usually because the device was reset)
  * ENODEV    - The device wants to do hard-reset (so user need to close FD)
  *
- * The driver also returns a custom define inside the IOCTL which can be:
+ * The driver also returns a custom define in case the IOCTL call returned 0.
+ * The define can be one of the following:
  *
  * HL_WAIT_CS_STATUS_COMPLETED   - The CS has been completed successfully (0)
  * HL_WAIT_CS_STATUS_BUSY        - The CS is still executing (0)
@@ -1294,8 +1294,6 @@ struct hl_debug_args {
  *                                 (ETIMEDOUT)
  * HL_WAIT_CS_STATUS_ABORTED     - The CS was aborted, usually because the
  *                                 device was reset (EIO)
- * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
- *
  */
 
 #define HL_IOCTL_WAIT_CS                       \
index 1d84ec9db93bd34e77ab31643cd8b5534480f1a1..5859ca0a1439be4cc3276fd54d9cc20c3c28fa25 100644 (file)
@@ -784,6 +784,7 @@ struct snd_rawmidi_status {
 
 #define SNDRV_RAWMIDI_IOCTL_PVERSION   _IOR('W', 0x00, int)
 #define SNDRV_RAWMIDI_IOCTL_INFO       _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
 #define SNDRV_RAWMIDI_IOCTL_PARAMS     _IOWR('W', 0x10, struct snd_rawmidi_params)
 #define SNDRV_RAWMIDI_IOCTL_STATUS     _IOWR('W', 0x20, struct snd_rawmidi_status)
 #define SNDRV_RAWMIDI_IOCTL_DROP       _IOW('W', 0x30, int)
index 39a5580f8feb08e6c39b591ae1dd524fa382ecdc..a3584a357f353c86ba162e2edbecfda1dffa4e1e 100644 (file)
@@ -46,30 +46,18 @@ extern unsigned long *xen_contiguous_bitmap;
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
                                unsigned int address_bits,
                                dma_addr_t *dma_handle);
-
 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-#else
-static inline int xen_create_contiguous_region(phys_addr_t pstart,
-                                              unsigned int order,
-                                              unsigned int address_bits,
-                                              dma_addr_t *dma_handle)
-{
-       return 0;
-}
-
-static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
-                                                unsigned int order) { }
 #endif
 
 #if defined(CONFIG_XEN_PV)
 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
-                 unsigned int domid, bool no_translate, struct page **pages);
+                 unsigned int domid, bool no_translate);
 #else
 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                                xen_pfn_t *pfn, int nr, int *err_ptr,
                                pgprot_t prot,  unsigned int domid,
-                               bool no_translate, struct page **pages)
+                               bool no_translate)
 {
        BUG();
        return 0;
@@ -146,7 +134,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
         */
        BUG_ON(err_ptr == NULL);
        return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
-                            false, pages);
+                            false);
 }
 
 /*
@@ -158,7 +146,6 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
  * @err_ptr: Returns per-MFN error status.
  * @prot:    page protection mask
  * @domid:   Domain owning the pages
- * @pages:   Array of pages if this domain has an auto-translated physmap
  *
  * @mfn and @err_ptr may point to the same buffer, the MFNs will be
  * overwritten by the error codes after they are mapped.
@@ -169,14 +156,13 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
 static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
                                             unsigned long addr, xen_pfn_t *mfn,
                                             int nr, int *err_ptr,
-                                            pgprot_t prot, unsigned int domid,
-                                            struct page **pages)
+                                            pgprot_t prot, unsigned int domid)
 {
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -EOPNOTSUPP;
 
        return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
-                            true, pages);
+                            true);
 }
 
 /* xen_remap_domain_gfn_range() - map a range of foreign frames
@@ -200,8 +186,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -EOPNOTSUPP;
 
-       return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
-                            pages);
+       return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
 }
 
 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
index 2ed30ff6c90625b7944d45c6d4618a011fcc94f7..762b534978d95d3bed72f8b2f3323572db6b1e0a 100644 (file)
@@ -338,20 +338,19 @@ __setup("rootflags=", root_data_setup);
 __setup("rootfstype=", fs_names_setup);
 __setup("rootdelay=", root_delay_setup);
 
-static int __init split_fs_names(char *page, char *names)
+/* This can return zero length strings. Caller should check */
+static int __init split_fs_names(char *page, size_t size, char *names)
 {
-       int count = 0;
+       int count = 1;
        char *p = page;
 
-       strcpy(p, root_fs_names);
+       strlcpy(p, root_fs_names, size);
        while (*p++) {
-               if (p[-1] == ',')
+               if (p[-1] == ',') {
                        p[-1] = '\0';
+                       count++;
+               }
        }
-       *p = '\0';
-
-       for (p = page; *p; p += strlen(p)+1)
-               count++;
 
        return count;
 }
@@ -404,12 +403,16 @@ void __init mount_block_root(char *name, int flags)
        scnprintf(b, BDEVNAME_SIZE, "unknown-block(%u,%u)",
                  MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
        if (root_fs_names)
-               num_fs = split_fs_names(fs_names, root_fs_names);
+               num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names);
        else
                num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
 retry:
        for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1) {
-               int err = do_mount_root(name, p, flags, root_mount_data);
+               int err;
+
+               if (!*p)
+                       continue;
+               err = do_mount_root(name, p, flags, root_mount_data);
                switch (err) {
                        case 0:
                                goto out;
@@ -543,19 +546,18 @@ static int __init mount_nodev_root(void)
        fs_names = (void *)__get_free_page(GFP_KERNEL);
        if (!fs_names)
                return -EINVAL;
-       num_fs = split_fs_names(fs_names, root_fs_names);
+       num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names);
 
        for (i = 0, fstype = fs_names; i < num_fs;
             i++, fstype += strlen(fstype) + 1) {
+               if (!*fstype)
+                       continue;
                if (!fs_is_nodev(fstype))
                        continue;
                err = do_mount_root(root_device_name, fstype, root_mountflags,
                                    root_mount_data);
                if (!err)
                        break;
-               if (err != -EACCES && err != -EINVAL)
-                       panic("VFS: Unable to mount root \"%s\" (%s), err=%d\n",
-                             root_device_name, fstype, err);
        }
 
        free_page((unsigned long)fs_names);
index 5c9a48df90e1b499f88299e6410f606b879072ff..3c4054a9554580a8b65ea5caa33493d1ee825748 100644 (file)
@@ -382,6 +382,7 @@ static char * __init xbc_make_cmdline(const char *key)
        ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
        if (ret < 0 || ret > len) {
                pr_err("Failed to print extra kernel cmdline.\n");
+               memblock_free_ptr(new_cmdline, len + 1);
                return NULL;
        }
 
@@ -924,7 +925,7 @@ static void __init print_unknown_bootoptions(void)
                end += sprintf(end, " %s", *p);
 
        pr_notice("Unknown command line parameters:%s\n", unknown_options);
-       memblock_free(__pa(unknown_options), len);
+       memblock_free_ptr(unknown_options, len);
 }
 
 asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
@@ -1242,7 +1243,7 @@ trace_initcall_start_cb(void *data, initcall_t fn)
 {
        ktime_t *calltime = (ktime_t *)data;
 
-       printk(KERN_DEBUG "calling  %pS @ %i irqs_disabled() %d\n", fn, task_pid_nr(current), irqs_disabled());
+       printk(KERN_DEBUG "calling  %pS @ %i\n", fn, task_pid_nr(current));
        *calltime = ktime_get();
 }
 
@@ -1256,8 +1257,8 @@ trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
        rettime = ktime_get();
        delta = ktime_sub(rettime, *calltime);
        duration = (unsigned long long) ktime_to_ns(delta) >> 10;
-       printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs, irqs_disabled() %d\n",
-                fn, ret, duration, irqs_disabled());
+       printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+                fn, ret, duration);
 }
 
 static ktime_t initcall_calltime;
index f833238df1ce2e08bb71907db47386c88b655a2b..6693daf4fe1124b3a69a345c42f30cf9767a739b 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2238,7 +2238,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
                return -EINVAL;
 
        if (nsops > SEMOPM_FAST) {
-               sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL_ACCOUNT);
+               sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
                if (sops == NULL)
                        return -ENOMEM;
        }
index d6731c32864eb5c9002b56e5465650950dfea265..9abcc33f02cf01b552e7362c3ed11ce41fc4756d 100644 (file)
@@ -368,6 +368,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
                const struct btf_type *mtype, *ptype;
                struct bpf_prog *prog;
                u32 moff;
+               u32 flags;
 
                moff = btf_member_bit_offset(t, member) / 8;
                ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
@@ -431,10 +432,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 
                tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
                tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
+               flags = st_ops->func_models[i].ret_size > 0 ?
+                       BPF_TRAMP_F_RET_FENTRY_RET : 0;
                err = arch_prepare_bpf_trampoline(NULL, image,
                                                  st_map->image + PAGE_SIZE,
-                                                 &st_ops->func_models[i], 0,
-                                                 tprogs, NULL);
+                                                 &st_ops->func_models[i],
+                                                 flags, tprogs, NULL);
                if (err < 0)
                        goto reset_unlock;
 
index 9f4636d021b108f178343c9822c673e547b0a8cb..d6b7dfdd806610165a223a48b77cfc1abe6d156a 100644 (file)
@@ -827,7 +827,7 @@ int bpf_jit_charge_modmem(u32 pages)
 {
        if (atomic_long_add_return(pages, &bpf_jit_current) >
            (bpf_jit_limit >> PAGE_SHIFT)) {
-               if (!capable(CAP_SYS_ADMIN)) {
+               if (!bpf_capable()) {
                        atomic_long_sub(pages, &bpf_jit_current);
                        return -EPERM;
                }
index ca3cd9aaa6ced0e65bad5617a805f9ca4ad9a715..7b4afb7d96db18306006f51b7805b1150df0789c 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  * Copyright (c) 2016 Facebook
  */
index e546b18d27daeda77548d00ad9f56af24a55d58f..a4b040793f443cc6a4ffa38b12721dd27cd9c690 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  * Copyright (c) 2016 Facebook
  */
index e8eefdf8cf3ecd2555c42634af408249fd1e06f1..6e75bbee39f0b55aa74af7f702ce60f30713cd33 100644 (file)
@@ -63,7 +63,8 @@ static inline int stack_map_data_size(struct bpf_map *map)
 
 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
 {
-       u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
+       u64 elem_size = sizeof(struct stack_map_bucket) +
+                       (u64)smap->map.value_size;
        int err;
 
        smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
@@ -179,7 +180,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
         * with build_id.
         */
        if (!user || !current || !current->mm || irq_work_busy ||
-           !mmap_read_trylock_non_owner(current->mm)) {
+           !mmap_read_trylock(current->mm)) {
                /* cannot access current->mm, fall back to ips */
                for (i = 0; i < trace_nr; i++) {
                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
@@ -204,9 +205,15 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
        }
 
        if (!work) {
-               mmap_read_unlock_non_owner(current->mm);
+               mmap_read_unlock(current->mm);
        } else {
                work->mm = current->mm;
+
+               /* The lock will be released once we're out of interrupt
+                * context. Tell lockdep that we've released it now so
+                * it doesn't complain that we forgot to release it.
+                */
+               rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
                irq_work_queue(&work->irq_work);
        }
 }
index 047ac4b4703b08a72902f423a0a629803f2358cc..e76b559179054b93191b8959ddcad9ad01535e8c 100644 (file)
@@ -9912,6 +9912,8 @@ static int check_btf_line(struct bpf_verifier_env *env,
        nr_linfo = attr->line_info_cnt;
        if (!nr_linfo)
                return 0;
+       if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
+               return -EINVAL;
 
        rec_size = attr->line_info_rec_size;
        if (rec_size < MIN_BPF_LINEINFO_SIZE ||
index 881ce1470bebad74af7f37c8ae6b6579dd433cb8..570b0c97392a95fdfddf00cfed35c9a91e2e07fe 100644 (file)
@@ -6572,74 +6572,51 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
  */
 #ifdef CONFIG_SOCK_CGROUP_DATA
 
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-
-DEFINE_SPINLOCK(cgroup_sk_update_lock);
-static bool cgroup_sk_alloc_disabled __read_mostly;
-
-void cgroup_sk_alloc_disable(void)
-{
-       if (cgroup_sk_alloc_disabled)
-               return;
-       pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
-       cgroup_sk_alloc_disabled = true;
-}
-
-#else
-
-#define cgroup_sk_alloc_disabled       false
-
-#endif
-
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
 {
-       if (cgroup_sk_alloc_disabled) {
-               skcd->no_refcnt = 1;
-               return;
-       }
-
-       /* Don't associate the sock with unrelated interrupted task's cgroup. */
-       if (in_interrupt())
-               return;
+       struct cgroup *cgroup;
 
        rcu_read_lock();
+       /* Don't associate the sock with unrelated interrupted task's cgroup. */
+       if (in_interrupt()) {
+               cgroup = &cgrp_dfl_root.cgrp;
+               cgroup_get(cgroup);
+               goto out;
+       }
 
        while (true) {
                struct css_set *cset;
 
                cset = task_css_set(current);
                if (likely(cgroup_tryget(cset->dfl_cgrp))) {
-                       skcd->val = (unsigned long)cset->dfl_cgrp;
-                       cgroup_bpf_get(cset->dfl_cgrp);
+                       cgroup = cset->dfl_cgrp;
                        break;
                }
                cpu_relax();
        }
-
+out:
+       skcd->cgroup = cgroup;
+       cgroup_bpf_get(cgroup);
        rcu_read_unlock();
 }
 
 void cgroup_sk_clone(struct sock_cgroup_data *skcd)
 {
-       if (skcd->val) {
-               if (skcd->no_refcnt)
-                       return;
-               /*
-                * We might be cloning a socket which is left in an empty
-                * cgroup and the cgroup might have already been rmdir'd.
-                * Don't use cgroup_get_live().
-                */
-               cgroup_get(sock_cgroup_ptr(skcd));
-               cgroup_bpf_get(sock_cgroup_ptr(skcd));
-       }
+       struct cgroup *cgrp = sock_cgroup_ptr(skcd);
+
+       /*
+        * We might be cloning a socket which is left in an empty
+        * cgroup and the cgroup might have already been rmdir'd.
+        * Don't use cgroup_get_live().
+        */
+       cgroup_get(cgrp);
+       cgroup_bpf_get(cgrp);
 }
 
 void cgroup_sk_free(struct sock_cgroup_data *skcd)
 {
        struct cgroup *cgrp = sock_cgroup_ptr(skcd);
 
-       if (skcd->no_refcnt)
-               return;
        cgroup_bpf_put(cgrp);
        cgroup_put(cgrp);
 }
index df1ccf4558f8242904aad112bdf86f68ed3fa76b..2a9695ccb65f539c713fdce1cc9bb2fd15c4779f 100644 (file)
@@ -311,17 +311,19 @@ static struct cpuset top_cpuset = {
                if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
 
 /*
- * There are two global locks guarding cpuset structures - cpuset_mutex and
+ * There are two global locks guarding cpuset structures - cpuset_rwsem and
  * callback_lock. We also require taking task_lock() when dereferencing a
  * task's cpuset pointer. See "The task_lock() exception", at the end of this
- * comment.
+ * comment.  The cpuset code uses only cpuset_rwsem write lock.  Other
+ * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
+ * prevent change to cpuset structures.
  *
  * A task must hold both locks to modify cpusets.  If a task holds
- * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
+ * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
  * is the only task able to also acquire callback_lock and be able to
  * modify cpusets.  It can perform various checks on the cpuset structure
  * first, knowing nothing will change.  It can also allocate memory while
- * just holding cpuset_mutex.  While it is performing these checks, various
+ * just holding cpuset_rwsem.  While it is performing these checks, various
  * callback routines can briefly acquire callback_lock to query cpusets.
  * Once it is ready to make the changes, it takes callback_lock, blocking
  * everyone else.
@@ -393,7 +395,7 @@ static inline bool is_in_v2_mode(void)
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void guarantee_online_cpus(struct task_struct *tsk,
                                  struct cpumask *pmask)
@@ -435,7 +437,7 @@ out_unlock:
  * One way or another, we guarantee to return some non-empty subset
  * of node_states[N_MEMORY].
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 {
@@ -447,7 +449,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 /*
  * update task's spread flag if cpuset's page/slab spread flag is set
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void cpuset_update_task_spread_flag(struct cpuset *cs,
                                        struct task_struct *tsk)
@@ -468,7 +470,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
  *
  * One cpuset is a subset of another if all its allowed CPUs and
  * Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set.  Call holding cpuset_mutex.
+ * are only set if the other's are set.  Call holding cpuset_rwsem.
  */
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -577,7 +579,7 @@ static inline void free_cpuset(struct cpuset *cs)
  * If we replaced the flag and mask values of the current cpuset
  * (cur) with those values in the trial cpuset (trial), would
  * our various subset and exclusive rules still be valid?  Presumes
- * cpuset_mutex held.
+ * cpuset_rwsem held.
  *
  * 'cur' is the address of an actual, in-use cpuset.  Operations
  * such as list traversal that depend on the actual address of the
@@ -700,7 +702,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
        rcu_read_unlock();
 }
 
-/* Must be called with cpuset_mutex held.  */
+/* Must be called with cpuset_rwsem held.  */
 static inline int nr_cpusets(void)
 {
        /* jump label reference count + the top-level cpuset */
@@ -726,7 +728,7 @@ static inline int nr_cpusets(void)
  * domains when operating in the severe memory shortage situations
  * that could cause allocation failures below.
  *
- * Must be called with cpuset_mutex held.
+ * Must be called with cpuset_rwsem held.
  *
  * The three key local variables below are:
  *    cp - cpuset pointer, used (together with pos_css) to perform a
@@ -1005,7 +1007,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
  * 'cpus' is removed, then call this routine to rebuild the
  * scheduler's dynamic sched domains.
  *
- * Call with cpuset_mutex held.  Takes cpus_read_lock().
+ * Call with cpuset_rwsem held.  Takes cpus_read_lock().
  */
 static void rebuild_sched_domains_locked(void)
 {
@@ -1078,7 +1080,7 @@ void rebuild_sched_domains(void)
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
  *
  * Iterate through each task of @cs updating its cpus_allowed to the
- * effective cpuset's.  As this function is called with cpuset_mutex held,
+ * effective cpuset's.  As this function is called with cpuset_rwsem held,
  * cpuset membership stays stable.
  */
 static void update_tasks_cpumask(struct cpuset *cs)
@@ -1347,7 +1349,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
  *
  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
  *
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
  */
 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
 {
@@ -1704,12 +1706,12 @@ static void *cpuset_being_rebound;
  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
  *
  * Iterate through each task of @cs updating its mems_allowed to the
- * effective cpuset's.  As this function is called with cpuset_mutex held,
+ * effective cpuset's.  As this function is called with cpuset_rwsem held,
  * cpuset membership stays stable.
  */
 static void update_tasks_nodemask(struct cpuset *cs)
 {
-       static nodemask_t newmems;      /* protected by cpuset_mutex */
+       static nodemask_t newmems;      /* protected by cpuset_rwsem */
        struct css_task_iter it;
        struct task_struct *task;
 
@@ -1722,7 +1724,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
         * take while holding tasklist_lock.  Forks can happen - the
         * mpol_dup() cpuset_being_rebound check will catch such forks,
         * and rebind their vma mempolicies too.  Because we still hold
-        * the global cpuset_mutex, we know that no other rebind effort
+        * the global cpuset_rwsem, we know that no other rebind effort
         * will be contending for the global variable cpuset_being_rebound.
         * It's ok if we rebind the same mm twice; mpol_rebind_mm()
         * is idempotent.  Also migrate pages in each mm to new nodes.
@@ -1768,7 +1770,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
  *
  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
  *
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
  */
 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
 {
@@ -1821,7 +1823,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
  * mempolicies and if the cpuset is marked 'memory_migrate',
  * migrate the tasks pages to the new memory.
  *
- * Call with cpuset_mutex held. May take callback_lock during call.
+ * Call with cpuset_rwsem held. May take callback_lock during call.
  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
  * their mempolicies to the cpusets new mems_allowed.
@@ -1911,7 +1913,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
  * @cs: the cpuset in which each task's spread flags needs to be changed
  *
  * Iterate through each task of @cs updating its spread flags.  As this
- * function is called with cpuset_mutex held, cpuset membership stays
+ * function is called with cpuset_rwsem held, cpuset membership stays
  * stable.
  */
 static void update_tasks_flags(struct cpuset *cs)
@@ -1931,7 +1933,7 @@ static void update_tasks_flags(struct cpuset *cs)
  * cs:         the cpuset to update
  * turning_on:         whether the flag is being set or cleared
  *
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
  */
 
 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1980,7 +1982,7 @@ out:
  * cs: the cpuset to update
  * new_prs: new partition root state
  *
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
  */
 static int update_prstate(struct cpuset *cs, int new_prs)
 {
@@ -2167,7 +2169,7 @@ static int fmeter_getrate(struct fmeter *fmp)
 
 static struct cpuset *cpuset_attach_old_cs;
 
-/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
+/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
 static int cpuset_can_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
@@ -2219,7 +2221,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
 }
 
 /*
- * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
+ * Protected by cpuset_rwsem.  cpus_attach is used only by cpuset_attach()
  * but we can't allocate it dynamically there.  Define it global and
  * allocate from cpuset_init().
  */
@@ -2227,7 +2229,7 @@ static cpumask_var_t cpus_attach;
 
 static void cpuset_attach(struct cgroup_taskset *tset)
 {
-       /* static buf protected by cpuset_mutex */
+       /* static buf protected by cpuset_rwsem */
        static nodemask_t cpuset_attach_nodemask_to;
        struct task_struct *task;
        struct task_struct *leader;
@@ -2417,7 +2419,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
         * operation like this one can lead to a deadlock through kernfs
         * active_ref protection.  Let's break the protection.  Losing the
         * protection is okay as we check whether @cs is online after
-        * grabbing cpuset_mutex anyway.  This only happens on the legacy
+        * grabbing cpuset_rwsem anyway.  This only happens on the legacy
         * hierarchies.
         */
        css_get(&cs->css);
@@ -3672,7 +3674,7 @@ void __cpuset_memory_pressure_bump(void)
  *  - Used for /proc/<pid>/cpuset.
  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
  *    doesn't really matter if tsk->cpuset changes after we read it,
- *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
+ *    and we take cpuset_rwsem, keeping cpuset_attach() from changing it
  *    anyway.
  */
 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
index 6c90c69e5311afa9347b4f0df90412b544ed8280..95445bd6eb7258d9d6a9fd26acc79748574fa12d 100644 (file)
@@ -567,7 +567,8 @@ static void add_dma_entry(struct dma_debug_entry *entry)
                pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
        } else if (rc == -EEXIST) {
-               pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
+               err_printk(entry->dev, entry,
+                       "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
        }
 }
 
index 7ee5284bff585d33f05fd029d65e5d3002cd1540..06fec5547e7c790a58ab02183d31228cb94d5f09 100644 (file)
@@ -206,7 +206,8 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 /**
  * dma_map_sg_attrs - Map the given buffer for DMA
  * @dev:       The device for which to perform the DMA operation
- * @sg:        The sg_table object describing the buffer
+ * @sg:                The sg_table object describing the buffer
+ * @nents:     Number of entries to map
  * @dir:       DMA direction
  * @attrs:     Optional DMA attributes for the map operation
  *
index bf16395b9e1351f843edce4335df8bddb5a49686..d5a61d565ad5d186e8a9e206a9e17b4d285f93f6 100644 (file)
@@ -171,10 +171,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
                        handle_signal_work(regs, ti_work);
 
-               if (ti_work & _TIF_NOTIFY_RESUME) {
+               if (ti_work & _TIF_NOTIFY_RESUME)
                        tracehook_notify_resume(regs);
-                       rseq_handle_notify_resume(NULL, regs);
-               }
 
                /* Architecture specific TIF work */
                arch_exit_to_user_mode_work(regs, ti_work);
index 744e8726c5b2fa226ef4514f04d480fc9015d52a..f23ca260307f02dbc501bd0b8e36ece31f82faf5 100644 (file)
@@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
        return 0;
 }
 
+static inline bool event_update_userpage(struct perf_event *event)
+{
+       if (likely(!atomic_read(&event->mmap_count)))
+               return false;
+
+       perf_event_update_time(event);
+       perf_set_shadow_time(event, event->ctx);
+       perf_event_update_userpage(event);
+
+       return true;
+}
+
+static inline void group_update_userpage(struct perf_event *group_event)
+{
+       struct perf_event *event;
+
+       if (!event_update_userpage(group_event))
+               return;
+
+       for_each_sibling_event(event, group_event)
+               event_update_userpage(event);
+}
+
 static int merge_sched_in(struct perf_event *event, void *data)
 {
        struct perf_event_context *ctx = event->ctx;
@@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
        }
 
        if (event->state == PERF_EVENT_STATE_INACTIVE) {
+               *can_add_hw = 0;
                if (event->attr.pinned) {
                        perf_cgroup_event_disable(event, ctx);
                        perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+               } else {
+                       ctx->rotate_necessary = 1;
+                       perf_mux_hrtimer_restart(cpuctx);
+                       group_update_userpage(event);
                }
-
-               *can_add_hw = 0;
-               ctx->rotate_necessary = 1;
-               perf_mux_hrtimer_restart(cpuctx);
        }
 
        return 0;
@@ -6324,6 +6348,8 @@ accounting:
 
                ring_buffer_attach(event, rb);
 
+               perf_event_update_time(event);
+               perf_set_shadow_time(event, event->ctx);
                perf_event_init_userpage(event);
                perf_event_update_userpage(event);
        } else {
@@ -10193,7 +10219,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
                return;
 
        if (ifh->nr_file_filters) {
-               mm = get_task_mm(event->ctx->task);
+               mm = get_task_mm(task);
                if (!mm)
                        goto restart;
 
index 19e83e9b723ce0bea51340c5b65756516ef44dff..4d8fc65cf38f4177c34b3138a94fcab86830892e 100644 (file)
@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
  * Allocates and initializes an irq_domain structure.
  * Returns pointer to IRQ domain, or NULL on failure.
  */
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
                                    irq_hw_number_t hwirq_max, int direct_max,
                                    const struct irq_domain_ops *ops,
                                    void *host_data)
index 4ba15088e6405bfe7ec16d956e0cba9ce9f72a09..88191f6e252c34260252e18a4cc7ee8fdbdbda59 100644 (file)
  * The risk of writer starvation is there, but the pathological use cases
  * which trigger it are not necessarily the typical RT workloads.
  *
+ * Fast-path orderings:
+ * The lock/unlock of readers can run in fast paths: lock and unlock are only
+ * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
+ * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
+ * and _release() (or stronger).
+ *
  * Common code shared between RT rw_semaphore and rwlock
  */
 
@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
         * set.
         */
        for (r = atomic_read(&rwb->readers); r < 0;) {
+               /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
                if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
                        return 1;
        }
@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
        /*
         * rwb->readers can only hit 0 when a writer is waiting for the
         * active readers to leave the critical section.
+        *
+        * dec_and_test() is fully ordered, provides RELEASE.
         */
        if (unlikely(atomic_dec_and_test(&rwb->readers)))
                __rwbase_read_unlock(rwb, state);
@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
 {
        struct rt_mutex_base *rtm = &rwb->rtmutex;
 
-       atomic_add(READER_BIAS - bias, &rwb->readers);
+       /*
+        * _release() is needed in case that reader is in fast path, pairing
+        * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
+        */
+       (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
        raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
        rwbase_rtmutex_unlock(rtm);
 }
@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
        __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
 }
 
+static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+       /* Can do without CAS because we're serialized by wait_lock. */
+       lockdep_assert_held(&rwb->rtmutex.wait_lock);
+
+       /*
+        * _acquire is needed in case the reader is in the fast path, pairing
+        * with rwbase_read_unlock(), provides ACQUIRE.
+        */
+       if (!atomic_read_acquire(&rwb->readers)) {
+               atomic_set(&rwb->readers, WRITER_BIAS);
+               return 1;
+       }
+
+       return 0;
+}
+
 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
                                     unsigned int state)
 {
@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
        atomic_sub(READER_BIAS, &rwb->readers);
 
        raw_spin_lock_irqsave(&rtm->wait_lock, flags);
-       /*
-        * set_current_state() for rw_semaphore
-        * current_save_and_set_rtlock_wait_state() for rwlock
-        */
-       rwbase_set_and_save_current_state(state);
+       if (__rwbase_write_trylock(rwb))
+               goto out_unlock;
 
-       /* Block until all readers have left the critical section. */
-       for (; atomic_read(&rwb->readers);) {
+       rwbase_set_and_save_current_state(state);
+       for (;;) {
                /* Optimized out for rwlocks */
                if (rwbase_signal_pending_state(state, current)) {
-                       __set_current_state(TASK_RUNNING);
+                       rwbase_restore_current_state();
                        __rwbase_write_unlock(rwb, 0, flags);
                        return -EINTR;
                }
+
+               if (__rwbase_write_trylock(rwb))
+                       break;
+
                raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+               rwbase_schedule();
+               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
 
-               /*
-                * Schedule and wait for the readers to leave the critical
-                * section. The last reader leaving it wakes the waiter.
-                */
-               if (atomic_read(&rwb->readers) != 0)
-                       rwbase_schedule();
                set_current_state(state);
-               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
        }
-
-       atomic_set(&rwb->readers, WRITER_BIAS);
        rwbase_restore_current_state();
+
+out_unlock:
        raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
        return 0;
 }
@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
        atomic_sub(READER_BIAS, &rwb->readers);
 
        raw_spin_lock_irqsave(&rtm->wait_lock, flags);
-       if (!atomic_read(&rwb->readers)) {
-               atomic_set(&rwb->readers, WRITER_BIAS);
+       if (__rwbase_write_trylock(rwb)) {
                raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
                return 1;
        }
index 40ec9a030eecf41d5597e6c910746aab385b1da0..5c26a76e800b579a1e450b1aab962da2df7fc0d6 100644 (file)
@@ -4489,8 +4489,10 @@ static void cfi_init(struct module *mod)
        /* Fix init/exit functions to point to the CFI jump table */
        if (init)
                mod->init = *init;
+#ifdef CONFIG_MODULE_UNLOAD
        if (exit)
                mod->exit = *exit;
+#endif
 
        cfi_module_add(mod, module_addr_min);
 #endif
index 825277e1e742ddfaacd6cc99f4ed7a4156d88778..a8d0a58deebc708b08574bee0e29bbbe5ff961b1 100644 (file)
@@ -1166,9 +1166,9 @@ void __init setup_log_buf(int early)
        return;
 
 err_free_descs:
-       memblock_free(__pa(new_descs), new_descs_size);
+       memblock_free_ptr(new_descs, new_descs_size);
 err_free_log_buf:
-       memblock_free(__pa(new_log_buf), new_log_buf_len);
+       memblock_free_ptr(new_log_buf, new_log_buf_len);
 }
 
 static bool __read_mostly ignore_loglevel;
index 35f7bd0fced0e2dd8aed819e054dac03f024388a..6d45ac3dae7fb6714b9910ec8f2bca12f9481fa1 100644 (file)
@@ -282,9 +282,17 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
 
        if (unlikely(t->flags & PF_EXITING))
                return;
-       ret = rseq_ip_fixup(regs);
-       if (unlikely(ret < 0))
-               goto error;
+
+       /*
+        * regs is NULL if and only if the caller is in a syscall path.  Skip
+        * fixup and leave rseq_cs as is so that rseq_sycall() will detect and
+        * kill a misbehaving userspace on debug kernels.
+        */
+       if (regs) {
+               ret = rseq_ip_fixup(regs);
+               if (unlikely(ret < 0))
+                       goto error;
+       }
        if (unlikely(rseq_update_cpu_id(t)))
                goto error;
        return;
index 49716228efb4bbed0a362e2ae0533adb5edde72c..17a653b67006af6fed33ceae0ee3608d8e8eb0ad 100644 (file)
@@ -173,16 +173,22 @@ static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
                                   size_t cnt, loff_t *ppos)
 {
        char buf[16];
+       unsigned int scaling;
 
        if (cnt > 15)
                cnt = 15;
 
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
+       buf[cnt] = '\0';
 
-       if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
+       if (kstrtouint(buf, 10, &scaling))
                return -EINVAL;
 
+       if (scaling >= SCHED_TUNABLESCALING_END)
+               return -EINVAL;
+
+       sysctl_sched_tunable_scaling = scaling;
        if (sched_update_scaling())
                return -EINVAL;
 
index ff69f245b939595ab914c70c1f7fa01c1843778a..f6a05d9b54436ae0353d88574b8669e98c468d4e 100644 (file)
@@ -4936,8 +4936,12 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        /* update hierarchical throttle state */
        walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
 
-       if (!cfs_rq->load.weight)
+       /* Nothing to run but something to decay (on_list)? Complete the branch */
+       if (!cfs_rq->load.weight) {
+               if (cfs_rq->on_list)
+                       goto unthrottle_throttle;
                return;
+       }
 
        task_delta = cfs_rq->h_nr_running;
        idle_task_delta = cfs_rq->idle_h_nr_running;
index ee736861b18f7e5a30df8d259ce4bd1f9b6d50e7..643d412ac6235eae110975f60247c97a8f278225 100644 (file)
@@ -1404,7 +1404,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
                        }
                }
 
-               *newval += now;
+               if (*newval)
+                       *newval += now;
        }
 
        /*
index c221e4c3f625cf9760cdbc85e4f0c4a6fc714924..fa91f398f28b734f9474219a459628e946c716b4 100644 (file)
@@ -1605,6 +1605,14 @@ static int blk_trace_remove_queue(struct request_queue *q)
        if (bt == NULL)
                return -EINVAL;
 
+       if (bt->trace_state == Blktrace_running) {
+               bt->trace_state = Blktrace_stopped;
+               spin_lock_irq(&running_trace_lock);
+               list_del_init(&bt->running_list);
+               spin_unlock_irq(&running_trace_lock);
+               relay_flush(bt->rchan);
+       }
+
        put_probe_ref();
        synchronize_rcu();
        blk_trace_free(bt);
index 7896d30d90f76f023d40c9e9f2cea2f1cbdc3878..bc677cd6422401889f77b7d60ba50fbb2ad1bd5a 100644 (file)
@@ -1744,16 +1744,15 @@ void latency_fsnotify(struct trace_array *tr)
        irq_work_queue(&tr->fsnotify_irqwork);
 }
 
-/*
- * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- *  defined(CONFIG_FSNOTIFY)
- */
-#else
+#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+       || defined(CONFIG_OSNOISE_TRACER)
 
 #define trace_create_maxlat_file(tr, d_tracer)                         \
        trace_create_file("tracing_max_latency", 0644, d_tracer,        \
                          &tr->max_latency, &tracing_max_lat_fops)
 
+#else
+#define trace_create_maxlat_file(tr, d_tracer)  do { } while (0)
 #endif
 
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -9473,9 +9472,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 
        create_trace_options_dir(tr);
 
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
        trace_create_maxlat_file(tr, d_tracer);
-#endif
 
        if (ftrace_create_function_files(tr, d_tracer))
                MEM_FAIL(1, "Could not allocate function filter files");
index 3044b762cbd733545ee25a83c91760e28a4825b9..c4a15aef36afa3c1e3a97ad6030167e69604796a 100644 (file)
@@ -119,10 +119,58 @@ static bool eprobe_dyn_event_match(const char *system, const char *event,
                        int argc, const char **argv, struct dyn_event *ev)
 {
        struct trace_eprobe *ep = to_trace_eprobe(ev);
+       const char *slash;
 
-       return strcmp(trace_probe_name(&ep->tp), event) == 0 &&
-           (!system || strcmp(trace_probe_group_name(&ep->tp), system) == 0) &&
-           trace_probe_match_command_args(&ep->tp, argc, argv);
+       /*
+        * We match the following:
+        *  event only                  - match all eprobes with event name
+        *  system and event only       - match all system/event probes
+        *
+        * The below has the above satisfied with more arguments:
+        *
+        *  attached system/event       - If the arg has the system and event
+        *                                the probe is attached to, match
+        *                                probes with the attachment.
+        *
+        *  If any more args are given, then it requires a full match.
+        */
+
+       /*
+        * If system exists, but this probe is not part of that system
+        * do not match.
+        */
+       if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
+               return false;
+
+       /* Must match the event name */
+       if (strcmp(trace_probe_name(&ep->tp), event) != 0)
+               return false;
+
+       /* No arguments match all */
+       if (argc < 1)
+               return true;
+
+       /* First argument is the system/event the probe is attached to */
+
+       slash = strchr(argv[0], '/');
+       if (!slash)
+               slash = strchr(argv[0], '.');
+       if (!slash)
+               return false;
+
+       if (strncmp(ep->event_system, argv[0], slash - argv[0]))
+               return false;
+       if (strcmp(ep->event_name, slash + 1))
+               return false;
+
+       argc--;
+       argv++;
+
+       /* If there are no other args, then match */
+       if (argc < 1)
+               return true;
+
+       return trace_probe_match_command_args(&ep->tp, argc, argv);
 }
 
 static struct dyn_event_operations eprobe_dyn_event_ops = {
@@ -632,6 +680,13 @@ static int disable_eprobe(struct trace_eprobe *ep,
 
        trace_event_trigger_enable_disable(file, 0);
        update_cond_flag(file);
+
+       /* Make sure nothing is using the edata or trigger */
+       tracepoint_synchronize_unregister();
+
+       kfree(edata);
+       kfree(trigger);
+
        return 0;
 }
 
index a6061a69aa84e2c88070f090b4a2496e02615b8a..f01e442716e2f19d8dc920a3812b31d9dcaf5d80 100644 (file)
@@ -2506,7 +2506,7 @@ find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
  * events.  However, for convenience, users are allowed to directly
  * specify an event field in an action, which will be automatically
  * converted into a variable on their behalf.
-
+ *
  * If a user specifies a field on an event that isn't the event the
  * histogram currently being defined (the target event histogram), the
  * only way that can be accomplished is if a new hist trigger is
index 33a6b4a2443d274de3dfb1f31c11a460da118747..1b3eb1e9531f4ac71ebced9bac749ecea747064f 100644 (file)
@@ -4830,8 +4830,16 @@ void show_workqueue_state(void)
 
                for_each_pwq(pwq, wq) {
                        raw_spin_lock_irqsave(&pwq->pool->lock, flags);
-                       if (pwq->nr_active || !list_empty(&pwq->inactive_works))
+                       if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+                               /*
+                                * Defer printing to avoid deadlocks in console
+                                * drivers that queue work while holding locks
+                                * also taken in their write paths.
+                                */
+                               printk_deferred_enter();
                                show_pwq(pwq);
+                               printk_deferred_exit();
+                       }
                        raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
                        /*
                         * We could be printing a lot from atomic context, e.g.
@@ -4849,7 +4857,12 @@ void show_workqueue_state(void)
                raw_spin_lock_irqsave(&pool->lock, flags);
                if (pool->nr_workers == pool->nr_idle)
                        goto next_pool;
-
+               /*
+                * Defer printing to avoid deadlocks in console drivers that
+                * queue work while holding locks also taken in their write
+                * paths.
+                */
+               printk_deferred_enter();
                pr_info("pool %d:", pool->id);
                pr_cont_pool_info(pool);
                pr_cont(" hung=%us workers=%d",
@@ -4864,6 +4877,7 @@ void show_workqueue_state(void)
                        first = false;
                }
                pr_cont("\n");
+               printk_deferred_exit();
        next_pool:
                raw_spin_unlock_irqrestore(&pool->lock, flags);
                /*
index ed4a31e34098defa61c88823c9e566dbea422417..2a9b6dcdac4ff8909f76724db74b290208488211 100644 (file)
@@ -295,7 +295,7 @@ config DEBUG_INFO_DWARF4
 
 config DEBUG_INFO_DWARF5
        bool "Generate DWARF Version 5 debuginfo"
-       depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+       depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
        depends on !DEBUG_INFO_BTF
        help
          Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
@@ -346,7 +346,7 @@ config FRAME_WARN
        int "Warn for stack frames larger than"
        range 0 8192
        default 2048 if GCC_PLUGIN_LATENT_ENTROPY
-       default 1536 if (!64BIT && PARISC)
+       default 1536 if (!64BIT && (PARISC || XTENSA))
        default 1024 if (!64BIT && !PARISC)
        default 2048 if 64BIT
        help
index 1e2d10f86011718f9795638afa642d8c8bcc2067..cdc842d090db388dc446e4016392e106ed8b4856 100644 (file)
@@ -66,6 +66,7 @@ choice
 config KASAN_GENERIC
        bool "Generic mode"
        depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
+       depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
        select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        help
@@ -86,6 +87,7 @@ config KASAN_GENERIC
 config KASAN_SW_TAGS
        bool "Software tag-based mode"
        depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
+       depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
        select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        help
index 5efd1b435a37c5d138d15c608cb9e7b685ef20f6..a841be5244ac666cc727168c750c0d174f8d0698 100644 (file)
@@ -351,7 +351,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
 obj-$(CONFIG_PLDMFW) += pldmfw/
 
 # KUnit tests
-CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
 obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
 obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
 obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
index f8419cff11471c94874a5e20f72ef32abae7a5d6..5ae248b2937389363c9519954a14f86877def4c7 100644 (file)
@@ -792,7 +792,7 @@ void __init xbc_destroy_all(void)
        xbc_data = NULL;
        xbc_data_size = 0;
        xbc_node_num = 0;
-       memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX);
+       memblock_free_ptr(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
        xbc_nodes = NULL;
        brace_index = 0;
 }
index f2d50d69a6c364c06b9e7fa4f37527224dd712c6..755c10c5138cdb51d5da166ee2900565dfdfde38 100644 (file)
@@ -1972,3 +1972,39 @@ int import_single_range(int rw, void __user *buf, size_t len,
        return 0;
 }
 EXPORT_SYMBOL(import_single_range);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ *     iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+       if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
+                        !iov_iter_is_kvec(i))
+               return;
+       i->iov_offset = state->iov_offset;
+       i->count = state->count;
+       /*
+        * For the *vec iters, nr_segs + iov is constant - if we increment
+        * the vec, then we also decrement the nr_segs count. Hence we don't
+        * need to track both of these, just one is enough and we can deduct
+        * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+        * size, so we can just increment the iov pointer as they are unionzed.
+        * ITER_BVEC _may_ be the same size on some archs, but on others it is
+        * not. Be safe and handle it separately.
+        */
+       BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+       if (iov_iter_is_bvec(i))
+               i->bvec -= state->nr_segs - i->nr_segs;
+       else
+               i->iov -= state->nr_segs - i->nr_segs;
+       i->nr_segs = state->nr_segs;
+}
index cdbe54b165017e4a70c9e2aa12d021911519f2a2..e14a18af573dd71c8e0ad1641d2d53ae2a0c405f 100644 (file)
@@ -116,8 +116,8 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
        /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
        if (IS_ERR_OR_NULL(to_free))
                return;
-       kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
-                                    (void *)to_free);
+       kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+                            (void *)to_free);
 }
 
 static struct kunit_suite *alloc_fake_suite(struct kunit *test,
index 6ed72dccfdb5d667cfa7a4a67d3d727eb5d417ad..9a72f4bbf0e2638d6171b62e834224d7958a3c7a 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #include <linux/packing.h>
index 2d3eb1cb73b8f5b9ee3307e8700d64379cdea19d..ce39ce9f3526ea2eee7891b62302369b076e8974 100644 (file)
@@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
        return pci_iomap_wc_range(dev, bar, 0, maxlen);
 }
 EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+       uintptr_t start = (uintptr_t) PCI_IOBASE;
+       uintptr_t addr = (uintptr_t) p;
+
+       if (addr >= start && addr < start + IO_SPACE_LIMIT)
+               return;
+       iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
 #endif /* CONFIG_PCI */
index f19c4fbe1be7832abd60e75d03afa765acaca65b..2843f9bb42acdf674f370e08031d5333c34dfdaa 100644 (file)
@@ -253,13 +253,12 @@ void inflate_fast(z_streamp strm, unsigned start)
 
                        sfrom = (unsigned short *)(from);
                        loops = len >> 1;
-                       do
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-                           *sout++ = *sfrom++;
-#else
-                           *sout++ = get_unaligned16(sfrom++);
-#endif
-                       while (--loops);
+                       do {
+                           if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+                               *sout++ = *sfrom++;
+                           else
+                               *sout++ = get_unaligned16(sfrom++);
+                       } while (--loops);
                        out = (unsigned char *)sout;
                        from = (unsigned char *)sfrom;
                    } else { /* dist == 1 or dist == 2 */
index 930e83bceef03767f9a2556f7ca6501a76c4cf6e..4eddcfa73996f24672344652cbb087e1a50bc58a 100644 (file)
@@ -20,27 +20,27 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
        ssize_t nr_integers = 0, i;
 
        question = "123";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
        KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
        kfree(answers);
 
        question = "123abc";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
        KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
        kfree(answers);
 
        question = "a123";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
        kfree(answers);
 
        question = "12 35";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
        for (i = 0; i < nr_integers; i++)
@@ -48,7 +48,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
        kfree(answers);
 
        question = "12 35 46";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
        for (i = 0; i < nr_integers; i++)
@@ -56,7 +56,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
        kfree(answers);
 
        question = "12 35 abc 46";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
        for (i = 0; i < 2; i++)
@@ -64,13 +64,13 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
        kfree(answers);
 
        question = "";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
        kfree(answers);
 
        question = "\n";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
        kfree(answers);
index e73fe0a8ec3d25fe4af305cb89c83288fbba1d18..fae0f81ad831302462b99257602c067359668ab6 100644 (file)
@@ -24,7 +24,9 @@ const char *migrate_reason_names[MR_TYPES] = {
        "syscall_or_cpuset",
        "mempolicy_mbind",
        "numa_misplaced",
-       "cma",
+       "contig_range",
+       "longterm_pin",
+       "demotion",
 };
 
 const struct trace_print_flags pageflag_names[] = {
index 025338128cd92415ba57ce989aa8b9ebba888bf7..a5716fdec1aac0ac9bf6f2305d5369887e671e39 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -651,10 +651,8 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
         * from &migrate_nodes. This will verify that future list.h changes
         * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
         */
-#if defined(GCC_VERSION) && GCC_VERSION >= 40903
        BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
        BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
-#endif
 
        if (stable_node->head == &migrate_nodes)
                list_del(&stable_node->list);
index 0ab5a749bfa69e07f124e9d378f8468e459053de..5c3503c98b2f113ca3de1f0a76e863a316d5c8cd 100644 (file)
@@ -472,7 +472,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
                kfree(old_array);
        else if (old_array != memblock_memory_init_regions &&
                 old_array != memblock_reserved_init_regions)
-               memblock_free(__pa(old_array), old_alloc_size);
+               memblock_free_ptr(old_array, old_alloc_size);
 
        /*
         * Reserve the new array if that comes from the memblock.  Otherwise, we
@@ -795,6 +795,20 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
        return memblock_remove_range(&memblock.memory, base, size);
 }
 
+/**
+ * memblock_free_ptr - free boot memory allocation
+ * @ptr: starting address of the  boot memory allocation
+ * @size: size of the boot memory block in bytes
+ *
+ * Free boot memory block previously allocated by memblock_alloc_xx() API.
+ * The freeing memory will not be released to the buddy allocator.
+ */
+void __init_memblock memblock_free_ptr(void *ptr, size_t size)
+{
+       if (ptr)
+               memblock_free(__pa(ptr), size);
+}
+
 /**
  * memblock_free - free boot memory block
  * @base: phys starting address of the  boot memory block
@@ -922,7 +936,12 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
  */
 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 {
-       return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
+       int ret = memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
+
+       if (!ret)
+               kmemleak_free_part_phys(base, size);
+
+       return ret;
 }
 
 /**
index b762215d73eb99bcce600de14175ea68ae95a8db..6da5020a8656da81761fbd3ca48efd8317dd1304 100644 (file)
@@ -106,9 +106,6 @@ static bool do_memsw_account(void)
 /* memcg and lruvec stats flushing */
 static void flush_memcg_stats_dwork(struct work_struct *w);
 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static void flush_memcg_stats_work(struct work_struct *w);
-static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
-static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
 static DEFINE_SPINLOCK(stats_flush_lock);
 
 #define THRESHOLDS_EVENTS_TARGET 128
@@ -682,8 +679,6 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 
        /* Update lruvec */
        __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
-       if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
-               queue_work(system_unbound_wq, &stats_flush_work);
 }
 
 /**
@@ -5361,11 +5356,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
        queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
 }
 
-static void flush_memcg_stats_work(struct work_struct *w)
-{
-       mem_cgroup_flush_stats();
-}
-
 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
index 54879c3390243603ecdcb39686def20d66c6facb..3e6449f2102a77fce4ac5416f960c801e616a887 100644 (file)
@@ -306,6 +306,7 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
                struct vm_area_struct *vma)
 {
        unsigned long address = vma_address(page, vma);
+       unsigned long ret = 0;
        pgd_t *pgd;
        p4d_t *p4d;
        pud_t *pud;
@@ -329,11 +330,10 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
        if (pmd_devmap(*pmd))
                return PMD_SHIFT;
        pte = pte_offset_map(pmd, address);
-       if (!pte_present(*pte))
-               return 0;
-       if (pte_devmap(*pte))
-               return PAGE_SHIFT;
-       return 0;
+       if (pte_present(*pte) && pte_devmap(*pte))
+               ret = PAGE_SHIFT;
+       pte_unmap(pte);
+       return ret;
 }
 
 /*
@@ -1126,7 +1126,7 @@ static int page_action(struct page_state *ps, struct page *p,
  */
 static inline bool HWPoisonHandlable(struct page *page)
 {
-       return PageLRU(page) || __PageMovable(page);
+       return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);
 }
 
 static int __get_hwpoison_page(struct page *page)
index 25fc46e872142a11692e3b44402cab89ca644fb3..adf9b9ef8277da3dab7500ee26b0067bd6763e71 100644 (file)
@@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        i_mmap_unlock_write(mapping);
 }
+EXPORT_SYMBOL_GPL(unmap_mapping_pages);
 
 /**
  * unmap_mapping_range - unmap the portion of all mmaps in the specified
index 88742953532cce47eaf63200eaf26fac10c6b6f6..b5860f4a2738ee3861e30bd4e506578a0f9218ba 100644 (file)
@@ -490,9 +490,9 @@ bool shmem_is_huge(struct vm_area_struct *vma,
        case SHMEM_HUGE_ALWAYS:
                return true;
        case SHMEM_HUGE_WITHIN_SIZE:
-               index = round_up(index, HPAGE_PMD_NR);
+               index = round_up(index + 1, HPAGE_PMD_NR);
                i_size = round_up(i_size_read(inode), PAGE_SIZE);
-               if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
+               if (i_size >> PAGE_SHIFT >= index)
                        return true;
                fallthrough;
        case SHMEM_HUGE_ADVISE:
index 897200d27dd075de42d8ffabcd8265b437652506..af3cad4e5378334c9d7da8fefde2081abf838678 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -620,7 +620,6 @@ void lru_add_drain_cpu(int cpu)
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
        activate_page_drain(cpu);
-       invalidate_bh_lrus_cpu(cpu);
 }
 
 /**
@@ -703,6 +702,20 @@ void lru_add_drain(void)
        local_unlock(&lru_pvecs.lock);
 }
 
+/*
+ * It's called from per-cpu workqueue context in SMP case so
+ * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
+ * the same cpu. It shouldn't be a problem in !SMP case since
+ * the core is only one and the locks will disable preemption.
+ */
+static void lru_add_and_bh_lrus_drain(void)
+{
+       local_lock(&lru_pvecs.lock);
+       lru_add_drain_cpu(smp_processor_id());
+       local_unlock(&lru_pvecs.lock);
+       invalidate_bh_lrus_cpu();
+}
+
 void lru_add_drain_cpu_zone(struct zone *zone)
 {
        local_lock(&lru_pvecs.lock);
@@ -717,7 +730,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
-       lru_add_drain();
+       lru_add_and_bh_lrus_drain();
 }
 
 /*
@@ -858,7 +871,7 @@ void lru_cache_disable(void)
         */
        __lru_add_drain_all(true);
 #else
-       lru_add_drain();
+       lru_add_and_bh_lrus_drain();
 #endif
 }
 
index 499b6b5767ed11955ed40fce7b75bcc87e23c09f..bacabe446906558f6c3b1d4ff7801d187c64c256 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -787,7 +787,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
                size_t *lenp, loff_t *ppos)
 {
        struct ctl_table t;
-       int new_policy;
+       int new_policy = -1;
        int ret;
 
        /*
@@ -805,7 +805,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
                t = *table;
                t.data = &new_policy;
                ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
-               if (ret)
+               if (ret || new_policy == -1)
                        return ret;
 
                mm_compute_batch(new_policy);
index d4268d8e9a82dcb2399c8e069bb3305982f2f47f..d5b81e4f4cbe8cdc1c1434a21ffc743d322b2227 100644 (file)
@@ -352,6 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
 
        inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
 
+       mem_cgroup_flush_stats();
        /*
         * Compare the distance to the existing workingset size. We
         * don't activate pages that couldn't stay resident even if
index 2eb0e55ef54d221f61e1b721b3c9016f3f6a9780..b5f4ef35357c808a52f3dfc70dad00b8513984e9 100644 (file)
@@ -552,6 +552,12 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
        __skb->gso_segs = skb_shinfo(skb)->gso_segs;
 }
 
+static struct proto bpf_dummy_proto = {
+       .name   = "bpf_dummy",
+       .owner  = THIS_MODULE,
+       .obj_size = sizeof(struct sock),
+};
+
 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
                          union bpf_attr __user *uattr)
 {
@@ -596,20 +602,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
                break;
        }
 
-       sk = kzalloc(sizeof(struct sock), GFP_USER);
+       sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
        if (!sk) {
                kfree(data);
                kfree(ctx);
                return -ENOMEM;
        }
-       sock_net_set(sk, net);
        sock_init_data(NULL, sk);
 
        skb = build_skb(data, 0);
        if (!skb) {
                kfree(data);
                kfree(ctx);
-               kfree(sk);
+               sk_free(sk);
                return -ENOMEM;
        }
        skb->sk = sk;
@@ -682,8 +687,7 @@ out:
        if (dev && dev != net->loopback_dev)
                dev_put(dev);
        kfree_skb(skb);
-       bpf_sk_storage_free(sk);
-       kfree(sk);
+       sk_free(sk);
        kfree(ctx);
        return ret;
 }
index 3523c8c7068fdd0dd4847951290d5c1390f0153d..f3d751105343ce035c3885ded7f778b07ada4ff0 100644 (file)
@@ -1677,8 +1677,6 @@ static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
                                        int ifindex,
                                        struct br_ip *saddr)
 {
-       lockdep_assert_held_once(&brmctx->br->multicast_lock);
-
        write_seqcount_begin(&querier->seq);
        querier->port_ifidx = ifindex;
        memcpy(&querier->addr, saddr, sizeof(*saddr));
@@ -3867,13 +3865,13 @@ void br_multicast_ctx_init(struct net_bridge *br,
 
        brmctx->ip4_other_query.delay_time = 0;
        brmctx->ip4_querier.port_ifidx = 0;
-       seqcount_init(&brmctx->ip4_querier.seq);
+       seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
        brmctx->multicast_igmp_version = 2;
 #if IS_ENABLED(CONFIG_IPV6)
        brmctx->multicast_mld_version = 1;
        brmctx->ip6_other_query.delay_time = 0;
        brmctx->ip6_querier.port_ifidx = 0;
-       seqcount_init(&brmctx->ip6_querier.seq);
+       seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
 #endif
 
        timer_setup(&brmctx->ip4_mc_router_timer,
index 6c58fc14d2cb2de8bcd8364fc5e766247aba2e97..5c6c4305ed235891b2ed5c5a17eb8382f2aec1a0 100644 (file)
@@ -1666,7 +1666,8 @@ static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
        }
 
        return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
-              nla_total_size(sizeof(struct br_mcast_stats)) +
+              nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
+              (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) +
               nla_total_size(0);
 }
 
index b4cef3a97f12b48cb429eb64765124787a471b72..e8136db44462c6a868800209010832539a2b554b 100644 (file)
@@ -82,7 +82,7 @@ struct bridge_mcast_other_query {
 struct bridge_mcast_querier {
        struct br_ip addr;
        int port_ifidx;
-       seqcount_t seq;
+       seqcount_spinlock_t seq;
 };
 
 /* IGMP/MLD statistics */
index 37b67194c0dfe412d51b92d9b5f8513e5b2db34a..414dc5671c45edb447342b934683b0eaf1695b39 100644 (file)
@@ -53,20 +53,6 @@ struct chnl_net {
        enum caif_states state;
 };
 
-static void robust_list_del(struct list_head *delete_node)
-{
-       struct list_head *list_node;
-       struct list_head *n;
-       ASSERT_RTNL();
-       list_for_each_safe(list_node, n, &chnl_net_list) {
-               if (list_node == delete_node) {
-                       list_del(list_node);
-                       return;
-               }
-       }
-       WARN_ON(1);
-}
-
 static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
 {
        struct sk_buff *skb;
@@ -364,6 +350,7 @@ static int chnl_net_init(struct net_device *dev)
        ASSERT_RTNL();
        priv = netdev_priv(dev);
        strncpy(priv->name, dev->name, sizeof(priv->name));
+       INIT_LIST_HEAD(&priv->list_field);
        return 0;
 }
 
@@ -372,7 +359,7 @@ static void chnl_net_uninit(struct net_device *dev)
        struct chnl_net *priv;
        ASSERT_RTNL();
        priv = netdev_priv(dev);
-       robust_list_del(&priv->list_field);
+       list_del_init(&priv->list_field);
 }
 
 static const struct net_device_ops netdev_ops = {
@@ -537,7 +524,7 @@ static void __exit chnl_exit_module(void)
        rtnl_lock();
        list_for_each_safe(list_node, _tmp, &chnl_net_list) {
                dev = list_entry(list_node, struct chnl_net, list_field);
-               list_del(list_node);
+               list_del_init(list_node);
                delete_device(dev);
        }
        rtnl_unlock();
index 74fd402d26ddeba725d7447b855f4d3c1fea8238..7ee9fecd3affc4cb99eef4dca95a7e8757602545 100644 (file)
@@ -6923,12 +6923,16 @@ EXPORT_SYMBOL(napi_disable);
  */
 void napi_enable(struct napi_struct *n)
 {
-       BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
-       smp_mb__before_atomic();
-       clear_bit(NAPI_STATE_SCHED, &n->state);
-       clear_bit(NAPI_STATE_NPSVC, &n->state);
-       if (n->dev->threaded && n->thread)
-               set_bit(NAPI_STATE_THREADED, &n->state);
+       unsigned long val, new;
+
+       do {
+               val = READ_ONCE(n->state);
+               BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
+
+               new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
+               if (n->dev->threaded && n->thread)
+                       new |= NAPIF_STATE_THREADED;
+       } while (cmpxchg(&n->state, val, new) != val);
 }
 EXPORT_SYMBOL(napi_enable);
 
index 8c39283c26ae6470b47a5503ccd5688b265137d0..f0cb38344126a0ce327a9afc7aafffb07b2a0cfb 100644 (file)
@@ -50,6 +50,11 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
        if (addr_len > MAX_ADDR_LEN)
                return -EINVAL;
 
+       ha = list_first_entry(&list->list, struct netdev_hw_addr, list);
+       if (ha && !memcmp(addr, ha->addr, addr_len) &&
+           (!addr_type || addr_type == ha->type))
+               goto found_it;
+
        while (*ins_point) {
                int diff;
 
@@ -64,6 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
                } else if (diff > 0) {
                        ins_point = &parent->rb_right;
                } else {
+found_it:
                        if (exclusive)
                                return -EEXIST;
                        if (global) {
index eab5fc88a002871d7487825c6895a97cb12a4af8..d8b9dbabd4a439137c408fb548ea058b1349c3a1 100644 (file)
@@ -77,8 +77,8 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
        struct rtnl_link_stats64 temp;
        const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
 
-       seq_printf(seq, "%9s: %16llu %12llu %4llu %6llu %4llu %5llu %10llu %9llu "
-                  "%16llu %12llu %4llu %6llu %4llu %5llu %7llu %10llu\n",
+       seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+                  "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
                   dev->name, stats->rx_bytes, stats->rx_packets,
                   stats->rx_errors,
                   stats->rx_dropped + stats->rx_missed_errors,
@@ -103,11 +103,11 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
 static int dev_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "Interface|                            Receive                   "
-                             "                    |                                 Transmit\n"
-                             "         |            bytes      packets errs   drop fifo frame "
-                             "compressed multicast|            bytes      packets errs "
-                             "  drop fifo colls carrier compressed\n");
+               seq_puts(seq, "Inter-|   Receive                            "
+                             "                    |  Transmit\n"
+                             " face |bytes    packets errs drop fifo frame "
+                             "compressed multicast|bytes    packets errs "
+                             "drop fifo colls carrier compressed\n");
        else
                dev_seq_printf_stats(seq, v);
        return 0;
@@ -259,14 +259,14 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
        struct packet_type *pt = v;
 
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "Type      Device      Function\n");
+               seq_puts(seq, "Type Device      Function\n");
        else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
                if (pt->type == htons(ETH_P_ALL))
                        seq_puts(seq, "ALL ");
                else
                        seq_printf(seq, "%04x", ntohs(pt->type));
 
-               seq_printf(seq, "      %-9s   %ps\n",
+               seq_printf(seq, " %-8s %ps\n",
                           pt->dev ? pt->dev->name : "", pt->func);
        }
 
@@ -327,14 +327,12 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
        struct netdev_hw_addr *ha;
        struct net_device *dev = v;
 
-       if (v == SEQ_START_TOKEN) {
-               seq_puts(seq, "Ifindex Interface Refcount Global_use Address\n");
+       if (v == SEQ_START_TOKEN)
                return 0;
-       }
 
        netif_addr_lock_bh(dev);
        netdev_for_each_mc_addr(ha, dev) {
-               seq_printf(seq, "%-7d %-9s %-8d %-10d %*phN\n",
+               seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
                           dev->ifindex, dev->name,
                           ha->refcount, ha->global_use,
                           (int)dev->addr_len, ha->addr);
index b49c57d35a88e62915fe1534e082f7a912197bbe..1a6a86693b745984dbc3fe0c3b39d30042a47186 100644 (file)
@@ -71,11 +71,8 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
        struct update_classid_context *ctx = (void *)v;
        struct socket *sock = sock_from_file(file);
 
-       if (sock) {
-               spin_lock(&cgroup_sk_update_lock);
+       if (sock)
                sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
-               spin_unlock(&cgroup_sk_update_lock);
-       }
        if (--ctx->batch == 0) {
                ctx->batch = UPDATE_CLASSID_BATCH;
                return n + 1;
@@ -121,8 +118,6 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
        struct css_task_iter it;
        struct task_struct *p;
 
-       cgroup_sk_alloc_disable();
-
        cs->classid = (u32)value;
 
        css_task_iter_start(css, 0, &it);
index 99a431c56f230b3655ffec56b7e4e2a7f24871a6..8456dfbe2eb40b371901c1e753acf8513262ac41 100644 (file)
@@ -207,8 +207,6 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
        if (!dev)
                return -ENODEV;
 
-       cgroup_sk_alloc_disable();
-
        rtnl_lock();
 
        ret = netprio_set_prio(of_css(of), dev, prio);
@@ -221,12 +219,10 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
 static int update_netprio(const void *v, struct file *file, unsigned n)
 {
        struct socket *sock = sock_from_file(file);
-       if (sock) {
-               spin_lock(&cgroup_sk_update_lock);
+
+       if (sock)
                sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
                                        (unsigned long)v);
-               spin_unlock(&cgroup_sk_update_lock);
-       }
        return 0;
 }
 
@@ -235,8 +231,6 @@ static void net_prio_attach(struct cgroup_taskset *tset)
        struct task_struct *p;
        struct cgroup_subsys_state *css;
 
-       cgroup_sk_alloc_disable();
-
        cgroup_taskset_for_each(p, css, tset) {
                void *v = (void *)(unsigned long)css->id;
 
index 972c8cb303a514758278307cd9fcb974e37f2b96..8ccce85562a1da2a5285aebd19a6a4cb7d6a163e 100644 (file)
@@ -5262,7 +5262,7 @@ nla_put_failure:
 static size_t if_nlmsg_stats_size(const struct net_device *dev,
                                  u32 filter_mask)
 {
-       size_t size = 0;
+       size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
 
        if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
                size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
index 62627e868e03603a5aff99a0dfc7c99c6a4457e2..c1601f75ec4b3eac90c1b82c0dc9533e9b1b73eb 100644 (file)
@@ -1376,6 +1376,16 @@ set_sndbuf:
 }
 EXPORT_SYMBOL(sock_setsockopt);
 
+static const struct cred *sk_get_peer_cred(struct sock *sk)
+{
+       const struct cred *cred;
+
+       spin_lock(&sk->sk_peer_lock);
+       cred = get_cred(sk->sk_peer_cred);
+       spin_unlock(&sk->sk_peer_lock);
+
+       return cred;
+}
 
 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
                          struct ucred *ucred)
@@ -1552,7 +1562,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                struct ucred peercred;
                if (len > sizeof(peercred))
                        len = sizeof(peercred);
+
+               spin_lock(&sk->sk_peer_lock);
                cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
+               spin_unlock(&sk->sk_peer_lock);
+
                if (copy_to_user(optval, &peercred, len))
                        return -EFAULT;
                goto lenout;
@@ -1560,20 +1574,23 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
 
        case SO_PEERGROUPS:
        {
+               const struct cred *cred;
                int ret, n;
 
-               if (!sk->sk_peer_cred)
+               cred = sk_get_peer_cred(sk);
+               if (!cred)
                        return -ENODATA;
 
-               n = sk->sk_peer_cred->group_info->ngroups;
+               n = cred->group_info->ngroups;
                if (len < n * sizeof(gid_t)) {
                        len = n * sizeof(gid_t);
+                       put_cred(cred);
                        return put_user(len, optlen) ? -EFAULT : -ERANGE;
                }
                len = n * sizeof(gid_t);
 
-               ret = groups_to_user((gid_t __user *)optval,
-                                    sk->sk_peer_cred->group_info);
+               ret = groups_to_user((gid_t __user *)optval, cred->group_info);
+               put_cred(cred);
                if (ret)
                        return ret;
                goto lenout;
@@ -1935,9 +1952,10 @@ static void __sk_destruct(struct rcu_head *head)
                sk->sk_frag.page = NULL;
        }
 
-       if (sk->sk_peer_cred)
-               put_cred(sk->sk_peer_cred);
+       /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
+       put_cred(sk->sk_peer_cred);
        put_pid(sk->sk_peer_pid);
+
        if (likely(sk->sk_net_refcnt))
                put_net(sock_net(sk));
        sk_prot_free(sk->sk_prot_creator, sk);
@@ -3145,6 +3163,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
        sk->sk_peer_pid         =       NULL;
        sk->sk_peer_cred        =       NULL;
+       spin_lock_init(&sk->sk_peer_lock);
+
        sk->sk_write_pending    =       0;
        sk->sk_rcvlowat         =       1;
        sk->sk_rcvtimeo         =       MAX_SCHEDULE_TIMEOUT;
@@ -3179,17 +3199,15 @@ EXPORT_SYMBOL(sock_init_data);
 
 void lock_sock_nested(struct sock *sk, int subclass)
 {
+       /* The sk_lock has mutex_lock() semantics here. */
+       mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+
        might_sleep();
        spin_lock_bh(&sk->sk_lock.slock);
        if (sk->sk_lock.owned)
                __lock_sock(sk);
        sk->sk_lock.owned = 1;
-       spin_unlock(&sk->sk_lock.slock);
-       /*
-        * The sk_lock has mutex_lock() semantics here:
-        */
-       mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
-       local_bh_enable();
+       spin_unlock_bh(&sk->sk_lock.slock);
 }
 EXPORT_SYMBOL(lock_sock_nested);
 
@@ -3212,42 +3230,37 @@ void release_sock(struct sock *sk)
 }
 EXPORT_SYMBOL(release_sock);
 
-/**
- * lock_sock_fast - fast version of lock_sock
- * @sk: socket
- *
- * This version should be used for very small section, where process wont block
- * return false if fast path is taken:
- *
- *   sk_lock.slock locked, owned = 0, BH disabled
- *
- * return true if slow path is taken:
- *
- *   sk_lock.slock unlocked, owned = 1, BH enabled
- */
-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
 {
        might_sleep();
        spin_lock_bh(&sk->sk_lock.slock);
 
-       if (!sk->sk_lock.owned)
+       if (!sk->sk_lock.owned) {
                /*
-                * Note : We must disable BH
+                * Fast path return with bottom halves disabled and
+                * sock::sk_lock.slock held.
+                *
+                * The 'mutex' is not contended and holding
+                * sock::sk_lock.slock prevents all other lockers to
+                * proceed so the corresponding unlock_sock_fast() can
+                * avoid the slow path of release_sock() completely and
+                * just release slock.
+                *
+                * From a semantical POV this is equivalent to 'acquiring'
+                * the 'mutex', hence the corresponding lockdep
+                * mutex_release() has to happen in the fast path of
+                * unlock_sock_fast().
                 */
                return false;
+       }
 
        __lock_sock(sk);
        sk->sk_lock.owned = 1;
-       spin_unlock(&sk->sk_lock.slock);
-       /*
-        * The sk_lock has mutex_lock() semantics here:
-        */
-       mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
        __acquire(&sk->sk_lock.slock);
-       local_bh_enable();
+       spin_unlock_bh(&sk->sk_lock.slock);
        return true;
 }
-EXPORT_SYMBOL(lock_sock_fast);
+EXPORT_SYMBOL(__lock_sock_fast);
 
 int sock_gettstamp(struct socket *sock, void __user *userstamp,
                   bool timeval, bool time32)
index c5c74a34d139d99fb58dc5cebe4d6a8d72a214ed..91e7a2202697143fddd1e81e2c528ea9bbc4ae4e 100644 (file)
@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
                newdp->dccps_role           = DCCP_ROLE_SERVER;
                newdp->dccps_hc_rx_ackvec   = NULL;
                newdp->dccps_service_list   = NULL;
+               newdp->dccps_hc_rx_ccid     = NULL;
+               newdp->dccps_hc_tx_ccid     = NULL;
                newdp->dccps_service        = dreq->dreq_service;
                newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
                newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
index 54828553975236d8d1d04582220ba701a03b9032..d8ee15f1c7a9ff14b8a172ecf9a5a614b461cdda 100644 (file)
@@ -101,8 +101,6 @@ config NET_DSA_TAG_RTL4_A
 
 config NET_DSA_TAG_OCELOT
        tristate "Tag driver for Ocelot family of switches, using NPI port"
-       depends on MSCC_OCELOT_SWITCH_LIB || \
-                  (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
        select PACKING
        help
          Say Y or M if you want to enable NPI tagging for the Ocelot switches
@@ -114,8 +112,6 @@ config NET_DSA_TAG_OCELOT
 
 config NET_DSA_TAG_OCELOT_8021Q
        tristate "Tag driver for Ocelot family of switches, using VLAN"
-       depends on MSCC_OCELOT_SWITCH_LIB || \
-                 (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
        help
          Say Y or M if you want to enable support for tagging frames with a
          custom VLAN-based header. Frames that require timestamping, such as
@@ -138,7 +134,6 @@ config NET_DSA_TAG_LAN9303
 
 config NET_DSA_TAG_SJA1105
        tristate "Tag driver for NXP SJA1105 switches"
-       depends on NET_DSA_SJA1105 || !NET_DSA_SJA1105
        select PACKING
        help
          Say Y or M if you want to enable support for tagging frames with the
index 1dc45e40f961c319bf3954642b997b61a22c1265..41f36ad8b0ec674f39ce4904763dc618ebe233d1 100644 (file)
@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
        return queue_work(dsa_owq, work);
 }
 
+void dsa_flush_workqueue(void)
+{
+       flush_workqueue(dsa_owq);
+}
+
 int dsa_devlink_param_get(struct devlink *dl, u32 id,
                          struct devlink_param_gset_ctx *ctx)
 {
index 1b2b25d7bd025c13102c6629dbfe3c131ba50db8..da18094b5a049b0e5e77c74c881cc15aa2ffe206 100644 (file)
@@ -170,7 +170,7 @@ void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
        /* Check if the bridge is still in use, otherwise it is time
         * to clean it up so we can reuse this bridge_num later.
         */
-       if (!dsa_bridge_num_find(bridge_dev))
+       if (dsa_bridge_num_find(bridge_dev) < 0)
                clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
 }
 
@@ -429,6 +429,7 @@ static int dsa_port_setup(struct dsa_port *dp)
 {
        struct devlink_port *dlp = &dp->devlink_port;
        bool dsa_port_link_registered = false;
+       struct dsa_switch *ds = dp->ds;
        bool dsa_port_enabled = false;
        int err = 0;
 
@@ -438,6 +439,12 @@ static int dsa_port_setup(struct dsa_port *dp)
        INIT_LIST_HEAD(&dp->fdbs);
        INIT_LIST_HEAD(&dp->mdbs);
 
+       if (ds->ops->port_setup) {
+               err = ds->ops->port_setup(ds, dp->index);
+               if (err)
+                       return err;
+       }
+
        switch (dp->type) {
        case DSA_PORT_TYPE_UNUSED:
                dsa_port_disable(dp);
@@ -480,8 +487,11 @@ static int dsa_port_setup(struct dsa_port *dp)
                dsa_port_disable(dp);
        if (err && dsa_port_link_registered)
                dsa_port_link_unregister_of(dp);
-       if (err)
+       if (err) {
+               if (ds->ops->port_teardown)
+                       ds->ops->port_teardown(ds, dp->index);
                return err;
+       }
 
        dp->setup = true;
 
@@ -533,11 +543,15 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
 static void dsa_port_teardown(struct dsa_port *dp)
 {
        struct devlink_port *dlp = &dp->devlink_port;
+       struct dsa_switch *ds = dp->ds;
        struct dsa_mac_addr *a, *tmp;
 
        if (!dp->setup)
                return;
 
+       if (ds->ops->port_teardown)
+               ds->ops->port_teardown(ds, dp->index);
+
        devlink_port_type_clear(dlp);
 
        switch (dp->type) {
@@ -581,6 +595,36 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
        dp->devlink_port_setup = false;
 }
 
+/* Destroy the current devlink port, and create a new one which has the UNUSED
+ * flavour. At this point, any call to ds->ops->port_setup has been already
+ * balanced out by a call to ds->ops->port_teardown, so we know that any
+ * devlink port regions the driver had are now unregistered. We then call its
+ * ds->ops->port_setup again, in order for the driver to re-create them on the
+ * new devlink port.
+ */
+static int dsa_port_reinit_as_unused(struct dsa_port *dp)
+{
+       struct dsa_switch *ds = dp->ds;
+       int err;
+
+       dsa_port_devlink_teardown(dp);
+       dp->type = DSA_PORT_TYPE_UNUSED;
+       err = dsa_port_devlink_setup(dp);
+       if (err)
+               return err;
+
+       if (ds->ops->port_setup) {
+               /* On error, leave the devlink port registered,
+                * dsa_switch_teardown will clean it up later.
+                */
+               err = ds->ops->port_setup(ds, dp->index);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int dsa_devlink_info_get(struct devlink *dl,
                                struct devlink_info_req *req,
                                struct netlink_ext_ack *extack)
@@ -767,7 +811,9 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
                if (!dsa_is_cpu_port(ds, port))
                        continue;
 
+               rtnl_lock();
                err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
+               rtnl_unlock();
                if (err) {
                        dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
                                tag_ops->name, ERR_PTR(err));
@@ -836,7 +882,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
        devlink_params_publish(ds->devlink);
 
        if (!ds->slave_mii_bus && ds->ops->phy_read) {
-               ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+               ds->slave_mii_bus = mdiobus_alloc();
                if (!ds->slave_mii_bus) {
                        err = -ENOMEM;
                        goto teardown;
@@ -846,13 +892,16 @@ static int dsa_switch_setup(struct dsa_switch *ds)
 
                err = mdiobus_register(ds->slave_mii_bus);
                if (err < 0)
-                       goto teardown;
+                       goto free_slave_mii_bus;
        }
 
        ds->setup = true;
 
        return 0;
 
+free_slave_mii_bus:
+       if (ds->slave_mii_bus && ds->ops->phy_read)
+               mdiobus_free(ds->slave_mii_bus);
 teardown:
        if (ds->ops->teardown)
                ds->ops->teardown(ds);
@@ -877,8 +926,11 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
        if (!ds->setup)
                return;
 
-       if (ds->slave_mii_bus && ds->ops->phy_read)
+       if (ds->slave_mii_bus && ds->ops->phy_read) {
                mdiobus_unregister(ds->slave_mii_bus);
+               mdiobus_free(ds->slave_mii_bus);
+               ds->slave_mii_bus = NULL;
+       }
 
        dsa_switch_unregister_notifier(ds);
 
@@ -897,6 +949,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
        ds->setup = false;
 }
 
+/* First tear down the non-shared, then the shared ports. This ensures that
+ * all work items scheduled by our switchdev handlers for user ports have
+ * completed before we destroy the refcounting kept on the shared ports.
+ */
+static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
+{
+       struct dsa_port *dp;
+
+       list_for_each_entry(dp, &dst->ports, list)
+               if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
+                       dsa_port_teardown(dp);
+
+       dsa_flush_workqueue();
+
+       list_for_each_entry(dp, &dst->ports, list)
+               if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
+                       dsa_port_teardown(dp);
+}
+
+static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
+{
+       struct dsa_port *dp;
+
+       list_for_each_entry(dp, &dst->ports, list)
+               dsa_switch_teardown(dp->ds);
+}
+
 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -911,38 +990,22 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
        list_for_each_entry(dp, &dst->ports, list) {
                err = dsa_port_setup(dp);
                if (err) {
-                       dsa_port_devlink_teardown(dp);
-                       dp->type = DSA_PORT_TYPE_UNUSED;
-                       err = dsa_port_devlink_setup(dp);
+                       err = dsa_port_reinit_as_unused(dp);
                        if (err)
                                goto teardown;
-                       continue;
                }
        }
 
        return 0;
 
 teardown:
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_port_teardown(dp);
+       dsa_tree_teardown_ports(dst);
 
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_switch_teardown(dp->ds);
+       dsa_tree_teardown_switches(dst);
 
        return err;
 }
 
-static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
-{
-       struct dsa_port *dp;
-
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_port_teardown(dp);
-
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_switch_teardown(dp->ds);
-}
-
 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -1034,6 +1097,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
 teardown_master:
        dsa_tree_teardown_master(dst);
 teardown_switches:
+       dsa_tree_teardown_ports(dst);
        dsa_tree_teardown_switches(dst);
 teardown_cpu_ports:
        dsa_tree_teardown_cpu_ports(dst);
@@ -1052,6 +1116,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 
        dsa_tree_teardown_master(dst);
 
+       dsa_tree_teardown_ports(dst);
+
        dsa_tree_teardown_switches(dst);
 
        dsa_tree_teardown_cpu_ports(dst);
@@ -1546,3 +1612,53 @@ void dsa_unregister_switch(struct dsa_switch *ds)
        mutex_unlock(&dsa2_mutex);
 }
 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
+
+/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
+ * blocking that operation from completion, due to the dev_hold taken inside
+ * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
+ * the DSA master, so that the system can reboot successfully.
+ */
+void dsa_switch_shutdown(struct dsa_switch *ds)
+{
+       struct net_device *master, *slave_dev;
+       LIST_HEAD(unregister_list);
+       struct dsa_port *dp;
+
+       mutex_lock(&dsa2_mutex);
+       rtnl_lock();
+
+       list_for_each_entry(dp, &ds->dst->ports, list) {
+               if (dp->ds != ds)
+                       continue;
+
+               if (!dsa_port_is_user(dp))
+                       continue;
+
+               master = dp->cpu_dp->master;
+               slave_dev = dp->slave;
+
+               netdev_upper_dev_unlink(master, slave_dev);
+               /* Just unlinking ourselves as uppers of the master is not
+                * sufficient. When the master net device unregisters, that will
+                * also call dev_close, which we will catch as NETDEV_GOING_DOWN
+                * and trigger a dev_close on our own devices (dsa_slave_close).
+                * In turn, that will call dev_mc_unsync on the master's net
+                * device. If the master is also a DSA switch port, this will
+                * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
+                * its own master. Lockdep will complain about the fact that
+                * all cascaded masters have the same dsa_master_addr_list_lock_key,
+                * which it normally would not do if the cascaded masters would
+                * be in a proper upper/lower relationship, which we've just
+                * destroyed.
+                * To suppress the lockdep warnings, let's actually unregister
+                * the DSA slave interfaces too, to avoid the nonsensical
+                * multicast address list synchronization on shutdown.
+                */
+               unregister_netdevice_queue(slave_dev, &unregister_list);
+       }
+       unregister_netdevice_many(&unregister_list);
+
+       rtnl_unlock();
+       mutex_unlock(&dsa2_mutex);
+}
+EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
index 33ab7d7af9eb4dc4e18ab99988459da1bae36ec7..a5c9bc7b66c6eb37c7552b55a9d56302a71266d2 100644 (file)
@@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
 
 bool dsa_schedule_work(struct work_struct *work);
+void dsa_flush_workqueue(void);
 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
 
 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
index 662ff531d4e29874f9d64e6b91d96a1e7e0e959c..a2bf2d8ac65b7db8bc27ebbe1114f94a8da94149 100644 (file)
@@ -1854,13 +1854,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
                 * use the switch internal MDIO bus instead
                 */
                ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
-               if (ret) {
-                       netdev_err(slave_dev,
-                                  "failed to connect to port %d: %d\n",
-                                  dp->index, ret);
-                       phylink_destroy(dp->pl);
-                       return ret;
-               }
+       }
+       if (ret) {
+               netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
+                          ERR_PTR(ret));
+               phylink_destroy(dp->pl);
        }
 
        return ret;
index 1c797ec8e2c2b805a890df560c80b4f0eb45d4da..6466d0539af9ffea73dda13dfc1e656fe607cf4d 100644 (file)
@@ -168,7 +168,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
                if (extack._msg)
                        dev_err(ds->dev, "port %d: %s\n", info->port,
                                extack._msg);
-               if (err && err != EOPNOTSUPP)
+               if (err && err != -EOPNOTSUPP)
                        return err;
        }
 
index 77d0ce89ab77abb174aae90c5094f7a1a3ec5ae2..b3da4b2ea11cf693e391f5beb28f2586593d188d 100644 (file)
@@ -45,6 +45,7 @@
  *   6    6       2        2      4    2       N
  */
 
+#include <linux/dsa/mv88e6xxx.h>
 #include <linux/etherdevice.h>
 #include <linux/list.h>
 #include <linux/slab.h>
@@ -129,12 +130,9 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
        u8 tag_dev, tag_port;
        enum dsa_cmd cmd;
        u8 *dsa_header;
-       u16 pvid = 0;
-       int err;
 
        if (skb->offload_fwd_mark) {
                struct dsa_switch_tree *dst = dp->ds->dst;
-               struct net_device *br = dp->bridge_dev;
 
                cmd = DSA_CMD_FORWARD;
 
@@ -144,19 +142,6 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
                 */
                tag_dev = dst->last_switch + 1 + dp->bridge_num;
                tag_port = 0;
-
-               /* If we are offloading forwarding for a VLAN-unaware bridge,
-                * inject packets to hardware using the bridge's pvid, since
-                * that's where the packets ingressed from.
-                */
-               if (!br_vlan_enabled(br)) {
-                       /* Safe because __dev_queue_xmit() runs under
-                        * rcu_read_lock_bh()
-                        */
-                       err = br_vlan_get_pvid_rcu(br, &pvid);
-                       if (err)
-                               return NULL;
-               }
        } else {
                cmd = DSA_CMD_FROM_CPU;
                tag_dev = dp->ds->index;
@@ -180,16 +165,21 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
                        dsa_header[2] &= ~0x10;
                }
        } else {
+               struct net_device *br = dp->bridge_dev;
+               u16 vid;
+
+               vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
+
                skb_push(skb, DSA_HLEN + extra);
                dsa_alloc_etype_header(skb, DSA_HLEN + extra);
 
-               /* Construct untagged DSA tag. */
+               /* Construct DSA header from untagged frame. */
                dsa_header = dsa_etype_header_pos_tx(skb) + extra;
 
                dsa_header[0] = (cmd << 6) | tag_dev;
                dsa_header[1] = tag_port << 3;
-               dsa_header[2] = pvid >> 8;
-               dsa_header[3] = pvid & 0xff;
+               dsa_header[2] = vid >> 8;
+               dsa_header[3] = vid & 0xff;
        }
 
        return skb;
@@ -210,7 +200,7 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
        cmd = dsa_header[0] >> 6;
        switch (cmd) {
        case DSA_CMD_FORWARD:
-               trunk = !!(dsa_header[1] & 7);
+               trunk = !!(dsa_header[1] & 4);
                break;
 
        case DSA_CMD_TO_CPU:
index d37ab98e7fe18f4f1258c7417f7742c6edde556f..605b51ca692105628a1b5108d7d596bf2b8c223b 100644 (file)
@@ -1,8 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
  */
 #include <linux/dsa/ocelot.h>
-#include <soc/mscc/ocelot.h>
 #include "dsa_priv.h"
 
 static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
index 3038a257ba05c778316e03d54b8bae9ec205cd64..3412051981d7bd99c6e58e2aeb0a82d62439bbe9 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
  *
  * An implementation of the software-defined tag_8021q.c tagger format, which
  * also preserves full functionality under a vlan_filtering bridge. It does
@@ -9,10 +9,32 @@
  *   that on egress
  */
 #include <linux/dsa/8021q.h>
-#include <soc/mscc/ocelot.h>
-#include <soc/mscc/ocelot_ptp.h>
+#include <linux/dsa/ocelot.h>
 #include "dsa_priv.h"
 
+static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
+                                        struct sk_buff *skb)
+{
+       struct felix_deferred_xmit_work *xmit_work;
+       struct felix_port *felix_port = dp->priv;
+
+       xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+       if (!xmit_work)
+               return NULL;
+
+       /* Calls felix_port_deferred_xmit in felix.c */
+       kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn);
+       /* Increase refcount so the kfree_skb in dsa_slave_xmit
+        * won't really free the packet.
+        */
+       xmit_work->dp = dp;
+       xmit_work->skb = skb_get(skb);
+
+       kthread_queue_work(felix_port->xmit_worker, &xmit_work->work);
+
+       return NULL;
+}
+
 static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
                                   struct net_device *netdev)
 {
@@ -20,18 +42,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
        u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
        u16 queue_mapping = skb_get_queue_mapping(skb);
        u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
-       struct ocelot *ocelot = dp->ds->priv;
-       int port = dp->index;
-       u32 rew_op = 0;
+       struct ethhdr *hdr = eth_hdr(skb);
 
-       rew_op = ocelot_ptp_rew_op(skb);
-       if (rew_op) {
-               if (!ocelot_can_inject(ocelot, 0))
-                       return NULL;
-
-               ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
-               return NULL;
-       }
+       if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest))
+               return ocelot_defer_xmit(dp, skb);
 
        return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
                              ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
index c054f48541c8d621ca018cb94016193789e66c37..2edede9ddac93b388508cd1ccee6f4c6103cee27 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/if_vlan.h>
 #include <linux/dsa/sja1105.h>
 #include <linux/dsa/8021q.h>
+#include <linux/skbuff.h>
 #include <linux/packing.h>
 #include "dsa_priv.h"
 
 #define SJA1110_TX_TRAILER_LEN                 4
 #define SJA1110_MAX_PADDING_LEN                        15
 
+enum sja1110_meta_tstamp {
+       SJA1110_META_TSTAMP_TX = 0,
+       SJA1110_META_TSTAMP_RX = 1,
+};
+
 /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
 static inline bool sja1105_is_link_local(const struct sk_buff *skb)
 {
@@ -520,6 +526,43 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
                                              is_meta);
 }
 
+static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
+                                       u8 ts_id, enum sja1110_meta_tstamp dir,
+                                       u64 tstamp)
+{
+       struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+       struct dsa_port *dp = dsa_to_port(ds, port);
+       struct skb_shared_hwtstamps shwt = {0};
+       struct sja1105_port *sp = dp->priv;
+
+       if (!dsa_port_is_sja1105(dp))
+               return;
+
+       /* We don't care about RX timestamps on the CPU port */
+       if (dir == SJA1110_META_TSTAMP_RX)
+               return;
+
+       spin_lock(&sp->data->skb_txtstamp_queue.lock);
+
+       skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) {
+               if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+                       continue;
+
+               __skb_unlink(skb, &sp->data->skb_txtstamp_queue);
+               skb_match = skb;
+
+               break;
+       }
+
+       spin_unlock(&sp->data->skb_txtstamp_queue.lock);
+
+       if (WARN_ON(!skb_match))
+               return;
+
+       shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+       skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
 static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
 {
        u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
index b42c429cebbe805beabd41233b24203fb82fc0cf..3364cb9c67e018fea2b2e370046de5252581b996 100644 (file)
@@ -1661,7 +1661,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
 
 #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
 int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
-                   int nh_weight, u8 rt_family)
+                   int nh_weight, u8 rt_family, u32 nh_tclassid)
 {
        const struct net_device *dev = nhc->nhc_dev;
        struct rtnexthop *rtnh;
@@ -1679,6 +1679,9 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
 
        rtnh->rtnh_flags = flags;
 
+       if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid))
+               goto nla_put_failure;
+
        /* length of rtnetlink header + attributes */
        rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
 
@@ -1706,14 +1709,13 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
        }
 
        for_nexthops(fi) {
-               if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
-                                   AF_INET) < 0)
-                       goto nla_put_failure;
+               u32 nh_tclassid = 0;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-               if (nh->nh_tclassid &&
-                   nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
-                       goto nla_put_failure;
+               nh_tclassid = nh->nh_tclassid;
 #endif
+               if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
+                                   AF_INET, nh_tclassid) < 0)
+                       goto nla_put_failure;
        } endfor_nexthops(fi);
 
 mp_end:
index 8b30cadff708f96f4acf4a459c8e9c0397f8d305..b7e277d8a84d224cb9c034321e688d765d01c07f 100644 (file)
@@ -1054,14 +1054,19 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
        iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio);
        if (!ext_hdr || !iio)
                goto send_mal_query;
-       if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr))
+       if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) ||
+           ntohs(iio->extobj_hdr.length) > sizeof(_iio))
                goto send_mal_query;
        ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr);
+       iio = skb_header_pointer(skb, sizeof(_ext_hdr),
+                                sizeof(iio->extobj_hdr) + ident_len, &_iio);
+       if (!iio)
+               goto send_mal_query;
+
        status = 0;
        dev = NULL;
        switch (iio->extobj_hdr.class_type) {
        case ICMP_EXT_ECHO_CTYPE_NAME:
-               iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio);
                if (ident_len >= IFNAMSIZ)
                        goto send_mal_query;
                memset(buff, 0, sizeof(buff));
@@ -1069,30 +1074,24 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
                dev = dev_get_by_name(net, buff);
                break;
        case ICMP_EXT_ECHO_CTYPE_INDEX:
-               iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) +
-                                        sizeof(iio->ident.ifindex), &_iio);
                if (ident_len != sizeof(iio->ident.ifindex))
                        goto send_mal_query;
                dev = dev_get_by_index(net, ntohl(iio->ident.ifindex));
                break;
        case ICMP_EXT_ECHO_CTYPE_ADDR:
-               if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
+               if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) ||
+                   ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
                                 iio->ident.addr.ctype3_hdr.addrlen)
                        goto send_mal_query;
                switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) {
                case ICMP_AFI_IP:
-                       iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) +
-                                                sizeof(struct in_addr), &_iio);
-                       if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
-                                        sizeof(struct in_addr))
+                       if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr))
                                goto send_mal_query;
                        dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr);
                        break;
 #if IS_ENABLED(CONFIG_IPV6)
                case ICMP_AFI_IP6:
-                       iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio);
-                       if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) +
-                                        sizeof(struct in6_addr))
+                       if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr))
                                goto send_mal_query;
                        dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev);
                        dev_hold(dev);
index 80aeaf9e6e16e004011301c471e4dbf17a89e30d..bfb522e513461a92cbd19c0c2c14b2dda33bb4f7 100644 (file)
@@ -242,8 +242,10 @@ static inline int compute_score(struct sock *sk, struct net *net,
 
                if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
                        return -1;
+               score =  sk->sk_bound_dev_if ? 2 : 1;
 
-               score = sk->sk_family == PF_INET ? 2 : 1;
+               if (sk->sk_family == PF_INET)
+                       score++;
                if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
index b88e0f36cd05ae067be12599e00a896c7e93dc9c..8265c676570533cf3f752562dbdd54bfb0508688 100644 (file)
@@ -42,7 +42,7 @@ iptable_raw_hook(void *priv, struct sk_buff *skb,
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
 
-static int __net_init iptable_raw_table_init(struct net *net)
+static int iptable_raw_table_init(struct net *net)
 {
        struct ipt_replace *repl;
        const struct xt_table *table = &packet_raw;
index 613432a36f0a701afc993d23053bfc03c4a8ed0a..e61ea428ea1872077ec6c2917b8c511518a692d8 100644 (file)
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-static unsigned int defrag4_pernet_id __read_mostly;
 static DEFINE_MUTEX(defrag4_mutex);
 
-struct defrag4_pernet {
-       unsigned int users;
-};
-
 static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
                                   u_int32_t user)
 {
@@ -111,19 +106,15 @@ static const struct nf_hook_ops ipv4_defrag_ops[] = {
 
 static void __net_exit defrag4_net_exit(struct net *net)
 {
-       struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
-
-       if (nf_defrag->users) {
+       if (net->nf.defrag_ipv4_users) {
                nf_unregister_net_hooks(net, ipv4_defrag_ops,
                                        ARRAY_SIZE(ipv4_defrag_ops));
-               nf_defrag->users = 0;
+               net->nf.defrag_ipv4_users = 0;
        }
 }
 
 static struct pernet_operations defrag4_net_ops = {
        .exit = defrag4_net_exit,
-       .id   = &defrag4_pernet_id,
-       .size = sizeof(struct defrag4_pernet),
 };
 
 static int __init nf_defrag_init(void)
@@ -138,24 +129,23 @@ static void __exit nf_defrag_fini(void)
 
 int nf_defrag_ipv4_enable(struct net *net)
 {
-       struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
        int err = 0;
 
        mutex_lock(&defrag4_mutex);
-       if (nf_defrag->users == UINT_MAX) {
+       if (net->nf.defrag_ipv4_users == UINT_MAX) {
                err = -EOVERFLOW;
                goto out_unlock;
        }
 
-       if (nf_defrag->users) {
-               nf_defrag->users++;
+       if (net->nf.defrag_ipv4_users) {
+               net->nf.defrag_ipv4_users++;
                goto out_unlock;
        }
 
        err = nf_register_net_hooks(net, ipv4_defrag_ops,
                                    ARRAY_SIZE(ipv4_defrag_ops));
        if (err == 0)
-               nf_defrag->users = 1;
+               net->nf.defrag_ipv4_users = 1;
 
  out_unlock:
        mutex_unlock(&defrag4_mutex);
@@ -165,12 +155,10 @@ EXPORT_SYMBOL_GPL(nf_defrag_ipv4_enable);
 
 void nf_defrag_ipv4_disable(struct net *net)
 {
-       struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id);
-
        mutex_lock(&defrag4_mutex);
-       if (nf_defrag->users) {
-               nf_defrag->users--;
-               if (nf_defrag->users == 0)
+       if (net->nf.defrag_ipv4_users) {
+               net->nf.defrag_ipv4_users--;
+               if (net->nf.defrag_ipv4_users == 0)
                        nf_unregister_net_hooks(net, ipv4_defrag_ops,
                                                ARRAY_SIZE(ipv4_defrag_ops));
        }
index 75ca4b6e484f49f156a91df89d7e0e67b0c142eb..9e8100728d464dd4b51e407d0b973ec878ada8d5 100644 (file)
@@ -1982,6 +1982,8 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
        rcu_assign_pointer(old->nh_grp, newg);
 
        if (newg->resilient) {
+               /* Make sure concurrent readers are not using 'oldg' anymore. */
+               synchronize_net();
                rcu_assign_pointer(oldg->res_table, tmp_table);
                rcu_assign_pointer(oldg->spare->res_table, tmp_table);
        }
@@ -3565,6 +3567,7 @@ static struct notifier_block nh_netdev_notifier = {
 };
 
 static int nexthops_dump(struct net *net, struct notifier_block *nb,
+                        enum nexthop_event_type event_type,
                         struct netlink_ext_ack *extack)
 {
        struct rb_root *root = &net->nexthop.rb_root;
@@ -3575,8 +3578,7 @@ static int nexthops_dump(struct net *net, struct notifier_block *nb,
                struct nexthop *nh;
 
                nh = rb_entry(node, struct nexthop, rb_node);
-               err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
-                                           extack);
+               err = call_nexthop_notifier(nb, net, event_type, nh, extack);
                if (err)
                        break;
        }
@@ -3590,7 +3592,7 @@ int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
        int err;
 
        rtnl_lock();
-       err = nexthops_dump(net, nb, extack);
+       err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
        if (err)
                goto unlock;
        err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
@@ -3603,8 +3605,17 @@ EXPORT_SYMBOL(register_nexthop_notifier);
 
 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
 {
-       return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
-                                                 nb);
+       int err;
+
+       rtnl_lock();
+       err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
+                                                nb);
+       if (err)
+               goto unlock;
+       nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+unlock:
+       rtnl_unlock();
+       return err;
 }
 EXPORT_SYMBOL(unregister_nexthop_notifier);
 
index 3f7bd7ae7d7af8628b06c124fd8795cbf30e4ce0..141e85e6422b1f567b89d554024ee92aa5fa2d2f 100644 (file)
@@ -1346,7 +1346,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
                if (tp->undo_marker && tp->undo_retrans > 0 &&
                    after(end_seq, tp->undo_marker))
-                       tp->undo_retrans--;
+                       tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
                if ((sacked & TCPCB_SACKED_ACKED) &&
                    before(start_seq, state->reord))
                                state->reord = start_seq;
index 8851c9463b4b62c9017565f545250c4ffe22927c..8536b2a7210b2ee88443d7bdfe21d22fd1433997 100644 (file)
@@ -390,7 +390,8 @@ static int compute_score(struct sock *sk, struct net *net,
                                        dif, sdif);
        if (!dev_match)
                return -1;
-       score += 4;
+       if (sk->sk_bound_dev_if)
+               score += 4;
 
        if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
@@ -1053,7 +1054,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        __be16 dport;
        u8  tos;
        int err, is_udplite = IS_UDPLITE(sk);
-       int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+       int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
        struct sk_buff *skb;
        struct ip_options_data opt_copy;
@@ -1361,7 +1362,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
        }
 
        up->len += size;
-       if (!(up->corkflag || (flags&MSG_MORE)))
+       if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))
                ret = udp_push_pending_frames(sk);
        if (!ret)
                ret = size;
@@ -2662,9 +2663,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
        switch (optname) {
        case UDP_CORK:
                if (val != 0) {
-                       up->corkflag = 1;
+                       WRITE_ONCE(up->corkflag, 1);
                } else {
-                       up->corkflag = 0;
+                       WRITE_ONCE(up->corkflag, 0);
                        lock_sock(sk);
                        push_pending_frames(sk);
                        release_sock(sk);
@@ -2787,7 +2788,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
 
        switch (optname) {
        case UDP_CORK:
-               val = up->corkflag;
+               val = READ_ONCE(up->corkflag);
                break;
 
        case UDP_ENCAP:
index 0d122edc368dd045effa1288246e3ebe7c65496b..b91003538d87a03855df9bd35b751017d37fe031 100644 (file)
@@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void)
 {
        int err;
 
-       udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
+       udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
        if (!udp_tunnel_nic_workqueue)
                return -ENOMEM;
 
index 55c290d556059a989cebc6d26cb77e8b01edb5a5..67c9114835c84864a3353c6f1c16853ea62a21c5 100644 (file)
@@ -106,7 +106,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
                if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
                        return -1;
 
-               score = 1;
+               score =  sk->sk_bound_dev_if ? 2 : 1;
                if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
index 5e8961004832e3e9be341c5c5d203d29ae05be3c..d128172bb54976bac370449c2204d42cd5b84e69 100644 (file)
@@ -770,6 +770,66 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
                data += sizeof(__be32);
        }
 
+       /* bit12 undefined: filled with empty value */
+       if (trace->type.bit12) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit13 undefined: filled with empty value */
+       if (trace->type.bit13) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit14 undefined: filled with empty value */
+       if (trace->type.bit14) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit15 undefined: filled with empty value */
+       if (trace->type.bit15) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit16 undefined: filled with empty value */
+       if (trace->type.bit16) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit17 undefined: filled with empty value */
+       if (trace->type.bit17) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit18 undefined: filled with empty value */
+       if (trace->type.bit18) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit19 undefined: filled with empty value */
+       if (trace->type.bit19) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit20 undefined: filled with empty value */
+       if (trace->type.bit20) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
+       /* bit21 undefined: filled with empty value */
+       if (trace->type.bit21) {
+               *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+               data += sizeof(__be32);
+       }
+
        /* opaque state snapshot */
        if (trace->type.bit22) {
                if (!sc) {
@@ -791,16 +851,10 @@ void ioam6_fill_trace_data(struct sk_buff *skb,
        struct ioam6_schema *sc;
        u8 sclen = 0;
 
-       /* Skip if Overflow flag is set OR
-        * if an unknown type (bit 12-21) is set
+       /* Skip if Overflow flag is set
         */
-       if (trace->overflow ||
-           trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
-           trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
-           trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
-           trace->type.bit21) {
+       if (trace->overflow)
                return;
-       }
 
        /* NodeLen does not include Opaque State Snapshot length. We need to
         * take it into account if the corresponding bit is set (bit 22) and
index f9ee04541c17fcfc52a2c5843d62bbdc72a2a815..9b7b726f8f45f2f0fb65b0792d2ae29e8042b7ab 100644 (file)
@@ -75,7 +75,11 @@ static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
        u32 fields;
 
        if (!trace->type_be32 || !trace->remlen ||
-           trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4)
+           trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4 ||
+           trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
+           trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
+           trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
+           trace->type.bit21)
                return false;
 
        trace->nodelen = 0;
index 1bec5b22f80d5dc5766d0a667e783fb16c6d0ba9..0371d2c1414555f56024321f1fc8510c477ea680 100644 (file)
@@ -1378,7 +1378,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
        int err = -ENOMEM;
        int allow_create = 1;
        int replace_required = 0;
-       int sernum = fib6_new_sernum(info->nl_net);
 
        if (info->nlh) {
                if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
@@ -1478,7 +1477,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
        if (!err) {
                if (rt->nh)
                        list_add(&rt->nh_list, &rt->nh->f6i_list);
-               __fib6_update_sernum_upto_root(rt, sernum);
+               __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
                fib6_start_gc(info->nl_net, rt);
        }
 
index de2cf3943b91e4eed16937dc865b009eb451f76e..a579ea14a69b67cdd641e1bf1dc943b8b8468007 100644 (file)
@@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb,
         * things we don't know, ie. tcp syn flag or ports).  If the
         * rule is also a fragment-specific rule, non-fragments won't
         * match it. */
+       acpar.fragoff = 0;
        acpar.hotdrop = false;
        acpar.state   = state;
 
index a0108415275fe02a643efb307743b503985ae3f2..5c47be29b9ee9832069aa4b9e97852e22dd4278b 100644 (file)
@@ -33,7 +33,7 @@
 
 static const char nf_frags_cache_name[] = "nf-frags";
 
-unsigned int nf_frag_pernet_id __read_mostly;
+static unsigned int nf_frag_pernet_id __read_mostly;
 static struct inet_frags nf_frags;
 
 static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
index e8a59d8bf2adf705824586f6f60b35b53d2be9e6..cb4eb1d2c620b96f23e516d17835a2aada6f0fa8 100644 (file)
@@ -25,8 +25,6 @@
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
-extern unsigned int nf_frag_pernet_id;
-
 static DEFINE_MUTEX(defrag6_mutex);
 
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
@@ -91,12 +89,10 @@ static const struct nf_hook_ops ipv6_defrag_ops[] = {
 
 static void __net_exit defrag6_net_exit(struct net *net)
 {
-       struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
-
-       if (nf_frag->users) {
+       if (net->nf.defrag_ipv6_users) {
                nf_unregister_net_hooks(net, ipv6_defrag_ops,
                                        ARRAY_SIZE(ipv6_defrag_ops));
-               nf_frag->users = 0;
+               net->nf.defrag_ipv6_users = 0;
        }
 }
 
@@ -134,24 +130,23 @@ static void __exit nf_defrag_fini(void)
 
 int nf_defrag_ipv6_enable(struct net *net)
 {
-       struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
        int err = 0;
 
        mutex_lock(&defrag6_mutex);
-       if (nf_frag->users == UINT_MAX) {
+       if (net->nf.defrag_ipv6_users == UINT_MAX) {
                err = -EOVERFLOW;
                goto out_unlock;
        }
 
-       if (nf_frag->users) {
-               nf_frag->users++;
+       if (net->nf.defrag_ipv6_users) {
+               net->nf.defrag_ipv6_users++;
                goto out_unlock;
        }
 
        err = nf_register_net_hooks(net, ipv6_defrag_ops,
                                    ARRAY_SIZE(ipv6_defrag_ops));
        if (err == 0)
-               nf_frag->users = 1;
+               net->nf.defrag_ipv6_users = 1;
 
  out_unlock:
        mutex_unlock(&defrag6_mutex);
@@ -161,12 +156,10 @@ EXPORT_SYMBOL_GPL(nf_defrag_ipv6_enable);
 
 void nf_defrag_ipv6_disable(struct net *net)
 {
-       struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id);
-
        mutex_lock(&defrag6_mutex);
-       if (nf_frag->users) {
-               nf_frag->users--;
-               if (nf_frag->users == 0)
+       if (net->nf.defrag_ipv6_users) {
+               net->nf.defrag_ipv6_users--;
+               if (net->nf.defrag_ipv6_users == 0)
                        nf_unregister_net_hooks(net, ipv6_defrag_ops,
                                                ARRAY_SIZE(ipv6_defrag_ops));
        }
index dbc2240239777b5fb82ebf40841541ae60813d14..9b9ef09382ab911b25e03b2ed7000c24411bf5cb 100644 (file)
@@ -5681,14 +5681,15 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
 
                if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
-                                   rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
+                                   rt->fib6_nh->fib_nh_weight, AF_INET6,
+                                   0) < 0)
                        goto nla_put_failure;
 
                list_for_each_entry_safe(sibling, next_sibling,
                                         &rt->fib6_siblings, fib6_siblings) {
                        if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
                                            sibling->fib6_nh->fib_nh_weight,
-                                           AF_INET6) < 0)
+                                           AF_INET6, 0) < 0)
                                goto nla_put_failure;
                }
 
index ea53847b5b7e8b82f1898fa442327a9ce060085f..8d785232b4796b7cafe14a35dedcbb0aaa2c37c2 100644 (file)
@@ -133,7 +133,8 @@ static int compute_score(struct sock *sk, struct net *net,
        dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
        if (!dev_match)
                return -1;
-       score++;
+       if (sk->sk_bound_dev_if)
+               score++;
 
        if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
@@ -1303,7 +1304,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int addr_len = msg->msg_namelen;
        bool connected = false;
        int ulen = len;
-       int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+       int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
        int err;
        int is_udplite = IS_UDPLITE(sk);
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
index 53486b162f01c176ec86c7bee4b1be5c87a7aa49..93271a2632b8ef10437a52ec931b66b631331a51 100644 (file)
@@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
        }
 
        if (tunnel->version == L2TP_HDR_VER_3 &&
-           l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+           l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
+               l2tp_session_dec_refcount(session);
                goto invalid;
+       }
 
        l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
        l2tp_session_dec_refcount(session);
index efbefcbac3ac6407d8a56e53270c2d566ba0c5f9..7cab1cf09bf1ad7ca59931f85cdf022ce2052524 100644 (file)
@@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void)
        atomic_set(&newtbl->entries,  0);
        spin_lock_init(&newtbl->gates_lock);
        spin_lock_init(&newtbl->walk_lock);
-       rhashtable_init(&newtbl->rhead, &mesh_rht_params);
+       if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
+               kfree(newtbl);
+               return NULL;
+       }
 
        return newtbl;
 }
index 204830a55240b4829991f4d815a6f1a10538c0f5..3fbd0b9ff9135474cdc8d900c52daa1958b1cadd 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
  * Copyright 2012-2013, cozybit Inc.
+ * Copyright (C) 2021 Intel Corporation
  */
 
 #include "mesh.h"
@@ -588,7 +589,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
 
        /* only transmit to PS STA with announced, non-zero awake window */
        if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
-           (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
+           (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))
                return;
 
        if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
index e5935e3d7a078ff7e3e87f4a845e959712459b71..8c6416129d5bed9669a77419b1e6f230cdc24063 100644 (file)
@@ -392,10 +392,6 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
        int mcast_rate;
        bool use_basicrate = false;
 
-       if (ieee80211_is_tx_data(txrc->skb) &&
-           info->flags & IEEE80211_TX_CTL_NO_ACK)
-               return false;
-
        if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {
                __rate_control_send_low(txrc->hw, sband, pubsta, info,
                                        txrc->rate_idx_mask);
index 99ed68f7dc365ebc526338d883789a0fd1aa1616..c4071b015c18848ba5cbbc019d12890b3ad2640f 100644 (file)
@@ -4131,7 +4131,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                if (!bssid)
                        return false;
                if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
-                   ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+                   ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
+                   !is_valid_ether_addr(hdr->addr2))
                        return false;
                if (ieee80211_is_beacon(hdr->frame_control))
                        return true;
index 2d1193ed3eb524219bd1df328db6531e2ac0d419..8921088a5df65f1e837e046413e669704955526d 100644 (file)
@@ -2209,7 +2209,11 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
                        }
 
                        vht_mcs = iterator.this_arg[4] >> 4;
+                       if (vht_mcs > 11)
+                               vht_mcs = 0;
                        vht_nss = iterator.this_arg[4] & 0xF;
+                       if (!vht_nss || vht_nss > 8)
+                               vht_nss = 1;
                        break;
 
                /*
@@ -3380,6 +3384,14 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
                goto out;
 
+       /* If n == 2, the "while (*frag_tail)" loop above didn't execute
+        * and  frag_tail should be &skb_shinfo(head)->frag_list.
+        * However, ieee80211_amsdu_prepare_head() can reallocate it.
+        * Reload frag_tail to have it pointing to the correct place.
+        */
+       if (n == 2)
+               frag_tail = &skb_shinfo(head)->frag_list;
+
        /*
         * Pad out the previous subframe to a multiple of 4 by adding the
         * padding to the next one, that's being added. Note that head->len
index bca47fad5a16280b808bef4c8832d6f0e0626b37..4eed23e27610439b316e8d846bd22af83399b8bf 100644 (file)
@@ -520,6 +520,9 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
                        return RX_DROP_UNUSABLE;
        }
 
+       /* reload hdr - skb might have been reallocated */
+       hdr = (void *)rx->skb->data;
+
        data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
        if (!rx->sta || data_len < 0)
                return RX_DROP_UNUSABLE;
@@ -749,6 +752,9 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
                        return RX_DROP_UNUSABLE;
        }
 
+       /* reload hdr - skb might have been reallocated */
+       hdr = (void *)rx->skb->data;
+
        data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
        if (!rx->sta || data_len < 0)
                return RX_DROP_UNUSABLE;
index 5265525011ad2a2945bbb4090c8b06cf154d0511..5ca186d53cb0f14eb6b14da352ced8eab1891de7 100644 (file)
@@ -1083,8 +1083,10 @@ static void __net_exit mctp_routes_net_exit(struct net *net)
 {
        struct mctp_route *rt;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(rt, &net->mctp.routes, list)
                mctp_route_release(rt);
+       rcu_read_unlock();
 }
 
 static struct pernet_operations mctp_net_ops = {
index f48eb6315bbb4b3f8a08101ee8624b4c49471500..292374fb077928cbf361e04d8910a545677df075 100644 (file)
@@ -36,7 +36,7 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
        struct sock *sk;
 
        net = sock_net(in_skb->sk);
-       msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);
+       msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);
        if (!msk)
                goto out_nosk;
 
index c4f9a5ce3815307a6b54d5ac318d3eac98927d00..050eea231528bc762226bbd86d43447011428921 100644 (file)
@@ -1718,9 +1718,7 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
 
        list_for_each_entry(entry, &pernet->local_addr_list, list) {
                if (addresses_equal(&entry->addr, &addr.addr, true)) {
-                       ret = mptcp_nl_addr_backup(net, &entry->addr, bkup);
-                       if (ret)
-                               return ret;
+                       mptcp_nl_addr_backup(net, &entry->addr, bkup);
 
                        if (bkup)
                                entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
index 2602f1386160d45398e60520fd459b7c79da6411..d073b211138287342cf6c2faf6b0fc299c203205 100644 (file)
@@ -528,7 +528,6 @@ static bool mptcp_check_data_fin(struct sock *sk)
 
                sk->sk_shutdown |= RCV_SHUTDOWN;
                smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
-               set_bit(MPTCP_DATA_READY, &msk->flags);
 
                switch (sk->sk_state) {
                case TCP_ESTABLISHED:
@@ -742,10 +741,9 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
 
        /* Wake-up the reader only for in-sequence data */
        mptcp_data_lock(sk);
-       if (move_skbs_to_msk(msk, ssk)) {
-               set_bit(MPTCP_DATA_READY, &msk->flags);
+       if (move_skbs_to_msk(msk, ssk))
                sk->sk_data_ready(sk);
-       }
+
        mptcp_data_unlock(sk);
 }
 
@@ -847,7 +845,6 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
                sk->sk_shutdown |= RCV_SHUTDOWN;
 
                smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
-               set_bit(MPTCP_DATA_READY, &msk->flags);
                sk->sk_data_ready(sk);
        }
 
@@ -1316,7 +1313,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
                        goto alloc_skb;
                }
 
-               must_collapse = (info->size_goal - skb->len > 0) &&
+               must_collapse = (info->size_goal > skb->len) &&
                                (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
                if (must_collapse) {
                        size_bias = skb->len;
@@ -1325,7 +1322,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
        }
 
 alloc_skb:
-       if (!must_collapse && !ssk->sk_tx_skb_cache &&
+       if (!must_collapse &&
            !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
                return 0;
 
@@ -1759,21 +1756,6 @@ out:
        return copied ? : ret;
 }
 
-static void mptcp_wait_data(struct sock *sk, long *timeo)
-{
-       DEFINE_WAIT_FUNC(wait, woken_wake_function);
-       struct mptcp_sock *msk = mptcp_sk(sk);
-
-       add_wait_queue(sk_sleep(sk), &wait);
-       sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
-       sk_wait_event(sk, timeo,
-                     test_bit(MPTCP_DATA_READY, &msk->flags), &wait);
-
-       sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-       remove_wait_queue(sk_sleep(sk), &wait);
-}
-
 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
                                struct msghdr *msg,
                                size_t len, int flags,
@@ -2077,19 +2059,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                }
 
                pr_debug("block timeout %ld", timeo);
-               mptcp_wait_data(sk, &timeo);
-       }
-
-       if (skb_queue_empty_lockless(&sk->sk_receive_queue) &&
-           skb_queue_empty(&msk->receive_queue)) {
-               /* entire backlog drained, clear DATA_READY. */
-               clear_bit(MPTCP_DATA_READY, &msk->flags);
-
-               /* .. race-breaker: ssk might have gotten new data
-                * after last __mptcp_move_skbs() returned false.
-                */
-               if (unlikely(__mptcp_move_skbs(msk)))
-                       set_bit(MPTCP_DATA_READY, &msk->flags);
+               sk_wait_data(sk, &timeo, NULL);
        }
 
 out_err:
@@ -2098,9 +2068,9 @@ out_err:
                        tcp_recv_timestamp(msg, sk, &tss);
        }
 
-       pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
-                msk, test_bit(MPTCP_DATA_READY, &msk->flags),
-                skb_queue_empty_lockless(&sk->sk_receive_queue), copied);
+       pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
+                msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+                skb_queue_empty(&msk->receive_queue), copied);
        if (!(flags & MSG_PEEK))
                mptcp_rcv_space_adjust(msk, copied);
 
@@ -2368,7 +2338,6 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
        inet_sk_state_store(sk, TCP_CLOSE);
        sk->sk_shutdown = SHUTDOWN_MASK;
        smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
-       set_bit(MPTCP_DATA_READY, &msk->flags);
        set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
 
        mptcp_close_wake_up(sk);
@@ -2735,7 +2704,7 @@ cleanup:
        inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
        mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-               bool slow = lock_sock_fast(ssk);
+               bool slow = lock_sock_fast_nested(ssk);
 
                sock_orphan(ssk);
                unlock_sock_fast(ssk, slow);
@@ -3385,8 +3354,14 @@ unlock_fail:
 
 static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
 {
-       return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
-              0;
+       /* Concurrent splices from sk_receive_queue into receive_queue will
+        * always show at least one non-empty queue when checked in this order.
+        */
+       if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
+           skb_queue_empty_lockless(&msk->receive_queue))
+               return 0;
+
+       return EPOLLIN | EPOLLRDNORM;
 }
 
 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
@@ -3421,7 +3396,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        state = inet_sk_state_load(sk);
        pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
        if (state == TCP_LISTEN)
-               return mptcp_check_readable(msk);
+               return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0;
 
        if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
                mask |= mptcp_check_readable(msk);
index d3e6fd1615f1f728b4ad51e861afee7037d23b7d..dc984676c5eb1582271645e8363f5115c0560dc4 100644 (file)
@@ -709,7 +709,7 @@ int mptcp_token_new_connect(struct sock *sk);
 void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
                        struct mptcp_sock *msk);
 bool mptcp_token_exists(u32 token);
-struct mptcp_sock *mptcp_token_get_sock(u32 token);
+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);
 struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
                                         long *s_num);
 void mptcp_token_destroy(struct mptcp_sock *msk);
index 1de7ce883c3776fe9345a45b3b6ec59d5e3644f7..6172f380dfb763b43c6d996b4896215cad9c7d7b 100644 (file)
@@ -86,7 +86,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
        struct mptcp_sock *msk;
        int local_id;
 
-       msk = mptcp_token_get_sock(subflow_req->token);
+       msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
        if (!msk) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
                return NULL;
index 37127781aee987055acf380f8696a2326e50971f..7f22526346a7e6667b73599ce27771fc18824cfd 100644 (file)
@@ -108,18 +108,12 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
 
        e->valid = 0;
 
-       msk = mptcp_token_get_sock(e->token);
+       msk = mptcp_token_get_sock(net, e->token);
        if (!msk) {
                spin_unlock_bh(&join_entry_locks[i]);
                return false;
        }
 
-       /* If this fails, the token got re-used in the mean time by another
-        * mptcp socket in a different netns, i.e. entry is outdated.
-        */
-       if (!net_eq(sock_net((struct sock *)msk), net))
-               goto err_put;
-
        subflow_req->remote_nonce = e->remote_nonce;
        subflow_req->local_nonce = e->local_nonce;
        subflow_req->backup = e->backup;
@@ -128,11 +122,6 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
        subflow_req->msk = msk;
        spin_unlock_bh(&join_entry_locks[i]);
        return true;
-
-err_put:
-       spin_unlock_bh(&join_entry_locks[i]);
-       sock_put((struct sock *)msk);
-       return false;
 }
 
 void __init mptcp_join_cookie_init(void)
index a98e554b034fe712d99ac5d9dbba1cbc9c9568b3..e581b341c5beb508e73fc5d7afea05f8cb6c390b 100644 (file)
@@ -231,6 +231,7 @@ found:
 
 /**
  * mptcp_token_get_sock - retrieve mptcp connection sock using its token
+ * @net: restrict to this namespace
  * @token: token of the mptcp connection to retrieve
  *
  * This function returns the mptcp connection structure with the given token.
@@ -238,7 +239,7 @@ found:
  *
  * returns NULL if no connection with the given token value exists.
  */
-struct mptcp_sock *mptcp_token_get_sock(u32 token)
+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)
 {
        struct hlist_nulls_node *pos;
        struct token_bucket *bucket;
@@ -251,11 +252,15 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token)
 again:
        sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
                msk = mptcp_sk(sk);
-               if (READ_ONCE(msk->token) != token)
+               if (READ_ONCE(msk->token) != token ||
+                   !net_eq(sock_net(sk), net))
                        continue;
+
                if (!refcount_inc_not_zero(&sk->sk_refcnt))
                        goto not_found;
-               if (READ_ONCE(msk->token) != token) {
+
+               if (READ_ONCE(msk->token) != token ||
+                   !net_eq(sock_net(sk), net)) {
                        sock_put(sk);
                        goto again;
                }
index e1bd6f0a0676fa1f6ecec42dc9e24f97247b33fe..5d984bec1cd865b78ee19adfe248bac57dc1d24e 100644 (file)
@@ -11,6 +11,7 @@ static struct mptcp_subflow_request_sock *build_req_sock(struct kunit *test)
                            GFP_USER);
        KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req);
        mptcp_token_init_request((struct request_sock *)req);
+       sock_net_set((struct sock *)req, &init_net);
        return req;
 }
 
@@ -22,7 +23,7 @@ static void mptcp_token_test_req_basic(struct kunit *test)
        KUNIT_ASSERT_EQ(test, 0,
                        mptcp_token_new_request((struct request_sock *)req));
        KUNIT_EXPECT_NE(test, 0, (int)req->token);
-       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token));
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, req->token));
 
        /* cleanup */
        mptcp_token_destroy_request((struct request_sock *)req);
@@ -55,6 +56,7 @@ static struct mptcp_sock *build_msk(struct kunit *test)
        msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
        KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
        refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
+       sock_net_set((struct sock *)msk, &init_net);
        return msk;
 }
 
@@ -74,11 +76,11 @@ static void mptcp_token_test_msk_basic(struct kunit *test)
                        mptcp_token_new_connect((struct sock *)icsk));
        KUNIT_EXPECT_NE(test, 0, (int)ctx->token);
        KUNIT_EXPECT_EQ(test, ctx->token, msk->token);
-       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token));
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, ctx->token));
        KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt));
 
        mptcp_token_destroy(msk);
-       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token));
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, ctx->token));
 }
 
 static void mptcp_token_test_accept(struct kunit *test)
@@ -90,11 +92,11 @@ static void mptcp_token_test_accept(struct kunit *test)
                        mptcp_token_new_request((struct request_sock *)req));
        msk->token = req->token;
        mptcp_token_accept(req, msk);
-       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
 
        /* this is now a no-op */
        mptcp_token_destroy_request((struct request_sock *)req);
-       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
 
        /* cleanup */
        mptcp_token_destroy(msk);
@@ -116,7 +118,7 @@ static void mptcp_token_test_destroyed(struct kunit *test)
 
        /* simulate race on removal */
        refcount_set(&sk->sk_refcnt, 0);
-       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token));
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, msk->token));
 
        /* cleanup */
        mptcp_token_destroy(msk);
index 6186358eac7c5a255e48eb9670311694f0d920cd..6e391308431da0279317bb2c2ac807c0c8b3a78d 100644 (file)
@@ -130,11 +130,11 @@ htable_size(u8 hbits)
 {
        size_t hsize;
 
-       /* We must fit both into u32 in jhash and size_t */
+       /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
        if (hbits > 31)
                return 0;
        hsize = jhash_size(hbits);
-       if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)
+       if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
            < hsize)
                return 0;
 
index c100c6b112c81ee5cb0eccda5421372756c305ac..2c467c422dc6343a296ccfff9097069440fd66cd 100644 (file)
@@ -1468,6 +1468,10 @@ int __init ip_vs_conn_init(void)
        int idx;
 
        /* Compute size and mask */
+       if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {
+               pr_info("conn_tab_bits not in [8, 20]. Using default value\n");
+               ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+       }
        ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
        ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
 
index 94e18fb9690dd44a32829894ed286be66b561d16..770a63103c7a4240b8559a97f707588d569beba8 100644 (file)
@@ -74,10 +74,14 @@ static __read_mostly struct kmem_cache *nf_conntrack_cachep;
 static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
+/* serialize hash resizes and nf_ct_iterate_cleanup */
+static DEFINE_MUTEX(nf_conntrack_mutex);
+
 #define GC_SCAN_INTERVAL       (120u * HZ)
 #define GC_SCAN_MAX_DURATION   msecs_to_jiffies(10)
 
-#define MAX_CHAINLEN   64u
+#define MIN_CHAINLEN   8u
+#define MAX_CHAINLEN   (32u - MIN_CHAINLEN)
 
 static struct conntrack_gc_work conntrack_gc_work;
 
@@ -188,11 +192,13 @@ seqcount_spinlock_t nf_conntrack_generation __read_mostly;
 static siphash_key_t nf_conntrack_hash_rnd __read_mostly;
 
 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+                             unsigned int zoneid,
                              const struct net *net)
 {
        struct {
                struct nf_conntrack_man src;
                union nf_inet_addr dst_addr;
+               unsigned int zone;
                u32 net_mix;
                u16 dport;
                u16 proto;
@@ -205,6 +211,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
        /* The direction must be ignored, so handle usable members manually. */
        combined.src = tuple->src;
        combined.dst_addr = tuple->dst.u3;
+       combined.zone = zoneid;
        combined.net_mix = net_hash_mix(net);
        combined.dport = (__force __u16)tuple->dst.u.all;
        combined.proto = tuple->dst.protonum;
@@ -219,15 +226,17 @@ static u32 scale_hash(u32 hash)
 
 static u32 __hash_conntrack(const struct net *net,
                            const struct nf_conntrack_tuple *tuple,
+                           unsigned int zoneid,
                            unsigned int size)
 {
-       return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
+       return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);
 }
 
 static u32 hash_conntrack(const struct net *net,
-                         const struct nf_conntrack_tuple *tuple)
+                         const struct nf_conntrack_tuple *tuple,
+                         unsigned int zoneid)
 {
-       return scale_hash(hash_conntrack_raw(tuple, net));
+       return scale_hash(hash_conntrack_raw(tuple, zoneid, net));
 }
 
 static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
@@ -650,9 +659,11 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
        do {
                sequence = read_seqcount_begin(&nf_conntrack_generation);
                hash = hash_conntrack(net,
-                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                     nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
                reply_hash = hash_conntrack(net,
-                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                          nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
        clean_from_lists(ct);
@@ -819,8 +830,20 @@ struct nf_conntrack_tuple_hash *
 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
 {
-       return __nf_conntrack_find_get(net, zone, tuple,
-                                      hash_conntrack_raw(tuple, net));
+       unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
+       struct nf_conntrack_tuple_hash *thash;
+
+       thash = __nf_conntrack_find_get(net, zone, tuple,
+                                       hash_conntrack_raw(tuple, zone_id, net));
+
+       if (thash)
+               return thash;
+
+       rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
+       if (rid != zone_id)
+               return __nf_conntrack_find_get(net, zone, tuple,
+                                              hash_conntrack_raw(tuple, rid, net));
+       return thash;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -842,6 +865,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
+       unsigned int max_chainlen;
        unsigned int chainlen = 0;
        unsigned int sequence;
        int err = -EEXIST;
@@ -852,18 +876,22 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        do {
                sequence = read_seqcount_begin(&nf_conntrack_generation);
                hash = hash_conntrack(net,
-                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                     nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
                reply_hash = hash_conntrack(net,
-                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                          nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
+       max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+
        /* See if there's one in the list already, including reverse */
        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                    zone, net))
                        goto out;
 
-               if (chainlen++ > MAX_CHAINLEN)
+               if (chainlen++ > max_chainlen)
                        goto chaintoolong;
        }
 
@@ -873,7 +901,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                    zone, net))
                        goto out;
-               if (chainlen++ > MAX_CHAINLEN)
+               if (chainlen++ > max_chainlen)
                        goto chaintoolong;
        }
 
@@ -1103,8 +1131,8 @@ drop:
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
+       unsigned int chainlen = 0, sequence, max_chainlen;
        const struct nf_conntrack_zone *zone;
-       unsigned int chainlen = 0, sequence;
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
@@ -1133,8 +1161,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
                hash = scale_hash(hash);
                reply_hash = hash_conntrack(net,
-                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                          nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
        /* We're not in hash table, and we refuse to set up related
@@ -1168,6 +1196,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                goto dying;
        }
 
+       max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
           not in the hash.  If there is, we lost race. */
@@ -1175,7 +1204,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                    zone, net))
                        goto out;
-               if (chainlen++ > MAX_CHAINLEN)
+               if (chainlen++ > max_chainlen)
                        goto chaintoolong;
        }
 
@@ -1184,7 +1213,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                    zone, net))
                        goto out;
-               if (chainlen++ > MAX_CHAINLEN) {
+               if (chainlen++ > max_chainlen) {
 chaintoolong:
                        nf_ct_add_to_dying_list(ct);
                        NF_CT_STAT_INC(net, chaintoolong);
@@ -1246,7 +1275,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
        rcu_read_lock();
  begin:
        nf_conntrack_get_ht(&ct_hash, &hsize);
-       hash = __hash_conntrack(net, tuple, hsize);
+       hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);
 
        hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
                ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1687,8 +1716,8 @@ resolve_normal_ct(struct nf_conn *tmpl,
        struct nf_conntrack_tuple_hash *h;
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_zone tmp;
+       u32 hash, zone_id, rid;
        struct nf_conn *ct;
-       u32 hash;
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
                             dataoff, state->pf, protonum, state->net,
@@ -1699,8 +1728,20 @@ resolve_normal_ct(struct nf_conn *tmpl,
 
        /* look for tuple match */
        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
-       hash = hash_conntrack_raw(&tuple, state->net);
+
+       zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
+       hash = hash_conntrack_raw(&tuple, zone_id, state->net);
        h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
+
+       if (!h) {
+               rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
+               if (zone_id != rid) {
+                       u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);
+
+                       h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
+               }
+       }
+
        if (!h) {
                h = init_conntrack(state->net, tmpl, &tuple,
                                   skb, dataoff, hash);
@@ -2225,28 +2266,31 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
        spinlock_t *lockp;
 
        for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+               struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
+
+               if (hlist_nulls_empty(hslot))
+                       continue;
+
                lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
                local_bh_disable();
                nf_conntrack_lock(lockp);
-               if (*bucket < nf_conntrack_htable_size) {
-                       hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
-                               if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
-                                       continue;
-                               /* All nf_conn objects are added to hash table twice, one
-                                * for original direction tuple, once for the reply tuple.
-                                *
-                                * Exception: In the IPS_NAT_CLASH case, only the reply
-                                * tuple is added (the original tuple already existed for
-                                * a different object).
-                                *
-                                * We only need to call the iterator once for each
-                                * conntrack, so we just use the 'reply' direction
-                                * tuple while iterating.
-                                */
-                               ct = nf_ct_tuplehash_to_ctrack(h);
-                               if (iter(ct, data))
-                                       goto found;
-                       }
+               hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
+                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
+                               continue;
+                       /* All nf_conn objects are added to hash table twice, one
+                        * for original direction tuple, once for the reply tuple.
+                        *
+                        * Exception: In the IPS_NAT_CLASH case, only the reply
+                        * tuple is added (the original tuple already existed for
+                        * a different object).
+                        *
+                        * We only need to call the iterator once for each
+                        * conntrack, so we just use the 'reply' direction
+                        * tuple while iterating.
+                        */
+                       ct = nf_ct_tuplehash_to_ctrack(h);
+                       if (iter(ct, data))
+                               goto found;
                }
                spin_unlock(lockp);
                local_bh_enable();
@@ -2264,26 +2308,20 @@ found:
 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
                                  void *data, u32 portid, int report)
 {
-       unsigned int bucket = 0, sequence;
+       unsigned int bucket = 0;
        struct nf_conn *ct;
 
        might_sleep();
 
-       for (;;) {
-               sequence = read_seqcount_begin(&nf_conntrack_generation);
-
-               while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
-                       /* Time to push up daises... */
+       mutex_lock(&nf_conntrack_mutex);
+       while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+               /* Time to push up daises... */
 
-                       nf_ct_delete(ct, portid, report);
-                       nf_ct_put(ct);
-                       cond_resched();
-               }
-
-               if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
-                       break;
-               bucket = 0;
+               nf_ct_delete(ct, portid, report);
+               nf_ct_put(ct);
+               cond_resched();
        }
+       mutex_unlock(&nf_conntrack_mutex);
 }
 
 struct iter_data {
@@ -2519,8 +2557,10 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
        if (!hash)
                return -ENOMEM;
 
+       mutex_lock(&nf_conntrack_mutex);
        old_size = nf_conntrack_htable_size;
        if (old_size == hashsize) {
+               mutex_unlock(&nf_conntrack_mutex);
                kvfree(hash);
                return 0;
        }
@@ -2537,12 +2577,16 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
 
        for (i = 0; i < nf_conntrack_htable_size; i++) {
                while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+                       unsigned int zone_id;
+
                        h = hlist_nulls_entry(nf_conntrack_hash[i].first,
                                              struct nf_conntrack_tuple_hash, hnnode);
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
+
+                       zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));
                        bucket = __hash_conntrack(nf_ct_net(ct),
-                                                 &h->tuple, hashsize);
+                                                 &h->tuple, zone_id, hashsize);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
@@ -2556,6 +2600,8 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
        nf_conntrack_all_unlock();
        local_bh_enable();
 
+       mutex_unlock(&nf_conntrack_mutex);
+
        synchronize_net();
        kvfree(old_hash);
        return 0;
index 7008961f5cb08b828530bcc23601694362d5a57c..273117683922858eca82f7192f022533422de590 100644 (file)
@@ -150,13 +150,16 @@ static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static unsigned int
-hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net,
+           const struct nf_conntrack_zone *zone,
+           const struct nf_conntrack_tuple *tuple)
 {
        unsigned int hash;
        struct {
                struct nf_conntrack_man src;
                u32 net_mix;
                u32 protonum;
+               u32 zone;
        } __aligned(SIPHASH_ALIGNMENT) combined;
 
        get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
@@ -165,9 +168,13 @@ hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
 
        /* Original src, to ensure we map it consistently if poss. */
        combined.src = tuple->src;
-       combined.net_mix = net_hash_mix(n);
+       combined.net_mix = net_hash_mix(net);
        combined.protonum = tuple->dst.protonum;
 
+       /* Zone ID can be used provided its valid for both directions */
+       if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)
+               combined.zone = zone->id;
+
        hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);
 
        return reciprocal_scale(hash, nf_nat_htable_size);
@@ -272,7 +279,7 @@ find_appropriate_src(struct net *net,
                     struct nf_conntrack_tuple *result,
                     const struct nf_nat_range2 *range)
 {
-       unsigned int h = hash_by_src(net, tuple);
+       unsigned int h = hash_by_src(net, zone, tuple);
        const struct nf_conn *ct;
 
        hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
@@ -619,7 +626,7 @@ nf_nat_setup_info(struct nf_conn *ct,
                unsigned int srchash;
                spinlock_t *lock;
 
-               srchash = hash_by_src(net,
+               srchash = hash_by_src(net, nf_ct_zone(ct),
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
                spin_lock_bh(lock);
@@ -788,7 +795,7 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
 {
        unsigned int h;
 
-       h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
        spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
        hlist_del_rcu(&ct->nat_bysource);
        spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
index 8e8a65d46345b292c8fcac2fc7fcdfd1be1c5557..acd73f717a0883d791fc351851a98bac4144705f 100644 (file)
@@ -9,8 +9,19 @@
 
 #include <net/netfilter/nf_nat_masquerade.h>
 
+struct masq_dev_work {
+       struct work_struct work;
+       struct net *net;
+       union nf_inet_addr addr;
+       int ifindex;
+       int (*iter)(struct nf_conn *i, void *data);
+};
+
+#define MAX_MASQ_WORKER_COUNT  16
+
 static DEFINE_MUTEX(masq_mutex);
 static unsigned int masq_refcnt __read_mostly;
+static atomic_t masq_worker_count __read_mostly;
 
 unsigned int
 nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
@@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
 
-static int device_cmp(struct nf_conn *i, void *ifindex)
+static void iterate_cleanup_work(struct work_struct *work)
+{
+       struct masq_dev_work *w;
+
+       w = container_of(work, struct masq_dev_work, work);
+
+       nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
+
+       put_net(w->net);
+       kfree(w);
+       atomic_dec(&masq_worker_count);
+       module_put(THIS_MODULE);
+}
+
+/* Iterate conntrack table in the background and remove conntrack entries
+ * that use the device/address being removed.
+ *
+ * In case too many work items have been queued already or memory allocation
+ * fails iteration is skipped, conntrack entries will time out eventually.
+ */
+static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
+                                int ifindex,
+                                int (*iter)(struct nf_conn *i, void *data),
+                                gfp_t gfp_flags)
+{
+       struct masq_dev_work *w;
+
+       if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
+               return;
+
+       net = maybe_get_net(net);
+       if (!net)
+               return;
+
+       if (!try_module_get(THIS_MODULE))
+               goto err_module;
+
+       w = kzalloc(sizeof(*w), gfp_flags);
+       if (w) {
+               /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
+               atomic_inc(&masq_worker_count);
+
+               INIT_WORK(&w->work, iterate_cleanup_work);
+               w->ifindex = ifindex;
+               w->net = net;
+               w->iter = iter;
+               if (addr)
+                       w->addr = *addr;
+               schedule_work(&w->work);
+               return;
+       }
+
+       module_put(THIS_MODULE);
+ err_module:
+       put_net(net);
+}
+
+static int device_cmp(struct nf_conn *i, void *arg)
 {
        const struct nf_conn_nat *nat = nfct_nat(i);
+       const struct masq_dev_work *w = arg;
 
        if (!nat)
                return 0;
-       return nat->masq_index == (int)(long)ifindex;
+       return nat->masq_index == w->ifindex;
 }
 
 static int masq_device_event(struct notifier_block *this,
@@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this,
                 * and forget them.
                 */
 
-               nf_ct_iterate_cleanup_net(net, device_cmp,
-                                         (void *)(long)dev->ifindex, 0, 0);
+               nf_nat_masq_schedule(net, NULL, dev->ifindex,
+                                    device_cmp, GFP_KERNEL);
        }
 
        return NOTIFY_DONE;
@@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this,
 
 static int inet_cmp(struct nf_conn *ct, void *ptr)
 {
-       struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
-       struct net_device *dev = ifa->ifa_dev->dev;
        struct nf_conntrack_tuple *tuple;
+       struct masq_dev_work *w = ptr;
 
-       if (!device_cmp(ct, (void *)(long)dev->ifindex))
+       if (!device_cmp(ct, ptr))
                return 0;
 
        tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
 
-       return ifa->ifa_address == tuple->dst.u3.ip;
+       return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
 }
 
 static int masq_inet_event(struct notifier_block *this,
                           unsigned long event,
                           void *ptr)
 {
-       struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
-       struct net *net = dev_net(idev->dev);
+       const struct in_ifaddr *ifa = ptr;
+       const struct in_device *idev;
+       const struct net_device *dev;
+       union nf_inet_addr addr;
+
+       if (event != NETDEV_DOWN)
+               return NOTIFY_DONE;
 
        /* The masq_dev_notifier will catch the case of the device going
         * down.  So if the inetdev is dead and being destroyed we have
         * no work to do.  Otherwise this is an individual address removal
         * and we have to perform the flush.
         */
+       idev = ifa->ifa_dev;
        if (idev->dead)
                return NOTIFY_DONE;
 
-       if (event == NETDEV_DOWN)
-               nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
+       memset(&addr, 0, sizeof(addr));
+
+       addr.ip = ifa->ifa_address;
+
+       dev = idev->dev;
+       nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
+                            inet_cmp, GFP_KERNEL);
 
        return NOTIFY_DONE;
 }
@@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = {
 };
 
 #if IS_ENABLED(CONFIG_IPV6)
-static atomic_t v6_worker_count __read_mostly;
-
 static int
 nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
                       const struct in6_addr *daddr, unsigned int srcprefs,
@@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
 
-struct masq_dev_work {
-       struct work_struct work;
-       struct net *net;
-       struct in6_addr addr;
-       int ifindex;
-};
-
-static int inet6_cmp(struct nf_conn *ct, void *work)
-{
-       struct masq_dev_work *w = (struct masq_dev_work *)work;
-       struct nf_conntrack_tuple *tuple;
-
-       if (!device_cmp(ct, (void *)(long)w->ifindex))
-               return 0;
-
-       tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-
-       return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
-}
-
-static void iterate_cleanup_work(struct work_struct *work)
-{
-       struct masq_dev_work *w;
-
-       w = container_of(work, struct masq_dev_work, work);
-
-       nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
-
-       put_net(w->net);
-       kfree(w);
-       atomic_dec(&v6_worker_count);
-       module_put(THIS_MODULE);
-}
-
 /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
  *
  * Defer it to the system workqueue.
@@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this,
 {
        struct inet6_ifaddr *ifa = ptr;
        const struct net_device *dev;
-       struct masq_dev_work *w;
-       struct net *net;
+       union nf_inet_addr addr;
 
-       if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
+       if (event != NETDEV_DOWN)
                return NOTIFY_DONE;
 
        dev = ifa->idev->dev;
-       net = maybe_get_net(dev_net(dev));
-       if (!net)
-               return NOTIFY_DONE;
 
-       if (!try_module_get(THIS_MODULE))
-               goto err_module;
+       memset(&addr, 0, sizeof(addr));
 
-       w = kmalloc(sizeof(*w), GFP_ATOMIC);
-       if (w) {
-               atomic_inc(&v6_worker_count);
-
-               INIT_WORK(&w->work, iterate_cleanup_work);
-               w->ifindex = dev->ifindex;
-               w->net = net;
-               w->addr = ifa->addr;
-               schedule_work(&w->work);
+       addr.in6 = ifa->addr;
 
-               return NOTIFY_DONE;
-       }
-
-       module_put(THIS_MODULE);
- err_module:
-       put_net(net);
+       nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
+                            GFP_ATOMIC);
        return NOTIFY_DONE;
 }
 
index 081437dd75b7eba8bb71ac4b9ee46a6ab5c7fd40..c0851fec11d46532f87423e1ab38988bf4418172 100644 (file)
@@ -780,6 +780,7 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
 {
        struct nftables_pernet *nft_net;
        struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -790,8 +791,11 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
        if (skb == NULL)
                goto err;
 
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq,
-                                       event, 0, ctx->family, ctx->table);
+                                       event, flags, ctx->family, ctx->table);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
@@ -1563,6 +1567,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
 {
        struct nftables_pernet *nft_net;
        struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -1573,8 +1578,11 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
        if (skb == NULL)
                goto err;
 
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
-                                       event, 0, ctx->family, ctx->table,
+                                       event, flags, ctx->family, ctx->table,
                                        ctx->chain);
        if (err < 0) {
                kfree_skb(skb);
@@ -2866,8 +2874,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
                                    u32 flags, int family,
                                    const struct nft_table *table,
                                    const struct nft_chain *chain,
-                                   const struct nft_rule *rule,
-                                   const struct nft_rule *prule)
+                                   const struct nft_rule *rule, u64 handle)
 {
        struct nlmsghdr *nlh;
        const struct nft_expr *expr, *next;
@@ -2887,9 +2894,8 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
                         NFTA_RULE_PAD))
                goto nla_put_failure;
 
-       if (event != NFT_MSG_DELRULE && prule) {
-               if (nla_put_be64(skb, NFTA_RULE_POSITION,
-                                cpu_to_be64(prule->handle),
+       if (event != NFT_MSG_DELRULE && handle) {
+               if (nla_put_be64(skb, NFTA_RULE_POSITION, cpu_to_be64(handle),
                                 NFTA_RULE_PAD))
                        goto nla_put_failure;
        }
@@ -2925,7 +2931,10 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
                                  const struct nft_rule *rule, int event)
 {
        struct nftables_pernet *nft_net = nft_pernet(ctx->net);
+       const struct nft_rule *prule;
        struct sk_buff *skb;
+       u64 handle = 0;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -2936,9 +2945,20 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
        if (skb == NULL)
                goto err;
 
+       if (event == NFT_MSG_NEWRULE &&
+           !list_is_first(&rule->list, &ctx->chain->rules) &&
+           !list_is_last(&rule->list, &ctx->chain->rules)) {
+               prule = list_prev_entry(rule, list);
+               handle = prule->handle;
+       }
+       if (ctx->flags & (NLM_F_APPEND | NLM_F_REPLACE))
+               flags |= NLM_F_APPEND;
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq,
-                                      event, 0, ctx->family, ctx->table,
-                                      ctx->chain, rule, NULL);
+                                      event, flags, ctx->family, ctx->table,
+                                      ctx->chain, rule, handle);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
@@ -2964,6 +2984,7 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        const struct nft_rule *rule, *prule;
        unsigned int s_idx = cb->args[0];
+       u64 handle;
 
        prule = NULL;
        list_for_each_entry_rcu(rule, &chain->rules, list) {
@@ -2975,12 +2996,17 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
                        memset(&cb->args[1], 0,
                                        sizeof(cb->args) - sizeof(cb->args[0]));
                }
+               if (prule)
+                       handle = prule->handle;
+               else
+                       handle = 0;
+
                if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq,
                                        NFT_MSG_NEWRULE,
                                        NLM_F_MULTI | NLM_F_APPEND,
                                        table->family,
-                                       table, chain, rule, prule) < 0)
+                                       table, chain, rule, handle) < 0)
                        return 1;
 
                nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -3143,7 +3169,7 @@ static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info,
 
        err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid,
                                       info->nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
-                                      family, table, chain, rule, NULL);
+                                      family, table, chain, rule, 0);
        if (err < 0)
                goto err_fill_rule_info;
 
@@ -3403,17 +3429,15 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
        }
 
        if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
+               err = nft_delrule(&ctx, old_rule);
+               if (err < 0)
+                       goto err_destroy_flow_rule;
+
                trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
                if (trans == NULL) {
                        err = -ENOMEM;
                        goto err_destroy_flow_rule;
                }
-               err = nft_delrule(&ctx, old_rule);
-               if (err < 0) {
-                       nft_trans_destroy(trans);
-                       goto err_destroy_flow_rule;
-               }
-
                list_add_tail_rcu(&rule->list, &old_rule->list);
        } else {
                trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
@@ -3943,8 +3967,9 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx,
                                 gfp_t gfp_flags)
 {
        struct nftables_pernet *nft_net = nft_pernet(ctx->net);
-       struct sk_buff *skb;
        u32 portid = ctx->portid;
+       struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -3955,7 +3980,10 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_set(skb, ctx, set, event, 0);
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
+       err = nf_tables_fill_set(skb, ctx, set, event, flags);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
@@ -4336,7 +4364,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        if (ops->privsize != NULL)
                size = ops->privsize(nla, &desc);
        alloc_size = sizeof(*set) + size + udlen;
-       if (alloc_size < size)
+       if (alloc_size < size || alloc_size > INT_MAX)
                return -ENOMEM;
        set = kvzalloc(alloc_size, GFP_KERNEL);
        if (!set)
@@ -5231,12 +5259,13 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
 static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
                                     const struct nft_set *set,
                                     const struct nft_set_elem *elem,
-                                    int event, u16 flags)
+                                    int event)
 {
        struct nftables_pernet *nft_net;
        struct net *net = ctx->net;
        u32 portid = ctx->portid;
        struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
@@ -5246,6 +5275,9 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
        if (skb == NULL)
                goto err;
 
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
                                          set, elem);
        if (err < 0) {
@@ -6921,7 +6953,7 @@ static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
 
 void nft_obj_notify(struct net *net, const struct nft_table *table,
                    struct nft_object *obj, u32 portid, u32 seq, int event,
-                   int family, int report, gfp_t gfp)
+                   u16 flags, int family, int report, gfp_t gfp)
 {
        struct nftables_pernet *nft_net = nft_pernet(net);
        struct sk_buff *skb;
@@ -6946,8 +6978,9 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_obj_info(skb, net, portid, seq, event, 0, family,
-                                     table, obj, false);
+       err = nf_tables_fill_obj_info(skb, net, portid, seq, event,
+                                     flags & (NLM_F_CREATE | NLM_F_EXCL),
+                                     family, table, obj, false);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
@@ -6964,7 +6997,7 @@ static void nf_tables_obj_notify(const struct nft_ctx *ctx,
                                 struct nft_object *obj, int event)
 {
        nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
-                      ctx->family, ctx->report, GFP_KERNEL);
+                      ctx->flags, ctx->family, ctx->report, GFP_KERNEL);
 }
 
 /*
@@ -7745,6 +7778,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
 {
        struct nftables_pernet *nft_net = nft_pernet(ctx->net);
        struct sk_buff *skb;
+       u16 flags = 0;
        int err;
 
        if (!ctx->report &&
@@ -7755,8 +7789,11 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
        if (skb == NULL)
                goto err;
 
+       if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL))
+               flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL);
+
        err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
-                                           ctx->seq, event, 0,
+                                           ctx->seq, event, flags,
                                            ctx->family, flowtable, hook_list);
        if (err < 0) {
                kfree_skb(skb);
@@ -8634,7 +8671,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        nft_setelem_activate(net, te->set, &te->elem);
                        nf_tables_setelem_notify(&trans->ctx, te->set,
                                                 &te->elem,
-                                                NFT_MSG_NEWSETELEM, 0);
+                                                NFT_MSG_NEWSETELEM);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_DELSETELEM:
@@ -8642,7 +8679,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 
                        nf_tables_setelem_notify(&trans->ctx, te->set,
                                                 &te->elem,
-                                                NFT_MSG_DELSETELEM, 0);
+                                                NFT_MSG_DELSETELEM);
                        nft_setelem_remove(net, te->set, &te->elem);
                        if (!nft_setelem_is_catchall(te->set, &te->elem)) {
                                atomic_dec(&te->set->nelems);
@@ -9599,7 +9636,6 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
                table->use--;
                nf_tables_chain_destroy(&ctx);
        }
-       list_del(&table->list);
        nf_tables_table_destroy(&ctx);
 }
 
@@ -9612,6 +9648,8 @@ static void __nft_release_tables(struct net *net)
                if (nft_table_has_owner(table))
                        continue;
 
+               list_del(&table->list);
+
                __nft_release_table(net, table);
        }
 }
@@ -9619,31 +9657,38 @@ static void __nft_release_tables(struct net *net)
 static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
 {
+       struct nft_table *table, *to_delete[8];
        struct nftables_pernet *nft_net;
        struct netlink_notify *n = ptr;
-       struct nft_table *table, *nt;
        struct net *net = n->net;
-       bool release = false;
+       unsigned int deleted;
+       bool restart = false;
 
        if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
                return NOTIFY_DONE;
 
        nft_net = nft_pernet(net);
+       deleted = 0;
        mutex_lock(&nft_net->commit_mutex);
+again:
        list_for_each_entry(table, &nft_net->tables, list) {
                if (nft_table_has_owner(table) &&
                    n->portid == table->nlpid) {
                        __nft_release_hook(net, table);
-                       release = true;
+                       list_del_rcu(&table->list);
+                       to_delete[deleted++] = table;
+                       if (deleted >= ARRAY_SIZE(to_delete))
+                               break;
                }
        }
-       if (release) {
+       if (deleted) {
+               restart = deleted >= ARRAY_SIZE(to_delete);
                synchronize_rcu();
-               list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
-                       if (nft_table_has_owner(table) &&
-                           n->portid == table->nlpid)
-                               __nft_release_table(net, table);
-               }
+               while (deleted)
+                       __nft_release_table(net, to_delete[--deleted]);
+
+               if (restart)
+                       goto again;
        }
        mutex_unlock(&nft_net->commit_mutex);
 
index 272bcdb1392dfc830e9bdd3b0a6f54d7b19ca954..f69cc73c581305dfb59963ce15d8716ea7bb52fb 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/netfilter_bridge/ebtables.h>
 #include <linux/netfilter_arp/arp_tables.h>
 #include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
 
 /* Used for matches where *info is larger than X byte */
 #define NFT_MATCH_LARGE_THRESH 192
@@ -257,8 +258,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        nft_compat_wait_for_destructors();
 
        ret = xt_check_target(&par, size, proto, inv);
-       if (ret < 0)
+       if (ret < 0) {
+               if (ret == -ENOENT) {
+                       const char *modname = NULL;
+
+                       if (strcmp(target->name, "LOG") == 0)
+                               modname = "nf_log_syslog";
+                       else if (strcmp(target->name, "NFLOG") == 0)
+                               modname = "nfnetlink_log";
+
+                       if (modname &&
+                           nft_request_module(ctx->net, "%s", modname) == -EAGAIN)
+                               return -EAGAIN;
+               }
+
                return ret;
+       }
 
        /* The standard target cannot be used */
        if (!target->target)
index 0363f533a42b8e8ac8b19bd10c197cabc206ea54..c4d1389f7185aef1429f2864eec8a673bb49b57e 100644 (file)
@@ -60,7 +60,7 @@ static void nft_quota_obj_eval(struct nft_object *obj,
        if (overquota &&
            !test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
                nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
-                              NFT_MSG_NEWOBJ, nft_pf(pkt), 0, GFP_ATOMIC);
+                              NFT_MSG_NEWOBJ, 0, nft_pf(pkt), 0, GFP_ATOMIC);
 }
 
 static int nft_quota_do_init(const struct nlattr * const tb[],
index 2ff75f7637b09dddf0e9b91ccd22b9b41dc6d05d..f39244f9c0ed94348b1d16ff468c1f78da1a0766 100644 (file)
@@ -44,6 +44,7 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static int log_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_log_info *loginfo = par->targinfo;
+       int ret;
 
        if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
                return -EINVAL;
@@ -58,7 +59,14 @@ static int log_tg_check(const struct xt_tgchk_param *par)
                return -EINVAL;
        }
 
-       return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+       ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+       if (ret != 0 && !par->nft_compat) {
+               request_module("%s", "nf_log_syslog");
+
+               ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+       }
+
+       return ret;
 }
 
 static void log_tg_destroy(const struct xt_tgdtor_param *par)
index fb579320805984d6c51d2d24bbcfd71f737bea33..e660c3710a10968909b98b5236536996cbb8462b 100644 (file)
@@ -42,13 +42,21 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static int nflog_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_nflog_info *info = par->targinfo;
+       int ret;
 
        if (info->flags & ~XT_NFLOG_MASK)
                return -EINVAL;
        if (info->prefix[sizeof(info->prefix) - 1] != '\0')
                return -EINVAL;
 
-       return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+       ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+       if (ret != 0 && !par->nft_compat) {
+               request_module("%s", "nfnetlink_log");
+
+               ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+       }
+
+       return ret;
 }
 
 static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
index 24b7cf447bc55b21e74cd82ba19528a575956981..ada47e59647a03e69f64a2ea935b5e8ec9785fe5 100644 (file)
@@ -594,7 +594,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
 
        /* We need to ensure that the socket is hashed and visible. */
        smp_wmb();
-       nlk_sk(sk)->bound = portid;
+       /* Paired with lockless reads from netlink_bind(),
+        * netlink_connect() and netlink_sendmsg().
+        */
+       WRITE_ONCE(nlk_sk(sk)->bound, portid);
 
 err:
        release_sock(sk);
@@ -1012,7 +1015,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
        if (nlk->ngroups < BITS_PER_LONG)
                groups &= (1UL << nlk->ngroups) - 1;
 
-       bound = nlk->bound;
+       /* Paired with WRITE_ONCE() in netlink_insert() */
+       bound = READ_ONCE(nlk->bound);
        if (bound) {
                /* Ensure nlk->portid is up-to-date. */
                smp_rmb();
@@ -1098,8 +1102,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
 
        /* No need for barriers here as we return to user-space without
         * using any of the bound attributes.
+        * Paired with WRITE_ONCE() in netlink_insert().
         */
-       if (!nlk->bound)
+       if (!READ_ONCE(nlk->bound))
                err = netlink_autobind(sock);
 
        if (err == 0) {
@@ -1888,7 +1893,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                dst_group = nlk->dst_group;
        }
 
-       if (!nlk->bound) {
+       /* Paired with WRITE_ONCE() in netlink_insert() */
+       if (!READ_ONCE(nlk->bound)) {
                err = netlink_autobind(sock);
                if (err)
                        goto out;
index 6024fad905ff04f98d3788efe15815c2ea9d7526..dda323e0a4730b2eca2a7c5516ee01eb1cdd7c6e 100644 (file)
@@ -60,6 +60,9 @@ int nfc_proto_register(const struct nfc_protocol *nfc_proto)
                proto_tab[nfc_proto->id] = nfc_proto;
        write_unlock(&proto_tab_lock);
 
+       if (rc)
+               proto_unregister(nfc_proto->proto);
+
        return rc;
 }
 EXPORT_SYMBOL(nfc_proto_register);
index fefc03674f4f8333bd657d102409f5f2161ccbc2..d63d2e5dc60c97e46ae977674e113b019169682b 100644 (file)
@@ -277,6 +277,7 @@ int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
 static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
 {
        struct digital_tg_mdaa_params *params;
+       int rc;
 
        params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (!params)
@@ -291,8 +292,12 @@ static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
        get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
        params->sc = DIGITAL_SENSF_FELICA_SC;
 
-       return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
-                               500, digital_tg_recv_atr_req, NULL);
+       rc = digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
+                             500, digital_tg_recv_atr_req, NULL);
+       if (rc)
+               kfree(params);
+
+       return rc;
 }
 
 static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech)
index 84d2345c75a3f2a9e9b89dade25421b1d4a8acab..3adf4589852affc6cd3665857ac0eba0ff21b641 100644 (file)
@@ -465,8 +465,12 @@ static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
        skb_put_u8(skb, sel_cmd);
        skb_put_u8(skb, DIGITAL_SDD_REQ_SEL_PAR);
 
-       return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
-                                  target);
+       rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
+                                target);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
 }
 
 static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg,
index a2e72c0038050dee96fbb865b3df504d31338249..b911ab78bed9aa14e30ea670e978987d7f827bd1 100644 (file)
@@ -334,6 +334,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
                                                         ndev->cur_conn_id);
                if (conn_info) {
                        list_del(&conn_info->list);
+                       if (conn_info == ndev->rf_conn_info)
+                               ndev->rf_conn_info = NULL;
                        devm_kfree(&ndev->nfc_dev->dev, conn_info);
                }
        }
index 543365f58e9735338b16d5e8f4c824fbaec92c9d..2a2bc64f75cfd86af81aed0cc9841503775f96d4 100644 (file)
@@ -46,6 +46,8 @@
  *                                     Copyright (C) 2011, <lokec@ccs.neu.edu>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/ethtool.h>
 #include <linux/types.h>
 #include <linux/mm.h>
index 23b21253b3c3306a28f49ef19df77d48f265dbf6..eb6345a027e1302ee4ee9407a87ddc7db9636e86 100644 (file)
@@ -2188,18 +2188,24 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
 
        arg->count = arg->skip;
 
+       rcu_read_lock();
        idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
                /* don't return filters that are being deleted */
                if (!refcount_inc_not_zero(&f->refcnt))
                        continue;
+               rcu_read_unlock();
+
                if (arg->fn(tp, f, arg) < 0) {
                        __fl_put(f);
                        arg->stop = 1;
+                       rcu_read_lock();
                        break;
                }
                __fl_put(f);
                arg->count++;
+               rcu_read_lock();
        }
+       rcu_read_unlock();
        arg->cookie = id;
 }
 
index 5e90e9b160e3d04033594cebde5e2cf3733317b9..12f39a2dffd47542dd699d1b04b97d85c113adab 100644 (file)
@@ -513,6 +513,12 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
                return stab;
        }
 
+       if (s->size_log > STAB_SIZE_LOG_MAX ||
+           s->cell_log > STAB_SIZE_LOG_MAX) {
+               NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
+               return ERR_PTR(-EINVAL);
+       }
+
        stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
        if (!stab)
                return ERR_PTR(-ENOMEM);
index a579a4131d22d771c9766f5ad6cdb16ece3034c0..e1040421b79797fefaa26b8d7d3f44b91896e1de 100644 (file)
@@ -233,6 +233,9 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
        if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
                return 0;
 
+       if (!q->ops->change)
+               return 0;
+
        nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
        if (nla) {
                nla->nla_type = RTM_NEWQDISC;
index 8766ab5b8788042df03b0b8cdb9e35734ef1a8a7..5eb3b1b7ae5e7886789c010548e43bcff6cf4172 100644 (file)
@@ -529,22 +529,28 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                for (i = tc.offset; i < tc.offset + tc.count; i++) {
                        struct netdev_queue *q = netdev_get_tx_queue(dev, i);
                        struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
-                       struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
-                       struct gnet_stats_queue __percpu *cpu_qstats = NULL;
 
                        spin_lock_bh(qdisc_lock(qdisc));
+
                        if (qdisc_is_percpu_stats(qdisc)) {
-                               cpu_bstats = qdisc->cpu_bstats;
-                               cpu_qstats = qdisc->cpu_qstats;
+                               qlen = qdisc_qlen_sum(qdisc);
+
+                               __gnet_stats_copy_basic(NULL, &bstats,
+                                                       qdisc->cpu_bstats,
+                                                       &qdisc->bstats);
+                               __gnet_stats_copy_queue(&qstats,
+                                                       qdisc->cpu_qstats,
+                                                       &qdisc->qstats,
+                                                       qlen);
+                       } else {
+                               qlen            += qdisc->q.qlen;
+                               bstats.bytes    += qdisc->bstats.bytes;
+                               bstats.packets  += qdisc->bstats.packets;
+                               qstats.backlog  += qdisc->qstats.backlog;
+                               qstats.drops    += qdisc->qstats.drops;
+                               qstats.requeues += qdisc->qstats.requeues;
+                               qstats.overlimits += qdisc->qstats.overlimits;
                        }
-
-                       qlen = qdisc_qlen_sum(qdisc);
-                       __gnet_stats_copy_basic(NULL, &sch->bstats,
-                                               cpu_bstats, &qdisc->bstats);
-                       __gnet_stats_copy_queue(&sch->qstats,
-                                               cpu_qstats,
-                                               &qdisc->qstats,
-                                               qlen);
                        spin_unlock_bh(qdisc_lock(qdisc));
                }
 
index 1ab2fc933a214d04dfff763d2c5de65f4a67374a..b9fd18d986464f317a9fb7ce709a9728ffb75751 100644 (file)
@@ -1641,6 +1641,10 @@ static void taprio_destroy(struct Qdisc *sch)
        list_del(&q->taprio_list);
        spin_unlock(&taprio_list_lock);
 
+       /* Note that taprio_reset() might not be called if an error
+        * happens in qdisc_create(), after taprio_init() has been called.
+        */
+       hrtimer_cancel(&q->advance_timer);
 
        taprio_disable_offload(dev, q, NULL);
 
index 5ef86fdb11769d9c8a32219c5c7361fc34217b02..1f1786021d9c81fb0ebd27b027fc479faf1e72f5 100644 (file)
@@ -702,7 +702,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
                ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
 
                /* Break out if chunk length is less then minimal. */
-               if (ntohs(ch->length) < sizeof(_ch))
+               if (!ch || ntohs(ch->length) < sizeof(_ch))
                        break;
 
                ch_end = offset + SCTP_PAD4(ntohs(ch->length));
index b8fa8f1a727704df07e78c0b72e77a4ecdd76b76..c7503fd6491593fc749b9af182c646a39ed2ddbc 100644 (file)
@@ -3697,7 +3697,7 @@ struct sctp_chunk *sctp_make_strreset_req(
        outlen = (sizeof(outreq) + stream_len) * out;
        inlen = (sizeof(inreq) + stream_len) * in;
 
-       retval = sctp_make_reconf(asoc, outlen + inlen);
+       retval = sctp_make_reconf(asoc, SCTP_PAD4(outlen) + SCTP_PAD4(inlen));
        if (!retval)
                return NULL;
 
index f23f558054a7cba4fa29e430bbd6729074a55611..99acd337ba90d828c6f11099fe2a54ac91055eaa 100644 (file)
@@ -150,9 +150,11 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
 
 again:
        link = conn->lnk;
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
        if (rc)
-               return rc;
+               goto put_out;
 
        spin_lock_bh(&conn->send_lock);
        if (link != conn->lnk) {
@@ -160,6 +162,7 @@ again:
                spin_unlock_bh(&conn->send_lock);
                smc_wr_tx_put_slot(link,
                                   (struct smc_wr_tx_pend_priv *)pend);
+               smc_wr_tx_link_put(link);
                if (again)
                        return -ENOLINK;
                again = true;
@@ -167,6 +170,8 @@ again:
        }
        rc = smc_cdc_msg_send(conn, wr_buf, pend);
        spin_unlock_bh(&conn->send_lock);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
index e286dafd6e886b53162ea121b5d86878b9f283c6..6ec1ebe878ae0ee967da04f76757261854012372 100644 (file)
@@ -230,7 +230,8 @@ static int smc_clc_prfx_set(struct socket *clcsock,
                goto out_rel;
        }
        /* get address to which the internal TCP socket is bound */
-       kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
+       if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
+               goto out_rel;
        /* analyze IP specific data of net_device belonging to TCP socket */
        addr6 = (struct sockaddr_in6 *)&addrs;
        rcu_read_lock();
index af227b65669eb60bb5c2c87d8c020991d498b21b..d2206743dc714863472f23a0cf70166569491ca0 100644 (file)
@@ -949,7 +949,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
                to_lnk = &lgr->lnk[i];
                break;
        }
-       if (!to_lnk) {
+       if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
                smc_lgr_terminate_sched(lgr);
                return NULL;
        }
@@ -981,24 +981,26 @@ again:
                read_unlock_bh(&lgr->conns_lock);
                /* pre-fetch buffer outside of send_lock, might sleep */
                rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
-               if (rc) {
-                       smcr_link_down_cond_sched(to_lnk);
-                       return NULL;
-               }
+               if (rc)
+                       goto err_out;
                /* avoid race with smcr_tx_sndbuf_nonempty() */
                spin_lock_bh(&conn->send_lock);
                smc_switch_link_and_count(conn, to_lnk);
                rc = smc_switch_cursor(smc, pend, wr_buf);
                spin_unlock_bh(&conn->send_lock);
                sock_put(&smc->sk);
-               if (rc) {
-                       smcr_link_down_cond_sched(to_lnk);
-                       return NULL;
-               }
+               if (rc)
+                       goto err_out;
                goto again;
        }
        read_unlock_bh(&lgr->conns_lock);
+       smc_wr_tx_link_put(to_lnk);
        return to_lnk;
+
+err_out:
+       smcr_link_down_cond_sched(to_lnk);
+       smc_wr_tx_link_put(to_lnk);
+       return NULL;
 }
 
 static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
@@ -1474,7 +1476,9 @@ static void smc_conn_abort_work(struct work_struct *work)
                                                   abort_work);
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
 
+       lock_sock(&smc->sk);
        smc_conn_kill(conn, true);
+       release_sock(&smc->sk);
        sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
 }
 
index 2e7560eba9812635c22f18f41aafc38b063dd02a..72f4b72eb1753a899f164e10aa1e75620c7e5bfa 100644 (file)
@@ -383,9 +383,11 @@ int smc_llc_send_confirm_link(struct smc_link *link,
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
        memset(confllc, 0, sizeof(*confllc));
        confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
@@ -402,6 +404,8 @@ int smc_llc_send_confirm_link(struct smc_link *link,
        confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -415,9 +419,11 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
        struct smc_link *link;
        int i, rc, rtok_ix;
 
+       if (!smc_wr_tx_link_hold(send_link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
        memset(rkeyllc, 0, sizeof(*rkeyllc));
        rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
@@ -444,6 +450,8 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
                (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
        /* send llc message */
        rc = smc_wr_tx_send(send_link, pend);
+put_out:
+       smc_wr_tx_link_put(send_link);
        return rc;
 }
 
@@ -456,9 +464,11 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
        memset(rkeyllc, 0, sizeof(*rkeyllc));
        rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
@@ -467,6 +477,8 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
        rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -480,9 +492,11 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        addllc = (struct smc_llc_msg_add_link *)wr_buf;
 
        memset(addllc, 0, sizeof(*addllc));
@@ -504,6 +518,8 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
        }
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -517,9 +533,11 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        delllc = (struct smc_llc_msg_del_link *)wr_buf;
 
        memset(delllc, 0, sizeof(*delllc));
@@ -536,6 +554,8 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
        delllc->reason = htonl(reason);
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -547,9 +567,11 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        testllc = (struct smc_llc_msg_test_link *)wr_buf;
        memset(testllc, 0, sizeof(*testllc));
        testllc->hd.common.type = SMC_LLC_TEST_LINK;
@@ -557,6 +579,8 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
        memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
@@ -567,13 +591,16 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
        struct smc_wr_buf *wr_buf;
        int rc;
 
-       if (!smc_link_usable(link))
+       if (!smc_wr_tx_link_hold(link))
                return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
-       return smc_wr_tx_send(link, pend);
+       rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
+       return rc;
 }
 
 /* schedule an llc send on link, may wait for buffers,
@@ -586,13 +613,16 @@ static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
        struct smc_wr_buf *wr_buf;
        int rc;
 
-       if (!smc_link_usable(link))
+       if (!smc_wr_tx_link_hold(link))
                return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
-       return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
+       rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
+put_out:
+       smc_wr_tx_link_put(link);
+       return rc;
 }
 
 /********************************* receive ***********************************/
@@ -672,9 +702,11 @@ static int smc_llc_add_link_cont(struct smc_link *link,
        struct smc_buf_desc *rmb;
        u8 n;
 
+       if (!smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
        if (rc)
-               return rc;
+               goto put_out;
        addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
        memset(addc_llc, 0, sizeof(*addc_llc));
 
@@ -706,7 +738,10 @@ static int smc_llc_add_link_cont(struct smc_link *link,
        addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
        if (lgr->role == SMC_CLNT)
                addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
-       return smc_wr_tx_send(link, pend);
+       rc = smc_wr_tx_send(link, pend);
+put_out:
+       smc_wr_tx_link_put(link);
+       return rc;
 }
 
 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
index c79361dfcdfb9f21d9541a7249b84227ca603b38..738a4a99c82797b7b5d28afaba5208135e1983ad 100644 (file)
@@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
 /* Wakeup sndbuf consumers from any context (IRQ or process)
  * since there is more data to transmit; usable snd_wnd as max transmit
  */
-static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
        struct smc_link *link = conn->lnk;
@@ -505,8 +505,11 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
        struct smc_wr_buf *wr_buf;
        int rc;
 
+       if (!link || !smc_wr_tx_link_hold(link))
+               return -ENOLINK;
        rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
        if (rc < 0) {
+               smc_wr_tx_link_put(link);
                if (rc == -EBUSY) {
                        struct smc_sock *smc =
                                container_of(conn, struct smc_sock, conn);
@@ -547,22 +550,7 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 
 out_unlock:
        spin_unlock_bh(&conn->send_lock);
-       return rc;
-}
-
-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
-{
-       struct smc_link *link = conn->lnk;
-       int rc = -ENOLINK;
-
-       if (!link)
-               return rc;
-
-       atomic_inc(&link->wr_tx_refcnt);
-       if (smc_link_usable(link))
-               rc = _smcr_tx_sndbuf_nonempty(conn);
-       if (atomic_dec_and_test(&link->wr_tx_refcnt))
-               wake_up_all(&link->wr_tx_wait);
+       smc_wr_tx_link_put(link);
        return rc;
 }
 
index 423b8709f1c9e4fd2462dc2bb929ebc24e01f454..2bc626f230a56dca33fb5617972443c8aecf98ee 100644 (file)
@@ -60,6 +60,20 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
        atomic_long_set(wr_tx_id, val);
 }
 
+static inline bool smc_wr_tx_link_hold(struct smc_link *link)
+{
+       if (!smc_link_usable(link))
+               return false;
+       atomic_inc(&link->wr_tx_refcnt);
+       return true;
+}
+
+static inline void smc_wr_tx_link_put(struct smc_link *link)
+{
+       if (atomic_dec_and_test(&link->wr_tx_refcnt))
+               wake_up_all(&link->wr_tx_wait);
+}
+
 static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
 {
        wake_up_all(&lnk->wr_tx_wait);
index 3e776e3dff913f6a723ecbae6419ed2a595674ab..1f2817195549be6c39ce13cf8c6bfb100fd556f0 100644 (file)
@@ -645,7 +645,7 @@ static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
                }
                __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
                goto ok;
-       } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) {
+       } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
                goto toolow;
        }
        if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
index a0a27d87f6312d95942f93846852eebb448e066a..ad570c2450be8bf2106710d3ec40ad88e21d6b17 100644 (file)
@@ -2423,7 +2423,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
                            u32 dport, struct sk_buff_head *xmitq)
 {
-       unsigned long time_limit = jiffies + 2;
+       unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
        struct sk_buff *skb;
        unsigned int lim;
        atomic_t *dcnt;
index eb47b9de23809ac9216aae46c2fb1eae4543c890..89f9e85ae970d410774e720faa0987c069aafdab 100644 (file)
@@ -608,20 +608,42 @@ static void unix_release_sock(struct sock *sk, int embrion)
 
 static void init_peercred(struct sock *sk)
 {
-       put_pid(sk->sk_peer_pid);
-       if (sk->sk_peer_cred)
-               put_cred(sk->sk_peer_cred);
+       const struct cred *old_cred;
+       struct pid *old_pid;
+
+       spin_lock(&sk->sk_peer_lock);
+       old_pid = sk->sk_peer_pid;
+       old_cred = sk->sk_peer_cred;
        sk->sk_peer_pid  = get_pid(task_tgid(current));
        sk->sk_peer_cred = get_current_cred();
+       spin_unlock(&sk->sk_peer_lock);
+
+       put_pid(old_pid);
+       put_cred(old_cred);
 }
 
 static void copy_peercred(struct sock *sk, struct sock *peersk)
 {
-       put_pid(sk->sk_peer_pid);
-       if (sk->sk_peer_cred)
-               put_cred(sk->sk_peer_cred);
+       const struct cred *old_cred;
+       struct pid *old_pid;
+
+       if (sk < peersk) {
+               spin_lock(&sk->sk_peer_lock);
+               spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+       } else {
+               spin_lock(&peersk->sk_peer_lock);
+               spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+       }
+       old_pid = sk->sk_peer_pid;
+       old_cred = sk->sk_peer_cred;
        sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
        sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+
+       spin_unlock(&sk->sk_peer_lock);
+       spin_unlock(&peersk->sk_peer_lock);
+
+       put_pid(old_pid);
+       put_cred(old_cred);
 }
 
 static int unix_listen(struct socket *sock, int backlog)
@@ -806,7 +828,7 @@ static void unix_unhash(struct sock *sk)
 }
 
 struct proto unix_dgram_proto = {
-       .name                   = "UNIX-DGRAM",
+       .name                   = "UNIX",
        .owner                  = THIS_MODULE,
        .obj_size               = sizeof(struct unix_sock),
        .close                  = unix_close,
@@ -828,20 +850,25 @@ struct proto unix_stream_proto = {
 
 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
 {
-       struct sock *sk = NULL;
        struct unix_sock *u;
+       struct sock *sk;
+       int err;
 
        atomic_long_inc(&unix_nr_socks);
-       if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
-               goto out;
+       if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
+               err = -ENFILE;
+               goto err;
+       }
 
        if (type == SOCK_STREAM)
                sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
        else /*dgram and  seqpacket */
                sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
 
-       if (!sk)
-               goto out;
+       if (!sk) {
+               err = -ENOMEM;
+               goto err;
+       }
 
        sock_init_data(sock, sk);
 
@@ -861,20 +888,23 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
        init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
        memset(&u->scm_stat, 0, sizeof(struct scm_stat));
        unix_insert_socket(unix_sockets_unbound(sk), sk);
-out:
-       if (sk == NULL)
-               atomic_long_dec(&unix_nr_socks);
-       else {
-               local_bh_disable();
-               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-               local_bh_enable();
-       }
+
+       local_bh_disable();
+       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+       local_bh_enable();
+
        return sk;
+
+err:
+       atomic_long_dec(&unix_nr_socks);
+       return ERR_PTR(err);
 }
 
 static int unix_create(struct net *net, struct socket *sock, int protocol,
                       int kern)
 {
+       struct sock *sk;
+
        if (protocol && protocol != PF_UNIX)
                return -EPROTONOSUPPORT;
 
@@ -901,7 +931,11 @@ static int unix_create(struct net *net, struct socket *sock, int protocol,
                return -ESOCKTNOSUPPORT;
        }
 
-       return unix_create1(net, sock, kern, sock->type) ? 0 : -ENOMEM;
+       sk = unix_create1(net, sock, kern, sock->type);
+       if (IS_ERR(sk))
+               return PTR_ERR(sk);
+
+       return 0;
 }
 
 static int unix_release(struct socket *sock)
@@ -1314,12 +1348,15 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
           we will have to recheck all again in any case.
         */
 
-       err = -ENOMEM;
-
        /* create new sock for complete connection */
        newsk = unix_create1(sock_net(sk), NULL, 0, sock->type);
-       if (newsk == NULL)
+       if (IS_ERR(newsk)) {
+               err = PTR_ERR(newsk);
+               newsk = NULL;
                goto out;
+       }
+
+       err = -ENOMEM;
 
        /* Allocate skb for sending to listening sock */
        skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
@@ -2845,6 +2882,9 @@ static int unix_shutdown(struct socket *sock, int mode)
 
        unix_state_lock(sk);
        sk->sk_shutdown |= mode;
+       if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+           mode == SHUTDOWN_MASK)
+               sk->sk_state = TCP_CLOSE;
        other = unix_peer(sk);
        if (other)
                sock_hold(other);
@@ -2867,12 +2907,10 @@ static int unix_shutdown(struct socket *sock, int mode)
                other->sk_shutdown |= peer_mode;
                unix_state_unlock(other);
                other->sk_state_change(other);
-               if (peer_mode == SHUTDOWN_MASK) {
+               if (peer_mode == SHUTDOWN_MASK)
                        sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
-                       other->sk_state = TCP_CLOSE;
-               } else if (peer_mode & RCV_SHUTDOWN) {
+               else if (peer_mode & RCV_SHUTDOWN)
                        sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
-               }
        }
        if (other)
                sock_put(other);
@@ -3073,7 +3111,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
 
                other = unix_peer(sk);
                if (other && unix_peer(other) != sk &&
-                   unix_recvq_full(other) &&
+                   unix_recvq_full_lockless(other) &&
                    unix_dgram_peer_wake_me(sk, other))
                        writable = 0;
 
index 03b66d154b2bfdc5ae63017532787f85350b56e8..3a3cb09eec1228f994fb23ec378879652dec9a6d 100644 (file)
@@ -1961,24 +1961,65 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
        return skb;
 }
 
+static int xfrm_notify_userpolicy(struct net *net)
+{
+       struct xfrm_userpolicy_default *up;
+       int len = NLMSG_ALIGN(sizeof(*up));
+       struct nlmsghdr *nlh;
+       struct sk_buff *skb;
+       int err;
+
+       skb = nlmsg_new(len, GFP_ATOMIC);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
+       if (nlh == NULL) {
+               kfree_skb(skb);
+               return -EMSGSIZE;
+       }
+
+       up = nlmsg_data(nlh);
+       up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+
+       nlmsg_end(skb, nlh);
+
+       rcu_read_lock();
+       err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
+       rcu_read_unlock();
+
+       return err;
+}
+
 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
                            struct nlattr **attrs)
 {
        struct net *net = sock_net(skb->sk);
        struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
-       u8 dirmask;
-       u8 old_default = net->xfrm.policy_default;
 
-       if (up->dirmask >= XFRM_USERPOLICY_DIRMASK_MAX)
-               return -EINVAL;
+       if (up->in == XFRM_USERPOLICY_BLOCK)
+               net->xfrm.policy_default |= XFRM_POL_DEFAULT_IN;
+       else if (up->in == XFRM_USERPOLICY_ACCEPT)
+               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_IN;
 
-       dirmask = (1 << up->dirmask) & XFRM_POL_DEFAULT_MASK;
+       if (up->fwd == XFRM_USERPOLICY_BLOCK)
+               net->xfrm.policy_default |= XFRM_POL_DEFAULT_FWD;
+       else if (up->fwd == XFRM_USERPOLICY_ACCEPT)
+               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_FWD;
 
-       net->xfrm.policy_default = (old_default & (0xff ^ dirmask))
-                                   | (up->action << up->dirmask);
+       if (up->out == XFRM_USERPOLICY_BLOCK)
+               net->xfrm.policy_default |= XFRM_POL_DEFAULT_OUT;
+       else if (up->out == XFRM_USERPOLICY_ACCEPT)
+               net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_OUT;
 
        rt_genid_bump_all(net);
 
+       xfrm_notify_userpolicy(net);
        return 0;
 }
 
@@ -1988,13 +2029,11 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct sk_buff *r_skb;
        struct nlmsghdr *r_nlh;
        struct net *net = sock_net(skb->sk);
-       struct xfrm_userpolicy_default *r_up, *up;
+       struct xfrm_userpolicy_default *r_up;
        int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
        u32 portid = NETLINK_CB(skb).portid;
        u32 seq = nlh->nlmsg_seq;
 
-       up = nlmsg_data(nlh);
-
        r_skb = nlmsg_new(len, GFP_ATOMIC);
        if (!r_skb)
                return -ENOMEM;
@@ -2007,8 +2046,12 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        r_up = nlmsg_data(r_nlh);
 
-       r_up->action = ((net->xfrm.policy_default & (1 << up->dirmask)) >> up->dirmask);
-       r_up->dirmask = up->dirmask;
+       r_up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       r_up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+       r_up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+                       XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
        nlmsg_end(r_skb, r_nlh);
 
        return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
index 4dc20be5fb96a63ccf01b19550518a51ba878b3b..5fd48a8d4f10a550278bb35483a73e9feff9a93e 100644 (file)
@@ -322,17 +322,11 @@ $(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
 
 -include $(BPF_SAMPLES_PATH)/Makefile.target
 
-VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                           \
-                    $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)    \
-                    ../../../../vmlinux                                \
-                    /sys/kernel/btf/vmlinux                            \
-                    /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF_PATHS ?= $(abspath $(if $(O),$(O)/vmlinux))                                \
+                    $(abspath $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)) \
+                    $(abspath ./vmlinux)
 VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
 
-ifeq ($(VMLINUX_BTF),)
-$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
-endif
-
 $(obj)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL)
 ifeq ($(VMLINUX_H),)
        $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
@@ -340,6 +334,11 @@ else
        $(Q)cp "$(VMLINUX_H)" $@
 endif
 
+ifeq ($(VMLINUX_BTF),)
+       $(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)",\
+               build the kernel or set VMLINUX_BTF variable)
+endif
+
 clean-files += vmlinux.h
 
 # Get Clang's default includes on this system, as opposed to those seen by
index aee04534483a8ca17e1af2d23fc2ebb850cf193a..29c3bb6ad1cd8a82983bfd634119eca30853f0c2 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
 /* eBPF instruction mini library */
 #ifndef __BPF_INSN_H
 #define __BPF_INSN_H
index 8f59d430cb64c8ca0bcd5acfc84274d4bb8f77c4..bb0a5a3bfcf01961cdb6db2bc1e472a5c15247b1 100644 (file)
@@ -5,11 +5,6 @@
 #include "xdp_sample.bpf.h"
 #include "xdp_sample_shared.h"
 
-enum {
-       BPF_F_BROADCAST         = (1ULL << 3),
-       BPF_F_EXCLUDE_INGRESS   = (1ULL << 4),
-};
-
 struct {
        __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
        __uint(key_size, sizeof(int));
index 4cce8fd0779c7c52edc5eea076297e31b8a31dbd..51fc23e2e9e50ca2ca6d3fb9ccd1e4e6d1b8f283 100644 (file)
@@ -29,7 +29,12 @@ CLANG_FLAGS  += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
 else
 CLANG_FLAGS    += -fintegrated-as
 endif
+# By default, clang only warns when it encounters an unknown warning flag or
+# certain optimization flags it knows it has not implemented.
+# Make it behave more like gcc by erroring when these flags are encountered
+# so they can be implemented or wrapped in cc-option.
 CLANG_FLAGS    += -Werror=unknown-warning-option
+CLANG_FLAGS    += -Werror=ignored-optimization-argument
 KBUILD_CFLAGS  += $(CLANG_FLAGS)
 KBUILD_AFLAGS  += $(CLANG_FLAGS)
 export CLANG_FLAGS
index 952e46876329a78a104ee91ef539c7e63d77c68c..4aad28480035582f0c4b99a973826597774d054e 100644 (file)
@@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)              \
                += -fplugin-arg-structleak_plugin-byref
 gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)    \
                += -fplugin-arg-structleak_plugin-byref-all
+ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
+    DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
+endif
+export DISABLE_STRUCTLEAK_PLUGIN
 gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK)              \
                += -DSTRUCTLEAK_PLUGIN
 
index 801c415bac59d014d96a8d34cd587e212d0487b2..b9e94c5e7097090317275d2a543d06debbce36bc 100644 (file)
@@ -33,10 +33,11 @@ else
        CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
         $(call cc-param,asan-globals=1) \
         $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
-        $(call cc-param,asan-stack=$(stack_enable)) \
         $(call cc-param,asan-instrument-allocas=1)
 endif
 
+CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable))
+
 endif # CONFIG_KASAN_GENERIC
 
 ifdef CONFIG_KASAN_SW_TAGS
index eef56d629799a0e5e5d91196dd2b6ce07785a985..48585c4d04ade8488c27b6e0501d433219b66141 100644 (file)
@@ -13,7 +13,7 @@
 # Stage 2 is handled by this file and does the following
 # 1) Find all modules listed in modules.order
 # 2) modpost is then used to
-# 3)  create one <module>.mod.c file pr. module
+# 3)  create one <module>.mod.c file per module
 # 4)  create one Module.symvers file with CRC for all exported symbols
 
 # Step 3 is used to place certain information in the module's ELF
index b9b0f15e588026952e4b3f9dd8f9b77cfa9dac67..217d21abc86e8d4e04e17ef8fdc7108b6656066f 100755 (executable)
@@ -34,7 +34,6 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
 REGEX_KCONFIG_DEF = re.compile(DEF)
 REGEX_KCONFIG_EXPR = re.compile(EXPR)
 REGEX_KCONFIG_STMT = re.compile(STMT)
-REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
 REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
 REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
 REGEX_QUOTES = re.compile("(\"(.*?)\")")
@@ -102,6 +101,9 @@ def parse_options():
                      "continue.")
 
     if args.commit:
+        if args.commit.startswith('HEAD'):
+            sys.exit("The --commit option can't use the HEAD ref")
+
         args.find = False
 
     if args.ignore:
@@ -432,7 +434,6 @@ def parse_kconfig_file(kfile):
     lines = []
     defined = []
     references = []
-    skip = False
 
     if not os.path.exists(kfile):
         return defined, references
@@ -448,12 +449,6 @@ def parse_kconfig_file(kfile):
         if REGEX_KCONFIG_DEF.match(line):
             symbol_def = REGEX_KCONFIG_DEF.findall(line)
             defined.append(symbol_def[0])
-            skip = False
-        elif REGEX_KCONFIG_HELP.match(line):
-            skip = True
-        elif skip:
-            # ignore content of help messages
-            pass
         elif REGEX_KCONFIG_STMT.match(line):
             line = REGEX_QUOTES.sub("", line)
             symbols = get_symbols_in_line(line)
index fd9777f63f14703d25664d5dd601e85bf635b0bb..9dbab13329fa986f0a2d6e955b8b5e1c105634f3 100755 (executable)
@@ -82,10 +82,8 @@ cat << EOF
 #define __IGNORE_truncate64
 #define __IGNORE_stat64
 #define __IGNORE_lstat64
-#define __IGNORE_fstat64
 #define __IGNORE_fcntl64
 #define __IGNORE_fadvise64_64
-#define __IGNORE_fstatat64
 #define __IGNORE_fstatfs64
 #define __IGNORE_statfs64
 #define __IGNORE_llseek
@@ -253,6 +251,10 @@ cat << EOF
 #define __IGNORE_getpmsg
 #define __IGNORE_putpmsg
 #define __IGNORE_vserver
+
+/* 64-bit ports never needed these, and new 32-bit ports can use statx */
+#define __IGNORE_fstat64
+#define __IGNORE_fstatat64
 EOF
 }
 
index 0033eedce003eefdb017d965fdc833901c53eb0a..1d1bde1fd45eb76b4639f8c720619f6712e25dcd 100755 (executable)
@@ -13,6 +13,7 @@ import logging
 import os
 import re
 import subprocess
+import sys
 
 _DEFAULT_OUTPUT = 'compile_commands.json'
 _DEFAULT_LOG_LEVEL = 'WARNING'
index 319f92104f5652f0c00a8ffba8259a10a8dce121..4edc708baa6356c49c8cabbb3de41ba2b0fb7457 100755 (executable)
@@ -17,13 +17,7 @@ binutils)
        echo 2.23.0
        ;;
 gcc)
-       # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
-       # https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
-       if [ "$SRCARCH" = arm64 ]; then
-               echo 5.1.0
-       else
-               echo 4.9.0
-       fi
+       echo 5.1.0
        ;;
 icc)
        # temporary
index 8f6b13ae46bfc4855f0a31375a4ce7d42262b53c..7d631aaa0ae118bc41e9649d811beaf4ee4fe6ec 100755 (executable)
@@ -189,7 +189,7 @@ if ($arch =~ /(x86(_64)?)|(i386)/) {
 $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
 $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)";
 $section_regex = "Disassembly of section\\s+(\\S+):";
-$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
+$function_regex = "^([0-9a-fA-F]+)\\s+<([^^]*?)>:";
 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$";
 $section_type = '@progbits';
 $mcount_adjust = 0;
index f355869c65cd10ef77c2faff6c90b3ca09692c21..6ee4fa882919c61e7847da79f16708c88a8898e2 100644 (file)
 #define EM_ARCV2       195
 #endif
 
+#ifndef EM_RISCV
+#define EM_RISCV       243
+#endif
+
 static uint32_t (*r)(const uint32_t *);
 static uint16_t (*r2)(const uint16_t *);
 static uint64_t (*r8)(const uint64_t *);
index 6517f221d52cd2154b7f3db8edc3834c150a0ccd..e7ebd45ca34573e16ade281ef5349a953238e3a9 100644 (file)
@@ -2157,7 +2157,7 @@ static int selinux_ptrace_access_check(struct task_struct *child,
 static int selinux_ptrace_traceme(struct task_struct *parent)
 {
        return avc_has_perm(&selinux_state,
-                           task_sid_subj(parent), task_sid_obj(current),
+                           task_sid_obj(parent), task_sid_obj(current),
                            SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
 }
 
@@ -6222,7 +6222,7 @@ static int selinux_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *m
        struct ipc_security_struct *isec;
        struct msg_security_struct *msec;
        struct common_audit_data ad;
-       u32 sid = task_sid_subj(target);
+       u32 sid = task_sid_obj(target);
        int rc;
 
        isec = selinux_ipc(msq);
index d59276f48d4fc12a46b93c535c014336695fb6e3..94ea2a8b2bb7366b0ab153ca21cc225ee655a78c 100644 (file)
@@ -126,6 +126,8 @@ static const struct nlmsg_perm nlmsg_xfrm_perms[] =
        { XFRM_MSG_NEWSPDINFO,  NETLINK_XFRM_SOCKET__NLMSG_WRITE },
        { XFRM_MSG_GETSPDINFO,  NETLINK_XFRM_SOCKET__NLMSG_READ  },
        { XFRM_MSG_MAPPING,     NETLINK_XFRM_SOCKET__NLMSG_READ  },
+       { XFRM_MSG_SETDEFAULT,  NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+       { XFRM_MSG_GETDEFAULT,  NETLINK_XFRM_SOCKET__NLMSG_READ  },
 };
 
 static const struct nlmsg_perm nlmsg_audit_perms[] =
@@ -189,7 +191,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
                 * structures at the top of this file with the new mappings
                 * before updating the BUILD_BUG_ON() macro!
                 */
-               BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
+               BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_GETDEFAULT);
                err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
                                 sizeof(nlmsg_xfrm_perms));
                break;
index cacbe751851943ea022ffa9054befe9102b77fb5..21a0e7c3b8dee5639a016f03d694dfd1a35e7b0e 100644 (file)
@@ -2016,7 +2016,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
                                const char *caller)
 {
        struct smk_audit_info ad;
-       struct smack_known *skp = smk_of_task_struct_subj(p);
+       struct smack_known *skp = smk_of_task_struct_obj(p);
        int rc;
 
        smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
@@ -3480,7 +3480,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
  */
 static int smack_getprocattr(struct task_struct *p, char *name, char **value)
 {
-       struct smack_known *skp = smk_of_task_struct_subj(p);
+       struct smack_known *skp = smk_of_task_struct_obj(p);
        char *cp;
        int slen;
 
index a59de24695ec9b0ea7d20dc309bdf05ef7574286..dfe5a64e19d2e0c6eed5d3d8e3b4cf73c1415d3d 100644 (file)
@@ -468,6 +468,76 @@ static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
 }
 #endif /* CONFIG_X86_X32 */
 
+#ifdef __BIG_ENDIAN
+typedef char __pad_before_u32[4];
+typedef char __pad_after_u32[0];
+#else
+typedef char __pad_before_u32[0];
+typedef char __pad_after_u32[4];
+#endif
+
+/* PCM 2.0.15 API definition had a bug in mmap control; it puts the avail_min
+ * at the wrong offset due to a typo in padding type.
+ * The bug hits only 32bit.
+ * A workaround for incorrect read/write is needed only in 32bit compat mode.
+ */
+struct __snd_pcm_mmap_control64_buggy {
+       __pad_before_u32 __pad1;
+       __u32 appl_ptr;
+       __pad_before_u32 __pad2;        /* SiC! here is the bug */
+       __pad_before_u32 __pad3;
+       __u32 avail_min;
+       __pad_after_uframe __pad4;
+};
+
+static int snd_pcm_ioctl_sync_ptr_buggy(struct snd_pcm_substream *substream,
+                                       struct snd_pcm_sync_ptr __user *_sync_ptr)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_sync_ptr sync_ptr;
+       struct __snd_pcm_mmap_control64_buggy *sync_cp;
+       volatile struct snd_pcm_mmap_status *status;
+       volatile struct snd_pcm_mmap_control *control;
+       int err;
+
+       memset(&sync_ptr, 0, sizeof(sync_ptr));
+       sync_cp = (struct __snd_pcm_mmap_control64_buggy *)&sync_ptr.c.control;
+       if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
+               return -EFAULT;
+       if (copy_from_user(sync_cp, &(_sync_ptr->c.control), sizeof(*sync_cp)))
+               return -EFAULT;
+       status = runtime->status;
+       control = runtime->control;
+       if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+               err = snd_pcm_hwsync(substream);
+               if (err < 0)
+                       return err;
+       }
+       snd_pcm_stream_lock_irq(substream);
+       if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
+               err = pcm_lib_apply_appl_ptr(substream, sync_cp->appl_ptr);
+               if (err < 0) {
+                       snd_pcm_stream_unlock_irq(substream);
+                       return err;
+               }
+       } else {
+               sync_cp->appl_ptr = control->appl_ptr;
+       }
+       if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+               control->avail_min = sync_cp->avail_min;
+       else
+               sync_cp->avail_min = control->avail_min;
+       sync_ptr.s.status.state = status->state;
+       sync_ptr.s.status.hw_ptr = status->hw_ptr;
+       sync_ptr.s.status.tstamp = status->tstamp;
+       sync_ptr.s.status.suspended_state = status->suspended_state;
+       sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
+       snd_pcm_stream_unlock_irq(substream);
+       if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
+               return -EFAULT;
+       return 0;
+}
+
 /*
  */
 enum {
@@ -537,7 +607,7 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
                if (in_x32_syscall())
                        return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
 #endif /* CONFIG_X86_X32 */
-               return snd_pcm_common_ioctl(file, substream, cmd, argp);
+               return snd_pcm_ioctl_sync_ptr_buggy(substream, argp);
        case SNDRV_PCM_IOCTL_HW_REFINE32:
                return snd_pcm_ioctl_hw_params_compat(substream, 1, argp);
        case SNDRV_PCM_IOCTL_HW_PARAMS32:
index 6c0a4a67ad2e39082a12090b7c22acc0f185d936..6f30231bdb88454c0d6c279f557965c1083608c4 100644 (file)
@@ -873,12 +873,21 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
                        return -EINVAL;
                }
        }
+       case SNDRV_RAWMIDI_IOCTL_USER_PVERSION:
+               if (get_user(rfile->user_pversion, (unsigned int __user *)arg))
+                       return -EFAULT;
+               return 0;
+
        case SNDRV_RAWMIDI_IOCTL_PARAMS:
        {
                struct snd_rawmidi_params params;
 
                if (copy_from_user(&params, argp, sizeof(struct snd_rawmidi_params)))
                        return -EFAULT;
+               if (rfile->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 2)) {
+                       params.mode = 0;
+                       memset(params.reserved, 0, sizeof(params.reserved));
+               }
                switch (params.stream) {
                case SNDRV_RAWMIDI_STREAM_OUTPUT:
                        if (rfile->output == NULL)
index 382275c5b1937e14e8b8d65187948eb847606a63..7f3fd8eb016fe825e13394576f803f0f27603fe0 100644 (file)
@@ -156,6 +156,8 @@ static int snd_seq_device_dev_free(struct snd_device *device)
        struct snd_seq_device *dev = device->device_data;
 
        cancel_autoload_drivers();
+       if (dev->private_free)
+               dev->private_free(dev);
        put_device(&dev->dev);
        return 0;
 }
@@ -183,11 +185,7 @@ static int snd_seq_device_dev_disconnect(struct snd_device *device)
 
 static void snd_seq_dev_release(struct device *dev)
 {
-       struct snd_seq_device *sdev = to_seq_dev(dev);
-
-       if (sdev->private_free)
-               sdev->private_free(sdev);
-       kfree(sdev);
+       kfree(to_seq_dev(dev));
 }
 
 /*
index ed40d0f7432c8c771c90116aee0761a56c0f21f5..773db4bf087693d995c19df9b98d0a889e84f538 100644 (file)
@@ -143,7 +143,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
        if (pointer_update)
                pcsp_pointer_update(chip);
 
-       hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns));
+       hrtimer_forward_now(handle, ns_to_ktime(ns));
 
        return HRTIMER_RESTART;
 }
index 5388b85fb60e5a8e887380108031f4f0774d8ba6..a18c2c033e8362e4297183a7be28598ac98143ac 100644 (file)
@@ -276,10 +276,11 @@ static void __maybe_unused copy_message(u64 *frames, __be32 *buffer,
 
        /* This is just for v2/v3 protocol. */
        for (i = 0; i < data_blocks; ++i) {
-               *frames = (be32_to_cpu(buffer[1]) << 16) |
-                         (be32_to_cpu(buffer[2]) >> 16);
+               *frames = be32_to_cpu(buffer[1]);
+               *frames <<= 16;
+               *frames |= be32_to_cpu(buffer[2]) >> 16;
+               ++frames;
                buffer += data_block_quadlets;
-               frames++;
        }
 }
 
index cb5b5e3a481b9ba3149fdbee7fb896c4bf7f7d46..daf731364695b1531c29258e8775fc54dbed78b6 100644 (file)
@@ -184,13 +184,16 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id
                        model = val;
        }
 
-       /*
-        * Mackie Onyx Satellite with base station has a quirk to report a wrong
-        * value in 'dbs' field of CIP header against its format information.
-        */
-       if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE)
+       if (vendor == VENDOR_LOUD) {
+               // Mackie Onyx Satellite with base station has a quirk to report a wrong
+               // value in 'dbs' field of CIP header against its format information.
                oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS;
 
+               // OXFW971-based models may transfer events by blocking method.
+               if (!(oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD))
+                       oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
+       }
+
        return 0;
 }
 
index 062da7a7a5861f994da1236a62b6138b158eb514..f7bd6e2db085bb666bcba9618cfdeb15541455e8 100644 (file)
@@ -421,8 +421,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
        if (!full_reset)
                goto skip_reset;
 
-       /* clear STATESTS */
-       snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
+       /* clear STATESTS if not in reset */
+       if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
+               snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
 
        /* reset controller */
        snd_hdac_bus_enter_link_reset(bus);
index 2523b23389e97dba45ff600b88535c47c6b13d56..1c8bffc3eec6e674b36065bf447400db71aa0835 100644 (file)
@@ -298,29 +298,31 @@ int snd_hda_codec_configure(struct hda_codec *codec)
 {
        int err;
 
+       if (codec->configured)
+               return 0;
+
        if (is_generic_config(codec))
                codec->probe_id = HDA_CODEC_ID_GENERIC;
        else
                codec->probe_id = 0;
 
-       err = snd_hdac_device_register(&codec->core);
-       if (err < 0)
-               return err;
+       if (!device_is_registered(&codec->core.dev)) {
+               err = snd_hdac_device_register(&codec->core);
+               if (err < 0)
+                       return err;
+       }
 
        if (!codec->preset)
                codec_bind_module(codec);
        if (!codec->preset) {
                err = codec_bind_generic(codec);
                if (err < 0) {
-                       codec_err(codec, "Unable to bind the codec\n");
-                       goto error;
+                       codec_dbg(codec, "Unable to bind the codec\n");
+                       return err;
                }
        }
 
+       codec->configured = 1;
        return 0;
-
- error:
-       snd_hdac_device_unregister(&codec->core);
-       return err;
 }
 EXPORT_SYMBOL_GPL(snd_hda_codec_configure);
index a9ebefd60cf68a0e773af8fdad276226566456ea..0c4a337c9fc0d266db5afd4d37210a33c889e61e 100644 (file)
@@ -791,6 +791,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
        snd_array_free(&codec->nids);
        remove_conn_list(codec);
        snd_hdac_regmap_exit(&codec->core);
+       codec->configured = 0;
 }
 EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind);
 
index 7cd452831fd30956fcf6fff20e989f684b94fe95..930ae4002a8187addcc97defc7b75573c804fd88 100644 (file)
@@ -25,6 +25,7 @@
 #include <sound/core.h>
 #include <sound/initval.h>
 #include "hda_controller.h"
+#include "hda_local.h"
 
 #define CREATE_TRACE_POINTS
 #include "hda_controller_trace.h"
@@ -1248,17 +1249,24 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
 int azx_codec_configure(struct azx *chip)
 {
        struct hda_codec *codec, *next;
+       int success = 0;
 
-       /* use _safe version here since snd_hda_codec_configure() deregisters
-        * the device upon error and deletes itself from the bus list.
-        */
-       list_for_each_codec_safe(codec, next, &chip->bus) {
-               snd_hda_codec_configure(codec);
+       list_for_each_codec(codec, &chip->bus) {
+               if (!snd_hda_codec_configure(codec))
+                       success++;
        }
 
-       if (!azx_bus(chip)->num_codecs)
-               return -ENODEV;
-       return 0;
+       if (success) {
+               /* unregister failed codecs if any codec has been probed */
+               list_for_each_codec_safe(codec, next, &chip->bus) {
+                       if (!codec->configured) {
+                               codec_err(codec, "Unable to configure, disabling\n");
+                               snd_hdac_device_unregister(&codec->core);
+                       }
+               }
+       }
+
+       return success ? 0 : -ENODEV;
 }
 EXPORT_SYMBOL_GPL(azx_codec_configure);
 
index 3062f87380b1b41f5f273f4c26b2ceda0aed3575..f5bf295eb83078d2586a5985aaa1af4eb2840116 100644 (file)
@@ -41,7 +41,7 @@
 /* 24 unused */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
-/* 27 unused */
+#define AZX_DCAPS_RETRY_PROBE  (1 << 27)       /* retry probe if no codec is configured */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
 #define AZX_DCAPS_NO_MSI64      (1 << 29)      /* Stick to 32-bit MSIs */
 #define AZX_DCAPS_SEPARATE_STREAM_TAG  (1 << 30) /* capture and playback use separate stream tag */
index 3aa432d814a24a1e1affe311054005e5cb057006..4d22e7adeee8ecf11006c951f18ed10cf8e220f9 100644 (file)
@@ -307,7 +307,8 @@ enum {
 /* quirks for AMD SB */
 #define AZX_DCAPS_PRESET_AMD_SB \
        (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_AMD_WORKAROUND |\
-        AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+        AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME |\
+        AZX_DCAPS_RETRY_PROBE)
 
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
@@ -883,10 +884,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
        return azx_get_pos_posbuf(chip, azx_dev);
 }
 
-static void azx_shutdown_chip(struct azx *chip)
+static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
 {
        azx_stop_chip(chip);
-       azx_enter_link_reset(chip);
+       if (!skip_link_reset)
+               azx_enter_link_reset(chip);
        azx_clear_irq_pending(chip);
        display_power(chip, false);
 }
@@ -895,6 +897,11 @@ static void azx_shutdown_chip(struct azx *chip)
 static DEFINE_MUTEX(card_list_lock);
 static LIST_HEAD(card_list);
 
+static void azx_shutdown_chip(struct azx *chip)
+{
+       __azx_shutdown_chip(chip, false);
+}
+
 static void azx_add_card_list(struct azx *chip)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1717,7 +1724,7 @@ static void azx_check_snoop_available(struct azx *chip)
 
 static void azx_probe_work(struct work_struct *work)
 {
-       struct hda_intel *hda = container_of(work, struct hda_intel, probe_work);
+       struct hda_intel *hda = container_of(work, struct hda_intel, probe_work.work);
        azx_probe_continue(&hda->chip);
 }
 
@@ -1822,7 +1829,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
        }
 
        /* continue probing in work context as may trigger request module */
-       INIT_WORK(&hda->probe_work, azx_probe_work);
+       INIT_DELAYED_WORK(&hda->probe_work, azx_probe_work);
 
        *rchip = chip;
 
@@ -2136,7 +2143,7 @@ static int azx_probe(struct pci_dev *pci,
 #endif
 
        if (schedule_probe)
-               schedule_work(&hda->probe_work);
+               schedule_delayed_work(&hda->probe_work, 0);
 
        dev++;
        if (chip->disabled)
@@ -2222,6 +2229,11 @@ static int azx_probe_continue(struct azx *chip)
        int dev = chip->dev_index;
        int err;
 
+       if (chip->disabled || hda->init_failed)
+               return -EIO;
+       if (hda->probe_retry)
+               goto probe_retry;
+
        to_hda_bus(bus)->bus_probing = 1;
        hda->probe_continued = 1;
 
@@ -2283,10 +2295,20 @@ static int azx_probe_continue(struct azx *chip)
 #endif
        }
 #endif
+
+ probe_retry:
        if (bus->codec_mask && !(probe_only[dev] & 1)) {
                err = azx_codec_configure(chip);
-               if (err < 0)
+               if (err) {
+                       if ((chip->driver_caps & AZX_DCAPS_RETRY_PROBE) &&
+                           ++hda->probe_retry < 60) {
+                               schedule_delayed_work(&hda->probe_work,
+                                                     msecs_to_jiffies(1000));
+                               return 0; /* keep things up */
+                       }
+                       dev_err(chip->card->dev, "Cannot probe codecs, giving up\n");
                        goto out_free;
+               }
        }
 
        err = snd_card_register(chip->card);
@@ -2316,6 +2338,7 @@ out_free:
                display_power(chip, false);
        complete_all(&hda->probe_wait);
        to_hda_bus(bus)->bus_probing = 0;
+       hda->probe_retry = 0;
        return 0;
 }
 
@@ -2341,7 +2364,7 @@ static void azx_remove(struct pci_dev *pci)
                 * device during cancel_work_sync() call.
                 */
                device_unlock(&pci->dev);
-               cancel_work_sync(&hda->probe_work);
+               cancel_delayed_work_sync(&hda->probe_work);
                device_lock(&pci->dev);
 
                snd_card_free(card);
@@ -2357,7 +2380,7 @@ static void azx_shutdown(struct pci_dev *pci)
                return;
        chip = card->private_data;
        if (chip && chip->running)
-               azx_shutdown_chip(chip);
+               __azx_shutdown_chip(chip, true);
 }
 
 /* PCI IDs */
index 3fb119f090408865e2a380292807469b9bd8d369..0f39418f9328b08e7c34c2db540eed30f734100c 100644 (file)
@@ -14,7 +14,7 @@ struct hda_intel {
 
        /* sync probing */
        struct completion probe_wait;
-       struct work_struct probe_work;
+       struct delayed_work probe_work;
 
        /* card list (for power_save trigger) */
        struct list_head list;
@@ -30,6 +30,8 @@ struct hda_intel {
        unsigned int freed:1; /* resources already released */
 
        bool need_i915_power:1; /* the hda controller needs i915 power */
+
+       int probe_retry;        /* being probe-retry */
 };
 
 #endif
index 3c7ef55d016e9b65bc976dcd12c6c591f22496b8..31ff11ab868e1ac3c1aff15d20a52728884e2788 100644 (file)
@@ -1207,6 +1207,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
                snd_hda_jack_add_kctl(codec, DOLPHIN_LO_PIN_NID, "Line Out", true,
                                      SND_JACK_HEADPHONE, NULL);
 
+               snd_hda_jack_add_kctl(codec, DOLPHIN_AMIC_PIN_NID, "Microphone", true,
+                                     SND_JACK_MICROPHONE, NULL);
+
                cs8409_fix_caps(codec, DOLPHIN_HP_PIN_NID);
                cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID);
                cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
index 8b7a389b6aedb830639df9d3b4604f2493eda5aa..22d27b12c4e756cacb569202f029e30d98452d47 100644 (file)
@@ -526,6 +526,8 @@ static void alc_shutup_pins(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0236:
+       case 0x10ec0256:
        case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
@@ -2537,7 +2539,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
-       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED),
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
@@ -3528,7 +3531,8 @@ static void alc256_shutup(struct hda_codec *codec)
        /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
         * when booting with headset plugged. So skip setting it for the codec alc257
         */
-       if (codec->core.vendor_id != 0x10ec0257)
+       if (spec->codec_variant != ALC269_TYPE_ALC257 &&
+           spec->codec_variant != ALC269_TYPE_ALC256)
                alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
        if (!spec->no_shutup_pins)
@@ -6429,12 +6433,44 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
        hda_fixup_thinkpad_acpi(codec, fix, action);
 }
 
+/* Fixup for Lenovo Legion 15IMHg05 speaker output on headset removal. */
+static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
+                                                 const struct hda_fixup *fix,
+                                                 int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               spec->gen.suppress_auto_mute = 1;
+               break;
+       }
+}
+
 /* for alc295_fixup_hp_top_speakers */
 #include "hp_x360_helper.c"
 
 /* for alc285_fixup_ideapad_s740_coef() */
 #include "ideapad_s740_helper.c"
 
+static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
+                                                           const struct hda_fixup *fix,
+                                                           int action)
+{
+       /*
+       * A certain other OS sets these coeffs to different values. On at least one TongFang
+       * barebone these settings might survive even a cold reboot. So to restore a clean slate the
+       * values are explicitly reset to default here. Without this, the external microphone is
+       * always in a plugged-in state, while the internal microphone is always in an unplugged
+       * state, breaking the ability to use the internal microphone.
+       */
+       alc_write_coef_idx(codec, 0x24, 0x0000);
+       alc_write_coef_idx(codec, 0x26, 0x0000);
+       alc_write_coef_idx(codec, 0x29, 0x3000);
+       alc_write_coef_idx(codec, 0x37, 0xfe05);
+       alc_write_coef_idx(codec, 0x45, 0x5089);
+}
+
 enum {
        ALC269_FIXUP_GPIO2,
        ALC269_FIXUP_SONY_VAIO,
@@ -6646,6 +6682,11 @@ enum {
        ALC623_FIXUP_LENOVO_THINKSTATION_P340,
        ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
        ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
+       ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
+       ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+       ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
+       ALC287_FIXUP_13S_GEN2_SPEAKERS,
+       ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8236,6 +8277,117 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
        },
+       [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = {
+               .type = HDA_FIXUP_VERBS,
+               //.v.verbs = legion_15imhg05_coefs,
+               .v.verbs = (const struct hda_verb[]) {
+                        // set left speaker Legion 7i.
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        // set right speaker Legion 7i.
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                        {}
+               },
+               .chained = true,
+               .chain_id = ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+       },
+       [ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc287_fixup_legion_15imhg05_speakers,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE,
+       },
+       [ALC287_FIXUP_YOGA7_14ITL_SPEAKERS] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                        // set left speaker Yoga 7i.
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        // set right speaker Yoga 7i.
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x46 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                        { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                        {}
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE,
+       },
+       [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE,
+       },
+       [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8327,6 +8479,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8630,6 +8785,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
        SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
+       SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+       SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8660,6 +8819,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -10037,6 +10197,9 @@ enum {
        ALC671_FIXUP_HP_HEADSET_MIC2,
        ALC662_FIXUP_ACER_X2660G_HEADSET_MODE,
        ALC662_FIXUP_ACER_NITRO_HEADSET_MODE,
+       ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
+       ALC668_FIXUP_HEADSET_MIC,
+       ALC668_FIXUP_MIC_DET_COEF,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -10420,6 +10583,29 @@ static const struct hda_fixup alc662_fixups[] = {
                .chained = true,
                .chain_id = ALC662_FIXUP_USI_FUNC
        },
+       [ALC668_FIXUP_ASUS_NO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x04a1112c },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC668_FIXUP_HEADSET_MIC
+       },
+       [ALC668_FIXUP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_headset_mic,
+               .chained = true,
+               .chain_id = ALC668_FIXUP_MIC_DET_COEF
+       },
+       [ALC668_FIXUP_MIC_DET_COEF] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x15 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0d60 },
+                       {}
+               },
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10455,6 +10641,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
        SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+       SND_PCI_QUIRK(0x1043, 0x185d, "ASUS G551JW", ALC668_FIXUP_ASUS_NO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
        SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
index 87d24224c042e22ef3b4dcc466042c999e9992eb..23f253effb4faa928af41c545a2d26e53f5ea427 100644 (file)
@@ -52,7 +52,7 @@
 #define PCXHR_DSP 2
 
 #if (PCXHR_DSP_OFFSET_MAX > PCXHR_PLX_OFFSET_MIN)
-#undef  PCXHR_REG_TO_PORT(x)
+#error  PCXHR_REG_TO_PORT(x)
 #else
 #define PCXHR_REG_TO_PORT(x)   ((x)>PCXHR_DSP_OFFSET_MAX ? PCXHR_PLX : PCXHR_DSP)
 #endif
index a961f837cd094e0b6b963cb4665e8a2ea3b1c40d..bda66b30e063c24f4eb982288cdb5654dc701985 100644 (file)
@@ -1073,6 +1073,16 @@ static int fsl_esai_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_pm_get_sync;
 
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
+               goto err_pm_get_sync;
+       }
+
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
                                              &fsl_esai_dai, 1);
        if (ret) {
@@ -1082,12 +1092,6 @@ static int fsl_esai_probe(struct platform_device *pdev)
 
        INIT_WORK(&esai_priv->work, fsl_esai_hw_reset);
 
-       ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
-               goto err_pm_get_sync;
-       }
-
        return ret;
 
 err_pm_get_sync:
index 8c0c75ce9490faf79c8b43b961b6907f976ab911..9f90989ac59a6f48b38d6e3d4ab2d1f3e63ce92b 100644 (file)
@@ -737,18 +737,23 @@ static int fsl_micfil_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        regcache_cache_only(micfil->regmap, true);
 
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to pcm register\n");
+               return ret;
+       }
+
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
                                              &fsl_micfil_dai, 1);
        if (ret) {
                dev_err(&pdev->dev, "failed to register component %s\n",
                        fsl_micfil_component.name);
-               return ret;
        }
 
-       ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
-       if (ret)
-               dev_err(&pdev->dev, "failed to pcm register\n");
-
        return ret;
 }
 
index 223fcd15bfcccf44ebb680b3fd3b33902693328b..38f6362099d587ab97d78aeb1ec79913917d411f 100644 (file)
@@ -1152,11 +1152,10 @@ static int fsl_sai_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_pm_get_sync;
 
-       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
-                                             &sai->cpu_dai_drv, 1);
-       if (ret)
-               goto err_pm_get_sync;
-
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
        if (sai->soc_data->use_imx_pcm) {
                ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
                if (ret)
@@ -1167,6 +1166,11 @@ static int fsl_sai_probe(struct platform_device *pdev)
                        goto err_pm_get_sync;
        }
 
+       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+                                             &sai->cpu_dai_drv, 1);
+       if (ret)
+               goto err_pm_get_sync;
+
        return ret;
 
 err_pm_get_sync:
index 8ffb1a6048d63f06f2c676c216069eef1e6eac06..1c53719bb61e234d5456ec4cc7d9be234bd86403 100644 (file)
@@ -1434,16 +1434,20 @@ static int fsl_spdif_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        regcache_cache_only(spdif_priv->regmap, true);
 
-       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
-                                             &spdif_priv->cpu_dai_drv, 1);
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
        if (ret) {
-               dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+               dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
                goto err_pm_disable;
        }
 
-       ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
+       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
+                                             &spdif_priv->cpu_dai_drv, 1);
        if (ret) {
-               dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
+               dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
                goto err_pm_disable;
        }
 
index 31c5ee641fe76268ce5e16d38d71d36372d024f0..7ba2fd15132d9b966e4b86d5459c707e880aac26 100644 (file)
@@ -1215,18 +1215,23 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
        pm_runtime_enable(dev);
        regcache_cache_only(xcvr->regmap, true);
 
+       /*
+        * Register platform component before registering cpu dai for there
+        * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+        */
+       ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
+       if (ret) {
+               dev_err(dev, "failed to pcm register\n");
+               return ret;
+       }
+
        ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp,
                                              &fsl_xcvr_dai, 1);
        if (ret) {
                dev_err(dev, "failed to register component %s\n",
                        fsl_xcvr_comp.name);
-               return ret;
        }
 
-       ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
-       if (ret)
-               dev_err(dev, "failed to pcm register\n");
-
        return ret;
 }
 
index 6602eda89e8efaf9c3ad2bea5f9e90c80a1ce514..6b06248a9327a51a4229b6b69771d70f47e0772e 100644 (file)
@@ -929,6 +929,11 @@ static int create_sdw_dailink(struct snd_soc_card *card,
                              cpus + *cpu_id, cpu_dai_num,
                              codecs, codec_num,
                              NULL, &sdw_ops);
+               /*
+                * SoundWire DAILINKs use 'stream' functions and Bank Switch operations
+                * based on wait_for_completion(), tag them as 'nonatomic'.
+                */
+               dai_links[*be_index].nonatomic = true;
 
                ret = set_codec_init_func(card, link, dai_links + (*be_index)++,
                                          playback, group_id);
index 5a2f4667d50b398d76c138db91bb1debe566d6b8..81ad2dcee9ebc0e6025c7411d292216a5b9f956c 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config SND_SOC_MEDIATEK
        tristate
+       select REGMAP_MMIO
 
 config SND_SOC_MT2701
        tristate "ASoC support for Mediatek MT2701 chip"
@@ -188,7 +189,9 @@ config SND_SOC_MT8192_MT6359_RT1015_RT5682
 config SND_SOC_MT8195
        tristate "ASoC support for Mediatek MT8195 chip"
        depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on COMMON_CLK
        select SND_SOC_MEDIATEK
+       select MFD_SYSCON if SND_SOC_MT6359
        help
          This adds ASoC platform driver support for Mediatek MT8195 chip
          that can be used with other codecs.
index baaa5881b1d4849e789b2eff3c834a9b0a927f2c..e95c7c018e7d4c028f26755c7a950da9f160c638 100644 (file)
@@ -334,9 +334,11 @@ int mtk_afe_suspend(struct snd_soc_component *component)
                        devm_kcalloc(dev, afe->reg_back_up_list_num,
                                     sizeof(unsigned int), GFP_KERNEL);
 
-       for (i = 0; i < afe->reg_back_up_list_num; i++)
-               regmap_read(regmap, afe->reg_back_up_list[i],
-                           &afe->reg_back_up[i]);
+       if (afe->reg_back_up) {
+               for (i = 0; i < afe->reg_back_up_list_num; i++)
+                       regmap_read(regmap, afe->reg_back_up_list[i],
+                                   &afe->reg_back_up[i]);
+       }
 
        afe->suspended = true;
        afe->runtime_suspend(dev);
@@ -356,12 +358,13 @@ int mtk_afe_resume(struct snd_soc_component *component)
 
        afe->runtime_resume(dev);
 
-       if (!afe->reg_back_up)
+       if (!afe->reg_back_up) {
                dev_dbg(dev, "%s no reg_backup\n", __func__);
-
-       for (i = 0; i < afe->reg_back_up_list_num; i++)
-               mtk_regmap_write(regmap, afe->reg_back_up_list[i],
-                                afe->reg_back_up[i]);
+       } else {
+               for (i = 0; i < afe->reg_back_up_list_num; i++)
+                       mtk_regmap_write(regmap, afe->reg_back_up_list[i],
+                                        afe->reg_back_up[i]);
+       }
 
        afe->suspended = false;
        return 0;
index c97ace7387b4cdce5f506f17789a8c67ecb6cede..de09f67c04502207a878f22d8348da9a1940877f 100644 (file)
@@ -424,8 +424,8 @@ static int mt8195_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
        return snd_soc_component_set_jack(cmpnt_codec, &priv->hdmi_jack, NULL);
 }
 
-static int mt8195_hdmitx_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
-                                             struct snd_pcm_hw_params *params)
+static int mt8195_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+                                      struct snd_pcm_hw_params *params)
 
 {
        /* fix BE i2s format to 32bit, clean param mask first */
@@ -902,7 +902,7 @@ static struct snd_soc_dai_link mt8195_mt6359_rt1019_rt5682_dai_links[] = {
                .no_pcm = 1,
                .dpcm_playback = 1,
                .ops = &mt8195_dptx_ops,
-               .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup,
+               .be_hw_params_fixup = mt8195_dptx_hw_params_fixup,
                SND_SOC_DAILINK_REG(DPTX_BE),
        },
        [DAI_LINK_ETDM1_IN_BE] = {
@@ -953,7 +953,6 @@ static struct snd_soc_dai_link mt8195_mt6359_rt1019_rt5682_dai_links[] = {
                        SND_SOC_DAIFMT_NB_NF |
                        SND_SOC_DAIFMT_CBS_CFS,
                .dpcm_playback = 1,
-               .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup,
                SND_SOC_DAILINK_REG(ETDM3_OUT_BE),
        },
        [DAI_LINK_PCM1_BE] = {
index 3e4dd4a86363b822e8b1037adcd3490a81caf57f..59d0d7b2b55c82efff7b77a1126b03bff363f4ed 100644 (file)
@@ -371,7 +371,6 @@ int snd_sof_device_remove(struct device *dev)
                        dev_warn(dev, "error: %d failed to prepare DSP for device removal",
                                 ret);
 
-               snd_sof_fw_unload(sdev);
                snd_sof_ipc_free(sdev);
                snd_sof_free_debug(sdev);
                snd_sof_free_trace(sdev);
@@ -394,8 +393,7 @@ int snd_sof_device_remove(struct device *dev)
                snd_sof_remove(sdev);
 
        /* release firmware */
-       release_firmware(pdata->fw);
-       pdata->fw = NULL;
+       snd_sof_fw_unload(sdev);
 
        return 0;
 }
index 12fedf0984bd90437142c1e959b9376ecf9411f7..7e9723a10d02e015ed6c356a78ba71973e7aa9c9 100644 (file)
@@ -365,7 +365,14 @@ static int imx8_remove(struct snd_sof_dev *sdev)
 /* on i.MX8 there is 1 to 1 match between type and BAR idx */
 static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)
 {
-       return type;
+       /* Only IRAM and SRAM bars are valid */
+       switch (type) {
+       case SOF_FW_BLK_TYPE_IRAM:
+       case SOF_FW_BLK_TYPE_SRAM:
+               return type;
+       default:
+               return -EINVAL;
+       }
 }
 
 static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
index cb822d9537678d0e9e9972bd47375054cfdefaa0..892e1482f97faaf0cf4f407ee76f8b34372afdad 100644 (file)
@@ -228,7 +228,14 @@ static int imx8m_remove(struct snd_sof_dev *sdev)
 /* on i.MX8 there is 1 to 1 match between type and BAR idx */
 static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)
 {
-       return type;
+       /* Only IRAM and SRAM bars are valid */
+       switch (type) {
+       case SOF_FW_BLK_TYPE_IRAM:
+       case SOF_FW_BLK_TYPE_SRAM:
+               return type;
+       default:
+               return -EINVAL;
+       }
 }
 
 static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
index 2b38a77cd594f56f658ad130f60170bd47c6f96b..bb79c77775b3df968402d348d1233659e0d74170 100644 (file)
@@ -729,10 +729,10 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
        ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);
 
        if (ret < 0) {
-               dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",
-                       fw_filename, ret);
                dev_err(sdev->dev,
-                       "you may need to download the firmware from https://github.com/thesofproject/sof-bin/\n");
+                       "error: sof firmware file is missing, you might need to\n");
+               dev_err(sdev->dev,
+                       "       download it from https://github.com/thesofproject/sof-bin/\n");
                goto err;
        } else {
                dev_dbg(sdev->dev, "request_firmware %s successful\n",
@@ -880,5 +880,7 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
 void snd_sof_fw_unload(struct snd_sof_dev *sdev)
 {
        /* TODO: support module unloading at runtime */
+       release_firmware(sdev->pdata->fw);
+       sdev->pdata->fw = NULL;
 }
 EXPORT_SYMBOL(snd_sof_fw_unload);
index f72a6e83e6af20c2758cc4e382d1c95266e0c3e7..58f6ca5cf491a6cec698a4e0ac3b14d0740023b4 100644 (file)
@@ -530,7 +530,6 @@ void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
                return;
 
        if (sdev->dtrace_is_enabled) {
-               dev_err(sdev->dev, "error: waking up any trace sleepers\n");
                sdev->dtrace_error = true;
                wake_up(&sdev->trace_sleep);
        }
index bbb9a2282ed9ea4f10d0fcc028ae9447b6cced04..f6e3411b33cf125fb10da389dab4a00e795764a4 100644 (file)
@@ -122,9 +122,9 @@ static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
         * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
         */
        for (i = 0; i < stack_words; i += 4) {
-               hex_dump_to_buffer(stack + i * 4, 16, 16, 4,
+               hex_dump_to_buffer(stack + i, 16, 16, 4,
                                   buf, sizeof(buf), false);
-               dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);
+               dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);
        }
 }
 
index fd570a42f043198f1b7068132334b44c40bc22b6..1764b9302d467956e401d405f30b7e593be5e2ff 100644 (file)
@@ -1054,7 +1054,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
        return 0;
 }
 
-static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+static int usb_audio_resume(struct usb_interface *intf)
 {
        struct snd_usb_audio *chip = usb_get_intfdata(intf);
        struct snd_usb_stream *as;
@@ -1080,7 +1080,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
         * we just notify and restart the mixers
         */
        list_for_each_entry(mixer, &chip->mixer_list, list) {
-               err = snd_usb_mixer_resume(mixer, reset_resume);
+               err = snd_usb_mixer_resume(mixer);
                if (err < 0)
                        goto err_out;
        }
@@ -1100,20 +1100,10 @@ err_out:
        atomic_dec(&chip->active); /* allow autopm after this point */
        return err;
 }
-
-static int usb_audio_resume(struct usb_interface *intf)
-{
-       return __usb_audio_resume(intf, false);
-}
-
-static int usb_audio_reset_resume(struct usb_interface *intf)
-{
-       return __usb_audio_resume(intf, true);
-}
 #else
 #define usb_audio_suspend      NULL
 #define usb_audio_resume       NULL
-#define usb_audio_reset_resume NULL
+#define usb_audio_resume       NULL
 #endif         /* CONFIG_PM */
 
 static const struct usb_device_id usb_audio_ids [] = {
@@ -1135,7 +1125,7 @@ static struct usb_driver usb_audio_driver = {
        .disconnect =   usb_audio_disconnect,
        .suspend =      usb_audio_suspend,
        .resume =       usb_audio_resume,
-       .reset_resume = usb_audio_reset_resume,
+       .reset_resume = usb_audio_resume,
        .id_table =     usb_audio_ids,
        .supports_autosuspend = 1,
 };
index 43bc59575a6e360b0984a41792e5ab4018f5745e..a2ce535df14b7f8e3787b188982b2b70d2aba1bc 100644 (file)
@@ -3653,33 +3653,16 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
        return 0;
 }
 
-static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
-{
-       int err;
-
-       if (list->resume) {
-               err = list->resume(list);
-               if (err < 0)
-                       return err;
-       }
-       return restore_mixer_value(list);
-}
-
-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer)
 {
        struct usb_mixer_elem_list *list;
-       usb_mixer_elem_resume_func_t f;
        int id, err;
 
        /* restore cached mixer values */
        for (id = 0; id < MAX_ID_ELEMS; id++) {
                for_each_mixer_elem(list, mixer, id) {
-                       if (reset_resume)
-                               f = list->reset_resume;
-                       else
-                               f = list->resume;
-                       if (f) {
-                               err = f(list);
+                       if (list->resume) {
+                               err = list->resume(list);
                                if (err < 0)
                                        return err;
                        }
@@ -3700,7 +3683,6 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
        list->id = unitid;
        list->dump = snd_usb_mixer_dump_cval;
 #ifdef CONFIG_PM
-       list->resume = NULL;
-       list->reset_resume = default_mixer_reset_resume;
+       list->resume = restore_mixer_value;
 #endif
 }
index 876bbc9a71ad7812c14bb32529e3da7e14a3fecd..98ea24d91d803a527c9bd7981ab266edc40934aa 100644 (file)
@@ -70,7 +70,6 @@ struct usb_mixer_elem_list {
        bool is_std_info;
        usb_mixer_elem_dump_func_t dump;
        usb_mixer_elem_resume_func_t resume;
-       usb_mixer_elem_resume_func_t reset_resume;
 };
 
 /* iterate over mixer element list of the given unit id */
@@ -121,7 +120,7 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
 
 #ifdef CONFIG_PM
 int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);
-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume);
+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer);
 #endif
 
 int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
index a66ce0375fd97ca2377e04150ce3b51fa899e7f3..46082dc57be09e7d4d3cf8969da314681a594a2f 100644 (file)
@@ -151,7 +151,7 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
                *listp = list;
        list->mixer = mixer;
        list->id = id;
-       list->reset_resume = resume;
+       list->resume = resume;
        kctl = snd_ctl_new1(knew, list);
        if (!kctl) {
                kfree(list);
index 3d5848d5481be93156792793f92575f3bba850c8..53ebabf42472224ee1dee0eeea5e8f7879c4739c 100644 (file)
@@ -2450,6 +2450,8 @@ static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
                err = scarlett2_usb_get_config(mixer,
                                               SCARLETT2_CONFIG_TALKBACK_MAP,
                                               1, &bitmap);
+               if (err < 0)
+                       return err;
                for (i = 0; i < num_mixes; i++, bitmap >>= 1)
                        private->talkback_map[i] = bitmap & 1;
        }
index e03043f7dad3fe00fa0a0576f0f32fc620127dfc..de18fff6928037fbd4ae4356f320b8c78662fd24 100644 (file)
 /* E-Mu 0204 USB */
 { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) },
 
+/*
+ * Creative Technology, Ltd Live! Cam Sync HD [VF0770]
+ * The device advertises 8 formats, but only a rate of 48kHz is honored by the
+ * hardware and 24 bits give chopped audio, so only report the one working
+ * combination.
+ */
+{
+       USB_DEVICE(0x041e, 0x4095),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 16,
+                                       .iface = 3,
+                                       .altsetting = 4,
+                                       .altset_idx = 4,
+                                       .endpoint = 0x82,
+                                       .ep_attr = 0x05,
+                                       .rates = SNDRV_PCM_RATE_48000,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 48000 },
+                               },
+                       },
+                       {
+                               .ifnum = -1
+                       },
+               },
+       },
+},
+
 /*
  * HP Wireless Audio
  * When not ignored, causes instability issues for some users, forcing them to
index 6ee6d24c847fd51e968b1960f4a0fcd9363fac7f..889c855addfc81219a6571018bdb01c5df68ea98 100644 (file)
@@ -1900,6 +1900,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_CTL_MSG_DELAY | QUIRK_FLAG_IFACE_DELAY),
        VENDOR_FLG(0x07fd, /* MOTU */
                   QUIRK_FLAG_VALIDATE_RATES),
+       VENDOR_FLG(0x1235, /* Focusrite Novation */
+                  QUIRK_FLAG_VALIDATE_RATES),
        VENDOR_FLG(0x152a, /* Thesycon devices */
                   QUIRK_FLAG_DSD_RAW),
        VENDOR_FLG(0x1de7, /* Phoenix Audio */
diff --git a/tools/arch/x86/include/asm/unistd_32.h b/tools/arch/x86/include/asm/unistd_32.h
deleted file mode 100644 (file)
index 60a89db..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NR_perf_event_open
-# define __NR_perf_event_open 336
-#endif
-#ifndef __NR_futex
-# define __NR_futex 240
-#endif
-#ifndef __NR_gettid
-# define __NR_gettid 224
-#endif
-#ifndef __NR_getcpu
-# define __NR_getcpu 318
-#endif
-#ifndef __NR_setns
-# define __NR_setns 346
-#endif
diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
deleted file mode 100644 (file)
index 4205ed4..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NR_userfaultfd
-#define __NR_userfaultfd 282
-#endif
-#ifndef __NR_perf_event_open
-# define __NR_perf_event_open 298
-#endif
-#ifndef __NR_futex
-# define __NR_futex 202
-#endif
-#ifndef __NR_gettid
-# define __NR_gettid 186
-#endif
-#ifndef __NR_getcpu
-# define __NR_getcpu 309
-#endif
-#ifndef __NR_setns
-#define __NR_setns 308
-#endif
diff --git a/tools/arch/x86/include/uapi/asm/unistd_32.h b/tools/arch/x86/include/uapi/asm/unistd_32.h
new file mode 100644 (file)
index 0000000..60a89db
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NR_perf_event_open
+# define __NR_perf_event_open 336
+#endif
+#ifndef __NR_futex
+# define __NR_futex 240
+#endif
+#ifndef __NR_gettid
+# define __NR_gettid 224
+#endif
+#ifndef __NR_getcpu
+# define __NR_getcpu 318
+#endif
+#ifndef __NR_setns
+# define __NR_setns 346
+#endif
diff --git a/tools/arch/x86/include/uapi/asm/unistd_64.h b/tools/arch/x86/include/uapi/asm/unistd_64.h
new file mode 100644 (file)
index 0000000..cb52a3a
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NR_perf_event_open
+# define __NR_perf_event_open 298
+#endif
+#ifndef __NR_futex
+# define __NR_futex 202
+#endif
+#ifndef __NR_gettid
+# define __NR_gettid 186
+#endif
+#ifndef __NR_getcpu
+# define __NR_getcpu 309
+#endif
+#ifndef __NR_setns
+#define __NR_setns 308
+#endif
index c41f95815480da3f357a16b9a0e9a2ed913d0f86..797699462cd8ea22823a6cef922a6ab5723995a1 100644 (file)
        ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
 
 #define __get_next(t, insn)    \
-       ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
+       ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
 
 #define __peek_nbyte_next(t, insn, n)  \
-       ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
+       ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
 
 #define get_next(t, insn)      \
        ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
index 7862f217d85d37d234d4679c32ebe7e988a0580b..f2e506f7d57f92e59f933643c197e31bfe889dcd 100644 (file)
@@ -4,9 +4,8 @@
 
 #include <stdlib.h>
 
-#define __pa(addr)     (addr)
 #define SMP_CACHE_BYTES        0
 #define memblock_alloc(size, align)    malloc(size)
-#define memblock_free(paddr, size)     free(paddr)
+#define memblock_free_ptr(paddr, size) free(paddr)
 
 #endif
index 95c072b70d0e832f70a4ac07470bcd0d1ed46deb..8816f06fc6c7623ede69136c32ab79ccac862aee 100644 (file)
@@ -16,9 +16,9 @@
 # define __fallthrough __attribute__ ((fallthrough))
 #endif
 
-#if GCC_VERSION >= 40300
+#if __has_attribute(__error__)
 # define __compiletime_error(message) __attribute__((error(message)))
-#endif /* GCC_VERSION >= 40300 */
+#endif
 
 /* &a[0] degrades to a pointer: a different type from an array */
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
@@ -38,7 +38,3 @@
 #endif
 #define __printf(a, b) __attribute__((format(printf, a, b)))
 #define __scanf(a, b)  __attribute__((format(scanf, a, b)))
-
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
index 8712ff70995f42e4357eddb8bd9601bc42b246c0..dcb0c1bf686605ea68ab4701340d537b111a746b 100644 (file)
@@ -5,12 +5,9 @@
 #include <linux/compiler.h>
 
 /*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
  *
  * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
  * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -36,8 +33,6 @@
 #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
 #define type_min(T) ((T)((T)-type_max(T)-(T)1))
 
-
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
 /*
  * For simplicity and code hygiene, the fallback code below insists on
  * a, b and *d having the same type (similar to the min() and max()
        __builtin_mul_overflow(__a, __b, __d);  \
 })
 
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a + __b;                       \
-       *__d < __a;                             \
-})
-#define __unsigned_sub_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a - __b;                       \
-       __a < __b;                              \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({            \
-       typeof(a) __a = (a);                            \
-       typeof(b) __b = (b);                            \
-       typeof(d) __d = (d);                            \
-       (void) (&__a == &__b);                          \
-       (void) (&__a == __d);                           \
-       *__d = __a * __b;                               \
-       __builtin_constant_p(__b) ?                     \
-         __b > 0 && __a > type_max(typeof(__a)) / __b : \
-         __a > 0 && __b > type_max(typeof(__b)) / __a;  \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a + (u64)__b;             \
-       (((~(__a ^ __b)) & (*__d ^ __a))        \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a - (u64)__b;             \
-       ((((__a ^ __b)) & (*__d ^ __a))         \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({                              \
-       typeof(a) __a = (a);                                            \
-       typeof(b) __b = (b);                                            \
-       typeof(d) __d = (d);                                            \
-       typeof(a) __tmax = type_max(typeof(a));                         \
-       typeof(a) __tmin = type_min(typeof(a));                         \
-       (void) (&__a == &__b);                                          \
-       (void) (&__a == __d);                                           \
-       *__d = (u64)__a * (u64)__b;                                     \
-       (__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||        \
-       (__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
-       (__b == (typeof(__b))-1 && __a == __tmin);                      \
-})
-
-
-#define check_add_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_add_overflow(a, b, d),                 \
-                       __unsigned_add_overflow(a, b, d))
-
-#define check_sub_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_sub_overflow(a, b, d),                 \
-                       __unsigned_sub_overflow(a, b, d))
-
-#define check_mul_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_mul_overflow(a, b, d),                 \
-                       __unsigned_mul_overflow(a, b, d))
-
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
 /**
  * array_size() - Calculate size of 2-dimensional array.
  *
index 1d84ec9db93bd34e77ab31643cd8b5534480f1a1..5859ca0a1439be4cc3276fd54d9cc20c3c28fa25 100644 (file)
@@ -784,6 +784,7 @@ struct snd_rawmidi_status {
 
 #define SNDRV_RAWMIDI_IOCTL_PVERSION   _IOR('W', 0x00, int)
 #define SNDRV_RAWMIDI_IOCTL_INFO       _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
 #define SNDRV_RAWMIDI_IOCTL_PARAMS     _IOWR('W', 0x10, struct snd_rawmidi_params)
 #define SNDRV_RAWMIDI_IOCTL_STATUS     _IOWR('W', 0x20, struct snd_rawmidi_status)
 #define SNDRV_RAWMIDI_IOCTL_DROP       _IOW('W', 0x30, int)
index 88d8825fc6f61fb91a954656090d73288dac693a..e4f83c304ec9240580bbeda3b50df34e081b61c0 100644 (file)
@@ -6894,7 +6894,8 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
 
        if (obj->gen_loader) {
                /* reset FDs */
-               btf__set_fd(obj->btf, -1);
+               if (obj->btf)
+                       btf__set_fd(obj->btf, -1);
                for (i = 0; i < obj->nr_maps; i++)
                        obj->maps[i].fd = -1;
                if (!err)
index 10911a8cad0f2b7e64abd5ed45f9954971004d99..2df880cefdaeea1ff93b00134a7fcc6971939656 100644 (file)
@@ -1649,11 +1649,17 @@ static bool btf_is_non_static(const struct btf_type *t)
 static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,
                             int *out_btf_sec_id, int *out_btf_id)
 {
-       int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0;
+       int i, j, n, m, btf_id = 0;
        const struct btf_type *t;
        const struct btf_var_secinfo *vi;
        const char *name;
 
+       if (!obj->btf) {
+               pr_warn("failed to find BTF info for object '%s'\n", obj->filename);
+               return -EINVAL;
+       }
+
+       n = btf__get_nr_types(obj->btf);
        for (i = 1; i <= n; i++) {
                t = btf__type_by_id(obj->btf, i);
 
index 1fb8b49de1d628b2ac6749754114cfee0576c468..ea655318153f26b926c4e1ee7a6e2e9f55e35eee 100644 (file)
@@ -88,6 +88,7 @@ void strset__free(struct strset *set)
 
        hashmap__free(set->strs_hash);
        free(set->strs_data);
+       free(set);
 }
 
 size_t strset__data_size(const struct strset *set)
index d8886720e83d8dfe4c6c50eef746fcf867224852..8441e3e1aaac3ece838985c076738f99a30f1087 100644 (file)
@@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
        free(evsel);
 }
 
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
                int cpu, thread;
                for (cpu = 0; cpu < ncpus; cpu++) {
                        for (thread = 0; thread < nthreads; thread++) {
-                               FD(evsel, cpu, thread) = -1;
+                               int *fd = FD(evsel, cpu, thread);
+
+                               if (fd)
+                                       *fd = -1;
                        }
                }
        }
@@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
 {
        struct perf_evsel *leader = evsel->leader;
-       int fd;
+       int *fd;
 
        if (evsel == leader) {
                *group_fd = -1;
@@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
                return -ENOTCONN;
 
        fd = FD(leader, cpu, thread);
-       if (fd == -1)
+       if (fd == NULL || *fd == -1)
                return -EBADF;
 
-       *group_fd = fd;
+       *group_fd = *fd;
 
        return 0;
 }
@@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
                for (thread = 0; thread < threads->nr; thread++) {
-                       int fd, group_fd;
+                       int fd, group_fd, *evsel_fd;
+
+                       evsel_fd = FD(evsel, cpu, thread);
+                       if (evsel_fd == NULL)
+                               return -EINVAL;
 
                        err = get_group_fd(evsel, cpu, thread, &group_fd);
                        if (err < 0)
@@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
                        if (fd < 0)
                                return -errno;
 
-                       FD(evsel, cpu, thread) = fd;
+                       *evsel_fd = fd;
                }
        }
 
@@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
        int thread;
 
        for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
-               if (FD(evsel, cpu, thread) >= 0)
-                       close(FD(evsel, cpu, thread));
-               FD(evsel, cpu, thread) = -1;
+               int *fd = FD(evsel, cpu, thread);
+
+               if (fd && *fd >= 0) {
+                       close(*fd);
+                       *fd = -1;
+               }
        }
 }
 
@@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
                for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread);
-                       struct perf_mmap *map = MMAP(evsel, cpu, thread);
+                       int *fd = FD(evsel, cpu, thread);
 
-                       if (fd < 0)
+                       if (fd == NULL || *fd < 0)
                                continue;
 
-                       perf_mmap__munmap(map);
+                       perf_mmap__munmap(MMAP(evsel, cpu, thread));
                }
        }
 
@@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
                for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread);
-                       struct perf_mmap *map = MMAP(evsel, cpu, thread);
+                       int *fd = FD(evsel, cpu, thread);
+                       struct perf_mmap *map;
 
-                       if (fd < 0)
+                       if (fd == NULL || *fd < 0)
                                continue;
 
+                       map = MMAP(evsel, cpu, thread);
                        perf_mmap__init(map, NULL, false, NULL);
 
-                       ret = perf_mmap__mmap(map, &mp, fd, cpu);
+                       ret = perf_mmap__mmap(map, &mp, *fd, cpu);
                        if (ret) {
                                perf_evsel__munmap(evsel);
                                return ret;
@@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
 
 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
 {
-       if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
+       int *fd = FD(evsel, cpu, thread);
+
+       if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
                return NULL;
 
        return MMAP(evsel, cpu, thread)->base;
@@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
                     struct perf_counts_values *count)
 {
        size_t size = perf_evsel__read_size(evsel);
+       int *fd = FD(evsel, cpu, thread);
 
        memset(count, 0, sizeof(*count));
 
-       if (FD(evsel, cpu, thread) < 0)
+       if (fd == NULL || *fd < 0)
                return -EINVAL;
 
        if (MMAP(evsel, cpu, thread) &&
            !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
                return 0;
 
-       if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+       if (readn(*fd, count->values, size) <= 0)
                return -errno;
 
        return 0;
@@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
        int thread;
 
        for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-               int fd = FD(evsel, cpu, thread),
-                   err = ioctl(fd, ioc, arg);
+               int err;
+               int *fd = FD(evsel, cpu, thread);
+
+               if (fd == NULL || *fd < 0)
+                       return -1;
+
+               err = ioctl(*fd, ioc, arg);
 
                if (err)
                        return err;
index c67c833991708547dc06f9d843bde8d7965be908..ce91a582f0e4114195cf6006d53639accf1703bd 100644 (file)
@@ -40,7 +40,7 @@ static int test_stat_cpu(void)
                .type   = PERF_TYPE_SOFTWARE,
                .config = PERF_COUNT_SW_TASK_CLOCK,
        };
-       int err, cpu, tmp;
+       int err, idx;
 
        cpus = perf_cpu_map__new(NULL);
        __T("failed to create cpus", cpus);
@@ -70,10 +70,10 @@ static int test_stat_cpu(void)
        perf_evlist__for_each_evsel(evlist, evsel) {
                cpus = perf_evsel__cpus(evsel);
 
-               perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+               for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
                        struct perf_counts_values counts = { .val = 0 };
 
-                       perf_evsel__read(evsel, cpu, 0, &counts);
+                       perf_evsel__read(evsel, idx, 0, &counts);
                        __T("failed to read value for evsel", counts.val != 0);
                }
        }
index a184e4861627e73999c03ec50073fd6c92203891..33ae9334861a242ab59a68979d32ca3b7ea8aa68 100644 (file)
@@ -22,7 +22,7 @@ static int test_stat_cpu(void)
                .type   = PERF_TYPE_SOFTWARE,
                .config = PERF_COUNT_SW_CPU_CLOCK,
        };
-       int err, cpu, tmp;
+       int err, idx;
 
        cpus = perf_cpu_map__new(NULL);
        __T("failed to create cpus", cpus);
@@ -33,10 +33,10 @@ static int test_stat_cpu(void)
        err = perf_evsel__open(evsel, cpus, NULL);
        __T("failed to open evsel", err == 0);
 
-       perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+       for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
                struct perf_counts_values counts = { .val = 0 };
 
-               perf_evsel__read(evsel, cpu, 0, &counts);
+               perf_evsel__read(evsel, idx, 0, &counts);
                __T("failed to read value for evsel", counts.val != 0);
        }
 
@@ -148,6 +148,7 @@ static int test_stat_user_read(int event)
        __T("failed to mmap evsel", err == 0);
 
        pc = perf_evsel__mmap_base(evsel, 0, 0);
+       __T("failed to get mmapped address", pc);
 
 #if defined(__i386__) || defined(__x86_64__)
        __T("userspace counter access not supported", pc->cap_user_rdpmc);
index bc821056aba902e5630076f61273fdc5fb1c83cb..0893436cc09f84c835bb044c6dd78cfaa14920d7 100644 (file)
@@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
        sec = find_section_by_name(elf, ".altinstructions");
        if (!sec) {
                sec = elf_create_section(elf, ".altinstructions",
-                                        SHF_ALLOC, size, 0);
+                                        SHF_ALLOC, 0, 0);
 
                if (!sec) {
                        WARN_ELF("elf_create_section");
index e5947fbb9e7a6d602686af947c5caf1f2158157f..06b5c164ae9318150aacdfd23a9a0fd90df49e39 100644 (file)
@@ -292,7 +292,7 @@ static int decode_instructions(struct objtool_file *file)
                    !strcmp(sec->name, ".entry.text"))
                        sec->noinstr = true;
 
-               for (offset = 0; offset < sec->len; offset += insn->len) {
+               for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
                        insn = malloc(sizeof(*insn));
                        if (!insn) {
                                WARN("malloc failed");
@@ -307,7 +307,7 @@ static int decode_instructions(struct objtool_file *file)
                        insn->offset = offset;
 
                        ret = arch_decode_instruction(file->elf, sec, offset,
-                                                     sec->len - offset,
+                                                     sec->sh.sh_size - offset,
                                                      &insn->len, &insn->type,
                                                      &insn->immediate,
                                                      &insn->stack_ops);
@@ -349,9 +349,9 @@ static struct instruction *find_last_insn(struct objtool_file *file,
 {
        struct instruction *insn = NULL;
        unsigned int offset;
-       unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
+       unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
 
-       for (offset = sec->len - 1; offset >= end && !insn; offset--)
+       for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
                insn = find_insn(file, sec, offset);
 
        return insn;
@@ -389,7 +389,7 @@ static int add_dead_ends(struct objtool_file *file)
                insn = find_insn(file, reloc->sym->sec, reloc->addend);
                if (insn)
                        insn = list_prev_entry(insn, list);
-               else if (reloc->addend == reloc->sym->sec->len) {
+               else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
                        insn = find_last_insn(file, reloc->sym->sec);
                        if (!insn) {
                                WARN("can't find unreachable insn at %s+0x%x",
@@ -424,7 +424,7 @@ reachable:
                insn = find_insn(file, reloc->sym->sec, reloc->addend);
                if (insn)
                        insn = list_prev_entry(insn, list);
-               else if (reloc->addend == reloc->sym->sec->len) {
+               else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
                        insn = find_last_insn(file, reloc->sym->sec);
                        if (!insn) {
                                WARN("can't find reachable insn at %s+0x%x",
@@ -1561,14 +1561,14 @@ static int read_unwind_hints(struct objtool_file *file)
                return -1;
        }
 
-       if (sec->len % sizeof(struct unwind_hint)) {
+       if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
                WARN("struct unwind_hint size mismatch");
                return -1;
        }
 
        file->hints = true;
 
-       for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+       for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
                hint = (struct unwind_hint *)sec->data->d_buf + i;
 
                reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
index 8676c759872810d1e7cff3f7e2c5adc061164700..b18f0055b50b8018624331bbae9181b0d2eb730c 100644 (file)
@@ -286,10 +286,9 @@ static int read_sections(struct elf *elf)
                                return -1;
                        }
                }
-               sec->len = sec->sh.sh_size;
 
                if (sec->sh.sh_flags & SHF_EXECINSTR)
-                       elf->text_size += sec->len;
+                       elf->text_size += sec->sh.sh_size;
 
                list_add_tail(&sec->list, &elf->sections);
                elf_hash_add(section, &sec->hash, sec->idx);
@@ -734,8 +733,8 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
        data->d_size = strlen(str) + 1;
        data->d_align = 1;
 
-       len = strtab->len;
-       strtab->len += data->d_size;
+       len = strtab->sh.sh_size;
+       strtab->sh.sh_size += data->d_size;
        strtab->changed = true;
 
        return len;
@@ -790,9 +789,9 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
        data->d_align = 1;
        data->d_type = ELF_T_SYM;
 
-       sym->idx = symtab->len / sizeof(sym->sym);
+       sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
 
-       symtab->len += data->d_size;
+       symtab->sh.sh_size += data->d_size;
        symtab->changed = true;
 
        symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -814,7 +813,7 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
                data->d_align = 4;
                data->d_type = ELF_T_WORD;
 
-               symtab_shndx->len += 4;
+               symtab_shndx->sh.sh_size += 4;
                symtab_shndx->changed = true;
        }
 
@@ -855,7 +854,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
        }
 
        sec->idx = elf_ndxscn(s);
-       sec->len = size;
        sec->changed = true;
 
        sec->data = elf_newdata(s);
index e34395047530996207733546dbb38b03ed413510..075d8291b8546bda92af6e735cf58ad202c27069 100644 (file)
@@ -38,7 +38,6 @@ struct section {
        Elf_Data *data;
        char *name;
        int idx;
-       unsigned int len;
        bool changed, text, rodata, noinstr;
 };
 
index dc9b7dd314b05fbc0780e5699931476cdfdbe6c6..b5865e2450cbb5568446e4a4aad53d14f4d724d6 100644 (file)
@@ -204,7 +204,7 @@ int orc_create(struct objtool_file *file)
 
                /* Add a section terminator */
                if (!empty) {
-                       orc_list_add(&orc_list, &null, sec, sec->len);
+                       orc_list_add(&orc_list, &null, sec, sec->sh.sh_size);
                        nr++;
                }
        }
index bc925cf19e2dee7a3087829f4800ac71e0c6f6ca..06c3eacab3d53453706e795b01a02b781838a189 100644 (file)
@@ -58,6 +58,13 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
 {
 }
 
+static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
+                            unsigned long *off)
+{
+       *sec = reloc->sym->sec;
+       *off = reloc->sym->offset + reloc->addend;
+}
+
 static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                         struct section *sec, int idx,
                         struct special_alt *alt)
@@ -91,14 +98,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
                return -1;
        }
-       if (orig_reloc->sym->type != STT_SECTION) {
-               WARN_FUNC("don't know how to handle non-section reloc symbol %s",
-                          sec, offset + entry->orig, orig_reloc->sym->name);
-               return -1;
-       }
 
-       alt->orig_sec = orig_reloc->sym->sec;
-       alt->orig_off = orig_reloc->addend;
+       reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
 
        if (!entry->group || alt->new_len) {
                new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
@@ -116,8 +117,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                if (arch_is_retpoline(new_reloc->sym))
                        return 1;
 
-               alt->new_sec = new_reloc->sym->sec;
-               alt->new_off = (unsigned int)new_reloc->addend;
+               reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
 
                /* _ASM_EXTABLE_EX hack */
                if (alt->new_off >= 0x7ffffff0)
@@ -159,13 +159,13 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
                if (!sec)
                        continue;
 
-               if (sec->len % entry->size != 0) {
+               if (sec->sh.sh_size % entry->size != 0) {
                        WARN("%s size not a multiple of %d",
                             sec->name, entry->size);
                        return -1;
                }
 
-               nr_entries = sec->len / entry->size;
+               nr_entries = sec->sh.sh_size / entry->size;
 
                for (idx = 0; idx < nr_entries; idx++) {
                        alt = malloc(sizeof(*alt));
index 52152d156ad907e22c430e7ac48ef64c42f6032f..79936355d819a66969b90adcbd6e427a4c26b0a5 100644 (file)
@@ -164,7 +164,7 @@ const char unwinding_data[n]: an array of unwinding data, consisting of the EH F
 The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
 
 
-The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
 
 
 NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
index de6beedb72834b45e29175b674d6e49a2ab0a49d..3b6a2c84ea027e955ba6cb7e5ba290f0d876a5b6 100644 (file)
@@ -261,7 +261,7 @@ COALESCE
 User can specify how to sort offsets for cacheline.
 
 Following fields are available and governs the final
-output fields set for caheline offsets output:
+output fields set for cacheline offsets output:
 
   tid   - coalesced by process TIDs
   pid   - coalesced by process PIDs
index 184ba62420f099707370e7db674caf7900fe3254..db465fa7ee918a19f39c2fbd03b451f3f9adffa8 100644 (file)
@@ -883,7 +883,7 @@ and "r" can be combined to get calls and returns.
 
 "Transactions" events correspond to the start or end of transactions. The
 'flags' field can be used in perf script to determine whether the event is a
-tranasaction start, commit or abort.
+transaction start, commit or abort.
 
 Note that "instructions", "branches" and "transactions" events depend on code
 flow packets which can be disabled by using the config term "branch=0".  Refer
index 74d7745921968b3a7286311f740d3f5aab0315e2..1b4d452923d7e2327f378b2922cdc7ae4856a69d 100644 (file)
@@ -44,7 +44,7 @@ COMMON OPTIONS
 
 -f::
 --force::
-       Don't complan, do it.
+       Don't complain, do it.
 
 REPORT OPTIONS
 --------------
index 5a1f68122f50aaaa13baa1d8491a40dbdce92f47..fa4f39d305a7d576107bd49f38d1a6c91819f98c 100644 (file)
@@ -54,7 +54,7 @@ all sched_wakeup events in the system:
 Traces meant to be processed using a script should be recorded with
 the above option: -a to enable system-wide collection.
 
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
 
 ----
index 0250dc61cf98266898b9444db332402608f93447..cf4b7f4b625adc7a3c1bc58076f395bbc308bf2f 100644 (file)
@@ -448,7 +448,7 @@ all sched_wakeup events in the system:
 Traces meant to be processed using a script should be recorded with
 the above option: -a to enable system-wide collection.
 
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
 
 ----
index 4c9310be6acc9089dae38c45f5f1ce42ed058b7e..7e6fb7cbc0f423b09bdb1fb3f5dd243bbe2cd973 100644 (file)
@@ -385,7 +385,7 @@ Aggregate counts per physical processor for system-wide mode measurements.
 Print metrics or metricgroups specified in a comma separated list.
 For a group all metrics from the group are added.
 The events from the metrics are automatically measured.
-See perf list output for the possble metrics and metricgroups.
+See perf list output for the possible metrics and metricgroups.
 
 -A::
 --no-aggr::
index c6302df4cf29b9afbad34cc6165a29d15b8aa51d..a15b93fdcf50f910d596ec00dac712837154859d 100644 (file)
@@ -2,7 +2,7 @@ Using TopDown metrics in user space
 -----------------------------------
 
 Intel CPUs (since Sandy Bridge and Silvermont) support a TopDown
-methology to break down CPU pipeline execution into 4 bottlenecks:
+methodology to break down CPU pipeline execution into 4 bottlenecks:
 frontend bound, backend bound, bad speculation, retiring.
 
 For more details on Topdown see [1][5]
index 446180401e267f27cc35852572b77131e3353fe5..14e3e8d702a023df558d14d57def0aa37db83d42 100644 (file)
@@ -143,7 +143,7 @@ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
 ifdef CSINCLUDES
   LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
 endif
-OPENCSDLIBS := -lopencsd_c_api -lopencsd
+OPENCSDLIBS := -lopencsd_c_api -lopencsd -lstdc++
 ifdef CSLIBS
   LIBOPENCSD_LDFLAGS := -L$(CSLIBS)
 endif
index e04313c4d8409a9429fcd7d8080610d73441b783..5cd702062a042630bbd244cf9d188e4f9118d81d 100644 (file)
@@ -802,7 +802,7 @@ endif
 
 $(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
 
-LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
+LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(filter-out -static,$(LDFLAGS))'
 
 $(LIBTRACEEVENT): FORCE
        $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
index c7c7ec0812d5aef4c4399ca3e86ecdf69d0c9815..5fc6a2a3dbc5fd77215047d7d2c5e8df92f165c4 100644 (file)
@@ -8,10 +8,10 @@
 #include <linux/coresight-pmu.h>
 #include <linux/zalloc.h>
 
-#include "../../util/auxtrace.h"
-#include "../../util/debug.h"
-#include "../../util/evlist.h"
-#include "../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/debug.h"
+#include "../../../util/evlist.h"
+#include "../../../util/pmu.h"
 #include "cs-etm.h"
 #include "arm-spe.h"
 
index 515aae470e23b0cf601d0711ecb355adcbcda043..293a23bf8be39a25a6a3813f60e082e62062bbb3 100644 (file)
 #include <linux/zalloc.h>
 
 #include "cs-etm.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/auxtrace.h"
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/perf_api_probe.h"
-#include "../../util/evsel_config.h"
-#include "../../util/pmu.h"
-#include "../../util/cs-etm.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/perf_api_probe.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/pmu.h"
+#include "../../../util/cs-etm.h"
 #include <internal/lib.h> // page_size
-#include "../../util/session.h"
+#include "../../../util/session.h"
 
 #include <errno.h>
 #include <stdlib.h>
index 2864e2e3776d5105d39f83f4de94d306df4a07ac..2833e101a7c6407263130e9948a06a2caa32bc4b 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG_END
index bbc297a7e2e3517059f9c3b33816deb69074ac20..b8b23b9dc5987a4dc118ee7bc45b162d08a57111 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/string.h>
 
 #include "arm-spe.h"
-#include "../../util/pmu.h"
+#include "../../../util/pmu.h"
 
 struct perf_event_attr
 *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
index 36ba4c69c3c55cfec6c33a4b5cd52ee31d36f538..b7692cb0c73398e4f8c6bc6df4ebd7c83359fd3f 100644 (file)
@@ -1,8 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/event.h"
 
 bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
 {
index 3a550225dfafd37d579ba0a464e9ec067485660d..438906bf0014abb75d16db98ac304a20e7e5fb1b 100644 (file)
@@ -3,8 +3,8 @@
 #include <errno.h>
 #include <libunwind.h>
 #include "perf_regs.h"
-#include "../../util/unwind.h"
-#include "../../util/debug.h"
+#include "../../../util/unwind.h"
+#include "../../../util/debug.h"
 
 int libunwind__arch_reg_id(int regnum)
 {
index eeafe97b8105bbd0b1c36cf0b8393624d88eb48e..792cd75ade33d8707dd9dcab36962d5244c0977b 100644 (file)
@@ -432,7 +432,7 @@ void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
        u8 die = ((struct iio_root_port *)evsel->priv)->die;
        struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
 
-       if (count->run && count->ena) {
+       if (count && count->run && count->ena) {
                if (evsel->prev_raw_counts && !out->force_header) {
                        struct perf_counts_values *prev_count =
                                perf_counts(evsel->prev_raw_counts, die, 0);
index 0e824f7d8b19cd7b7b4dddcf200ae14f569cde94..6211d0b84b7a63edb843429ac1c2e6f8390f3887 100644 (file)
@@ -368,16 +368,6 @@ static inline int output_type(unsigned int type)
        return OUTPUT_TYPE_OTHER;
 }
 
-static inline unsigned int attr_type(unsigned int type)
-{
-       switch (type) {
-       case OUTPUT_TYPE_SYNTH:
-               return PERF_TYPE_SYNTH;
-       default:
-               return type;
-       }
-}
-
 static bool output_set_by_user(void)
 {
        int j;
@@ -556,6 +546,18 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
                output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
 }
 
+static struct evsel *find_first_output_type(struct evlist *evlist,
+                                           unsigned int type)
+{
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (output_type(evsel->core.attr.type) == (int)type)
+                       return evsel;
+       }
+       return NULL;
+}
+
 /*
  * verify all user requested events exist and the samples
  * have the expected data
@@ -567,7 +569,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
        struct evsel *evsel;
 
        for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
-               evsel = perf_session__find_first_evtype(session, attr_type(j));
+               evsel = find_first_output_type(session->evlist, j);
 
                /*
                 * even if fields is set to 0 (ie., show nothing) event must
index f6e87b7be5fa22dd1c628b0c76df923bb981eb3b..f0ecfda34ecebaccf8e85e029f9fce50379bdf29 100644 (file)
@@ -2408,6 +2408,8 @@ int cmd_stat(int argc, const char **argv)
                        goto out;
                } else if (verbose)
                        iostat_list(evsel_list, &stat_config);
+               if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
+                       target.system_wide = true;
        }
 
        if (add_default_attributes())
index 84a0cedf1fd9bcfb7bf648a48640320a578fe18b..f1f2965f6775cad71bb0d47f3b15a9064d9c3b75 100644 (file)
   {
     "EventCode": "0x4e010",
     "EventName": "PM_GCT_NOSLOT_IC_L3MISS",
-    "BriefDescription": "Gct empty for this thread due to icach l3 miss",
+    "BriefDescription": "Gct empty for this thread due to icache l3 miss",
     "PublicDescription": ""
   },
   {
index 6731b3cf0c2fc9b7c353f582c06fb186dfb847f4..7c887d37b893466ae84fa7d82c646cd1c63f0192 100644 (file)
@@ -1285,6 +1285,7 @@ int main(int argc, char *argv[])
        }
 
        free_arch_std_events();
+       free_sys_event_tables();
        free(mapfile);
        return 0;
 
@@ -1306,6 +1307,7 @@ err_close_eventsfp:
                create_empty_mapping(output_file);
 err_out:
        free_arch_std_events();
+       free_sys_event_tables();
        free(mapfile);
        return ret;
 }
index d9e99b3f77e66e707cca6c8e115120b0105c84b3..d8ea6a88163fbd1405cd072ad6cd94d4b68e029e 100644 (file)
@@ -68,3 +68,100 @@ fd=10
 type=0
 config=5
 optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
index 8b04a055d1548d03a84c53ad719e249eeb807197..b656ab93c5bf910ded52eebca2fa7f985d7b6221 100644 (file)
@@ -70,12 +70,109 @@ type=0
 config=5
 optional=1
 
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
 type=3
 config=0
 optional=1
@@ -84,8 +181,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
 type=3
 config=65536
 optional=1
@@ -94,8 +191,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
 type=3
 config=2
 optional=1
@@ -104,8 +201,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
 type=3
 config=65538
 optional=1
index 4fca9f1bfbf8d36932b814d70750c6e826c9cc05..97625090a1c4c299b3ee528b4c084b862c3cfab0 100644 (file)
@@ -70,12 +70,109 @@ type=0
 config=5
 optional=1
 
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
 type=3
 config=0
 optional=1
@@ -84,8 +181,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
 type=3
 config=65536
 optional=1
@@ -94,8 +191,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
 type=3
 config=2
 optional=1
@@ -104,8 +201,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
 type=3
 config=65538
 optional=1
@@ -114,8 +211,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event15:base-stat]
-fd=15
+[event24:base-stat]
+fd=24
 type=3
 config=1
 optional=1
@@ -124,8 +221,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event16:base-stat]
-fd=16
+[event25:base-stat]
+fd=25
 type=3
 config=65537
 optional=1
@@ -134,8 +231,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event17:base-stat]
-fd=17
+[event26:base-stat]
+fd=26
 type=3
 config=3
 optional=1
@@ -144,8 +241,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event18:base-stat]
-fd=18
+[event27:base-stat]
+fd=27
 type=3
 config=65539
 optional=1
@@ -154,8 +251,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event19:base-stat]
-fd=19
+[event28:base-stat]
+fd=28
 type=3
 config=4
 optional=1
@@ -164,8 +261,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event20:base-stat]
-fd=20
+[event29:base-stat]
+fd=29
 type=3
 config=65540
 optional=1
index 4bb58e1c82a676c7b5b7c7ab411f6d93f424864e..d555042e3fbfe6f978a074d073f35adbce333c6b 100644 (file)
@@ -70,12 +70,109 @@ type=0
 config=5
 optional=1
 
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event11:base-stat]
-fd=11
+[event20:base-stat]
+fd=20
 type=3
 config=0
 optional=1
@@ -84,8 +181,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event12:base-stat]
-fd=12
+[event21:base-stat]
+fd=21
 type=3
 config=65536
 optional=1
@@ -94,8 +191,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event13:base-stat]
-fd=13
+[event22:base-stat]
+fd=22
 type=3
 config=2
 optional=1
@@ -104,8 +201,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event14:base-stat]
-fd=14
+[event23:base-stat]
+fd=23
 type=3
 config=65538
 optional=1
@@ -114,8 +211,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event15:base-stat]
-fd=15
+[event24:base-stat]
+fd=24
 type=3
 config=1
 optional=1
@@ -124,8 +221,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event16:base-stat]
-fd=16
+[event25:base-stat]
+fd=25
 type=3
 config=65537
 optional=1
@@ -134,8 +231,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event17:base-stat]
-fd=17
+[event26:base-stat]
+fd=26
 type=3
 config=3
 optional=1
@@ -144,8 +241,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event18:base-stat]
-fd=18
+[event27:base-stat]
+fd=27
 type=3
 config=65539
 optional=1
@@ -154,8 +251,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event19:base-stat]
-fd=19
+[event28:base-stat]
+fd=28
 type=3
 config=4
 optional=1
@@ -164,8 +261,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event20:base-stat]
-fd=20
+[event29:base-stat]
+fd=29
 type=3
 config=65540
 optional=1
@@ -174,8 +271,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event21:base-stat]
-fd=21
+[event30:base-stat]
+fd=30
 type=3
 config=512
 optional=1
@@ -184,8 +281,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event22:base-stat]
-fd=22
+[event31:base-stat]
+fd=31
 type=3
 config=66048
 optional=1
index 9866cddebf237c2fdf8b9c1d28501e9a4b277962..9b4a765e4b7340d8a58ef6a2afd26584a6759a8c 100644 (file)
@@ -229,8 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
                            struct thread *thread, struct state *state)
 {
        struct addr_location al;
-       unsigned char buf1[BUFSZ];
-       unsigned char buf2[BUFSZ];
+       unsigned char buf1[BUFSZ] = {0};
+       unsigned char buf2[BUFSZ] = {0};
        size_t ret_len;
        u64 objdump_addr;
        const char *objdump_name;
index a288035eb36269bcb9d062df3e648faf7ed7bb2c..c756284b3b1353292b677d7f32b923877515c56e 100644 (file)
 /* For bsearch. We try to unwind functions in shared object. */
 #include <stdlib.h>
 
+/*
+ * The test will assert frames are on the stack but tail call optimizations lose
+ * the frame of the caller. Clang can disable this optimization on a called
+ * function but GCC currently (11/2020) lacks this attribute. The barrier is
+ * used to inhibit tail calls in these cases.
+ */
+#ifdef __has_attribute
+#if __has_attribute(disable_tail_calls)
+#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
+#define NO_TAIL_CALL_BARRIER
+#endif
+#endif
+#ifndef NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
+#endif
+
 static int mmap_handler(struct perf_tool *tool __maybe_unused,
                        union perf_event *event,
                        struct perf_sample *sample,
@@ -91,7 +108,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
        return strcmp((const char *) symbol, funcs[idx]);
 }
 
-noinline int test_dwarf_unwind__thread(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
 {
        struct perf_sample sample;
        unsigned long cnt = 0;
@@ -122,7 +139,7 @@ noinline int test_dwarf_unwind__thread(struct thread *thread)
 
 static int global_unwind_retval = -INT_MAX;
 
-noinline int test_dwarf_unwind__compare(void *p1, void *p2)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
 {
        /* Any possible value should be 'thread' */
        struct thread *thread = *(struct thread **)p1;
@@ -141,7 +158,7 @@ noinline int test_dwarf_unwind__compare(void *p1, void *p2)
        return p1 - p2;
 }
 
-noinline int test_dwarf_unwind__krava_3(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
 {
        struct thread *array[2] = {thread, thread};
        void *fp = &bsearch;
@@ -160,14 +177,22 @@ noinline int test_dwarf_unwind__krava_3(struct thread *thread)
        return global_unwind_retval;
 }
 
-noinline int test_dwarf_unwind__krava_2(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
 {
-       return test_dwarf_unwind__krava_3(thread);
+       int ret;
+
+       ret =  test_dwarf_unwind__krava_3(thread);
+       NO_TAIL_CALL_BARRIER;
+       return ret;
 }
 
-noinline int test_dwarf_unwind__krava_1(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
 {
-       return test_dwarf_unwind__krava_2(thread);
+       int ret;
+
+       ret =  test_dwarf_unwind__krava_2(thread);
+       NO_TAIL_CALL_BARRIER;
+       return ret;
 }
 
 int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
index 781afe42e90e0455be687c577a84665d9ee36cbc..fa5bd5c20e96b011320d0b0dfdf99c095c98c4d2 100644 (file)
@@ -757,25 +757,40 @@ void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
 }
 
 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
-                           unsigned int row, bool arrow_down)
+                           unsigned int row, int diff, bool arrow_down)
 {
-       unsigned int end_row;
+       int end_row;
 
-       if (row >= browser->top_idx)
-               end_row = row - browser->top_idx;
-       else
+       if (diff <= 0)
                return;
 
        SLsmg_set_char_set(1);
 
        if (arrow_down) {
+               if (row + diff <= browser->top_idx)
+                       return;
+
+               end_row = row + diff - browser->top_idx;
                ui_browser__gotorc(browser, end_row, column - 1);
-               SLsmg_write_char(SLSMG_ULCORN_CHAR);
-               ui_browser__gotorc(browser, end_row, column);
-               SLsmg_draw_hline(2);
-               ui_browser__gotorc(browser, end_row + 1, column - 1);
                SLsmg_write_char(SLSMG_LTEE_CHAR);
+
+               while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
+                       ui_browser__gotorc(browser, end_row, column - 1);
+                       SLsmg_draw_vline(1);
+               }
+
+               end_row = (int)(row - browser->top_idx);
+               if (end_row >= 0) {
+                       ui_browser__gotorc(browser, end_row, column - 1);
+                       SLsmg_write_char(SLSMG_ULCORN_CHAR);
+                       ui_browser__gotorc(browser, end_row, column);
+                       SLsmg_draw_hline(2);
+               }
        } else {
+               if (row < browser->top_idx)
+                       return;
+
+               end_row = row - browser->top_idx;
                ui_browser__gotorc(browser, end_row, column - 1);
                SLsmg_write_char(SLSMG_LTEE_CHAR);
                ui_browser__gotorc(browser, end_row, column);
index 3678eb88f119cf5f09db2708c1ff74254fe85962..510ce45540501972942c0c3a34f6429ca97de690 100644 (file)
@@ -51,7 +51,7 @@ void ui_browser__write_graph(struct ui_browser *browser, int graph);
 void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
                              u64 start, u64 end);
 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
-                           unsigned int row, bool arrow_down);
+                           unsigned int row, int diff, bool arrow_down);
 void __ui_browser__show_title(struct ui_browser *browser, const char *title);
 void ui_browser__show_title(struct ui_browser *browser, const char *title);
 int ui_browser__show(struct ui_browser *browser, const char *title,
index ef4da4295bf7f774635b72b0f8cb602246446f84..e81c2493efdf9eb1884a7471821876e370387ecc 100644 (file)
@@ -125,13 +125,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
                ab->selection = al;
 }
 
-static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
+static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
 {
        struct disasm_line *pos = list_prev_entry(cursor, al.node);
        const char *name;
+       int diff = 1;
+
+       while (pos && pos->al.offset == -1) {
+               pos = list_prev_entry(pos, al.node);
+               if (!ab->opts->hide_src_code)
+                       diff++;
+       }
 
        if (!pos)
-               return false;
+               return 0;
 
        if (ins__is_lock(&pos->ins))
                name = pos->ops.locked.ins.name;
@@ -139,9 +146,11 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
                name = pos->ins.name;
 
        if (!name || !cursor->ins.name)
-               return false;
+               return 0;
 
-       return ins__is_fused(ab->arch, name, cursor->ins.name);
+       if (ins__is_fused(ab->arch, name, cursor->ins.name))
+               return diff;
+       return 0;
 }
 
 static void annotate_browser__draw_current_jump(struct ui_browser *browser)
@@ -155,6 +164,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        struct annotation *notes = symbol__annotation(sym);
        u8 pcnt_width = annotation__pcnt_width(notes);
        int width;
+       int diff = 0;
 
        /* PLT symbols contain external offsets */
        if (strstr(sym->name, "@plt"))
@@ -205,11 +215,11 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
                                 pcnt_width + 2 + notes->widths.addr + width,
                                 from, to);
 
-       if (is_fused(ab, cursor)) {
+       diff = is_fused(ab, cursor);
+       if (diff > 0) {
                ui_browser__mark_fused(browser,
                                       pcnt_width + 3 + notes->widths.addr + width,
-                                      from - 1,
-                                      to > from);
+                                      from - diff, diff, to > from);
        }
 }
 
index 683f6d63560ed2794a1b7d0293be0cbf87536f8e..1a7112a87736ab5899b1f1d77f1b2044e1356cf0 100644 (file)
 struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
 {
        struct btf *btf;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
        int err = btf__get_from_id(id, &btf);
+#pragma GCC diagnostic pop
 
        return err ? ERR_PTR(err) : btf;
 }
index 4fb5e90d7a57ae481873b2d632dc60a992ea165d..60ce5908c6640022c472acc6d0611195a4c93284 100644 (file)
@@ -801,7 +801,7 @@ int perf_config_set(struct perf_config_set *set,
                                  section->name, item->name);
                        ret = fn(key, value, data);
                        if (ret < 0) {
-                               pr_err("Error: wrong config key-value pair %s=%s\n",
+                               pr_err("Error in the given config file: wrong config key-value pair %s=%s\n",
                                       key, value);
                                /*
                                 * Can't be just a 'break', as perf_config_set__for_each_entry()
index da19be7da284c250e5cff7b84e87a47de5ec7b3e..44e40bad0e336ab7d45c35065d4e1c37346da543 100644 (file)
@@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
 
        al.filtered = 0;
        al.sym = NULL;
+       al.srcline = NULL;
        if (!cpumode) {
                thread__find_cpumode_addr_location(thread, ip, &al);
        } else {
index 069c2cfdd3be6a7405196f697de057ea5377f9fa..352f16076e01f70ef91ebebdd7809d8cf4c0c05f 100644 (file)
@@ -2116,7 +2116,7 @@ fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
 static int __perf_session__process_decomp_events(struct perf_session *session)
 {
        s64 skip;
-       u64 size, file_pos = 0;
+       u64 size;
        struct decomp *decomp = session->decomp_last;
 
        if (!decomp)
@@ -2132,7 +2132,7 @@ static int __perf_session__process_decomp_events(struct perf_session *session)
                size = event->header.size;
 
                if (size < sizeof(struct perf_event_header) ||
-                   (skip = perf_session__process_event(session, event, file_pos)) < 0) {
+                   (skip = perf_session__process_event(session, event, decomp->file_pos)) < 0) {
                        pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
                                decomp->file_pos + decomp->head, event->header.size, event->header.type);
                        return -EINVAL;
index 5a931456e71851c3a799e2a348e968ec666d484e..ac35c61f65f5fd43457957f178f3ca72721bfedc 100755 (executable)
@@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
 
 from collections import namedtuple
 from enum import Enum, auto
-from typing import Iterable
+from typing import Iterable, Sequence
 
 import kunit_config
 import kunit_json
@@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
                                exec_result.elapsed_time))
        return parse_result
 
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+               '--json': 'stdout',
+               '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+       def massage_arg(arg: str) -> str:
+               if arg not in pseudo_bool_flag_defaults:
+                       return arg
+               return  f'{arg}={pseudo_bool_flag_defaults[arg]}'
+       return list(map(massage_arg, argv))
+
 def add_common_opts(parser) -> None:
        parser.add_argument('--build_dir',
                            help='As in the make command, it specifies the build '
@@ -303,7 +323,7 @@ def main(argv, linux=None):
                                  help='Specifies the file to read results from.',
                                  type=str, nargs='?', metavar='input_file')
 
-       cli_args = parser.parse_args(argv)
+       cli_args = parser.parse_args(massage_argv(argv))
 
        if get_kernel_root_path():
                os.chdir(get_kernel_root_path())
index 619c4554cbff247bc1f2c9c6cb61f01c61d6b747..1edcc8373b4e3195a5b88634bcb45964381abcc2 100755 (executable)
@@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
                        self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
                        self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
 
+       def test_run_raw_output_does_not_take_positional_args(self):
+               # --raw_output is a string flag, but we don't want it to consume
+               # any positional arguments, only ones after an '='
+               self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+               kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+               self.linux_source_mock.run_kernel.assert_called_once_with(
+                       args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+
        def test_exec_timeout(self):
                timeout = 3453
                kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
index 6836510a522f7391f943489c3d0519945ce07cbe..22722abc9dfa9ec71b45280b7f09b946cbd61963 100644 (file)
@@ -266,16 +266,19 @@ int test_init(struct tdescr *td)
                        td->feats_supported |= FEAT_SSBS;
                if (getauxval(AT_HWCAP) & HWCAP_SVE)
                        td->feats_supported |= FEAT_SVE;
-               if (feats_ok(td))
+               if (feats_ok(td)) {
                        fprintf(stderr,
                                "Required Features: [%s] supported\n",
                                feats_to_string(td->feats_required &
                                                td->feats_supported));
-               else
+               } else {
                        fprintf(stderr,
                                "Required Features: [%s] NOT supported\n",
                                feats_to_string(td->feats_required &
                                                ~td->feats_supported));
+                       td->result = KSFT_SKIP;
+                       return 0;
+               }
        }
 
        /* Perform test specific additional initialization */
index 866531c08e4f9da1e892b1c7cd15e24552774b81..799b88152e9e8d95fbd89da55f77a7b3247aba65 100644 (file)
@@ -375,7 +375,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o:                         \
                     $(TRUNNER_BPF_PROGS_DIR)/%.c                       \
                     $(TRUNNER_BPF_PROGS_DIR)/*.h                       \
                     $$(INCLUDE_DIR)/vmlinux.h                          \
-                    $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
+                    $(wildcard $(BPFDIR)/bpf_*.h)                      \
+                    | $(TRUNNER_OUTPUT) $$(BPFOBJ)
        $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@,                      \
                                          $(TRUNNER_BPF_CFLAGS))
 
index 033051717ba58da3c2a27a182884fb59c4282a60..f3daa44a82660a704888705cdec15234688d0c5b 100644 (file)
 #include <unistd.h>
 #include <ftw.h>
 
-
 #include "cgroup_helpers.h"
 
 /*
  * To avoid relying on the system setup, when setup_cgroup_env is called
- * we create a new mount namespace, and cgroup namespace. The cgroup2
- * root is mounted at CGROUP_MOUNT_PATH
- *
- * Unfortunately, most people don't have cgroupv2 enabled at this point in time.
- * It's easier to create our own mount namespace and manage it ourselves.
+ * we create a new mount namespace, and cgroup namespace. The cgroupv2
+ * root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't
+ * have cgroupv2 enabled at this point in time. It's easier to create our
+ * own mount namespace and manage it ourselves. We assume /mnt exists.
  *
- * We assume /mnt exists.
+ * Related cgroupv1 helpers are named *classid*(), since we only use the
+ * net_cls controller for tagging net_cls.classid. We assume the default
+ * mount under /sys/fs/cgroup/net_cls, which should be the case for the
+ * vast majority of users.
  */
 
 #define WALK_FD_LIMIT                  16
+
 #define CGROUP_MOUNT_PATH              "/mnt"
+#define CGROUP_MOUNT_DFLT              "/sys/fs/cgroup"
+#define NETCLS_MOUNT_PATH              CGROUP_MOUNT_DFLT "/net_cls"
 #define CGROUP_WORK_DIR                        "/cgroup-test-work-dir"
+
 #define format_cgroup_path(buf, path) \
        snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \
                 CGROUP_WORK_DIR, path)
 
+#define format_classid_path(buf)                               \
+       snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH,   \
+                CGROUP_WORK_DIR)
+
 /**
  * enable_all_controllers() - Enable all available cgroup v2 controllers
  *
@@ -139,8 +148,7 @@ static int nftwfunc(const char *filename, const struct stat *statptr,
        return 0;
 }
 
-
-static int join_cgroup_from_top(char *cgroup_path)
+static int join_cgroup_from_top(const char *cgroup_path)
 {
        char cgroup_procs_path[PATH_MAX + 1];
        pid_t pid = getpid();
@@ -313,3 +321,114 @@ int cgroup_setup_and_join(const char *path) {
        }
        return cg_fd;
 }
+
+/**
+ * setup_classid_environment() - Setup the cgroupv1 net_cls environment
+ *
+ * After calling this function, cleanup_classid_environment should be called
+ * once testing is complete.
+ *
+ * This function will print an error to stderr and return 1 if it is unable
+ * to setup the cgroup environment. If setup is successful, 0 is returned.
+ */
+int setup_classid_environment(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+
+       if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) &&
+           errno != EBUSY) {
+               log_err("mount cgroup base");
+               return 1;
+       }
+
+       if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) {
+               log_err("mkdir cgroup net_cls");
+               return 1;
+       }
+
+       if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
+           errno != EBUSY) {
+               log_err("mount cgroup net_cls");
+               return 1;
+       }
+
+       cleanup_classid_environment();
+
+       if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
+               log_err("mkdir cgroup work dir");
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * set_classid() - Set a cgroupv1 net_cls classid
+ * @id: the numeric classid
+ *
+ * Writes the passed classid into the cgroup work dir's net_cls.classid
+ * file in order to later on trigger socket tagging.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1. If there
+ * is a failure, it prints the error to stderr.
+ */
+int set_classid(unsigned int id)
+{
+       char cgroup_workdir[PATH_MAX - 42];
+       char cgroup_classid_path[PATH_MAX + 1];
+       int fd, rc = 0;
+
+       format_classid_path(cgroup_workdir);
+       snprintf(cgroup_classid_path, sizeof(cgroup_classid_path),
+                "%s/net_cls.classid", cgroup_workdir);
+
+       fd = open(cgroup_classid_path, O_WRONLY);
+       if (fd < 0) {
+               log_err("Opening cgroup classid: %s", cgroup_classid_path);
+               return 1;
+       }
+
+       if (dprintf(fd, "%u\n", id) < 0) {
+               log_err("Setting cgroup classid");
+               rc = 1;
+       }
+
+       close(fd);
+       return rc;
+}
+
+/**
+ * join_classid() - Join a cgroupv1 net_cls classid
+ *
+ * This function expects the cgroup work dir to be already created, as we
+ * join it here. This causes the process sockets to be tagged with the given
+ * net_cls classid.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1.
+ */
+int join_classid(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+       return join_cgroup_from_top(cgroup_workdir);
+}
+
+/**
+ * cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment
+ *
+ * At call time, it moves the calling process to the root cgroup, and then
+ * runs the deletion process.
+ *
+ * On failure, it will print an error to stderr, and try to continue.
+ */
+void cleanup_classid_environment(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+       join_cgroup_from_top(NETCLS_MOUNT_PATH);
+       nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+}
index 5fe3d88e4f0d271d77ad1dcec724147620d59c21..629da3854b3e7a56fa54cfbf93a2ad086ab35eae 100644 (file)
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __CGROUP_HELPERS_H
 #define __CGROUP_HELPERS_H
+
 #include <errno.h>
 #include <string.h>
 
@@ -8,12 +9,21 @@
 #define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
        __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
 
-
+/* cgroupv2 related */
 int cgroup_setup_and_join(const char *path);
 int create_and_get_cgroup(const char *path);
+unsigned long long get_cgroup_id(const char *path);
+
 int join_cgroup(const char *path);
+
 int setup_cgroup_environment(void);
 void cleanup_cgroup_environment(void);
-unsigned long long get_cgroup_id(const char *path);
 
-#endif
+/* cgroupv1 related */
+int set_classid(unsigned int id);
+int join_classid(void);
+
+int setup_classid_environment(void);
+void cleanup_classid_environment(void);
+
+#endif /* __CGROUP_HELPERS_H */
index 7e9f6375757a9bfb23dfb7deb572ae306c52037c..6db1af8fdee788407fe55576cf20c57347159c7c 100644 (file)
@@ -208,11 +208,26 @@ error_close:
 
 static int connect_fd_to_addr(int fd,
                              const struct sockaddr_storage *addr,
-                             socklen_t addrlen)
+                             socklen_t addrlen, const bool must_fail)
 {
-       if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
-               log_err("Failed to connect to server");
-               return -1;
+       int ret;
+
+       errno = 0;
+       ret = connect(fd, (const struct sockaddr *)addr, addrlen);
+       if (must_fail) {
+               if (!ret) {
+                       log_err("Unexpected success to connect to server");
+                       return -1;
+               }
+               if (errno != EPERM) {
+                       log_err("Unexpected error from connect to server");
+                       return -1;
+               }
+       } else {
+               if (ret) {
+                       log_err("Failed to connect to server");
+                       return -1;
+               }
        }
 
        return 0;
@@ -257,7 +272,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
                       strlen(opts->cc) + 1))
                goto error_close;
 
-       if (connect_fd_to_addr(fd, &addr, addrlen))
+       if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail))
                goto error_close;
 
        return fd;
@@ -289,7 +304,7 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
                return -1;
        }
 
-       if (connect_fd_to_addr(client_fd, &addr, len))
+       if (connect_fd_to_addr(client_fd, &addr, len, false))
                return -1;
 
        return 0;
index da7e132657d594663a38001f008777dea078c1f6..d198181a56487cbbc765ee26af871721af6e0187 100644 (file)
@@ -20,6 +20,7 @@ typedef __u16 __sum16;
 struct network_helper_opts {
        const char *cc;
        int timeout_ms;
+       bool must_fail;
 };
 
 /* ipv4 test vector */
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
new file mode 100644 (file)
index 0000000..ab3b9bc
--- /dev/null
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "connect4_dropper.skel.h"
+
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+static int run_test(int cgroup_fd, int server_fd, bool classid)
+{
+       struct network_helper_opts opts = {
+               .must_fail = true,
+       };
+       struct connect4_dropper *skel;
+       int fd, err = 0;
+
+       skel = connect4_dropper__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel_open"))
+               return -1;
+
+       skel->links.connect_v4_dropper =
+               bpf_program__attach_cgroup(skel->progs.connect_v4_dropper,
+                                          cgroup_fd);
+       if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) {
+               err = -1;
+               goto out;
+       }
+
+       if (classid && !ASSERT_OK(join_classid(), "join_classid")) {
+               err = -1;
+               goto out;
+       }
+
+       fd = connect_to_fd_opts(server_fd, &opts);
+       if (fd < 0)
+               err = -1;
+       else
+               close(fd);
+out:
+       connect4_dropper__destroy(skel);
+       return err;
+}
+
+void test_cgroup_v1v2(void)
+{
+       struct network_helper_opts opts = {};
+       int server_fd, client_fd, cgroup_fd;
+       static const int port = 60123;
+
+       /* Step 1: Check base connectivity works without any BPF. */
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
+       if (!ASSERT_GE(server_fd, 0, "server_fd"))
+               return;
+       client_fd = connect_to_fd_opts(server_fd, &opts);
+       if (!ASSERT_GE(client_fd, 0, "client_fd")) {
+               close(server_fd);
+               return;
+       }
+       close(client_fd);
+       close(server_fd);
+
+       /* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */
+       cgroup_fd = test__join_cgroup("/connect_dropper");
+       if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+               return;
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
+       if (!ASSERT_GE(server_fd, 0, "server_fd")) {
+               close(cgroup_fd);
+               return;
+       }
+       ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
+       setup_classid_environment();
+       set_classid(42);
+       ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
+       cleanup_classid_environment();
+       close(server_fd);
+       close(cgroup_fd);
+}
index 53f0e0fa1a5370b8fbfe284197284c1f3b60b49b..37c20b5ffa70f40291c1a9f73f7cf11b8ff815f7 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE
 #include <test_progs.h>
-#include <linux/ptrace.h>
 #include "test_task_pt_regs.skel.h"
 
 void test_task_pt_regs(void)
diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c
new file mode 100644 (file)
index 0000000..b565d99
--- /dev/null
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define VERDICT_REJECT 0
+#define VERDICT_PROCEED        1
+
+SEC("cgroup/connect4")
+int connect_v4_dropper(struct bpf_sock_addr *ctx)
+{
+       if (ctx->type != SOCK_STREAM)
+               return VERDICT_PROCEED;
+       if (ctx->user_port == bpf_htons(60123))
+               return VERDICT_REJECT;
+       return VERDICT_PROCEED;
+}
+
+char _license[] SEC("license") = "GPL";
index 6c059f1cfa1bb27b84b7162f93786d2c4a9334d1..e6cb09259408f2fea7929a6fbb1b313c38e69894 100644 (file)
@@ -1,12 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0
 
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
-struct pt_regs current_regs = {};
-struct pt_regs ctx_regs = {};
+#define PT_REGS_SIZE sizeof(struct pt_regs)
+
+/*
+ * The kernel struct pt_regs isn't exported in its entirety to userspace.
+ * Pass it as an array to task_pt_regs.c
+ */
+char current_regs[PT_REGS_SIZE] = {};
+char ctx_regs[PT_REGS_SIZE] = {};
 int uprobe_res = 0;
 
 SEC("uprobe/trigger_func")
@@ -17,8 +22,10 @@ int handle_uprobe(struct pt_regs *ctx)
 
        current = bpf_get_current_task_btf();
        regs = (struct pt_regs *) bpf_task_pt_regs(current);
-       __builtin_memcpy(&current_regs, regs, sizeof(*regs));
-       __builtin_memcpy(&ctx_regs, ctx, sizeof(*ctx));
+       if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs))
+               return 0;
+       if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx))
+               return 0;
 
        /* Prove that uprobe was run */
        uprobe_res = 1;
index 59ea56945e6cd6819dc12fe337f324d784449e48..b497bb85b667f78f0bfa393e5ffaca0766b345f0 100755 (executable)
@@ -112,6 +112,14 @@ setup()
        ip netns add "${NS2}"
        ip netns add "${NS3}"
 
+       # rp_filter gets confused by what these tests are doing, so disable it
+       ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
+       ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
+       ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
+
        ip link add veth1 type veth peer name veth2
        ip link add veth3 type veth peer name veth4
        ip link add veth5 type veth peer name veth6
@@ -236,11 +244,6 @@ setup()
        ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
        ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
 
-       # rp_filter gets confused by what these tests are doing, so disable it
-       ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
-       ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
-       ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
-
        TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
 
        sleep 1  # reduce flakiness
index 4de902ea14d82be9c06908e62e5d9bdb771a6279..de1c4e6de0b2110ce6c91bf248ae8e946d4b6105 100644 (file)
@@ -1,10 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#define __EXPORTED_HEADERS__
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
 #include <string.h>
 #include <errno.h>
-#include <linux/fcntl.h>
+#include <fcntl.h>
 #include <malloc.h>
 
 #include <sys/ioctl.h>
index beee0d5646a6190897ea722b43a6adfc7deae38c..f7d84549cc3e35cfd62b82069ad777fc6868504e 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
-# Copyright 2020 NXP Semiconductors
+# Copyright 2020 NXP
 
 WAIT_TIME=1
 NUM_NETIFS=4
index 5f5b2ba3e5572a37eb2afe53f2196423978bb711..60c02b482be8341ce795cab1238cc4772ebb942e 100644 (file)
@@ -11,8 +11,8 @@ SYSTEM="syscalls"
 EVENT="sys_enter_openat"
 FIELD="filename"
 EPROBE="eprobe_open"
-
-echo "e:$EPROBE $SYSTEM/$EVENT file=+0(\$filename):ustring" >> dynamic_events
+OPTIONS="file=+0(\$filename):ustring"
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
 
 grep -q "$EPROBE" dynamic_events
 test -d events/eprobes/$EPROBE
@@ -37,4 +37,54 @@ echo "-:$EPROBE" >> dynamic_events
 ! grep -q "$EPROBE" dynamic_events
 ! test -d events/eprobes/$EPROBE
 
+# test various ways to remove the probe (already tested with just event name)
+
+# With group name
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With group name and system/event
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With just event name and system/event
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:$EPROBE $SYSTEM/$EVENT" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With just event name and system/event and options
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# With group name and system/event and options
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+grep -q "$EPROBE" dynamic_events
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
+# Finally make sure what is in the dynamic_events file clears it too
+echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+LINE=`sed -e '/$EPROBE/s/^e/-/' < dynamic_events`
+test -d events/eprobes/$EPROBE
+echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+! grep -q "$EPROBE" dynamic_events
+! test -d events/eprobes/$EPROBE
+
 clear_trace
index 98053d3afbda72ec80cfd377ab6b318202cc3648..b8dbabe24ac226eed4e9e96155b5390a1d3a88aa 100644 (file)
@@ -24,6 +24,7 @@
 /x86_64/smm_test
 /x86_64/state_test
 /x86_64/svm_vmcall_test
+/x86_64/svm_int_ctl_test
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
 /x86_64/userspace_msr_exit_test
@@ -48,6 +49,7 @@
 /kvm_page_table_test
 /memslot_modification_stress_test
 /memslot_perf_test
+/rseq_test
 /set_memory_region_test
 /steal_time
 /kvm_binary_stats_test
index 5d05801ab81676b830f178bb99a8c3d76687c4bc..d1774f4613939702723d6f898915c05484bd5d1b 100644 (file)
@@ -56,6 +56,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
 TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
@@ -80,6 +81,7 @@ TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
 TEST_GEN_PROGS_x86_64 += kvm_page_table_test
 TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
 TEST_GEN_PROGS_x86_64 += memslot_perf_test
+TEST_GEN_PROGS_x86_64 += rseq_test
 TEST_GEN_PROGS_x86_64 += set_memory_region_test
 TEST_GEN_PROGS_x86_64 += steal_time
 TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
@@ -93,6 +95,7 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test
 TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
 TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
 TEST_GEN_PROGS_aarch64 += kvm_page_table_test
+TEST_GEN_PROGS_aarch64 += rseq_test
 TEST_GEN_PROGS_aarch64 += set_memory_region_test
 TEST_GEN_PROGS_aarch64 += steal_time
 TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
@@ -104,6 +107,7 @@ TEST_GEN_PROGS_s390x += demand_paging_test
 TEST_GEN_PROGS_s390x += dirty_log_test
 TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
 TEST_GEN_PROGS_s390x += kvm_page_table_test
+TEST_GEN_PROGS_s390x += rseq_test
 TEST_GEN_PROGS_s390x += set_memory_region_test
 TEST_GEN_PROGS_s390x += kvm_binary_stats_test
 
index 71e277c7c3f3338d11796022bda66394e170d0fd..5d95113c7b7c540842d5df93cfcef16658765e82 100644 (file)
@@ -371,9 +371,7 @@ static void help(char *name)
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n\n");
-       backing_src_help();
+       backing_src_help("-s");
        puts("");
        exit(0);
 }
@@ -381,7 +379,7 @@ static void help(char *name)
 int main(int argc, char *argv[])
 {
        struct test_params params = {
-               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .backing_src = DEFAULT_VM_MEM_SRC,
                .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
                .vcpus = 1,
        };
index e79c1b64977f146465345020dad34424b69ba78a..1510b21e6306143b202581d76415451545332797 100644 (file)
@@ -179,7 +179,7 @@ static void *uffd_handler_thread_fn(void *arg)
                        return NULL;
                }
 
-               if (!pollfd[0].revents & POLLIN)
+               if (!(pollfd[0].revents & POLLIN))
                        continue;
 
                r = read(uffd, &msg, sizeof(msg));
@@ -416,7 +416,7 @@ static void help(char *name)
 {
        puts("");
        printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
-              "          [-b memory] [-t type] [-v vcpus] [-o]\n", name);
+              "          [-b memory] [-s type] [-v vcpus] [-o]\n", name);
        guest_modes_help();
        printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
               "     UFFD registration mode: 'MISSING' or 'MINOR'.\n");
@@ -426,8 +426,7 @@ static void help(char *name)
        printf(" -b: specify the size of the memory region which should be\n"
               "     demand paged by each vCPU. e.g. 10M or 3G.\n"
               "     Default: 1G\n");
-       printf(" -t: The type of backing memory to use. Default: anonymous\n");
-       backing_src_help();
+       backing_src_help("-s");
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
@@ -439,14 +438,14 @@ int main(int argc, char *argv[])
 {
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        struct test_params p = {
-               .src_type = VM_MEM_SRC_ANONYMOUS,
+               .src_type = DEFAULT_VM_MEM_SRC,
                .partition_vcpu_memory_access = true,
        };
        int opt;
 
        guest_modes_append_default();
 
-       while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {
+       while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {
                switch (opt) {
                case 'm':
                        guest_modes_cmdline(optarg);
@@ -465,7 +464,7 @@ int main(int argc, char *argv[])
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
                        break;
-               case 't':
+               case 's':
                        p.src_type = parse_backing_src_type(optarg);
                        break;
                case 'v':
@@ -485,7 +484,7 @@ int main(int argc, char *argv[])
 
        if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
            !backing_src_is_shared(p.src_type)) {
-               TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");
+               TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
        }
 
        for_each_guest_mode(run_test, &p);
index 479868570d593745c07e13b28a96ac569c88e7f1..7ffab5bd5ce55a59da6e128e226847cf0c765353 100644 (file)
@@ -118,42 +118,64 @@ static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
        toggle_dirty_logging(vm, slots, false);
 }
 
-static void get_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
-                         uint64_t nr_pages)
+static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
 {
-       uint64_t slot_pages = nr_pages / slots;
        int i;
 
        for (i = 0; i < slots; i++) {
                int slot = PERF_TEST_MEM_SLOT_INDEX + i;
-               unsigned long *slot_bitmap = bitmap + i * slot_pages;
 
-               kvm_vm_get_dirty_log(vm, slot, slot_bitmap);
+               kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
        }
 }
 
-static void clear_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
-                           uint64_t nr_pages)
+static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
+                           int slots, uint64_t pages_per_slot)
 {
-       uint64_t slot_pages = nr_pages / slots;
        int i;
 
        for (i = 0; i < slots; i++) {
                int slot = PERF_TEST_MEM_SLOT_INDEX + i;
-               unsigned long *slot_bitmap = bitmap + i * slot_pages;
 
-               kvm_vm_clear_dirty_log(vm, slot, slot_bitmap, 0, slot_pages);
+               kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
        }
 }
 
+static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
+{
+       unsigned long **bitmaps;
+       int i;
+
+       bitmaps = malloc(slots * sizeof(bitmaps[0]));
+       TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
+
+       for (i = 0; i < slots; i++) {
+               bitmaps[i] = bitmap_zalloc(pages_per_slot);
+               TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
+       }
+
+       return bitmaps;
+}
+
+static void free_bitmaps(unsigned long *bitmaps[], int slots)
+{
+       int i;
+
+       for (i = 0; i < slots; i++)
+               free(bitmaps[i]);
+
+       free(bitmaps);
+}
+
 static void run_test(enum vm_guest_mode mode, void *arg)
 {
        struct test_params *p = arg;
        pthread_t *vcpu_threads;
        struct kvm_vm *vm;
-       unsigned long *bmap;
+       unsigned long **bitmaps;
        uint64_t guest_num_pages;
        uint64_t host_num_pages;
+       uint64_t pages_per_slot;
        int vcpu_id;
        struct timespec start;
        struct timespec ts_diff;
@@ -171,7 +193,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
-       bmap = bitmap_zalloc(host_num_pages);
+       pages_per_slot = host_num_pages / p->slots;
+
+       bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
 
        if (dirty_log_manual_caps) {
                cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
@@ -239,7 +263,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                        iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
 
                clock_gettime(CLOCK_MONOTONIC, &start);
-               get_dirty_log(vm, p->slots, bmap, host_num_pages);
+               get_dirty_log(vm, bitmaps, p->slots);
                ts_diff = timespec_elapsed(start);
                get_dirty_log_total = timespec_add(get_dirty_log_total,
                                                   ts_diff);
@@ -248,7 +272,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
                if (dirty_log_manual_caps) {
                        clock_gettime(CLOCK_MONOTONIC, &start);
-                       clear_dirty_log(vm, p->slots, bmap, host_num_pages);
+                       clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
                        ts_diff = timespec_elapsed(start);
                        clear_dirty_log_total = timespec_add(clear_dirty_log_total,
                                                             ts_diff);
@@ -281,7 +305,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                        clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
        }
 
-       free(bmap);
+       free_bitmaps(bitmaps, p->slots);
        free(vcpu_threads);
        perf_test_destroy_vm(vm);
 }
@@ -308,11 +332,9 @@ static void help(char *name)
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n\n");
+       backing_src_help("-s");
        printf(" -x: Split the memory region into this number of memslots.\n"
-              "     (default: 1)");
-       backing_src_help();
+              "     (default: 1)\n");
        puts("");
        exit(0);
 }
@@ -324,7 +346,7 @@ int main(int argc, char *argv[])
                .iterations = TEST_HOST_LOOP_N,
                .wr_fract = 1,
                .partition_vcpu_memory_access = true,
-               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .backing_src = DEFAULT_VM_MEM_SRC,
                .slots = 1,
        };
        int opt;
index d79be15dd3d204d29ec66d3fc997373709077b11..f8fddc84c0d3b3b18e5a5d791ae8f5da5ed6f759 100644 (file)
@@ -90,18 +90,23 @@ enum vm_mem_backing_src_type {
        NUM_SRC_TYPES,
 };
 
+#define DEFAULT_VM_MEM_SRC VM_MEM_SRC_ANONYMOUS
+
 struct vm_mem_backing_src_alias {
        const char *name;
        uint32_t flag;
 };
 
+#define MIN_RUN_DELAY_NS       200000UL
+
 bool thp_configured(void);
 size_t get_trans_hugepagesz(void);
 size_t get_def_hugetlb_pagesz(void);
 const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
 size_t get_backing_src_pagesz(uint32_t i);
-void backing_src_help(void);
+void backing_src_help(const char *flag);
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
+long get_run_delay(void);
 
 /*
  * Whether or not the given source type is shared memory (as opposed to
index 242ae8e09a653a8362959a03b099cd4f620ff864..05e65ca1c30cda841a66b9ee7886b99656c070bf 100644 (file)
@@ -312,37 +312,37 @@ static inline void set_xmm(int n, unsigned long val)
        }
 }
 
-typedef unsigned long v1di __attribute__ ((vector_size (8)));
+#define GET_XMM(__xmm)                                                 \
+({                                                                     \
+       unsigned long __val;                                            \
+       asm volatile("movq %%"#__xmm", %0" : "=r"(__val));              \
+       __val;                                                          \
+})
+
 static inline unsigned long get_xmm(int n)
 {
        assert(n >= 0 && n <= 7);
 
-       register v1di xmm0 __asm__("%xmm0");
-       register v1di xmm1 __asm__("%xmm1");
-       register v1di xmm2 __asm__("%xmm2");
-       register v1di xmm3 __asm__("%xmm3");
-       register v1di xmm4 __asm__("%xmm4");
-       register v1di xmm5 __asm__("%xmm5");
-       register v1di xmm6 __asm__("%xmm6");
-       register v1di xmm7 __asm__("%xmm7");
        switch (n) {
        case 0:
-               return (unsigned long)xmm0;
+               return GET_XMM(xmm0);
        case 1:
-               return (unsigned long)xmm1;
+               return GET_XMM(xmm1);
        case 2:
-               return (unsigned long)xmm2;
+               return GET_XMM(xmm2);
        case 3:
-               return (unsigned long)xmm3;
+               return GET_XMM(xmm3);
        case 4:
-               return (unsigned long)xmm4;
+               return GET_XMM(xmm4);
        case 5:
-               return (unsigned long)xmm5;
+               return GET_XMM(xmm5);
        case 6:
-               return (unsigned long)xmm6;
+               return GET_XMM(xmm6);
        case 7:
-               return (unsigned long)xmm7;
+               return GET_XMM(xmm7);
        }
+
+       /* never reached */
        return 0;
 }
 
index 0d04a7db7f249324857e8c41671060da9bb4099f..36407cb0ec85dcb62277f27215a19e2a285bb0f2 100644 (file)
@@ -456,10 +456,7 @@ static void help(char *name)
               "     (default: 1G)\n");
        printf(" -v: specify the number of vCPUs to run\n"
               "     (default: 1)\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n"
-              "     (default: anonymous)\n\n");
-       backing_src_help();
+       backing_src_help("-s");
        puts("");
 }
 
@@ -468,7 +465,7 @@ int main(int argc, char *argv[])
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        struct test_params p = {
                .test_mem_size = DEFAULT_TEST_MEM_SIZE,
-               .src_type = VM_MEM_SRC_ANONYMOUS,
+               .src_type = DEFAULT_VM_MEM_SRC,
        };
        int opt;
 
index af1031fed97f7d05741f0d1802ade4b5b0bbf544..b724291089939380bd0e8fa6093d8ef5ad339092 100644 (file)
@@ -11,6 +11,7 @@
 #include <stdlib.h>
 #include <time.h>
 #include <sys/stat.h>
+#include <sys/syscall.h>
 #include <linux/mman.h>
 #include "linux/kernel.h"
 
@@ -129,13 +130,16 @@ size_t get_trans_hugepagesz(void)
 {
        size_t size;
        FILE *f;
+       int ret;
 
        TEST_ASSERT(thp_configured(), "THP is not configured in host kernel");
 
        f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r");
        TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size");
 
-       fscanf(f, "%ld", &size);
+       ret = fscanf(f, "%ld", &size);
+       ret = fscanf(f, "%ld", &size);
+       TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size");
        fclose(f);
 
        return size;
@@ -279,13 +283,22 @@ size_t get_backing_src_pagesz(uint32_t i)
        }
 }
 
-void backing_src_help(void)
+static void print_available_backing_src_types(const char *prefix)
 {
        int i;
 
-       printf("Available backing src types:\n");
+       printf("%sAvailable backing src types:\n", prefix);
+
        for (i = 0; i < NUM_SRC_TYPES; i++)
-               printf("\t%s\n", vm_mem_backing_src_alias(i)->name);
+               printf("%s    %s\n", prefix, vm_mem_backing_src_alias(i)->name);
+}
+
+void backing_src_help(const char *flag)
+{
+       printf(" %s: specify the type of memory that should be used to\n"
+              "     back the guest data region. (default: %s)\n",
+              flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name);
+       print_available_backing_src_types("     ");
 }
 
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
@@ -296,7 +309,23 @@ enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
                if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name))
                        return i;
 
-       backing_src_help();
+       print_available_backing_src_types("");
        TEST_FAIL("Unknown backing src type: %s", type_name);
        return -1;
 }
+
+long get_run_delay(void)
+{
+       char path[64];
+       long val[2];
+       FILE *fp;
+
+       sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
+       fp = fopen(path, "r");
+       /* Return MIN_RUN_DELAY_NS upon failure just to be safe */
+       if (fscanf(fp, "%ld %ld ", &val[0], &val[1]) < 2)
+               val[1] = MIN_RUN_DELAY_NS;
+       fclose(fp);
+
+       return val[1];
+}
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
new file mode 100644 (file)
index 0000000..4158da0
--- /dev/null
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <syscall.h>
+#include <sys/ioctl.h>
+#include <sys/sysinfo.h>
+#include <asm/barrier.h>
+#include <linux/atomic.h>
+#include <linux/rseq.h>
+#include <linux/unistd.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+#define VCPU_ID 0
+
+static __thread volatile struct rseq __rseq = {
+       .cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
+};
+
+/*
+ * Use an arbitrary, bogus signature for configuring rseq, this test does not
+ * actually enter an rseq critical section.
+ */
+#define RSEQ_SIG 0xdeadbeef
+
+/*
+ * Any bug related to task migration is likely to be timing-dependent; perform
+ * a large number of migrations to reduce the odds of a false negative.
+ */
+#define NR_TASK_MIGRATIONS 100000
+
+static pthread_t migration_thread;
+static cpu_set_t possible_mask;
+static int min_cpu, max_cpu;
+static bool done;
+
+static atomic_t seq_cnt;
+
+static void guest_code(void)
+{
+       for (;;)
+               GUEST_SYNC(0);
+}
+
+static void sys_rseq(int flags)
+{
+       int r;
+
+       r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG);
+       TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
+}
+
+static int next_cpu(int cpu)
+{
+       /*
+        * Advance to the next CPU, skipping those that weren't in the original
+        * affinity set.  Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
+        * data storage is considered as opaque.  Note, if this task is pinned
+        * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
+        * burn a lot cycles and the test will take longer than normal to
+        * complete.
+        */
+       do {
+               cpu++;
+               if (cpu > max_cpu) {
+                       cpu = min_cpu;
+                       TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
+                                   "Min CPU = %d must always be usable", cpu);
+                       break;
+               }
+       } while (!CPU_ISSET(cpu, &possible_mask));
+
+       return cpu;
+}
+
+static void *migration_worker(void *ign)
+{
+       cpu_set_t allowed_mask;
+       int r, i, cpu;
+
+       CPU_ZERO(&allowed_mask);
+
+       for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
+               CPU_SET(cpu, &allowed_mask);
+
+               /*
+                * Bump the sequence count twice to allow the reader to detect
+                * that a migration may have occurred in between rseq and sched
+                * CPU ID reads.  An odd sequence count indicates a migration
+                * is in-progress, while a completely different count indicates
+                * a migration occurred since the count was last read.
+                */
+               atomic_inc(&seq_cnt);
+
+               /*
+                * Ensure the odd count is visible while sched_getcpu() isn't
+                * stable, i.e. while changing affinity is in-progress.
+                */
+               smp_wmb();
+               r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+               TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
+                           errno, strerror(errno));
+               smp_wmb();
+               atomic_inc(&seq_cnt);
+
+               CPU_CLR(cpu, &allowed_mask);
+
+               /*
+                * Wait 1-10us before proceeding to the next iteration and more
+                * specifically, before bumping seq_cnt again.  A delay is
+                * needed on three fronts:
+                *
+                *  1. To allow sched_setaffinity() to prompt migration before
+                *     ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME
+                *     (or TIF_NEED_RESCHED, which indirectly leads to handling
+                *     NOTIFY_RESUME) is handled in KVM context.
+                *
+                *     If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters
+                *     the guest, the guest will trigger a IO/MMIO exit all the
+                *     way to userspace and the TIF flags will be handled by
+                *     the generic "exit to userspace" logic, not by KVM.  The
+                *     exit to userspace is necessary to give the test a chance
+                *     to check the rseq CPU ID (see #2).
+                *
+                *     Alternatively, guest_code() could include an instruction
+                *     to trigger an exit that is handled by KVM, but any such
+                *     exit requires architecture specific code.
+                *
+                *  2. To let ioctl(KVM_RUN) make its way back to the test
+                *     before the next round of migration.  The test's check on
+                *     the rseq CPU ID must wait for migration to complete in
+                *     order to avoid false positive, thus any kernel rseq bug
+                *     will be missed if the next migration starts before the
+                *     check completes.
+                *
+                *  3. To ensure the read-side makes efficient forward progress,
+                *     e.g. if sched_getcpu() involves a syscall.  Stalling the
+                *     read-side means the test will spend more time waiting for
+                *     sched_getcpu() to stabilize and less time trying to hit
+                *     the timing-dependent bug.
+                *
+                * Because any bug in this area is likely to be timing-dependent,
+                * run with a range of delays at 1us intervals from 1us to 10us
+                * as a best effort to avoid tuning the test to the point where
+                * it can hit _only_ the original bug and not detect future
+                * regressions.
+                *
+                * The original bug can reproduce with a delay up to ~500us on
+                * x86-64, but starts to require more iterations to reproduce
+                * as the delay creeps above ~10us, and the average runtime of
+                * each iteration obviously increases as well.  Cap the delay
+                * at 10us to keep test runtime reasonable while minimizing
+                * potential coverage loss.
+                *
+                * The lower bound for reproducing the bug is likely below 1us,
+                * e.g. failures occur on x86-64 with nanosleep(0), but at that
+                * point the overhead of the syscall likely dominates the delay.
+                * Use usleep() for simplicity and to avoid unnecessary kernel
+                * dependencies.
+                */
+               usleep((i % 10) + 1);
+       }
+       done = true;
+       return NULL;
+}
+
+static int calc_min_max_cpu(void)
+{
+       int i, cnt, nproc;
+
+       if (CPU_COUNT(&possible_mask) < 2)
+               return -EINVAL;
+
+       /*
+        * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
+        * this task is affined to in order to reduce the time spent querying
+        * unusable CPUs, e.g. if this task is pinned to a small percentage of
+        * total CPUs.
+        */
+       nproc = get_nprocs_conf();
+       min_cpu = -1;
+       max_cpu = -1;
+       cnt = 0;
+
+       for (i = 0; i < nproc; i++) {
+               if (!CPU_ISSET(i, &possible_mask))
+                       continue;
+               if (min_cpu == -1)
+                       min_cpu = i;
+               max_cpu = i;
+               cnt++;
+       }
+
+       return (cnt < 2) ? -EINVAL : 0;
+}
+
+int main(int argc, char *argv[])
+{
+       int r, i, snapshot;
+       struct kvm_vm *vm;
+       u32 cpu, rseq_cpu;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
+       TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
+                   strerror(errno));
+
+       if (calc_min_max_cpu()) {
+               print_skip("Only one usable CPU, task migration not possible");
+               exit(KSFT_SKIP);
+       }
+
+       sys_rseq(0);
+
+       /*
+        * Create and run a dummy VM that immediately exits to userspace via
+        * GUEST_SYNC, while concurrently migrating the process by setting its
+        * CPU affinity.
+        */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+       ucall_init(vm, NULL);
+
+       pthread_create(&migration_thread, NULL, migration_worker, 0);
+
+       for (i = 0; !done; i++) {
+               vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
+                           "Guest failed?");
+
+               /*
+                * Verify rseq's CPU matches sched's CPU.  Ensure migration
+                * doesn't occur between sched_getcpu() and reading the rseq
+                * cpu_id by rereading both if the sequence count changes, or
+                * if the count is odd (migration in-progress).
+                */
+               do {
+                       /*
+                        * Drop bit 0 to force a mismatch if the count is odd,
+                        * i.e. if a migration is in-progress.
+                        */
+                       snapshot = atomic_read(&seq_cnt) & ~1;
+
+                       /*
+                        * Ensure reading sched_getcpu() and rseq.cpu_id
+                        * complete in a single "no migration" window, i.e. are
+                        * not reordered across the seq_cnt reads.
+                        */
+                       smp_rmb();
+                       cpu = sched_getcpu();
+                       rseq_cpu = READ_ONCE(__rseq.cpu_id);
+                       smp_rmb();
+               } while (snapshot != atomic_read(&seq_cnt));
+
+               TEST_ASSERT(rseq_cpu == cpu,
+                           "rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu);
+       }
+
+       /*
+        * Sanity check that the test was able to enter the guest a reasonable
+        * number of times, e.g. didn't get stalled too often/long waiting for
+        * sched_getcpu() to stabilize.  A 2:1 migration:KVM_RUN ratio is a
+        * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs
+        * than migrations given the 1us+ delay in the migration task.
+        */
+       TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
+                   "Only performed %d KVM_RUNs, task stalled too much?\n", i);
+
+       pthread_join(migration_thread, NULL);
+
+       kvm_vm_free(vm);
+
+       sys_rseq(RSEQ_FLAG_UNREGISTER);
+
+       return 0;
+}
index ecec30865a74f039ac23ea666e406ce51cb00bd1..62f2eb9ee3d5650ce825992f577ffb797e884f66 100644 (file)
@@ -10,7 +10,6 @@
 #include <sched.h>
 #include <pthread.h>
 #include <linux/kernel.h>
-#include <sys/syscall.h>
 #include <asm/kvm.h>
 #include <asm/kvm_para.h>
 
@@ -20,7 +19,6 @@
 
 #define NR_VCPUS               4
 #define ST_GPA_BASE            (1 << 30)
-#define MIN_RUN_DELAY_NS       200000UL
 
 static void *st_gva[NR_VCPUS];
 static uint64_t guest_stolen_time[NR_VCPUS];
@@ -118,12 +116,12 @@ struct st_time {
        uint64_t st_time;
 };
 
-static int64_t smccc(uint32_t func, uint32_t arg)
+static int64_t smccc(uint32_t func, uint64_t arg)
 {
        unsigned long ret;
 
        asm volatile(
-               "mov    x0, %1\n"
+               "mov    w0, %w1\n"
                "mov    x1, %2\n"
                "hvc    #0\n"
                "mov    %0, x0\n"
@@ -217,20 +215,6 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
 
 #endif
 
-static long get_run_delay(void)
-{
-       char path[64];
-       long val[2];
-       FILE *fp;
-
-       sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
-       fp = fopen(path, "r");
-       fscanf(fp, "%ld %ld ", &val[0], &val[1]);
-       fclose(fp);
-
-       return val[1];
-}
-
 static void *do_steal_time(void *arg)
 {
        struct timespec ts, stop;
index e6480fd5c4bdc44c3de0782aa3ed6441e43388b3..8039e1eff9388f994a0fb30ef46464def3469292 100644 (file)
@@ -82,7 +82,8 @@ int get_warnings_count(void)
        FILE *f;
 
        f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
-       fscanf(f, "%d", &warnings);
+       if (fscanf(f, "%d", &warnings) < 1)
+               warnings = 0;
        fclose(f);
 
        return warnings;
diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
new file mode 100644 (file)
index 0000000..df04f56
--- /dev/null
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_int_ctl_test
+ *
+ * Copyright (C) 2021, Red Hat, Inc.
+ *
+ * Nested SVM testing: test simultaneous use of V_IRQ from L1 and L0.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "apic.h"
+
+#define VCPU_ID                0
+
+static struct kvm_vm *vm;
+
+bool vintr_irq_called;
+bool intr_irq_called;
+
+#define VINTR_IRQ_NUMBER 0x20
+#define INTR_IRQ_NUMBER 0x30
+
+static void vintr_irq_handler(struct ex_regs *regs)
+{
+       vintr_irq_called = true;
+}
+
+static void intr_irq_handler(struct ex_regs *regs)
+{
+       x2apic_write_reg(APIC_EOI, 0x00);
+       intr_irq_called = true;
+}
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+       /* This code raises interrupt INTR_IRQ_NUMBER in the L1's LAPIC,
+        * and since L1 didn't enable virtual interrupt masking,
+        * L2 should receive it and not L1.
+        *
+        * L2 also has virtual interrupt 'VINTR_IRQ_NUMBER' pending in V_IRQ
+        * so it should also receive it after the following 'sti'.
+        */
+       x2apic_write_reg(APIC_ICR,
+               APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER);
+
+       __asm__ __volatile__(
+               "sti\n"
+               "nop\n"
+       );
+
+       GUEST_ASSERT(vintr_irq_called);
+       GUEST_ASSERT(intr_irq_called);
+
+       __asm__ __volatile__(
+               "vmcall\n"
+       );
+}
+
+static void l1_guest_code(struct svm_test_data *svm)
+{
+       #define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+       struct vmcb *vmcb = svm->vmcb;
+
+       x2apic_enable();
+
+       /* Prepare for L2 execution. */
+       generic_svm_setup(svm, l2_guest_code,
+                         &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       /* No virtual interrupt masking */
+       vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
+
+       /* No intercepts for real and virtual interrupts */
+       vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+
+       /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
+       vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
+       vmcb->control.int_vector = VINTR_IRQ_NUMBER;
+
+       run_guest(vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       vm_vaddr_t svm_gva;
+
+       nested_svm_check_supported();
+
+       vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+
+       vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
+       vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
+
+       vcpu_alloc_svm(vm, &svm_gva);
+       vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
+
+       struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+       struct ucall uc;
+
+       vcpu_run(vm, VCPU_ID);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                   "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+                   run->exit_reason,
+                   exit_reason_str(run->exit_reason));
+
+       switch (get_ucall(vm, VCPU_ID, &uc)) {
+       case UCALL_ABORT:
+               TEST_FAIL("%s", (const char *)uc.args[0]);
+               break;
+               /* NOT REACHED */
+       case UCALL_DONE:
+               goto done;
+       default:
+               TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+       }
+done:
+       kvm_vm_free(vm);
+       return 0;
+}
index 117bf49a3d7958647abd82597759b30438c2f05a..eda0d2a51224bd1c709d9d52352a9e81be2a29bb 100644 (file)
@@ -14,7 +14,6 @@
 #include <stdint.h>
 #include <time.h>
 #include <sched.h>
-#include <sys/syscall.h>
 
 #define VCPU_ID                5
 
@@ -98,20 +97,6 @@ static void guest_code(void)
        GUEST_DONE();
 }
 
-static long get_run_delay(void)
-{
-        char path[64];
-        long val[2];
-        FILE *fp;
-
-        sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
-        fp = fopen(path, "r");
-        fscanf(fp, "%ld %ld ", &val[0], &val[1]);
-        fclose(fp);
-
-        return val[1];
-}
-
 static int cmp_timespec(struct timespec *a, struct timespec *b)
 {
        if (a->tv_sec > b->tv_sec)
index fa2ac0e56b43c3446823b30933298864e9bc421a..fe7ee2b0f29c2b77ac20b279db9219fa0c4f86b7 100644 (file)
@@ -48,6 +48,7 @@ ARCH          ?= $(SUBARCH)
 # When local build is done, headers are installed in the default
 # INSTALL_HDR_PATH usr/include.
 .PHONY: khdr
+.NOTPARALLEL:
 khdr:
 ifndef KSFT_KHDR_INSTALL_DONE
 ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
index e1bf55dabdf6569b9da744beeaedf7a57f617e17..162c41e9bcae876be1a503dba0e3d27d3d4b2beb 100644 (file)
@@ -746,7 +746,7 @@ int read_write_nci_cmd(int nfc_sock, int virtual_fd, const __u8 *cmd, __u32 cmd_
                       const __u8 *rsp, __u32 rsp_len)
 {
        char buf[256];
-       unsigned int len;
+       int len;
 
        send(nfc_sock, &cmd[3], cmd_len - 3, 0);
        len = read(virtual_fd, buf, cmd_len);
index cfc7f4f97fd17c22a25af89d1b97ea49b7b549bd..df341648f8180ae8378cefb7be034ea42559cf16 100644 (file)
@@ -1,5 +1,2 @@
-##TEST_GEN_FILES := test_unix_oob
-TEST_PROGS := test_unix_oob
+TEST_GEN_PROGS := test_unix_oob
 include ../../lib.mk
-
-all: $(TEST_PROGS)
index 0f3e3763f4f8cf5ca7c812da124128e3b3a1ddb3..3dece8b292536e242fcac5c901f353c6417b87e1 100644 (file)
@@ -271,8 +271,9 @@ main(int argc, char **argv)
        read_oob(pfd, &oob);
 
        if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) {
-               fprintf(stderr, "Test 3 failed, sigurg %d len %d OOB %c ",
-               "atmark %d\n", signal_recvd, len, oob, atmark);
+               fprintf(stderr,
+                       "Test 3 failed, sigurg %d len %d OOB %c atmark %d\n",
+                       signal_recvd, len, oob, atmark);
                die(1);
        }
 
index 4254ddc3f70b560a85b64edcd9512eb7fb4bf621..1ef9e4159bba8e83b796f02b37c2b547d03db0f9 100755 (executable)
@@ -45,7 +45,7 @@ altnames_test()
        check_err $? "Got unexpected long alternative name from link show JSON"
 
        ip link property del $DUMMY_DEV altname $SHORT_NAME
-       check_err $? "Failed to add short alternative name"
+       check_err $? "Failed to delete short alternative name"
 
        ip -j -p link show $SHORT_NAME &>/dev/null
        check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
index 3caf72bb9c6a1241511bdb0b7f72c77ddce174b8..a2489ec398fe01325e23f17ac253e6c4002e27bc 100755 (executable)
@@ -468,10 +468,26 @@ out_bits()
   for i in {0..22}
   do
     ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \
-           prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0
-
-    run_test "out_bit$i" "${desc/<n>/$i}" ioam-node-alpha ioam-node-beta \
-           db01::2 db01::1 veth0 ${bit2type[$i]} 123
+           prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \
+           dev veth0 &>/dev/null
+
+    local cmd_res=$?
+    local descr="${desc/<n>/$i}"
+
+    if [[ $i -ge 12 && $i -le 21 ]]
+    then
+      if [ $cmd_res != 0 ]
+      then
+        npassed=$((npassed+1))
+        log_test_passed "$descr"
+      else
+        nfailed=$((nfailed+1))
+        log_test_failed "$descr"
+      fi
+    else
+      run_test "out_bit$i" "$descr" ioam-node-alpha ioam-node-beta \
+             db01::2 db01::1 veth0 ${bit2type[$i]} 123
+    fi
   done
 
   bit2size[22]=$tmp
@@ -544,7 +560,7 @@ in_bits()
   local tmp=${bit2size[22]}
   bit2size[22]=$(( $tmp + ${#BETA[9]} + ((4 - (${#BETA[9]} % 4)) % 4) ))
 
-  for i in {0..22}
+  for i in {0..11} {22..22}
   do
     ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \
            prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0
index d376cb2c383c3821270fa657a6025ae5471857be..8f6997d3581612ba7379349594c79e00ebaa6e36 100644 (file)
@@ -94,16 +94,6 @@ enum {
        TEST_OUT_BIT9,
        TEST_OUT_BIT10,
        TEST_OUT_BIT11,
-       TEST_OUT_BIT12,
-       TEST_OUT_BIT13,
-       TEST_OUT_BIT14,
-       TEST_OUT_BIT15,
-       TEST_OUT_BIT16,
-       TEST_OUT_BIT17,
-       TEST_OUT_BIT18,
-       TEST_OUT_BIT19,
-       TEST_OUT_BIT20,
-       TEST_OUT_BIT21,
        TEST_OUT_BIT22,
        TEST_OUT_FULL_SUPP_TRACE,
 
@@ -125,16 +115,6 @@ enum {
        TEST_IN_BIT9,
        TEST_IN_BIT10,
        TEST_IN_BIT11,
-       TEST_IN_BIT12,
-       TEST_IN_BIT13,
-       TEST_IN_BIT14,
-       TEST_IN_BIT15,
-       TEST_IN_BIT16,
-       TEST_IN_BIT17,
-       TEST_IN_BIT18,
-       TEST_IN_BIT19,
-       TEST_IN_BIT20,
-       TEST_IN_BIT21,
        TEST_IN_BIT22,
        TEST_IN_FULL_SUPP_TRACE,
 
@@ -199,30 +179,6 @@ static int check_ioam_header(int tid, struct ioam6_trace_hdr *ioam6h,
                       ioam6h->nodelen != 2 ||
                       ioam6h->remlen;
 
-       case TEST_OUT_BIT12:
-       case TEST_IN_BIT12:
-       case TEST_OUT_BIT13:
-       case TEST_IN_BIT13:
-       case TEST_OUT_BIT14:
-       case TEST_IN_BIT14:
-       case TEST_OUT_BIT15:
-       case TEST_IN_BIT15:
-       case TEST_OUT_BIT16:
-       case TEST_IN_BIT16:
-       case TEST_OUT_BIT17:
-       case TEST_IN_BIT17:
-       case TEST_OUT_BIT18:
-       case TEST_IN_BIT18:
-       case TEST_OUT_BIT19:
-       case TEST_IN_BIT19:
-       case TEST_OUT_BIT20:
-       case TEST_IN_BIT20:
-       case TEST_OUT_BIT21:
-       case TEST_IN_BIT21:
-               return ioam6h->overflow ||
-                      ioam6h->nodelen ||
-                      ioam6h->remlen != 1;
-
        case TEST_OUT_BIT22:
        case TEST_IN_BIT22:
                return ioam6h->overflow ||
@@ -326,6 +282,66 @@ static int check_ioam6_data(__u8 **p, struct ioam6_trace_hdr *ioam6h,
                *p += sizeof(__u32);
        }
 
+       if (ioam6h->type.bit12) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit13) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit14) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit15) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit16) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit17) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit18) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit19) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit20) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
+       if (ioam6h->type.bit21) {
+               if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff)
+                       return 1;
+               *p += sizeof(__u32);
+       }
+
        if (ioam6h->type.bit22) {
                len = cnf.sc_data ? strlen(cnf.sc_data) : 0;
                aligned = cnf.sc_data ? __ALIGN_KERNEL(len, 4) : 0;
@@ -455,26 +471,6 @@ static int str2id(const char *tname)
                return TEST_OUT_BIT10;
        if (!strcmp("out_bit11", tname))
                return TEST_OUT_BIT11;
-       if (!strcmp("out_bit12", tname))
-               return TEST_OUT_BIT12;
-       if (!strcmp("out_bit13", tname))
-               return TEST_OUT_BIT13;
-       if (!strcmp("out_bit14", tname))
-               return TEST_OUT_BIT14;
-       if (!strcmp("out_bit15", tname))
-               return TEST_OUT_BIT15;
-       if (!strcmp("out_bit16", tname))
-               return TEST_OUT_BIT16;
-       if (!strcmp("out_bit17", tname))
-               return TEST_OUT_BIT17;
-       if (!strcmp("out_bit18", tname))
-               return TEST_OUT_BIT18;
-       if (!strcmp("out_bit19", tname))
-               return TEST_OUT_BIT19;
-       if (!strcmp("out_bit20", tname))
-               return TEST_OUT_BIT20;
-       if (!strcmp("out_bit21", tname))
-               return TEST_OUT_BIT21;
        if (!strcmp("out_bit22", tname))
                return TEST_OUT_BIT22;
        if (!strcmp("out_full_supp_trace", tname))
@@ -509,26 +505,6 @@ static int str2id(const char *tname)
                return TEST_IN_BIT10;
        if (!strcmp("in_bit11", tname))
                return TEST_IN_BIT11;
-       if (!strcmp("in_bit12", tname))
-               return TEST_IN_BIT12;
-       if (!strcmp("in_bit13", tname))
-               return TEST_IN_BIT13;
-       if (!strcmp("in_bit14", tname))
-               return TEST_IN_BIT14;
-       if (!strcmp("in_bit15", tname))
-               return TEST_IN_BIT15;
-       if (!strcmp("in_bit16", tname))
-               return TEST_IN_BIT16;
-       if (!strcmp("in_bit17", tname))
-               return TEST_IN_BIT17;
-       if (!strcmp("in_bit18", tname))
-               return TEST_IN_BIT18;
-       if (!strcmp("in_bit19", tname))
-               return TEST_IN_BIT19;
-       if (!strcmp("in_bit20", tname))
-               return TEST_IN_BIT20;
-       if (!strcmp("in_bit21", tname))
-               return TEST_IN_BIT21;
        if (!strcmp("in_bit22", tname))
                return TEST_IN_BIT22;
        if (!strcmp("in_full_supp_trace", tname))
@@ -606,16 +582,6 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
        [TEST_OUT_BIT9]         = check_ioam_header_and_data,
        [TEST_OUT_BIT10]                = check_ioam_header_and_data,
        [TEST_OUT_BIT11]                = check_ioam_header_and_data,
-       [TEST_OUT_BIT12]                = check_ioam_header,
-       [TEST_OUT_BIT13]                = check_ioam_header,
-       [TEST_OUT_BIT14]                = check_ioam_header,
-       [TEST_OUT_BIT15]                = check_ioam_header,
-       [TEST_OUT_BIT16]                = check_ioam_header,
-       [TEST_OUT_BIT17]                = check_ioam_header,
-       [TEST_OUT_BIT18]                = check_ioam_header,
-       [TEST_OUT_BIT19]                = check_ioam_header,
-       [TEST_OUT_BIT20]                = check_ioam_header,
-       [TEST_OUT_BIT21]                = check_ioam_header,
        [TEST_OUT_BIT22]                = check_ioam_header_and_data,
        [TEST_OUT_FULL_SUPP_TRACE]      = check_ioam_header_and_data,
        [TEST_IN_UNDEF_NS]              = check_ioam_header,
@@ -633,16 +599,6 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
        [TEST_IN_BIT9]                  = check_ioam_header_and_data,
        [TEST_IN_BIT10]         = check_ioam_header_and_data,
        [TEST_IN_BIT11]         = check_ioam_header_and_data,
-       [TEST_IN_BIT12]         = check_ioam_header,
-       [TEST_IN_BIT13]         = check_ioam_header,
-       [TEST_IN_BIT14]         = check_ioam_header,
-       [TEST_IN_BIT15]         = check_ioam_header,
-       [TEST_IN_BIT16]         = check_ioam_header,
-       [TEST_IN_BIT17]         = check_ioam_header,
-       [TEST_IN_BIT18]         = check_ioam_header,
-       [TEST_IN_BIT19]         = check_ioam_header,
-       [TEST_IN_BIT20]         = check_ioam_header,
-       [TEST_IN_BIT21]         = check_ioam_header,
        [TEST_IN_BIT22]         = check_ioam_header_and_data,
        [TEST_IN_FULL_SUPP_TRACE]       = check_ioam_header_and_data,
        [TEST_FWD_FULL_SUPP_TRACE]      = check_ioam_header_and_data,
diff --git a/tools/testing/selftests/netfilter/nft_nat_zones.sh b/tools/testing/selftests/netfilter/nft_nat_zones.sh
new file mode 100755 (executable)
index 0000000..b9ab373
--- /dev/null
@@ -0,0 +1,309 @@
+#!/bin/bash
+#
+# Test connection tracking zone and NAT source port reallocation support.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+# Don't increase too much, 2000 clients should work
+# just fine but script can then take several minutes with
+# KASAN/debug builds.
+maxclients=100
+
+have_iperf=1
+ret=0
+
+# client1---.
+#            veth1-.
+#                  |
+#               NAT Gateway --veth0--> Server
+#                  | |
+#            veth2-' |
+# client2---'        |
+#  ....              |
+# clientX----vethX---'
+
+# All clients share identical IP address.
+# NAT Gateway uses policy routing and conntrack zones to isolate client
+# namespaces.  Each client connects to Server, each with colliding tuples:
+#   clientsaddr:10000 -> serveraddr:dport
+#   NAT Gateway is supposed to do port reallocation for each of the
+#   connections.
+
+sfx=$(mktemp -u "XXXXXXXX")
+gw="ns-gw-$sfx"
+cl1="ns-cl1-$sfx"
+cl2="ns-cl2-$sfx"
+srv="ns-srv-$sfx"
+
+v4gc1=$(sysctl -n net.ipv4.neigh.default.gc_thresh1 2>/dev/null)
+v4gc2=$(sysctl -n net.ipv4.neigh.default.gc_thresh2 2>/dev/null)
+v4gc3=$(sysctl -n net.ipv4.neigh.default.gc_thresh3 2>/dev/null)
+v6gc1=$(sysctl -n net.ipv6.neigh.default.gc_thresh1 2>/dev/null)
+v6gc2=$(sysctl -n net.ipv6.neigh.default.gc_thresh2 2>/dev/null)
+v6gc3=$(sysctl -n net.ipv6.neigh.default.gc_thresh3 2>/dev/null)
+
+cleanup()
+{
+       ip netns del $gw
+       ip netns del $srv
+       for i in $(seq 1 $maxclients); do
+               ip netns del ns-cl$i-$sfx 2>/dev/null
+       done
+
+       sysctl -q net.ipv4.neigh.default.gc_thresh1=$v4gc1 2>/dev/null
+       sysctl -q net.ipv4.neigh.default.gc_thresh2=$v4gc2 2>/dev/null
+       sysctl -q net.ipv4.neigh.default.gc_thresh3=$v4gc3 2>/dev/null
+       sysctl -q net.ipv6.neigh.default.gc_thresh1=$v6gc1 2>/dev/null
+       sysctl -q net.ipv6.neigh.default.gc_thresh2=$v6gc2 2>/dev/null
+       sysctl -q net.ipv6.neigh.default.gc_thresh3=$v6gc3 2>/dev/null
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+conntrack -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without conntrack tool"
+       exit $ksft_skip
+fi
+
+iperf3 -v >/dev/null 2>&1
+if [ $? -ne 0 ];then
+       have_iperf=0
+fi
+
+ip netns add "$gw"
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace $gw"
+       exit $ksft_skip
+fi
+ip -net "$gw" link set lo up
+
+trap cleanup EXIT
+
+ip netns add "$srv"
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create server netns $srv"
+       exit $ksft_skip
+fi
+
+ip link add veth0 netns "$gw" type veth peer name eth0 netns "$srv"
+ip -net "$gw" link set veth0 up
+ip -net "$srv" link set lo up
+ip -net "$srv" link set eth0 up
+
+sysctl -q net.ipv6.neigh.default.gc_thresh1=512  2>/dev/null
+sysctl -q net.ipv6.neigh.default.gc_thresh2=1024 2>/dev/null
+sysctl -q net.ipv6.neigh.default.gc_thresh3=4096 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh1=512  2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh2=1024 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh3=4096 2>/dev/null
+
+for i in $(seq 1 $maxclients);do
+  cl="ns-cl$i-$sfx"
+
+  ip netns add "$cl"
+  if [ $? -ne 0 ];then
+     echo "SKIP: Could not create client netns $cl"
+     exit $ksft_skip
+  fi
+  ip link add veth$i netns "$gw" type veth peer name eth0 netns "$cl" > /dev/null 2>&1
+  if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+  fi
+done
+
+for i in $(seq 1 $maxclients);do
+  cl="ns-cl$i-$sfx"
+  echo netns exec "$cl" ip link set lo up
+  echo netns exec "$cl" ip link set eth0 up
+  echo netns exec "$cl" sysctl -q net.ipv4.tcp_syn_retries=2
+  echo netns exec "$gw" ip link set veth$i up
+  echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.arp_ignore=2
+  echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.rp_filter=0
+
+  # clients have same IP addresses.
+  echo netns exec "$cl" ip addr add 10.1.0.3/24 dev eth0
+  echo netns exec "$cl" ip addr add dead:1::3/64 dev eth0
+  echo netns exec "$cl" ip route add default via 10.1.0.2 dev eth0
+  echo netns exec "$cl" ip route add default via dead:1::2 dev eth0
+
+  # NB: same addresses on client-facing interfaces.
+  echo netns exec "$gw" ip addr add 10.1.0.2/24 dev veth$i
+  echo netns exec "$gw" ip addr add dead:1::2/64 dev veth$i
+
+  # gw: policy routing
+  echo netns exec "$gw" ip route add 10.1.0.0/24 dev veth$i table $((1000+i))
+  echo netns exec "$gw" ip route add dead:1::0/64 dev veth$i table $((1000+i))
+  echo netns exec "$gw" ip route add 10.3.0.0/24 dev veth0 table $((1000+i))
+  echo netns exec "$gw" ip route add dead:3::0/64 dev veth0 table $((1000+i))
+  echo netns exec "$gw" ip rule add fwmark $i lookup $((1000+i))
+done | ip -batch /dev/stdin
+
+ip -net "$gw" addr add 10.3.0.1/24 dev veth0
+ip -net "$gw" addr add dead:3::1/64 dev veth0
+
+ip -net "$srv" addr add 10.3.0.99/24 dev eth0
+ip -net "$srv" addr add dead:3::99/64 dev eth0
+
+ip netns exec $gw nft -f /dev/stdin<<EOF
+table inet raw {
+       map iiftomark {
+               type ifname : mark
+       }
+
+       map iiftozone {
+               typeof iifname : ct zone
+       }
+
+       set inicmp {
+               flags dynamic
+               type ipv4_addr . ifname . ipv4_addr
+       }
+       set inflows {
+               flags dynamic
+               type ipv4_addr . inet_service . ifname . ipv4_addr . inet_service
+       }
+
+       set inflows6 {
+               flags dynamic
+               type ipv6_addr . inet_service . ifname . ipv6_addr . inet_service
+       }
+
+       chain prerouting {
+               type filter hook prerouting priority -64000; policy accept;
+               ct original zone set meta iifname map @iiftozone
+               meta mark set meta iifname map @iiftomark
+
+               tcp flags & (syn|ack) == ack add @inflows { ip saddr . tcp sport . meta iifname . ip daddr . tcp dport counter }
+               add @inflows6 { ip6 saddr . tcp sport . meta iifname . ip6 daddr . tcp dport counter }
+               ip protocol icmp add @inicmp { ip saddr . meta iifname . ip daddr counter }
+       }
+
+       chain nat_postrouting {
+               type nat hook postrouting priority 0; policy accept;
+                ct mark set meta mark meta oifname veth0 masquerade
+       }
+
+       chain mangle_prerouting {
+               type filter hook prerouting priority -100; policy accept;
+               ct direction reply meta mark set ct mark
+       }
+}
+EOF
+
+( echo add element inet raw iiftomark \{
+       for i in $(seq 1 $((maxclients-1))); do
+               echo \"veth$i\" : $i,
+       done
+       echo \"veth$maxclients\" : $maxclients \}
+       echo add element inet raw iiftozone \{
+       for i in $(seq 1 $((maxclients-1))); do
+               echo \"veth$i\" : $i,
+       done
+       echo \"veth$maxclients\" : $maxclients \}
+) | ip netns exec $gw nft -f /dev/stdin
+
+ip netns exec "$gw" sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv4.conf.all.rp_filter=0 >/dev/null
+
+# useful for debugging: allows to use 'ping' from clients to gateway.
+ip netns exec "$gw" sysctl -q net.ipv4.fwmark_reflect=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv6.fwmark_reflect=1 > /dev/null
+
+for i in $(seq 1 $maxclients); do
+  cl="ns-cl$i-$sfx"
+  ip netns exec $cl ping -i 0.5 -q -c 3 10.3.0.99 > /dev/null 2>&1 &
+  if [ $? -ne 0 ]; then
+     echo FAIL: Ping failure from $cl 1>&2
+     ret=1
+     break
+  fi
+done
+
+wait
+
+for i in $(seq 1 $maxclients); do
+   ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" | grep -q "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 counter packets 3 bytes 252 }"
+   if [ $? -ne 0 ];then
+      ret=1
+      echo "FAIL: counter icmp mismatch for veth$i" 1>&2
+      ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" 1>&2
+      break
+   fi
+done
+
+ip netns exec $gw nft get element inet raw inicmp "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 }" | grep -q "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }"
+if [ $? -ne 0 ];then
+    ret=1
+    echo "FAIL: counter icmp mismatch for veth0: { 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }"
+    ip netns exec $gw nft get element inet raw inicmp "{ 10.3.99 . \"veth0\" . 10.3.0.1 }" 1>&2
+fi
+
+if  [ $ret -eq 0 ]; then
+       echo "PASS: ping test from all $maxclients namespaces"
+fi
+
+if [ $have_iperf -eq 0 ];then
+       echo "SKIP: iperf3 not installed"
+       if [ $ret -ne 0 ];then
+           exit $ret
+       fi
+       exit $ksft_skip
+fi
+
+ip netns exec $srv iperf3 -s > /dev/null 2>&1 &
+iperfpid=$!
+sleep 1
+
+for i in $(seq 1 $maxclients); do
+  if [ $ret -ne 0 ]; then
+     break
+  fi
+  cl="ns-cl$i-$sfx"
+  ip netns exec $cl iperf3 -c 10.3.0.99 --cport 10000 -n 1 > /dev/null
+  if [ $? -ne 0 ]; then
+     echo FAIL: Failure to connect for $cl 1>&2
+     ip netns exec $gw conntrack -S 1>&2
+     ret=1
+  fi
+done
+if [ $ret -eq 0 ];then
+       echo "PASS: iperf3 connections for all $maxclients net namespaces"
+fi
+
+kill $iperfpid
+wait
+
+for i in $(seq 1 $maxclients); do
+   ip netns exec $gw nft get element inet raw inflows "{ 10.1.0.3 . 10000 . \"veth$i\" . 10.3.0.99 . 5201 }" > /dev/null
+   if [ $? -ne 0 ];then
+      ret=1
+      echo "FAIL: can't find expected tcp entry for veth$i" 1>&2
+      break
+   fi
+done
+if [ $ret -eq 0 ];then
+       echo "PASS: Found client connection for all $maxclients net namespaces"
+fi
+
+ip netns exec $gw nft get element inet raw inflows "{ 10.3.0.99 . 5201 . \"veth0\" . 10.3.0.1 . 10000 }" > /dev/null
+if [ $? -ne 0 ];then
+    ret=1
+    echo "FAIL: cannot find return entry on veth0" 1>&2
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh
new file mode 100755 (executable)
index 0000000..ac64637
--- /dev/null
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+# Test insertion speed for packets with identical addresses/ports
+# that are all placed in distinct conntrack zones.
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns="ns-$sfx"
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+zones=20000
+have_ct_tool=0
+ret=0
+
+cleanup()
+{
+       ip netns del $ns
+}
+
+ip netns add $ns
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace $gw"
+       exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+conntrack -V > /dev/null 2>&1
+if [ $? -eq 0 ];then
+       have_ct_tool=1
+fi
+
+ip -net "$ns" link set lo up
+
+test_zones() {
+       local max_zones=$1
+
+ip netns exec $ns sysctl -q net.netfilter.nf_conntrack_udp_timeout=3600
+ip netns exec $ns nft -f /dev/stdin<<EOF
+flush ruleset
+table inet raw {
+       map rndzone {
+               typeof numgen inc mod $max_zones : ct zone
+       }
+
+       chain output {
+               type filter hook output priority -64000; policy accept;
+               udp dport 12345  ct zone set numgen inc mod 65536 map @rndzone
+       }
+}
+EOF
+       (
+               echo "add element inet raw rndzone {"
+       for i in $(seq 1 $max_zones);do
+               echo -n "$i : $i"
+               if [ $i -lt $max_zones ]; then
+                       echo ","
+               else
+                       echo "}"
+               fi
+       done
+       ) | ip netns exec $ns nft -f /dev/stdin
+
+       local i=0
+       local j=0
+       local outerstart=$(date +%s%3N)
+       local stop=$outerstart
+
+       while [ $i -lt $max_zones ]; do
+               local start=$(date +%s%3N)
+               i=$((i + 10000))
+               j=$((j + 1))
+               dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+               if [ $? -ne 0 ] ;then
+                       ret=1
+                       break
+               fi
+
+               stop=$(date +%s%3N)
+               local duration=$((stop-start))
+               echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)"
+       done
+
+       if [ $have_ct_tool -eq 1 ]; then
+               local count=$(ip netns exec "$ns" conntrack -C)
+               local duration=$((stop-outerstart))
+
+               if [ $count -eq $max_zones ]; then
+                       echo "PASS: inserted $count entries from packet path in $duration ms total"
+               else
+                       ip netns exec $ns conntrack -S 1>&2
+                       echo "FAIL: inserted $count entries from packet path in $duration ms total, expected $max_zones entries"
+                       ret=1
+               fi
+       fi
+
+       if [ $ret -ne 0 ];then
+               echo "FAIL: insert $max_zones entries from packet path" 1>&2
+       fi
+}
+
+test_conntrack_tool() {
+       local max_zones=$1
+
+       ip netns exec $ns conntrack -F >/dev/null 2>/dev/null
+
+       local outerstart=$(date +%s%3N)
+       local start=$(date +%s%3N)
+       local stop=$start
+       local i=0
+       while [ $i -lt $max_zones ]; do
+               i=$((i + 1))
+               ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \
+                        --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i >/dev/null 2>&1
+               if [ $? -ne 0 ];then
+                       ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \
+                        --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i > /dev/null
+                       echo "FAIL: conntrack -I returned an error"
+                       ret=1
+                       break
+               fi
+
+               if [ $((i%10000)) -eq 0 ];then
+                       stop=$(date +%s%3N)
+
+                       local duration=$((stop-start))
+                       echo "PASS: added 10000 entries in $duration ms (now $i total)"
+                       start=$stop
+               fi
+       done
+
+       local count=$(ip netns exec "$ns" conntrack -C)
+       local duration=$((stop-outerstart))
+
+       if [ $count -eq $max_zones ]; then
+               echo "PASS: inserted $count entries via ctnetlink in $duration ms"
+       else
+               ip netns exec $ns conntrack -S 1>&2
+               echo "FAIL: inserted $count entries via ctnetlink in $duration ms, expected $max_zones entries ($duration ms)"
+               ret=1
+       fi
+}
+
+test_zones $zones
+
+if [ $have_ct_tool -eq 1 ];then
+       test_conntrack_tool $zones
+else
+       echo "SKIP: Could not run ctnetlink insertion test without conntrack tool"
+       if [ $ret -eq 0 ];then
+               exit $ksft_skip
+       fi
+fi
+
+exit $ret
index bd1ca25febe4c71543a646a901c0470118307063..aed632d29fff5273c69210a85bdaece582d91e9b 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#include <ppc-asm.h>
+#include <basic_asm.h>
 #include <asm/unistd.h>
 
        .text
@@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended)
 1:
        li      r3, -1
        blr
+
+
+.macro scv level
+       .long (0x44000001 | (\level) << 5)
+.endm
+
+FUNC_START(getppid_scv_tm_active)
+       PUSH_BASIC_STACK(0)
+       tbegin.
+       beq 1f
+       li      r0, __NR_getppid
+       scv     0
+       tend.
+       POP_BASIC_STACK(0)
+       blr
+1:
+       li      r3, -1
+       POP_BASIC_STACK(0)
+       blr
+
+FUNC_START(getppid_scv_tm_suspended)
+       PUSH_BASIC_STACK(0)
+       tbegin.
+       beq 1f
+       li      r0, __NR_getppid
+       tsuspend.
+       scv     0
+       tresume.
+       tend.
+       POP_BASIC_STACK(0)
+       blr
+1:
+       li      r3, -1
+       POP_BASIC_STACK(0)
+       blr
index 467a6b3134b2a36341c078b92d66ddf5fe3b9189..b763354c2eb4768eb3fe2f95b1da73c4a796e613 100644 (file)
 #include "utils.h"
 #include "tm.h"
 
+#ifndef PPC_FEATURE2_SCV
+#define PPC_FEATURE2_SCV               0x00100000 /* scv syscall */
+#endif
+
 extern int getppid_tm_active(void);
 extern int getppid_tm_suspended(void);
+extern int getppid_scv_tm_active(void);
+extern int getppid_scv_tm_suspended(void);
 
 unsigned retries = 0;
 
 #define TEST_DURATION 10 /* seconds */
 
-pid_t getppid_tm(bool suspend)
+pid_t getppid_tm(bool scv, bool suspend)
 {
        int i;
        pid_t pid;
 
        for (i = 0; i < TM_RETRIES; i++) {
-               if (suspend)
-                       pid = getppid_tm_suspended();
-               else
-                       pid = getppid_tm_active();
+               if (suspend) {
+                       if (scv)
+                               pid = getppid_scv_tm_suspended();
+                       else
+                               pid = getppid_tm_suspended();
+               } else {
+                       if (scv)
+                               pid = getppid_scv_tm_active();
+                       else
+                               pid = getppid_tm_active();
+               }
 
                if (pid >= 0)
                        return pid;
@@ -82,15 +95,24 @@ int tm_syscall(void)
                 * Test a syscall within a suspended transaction and verify
                 * that it succeeds.
                 */
-               FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+               FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
 
                /*
                 * Test a syscall within an active transaction and verify that
                 * it fails with the correct failure code.
                 */
-               FAIL_IF(getppid_tm(false) != -1);  /* Should fail... */
+               FAIL_IF(getppid_tm(false, false) != -1);  /* Should fail... */
                FAIL_IF(!failure_is_persistent()); /* ...persistently... */
                FAIL_IF(!failure_is_syscall());    /* ...with code syscall. */
+
+               /* Now do it all again with scv if it is available. */
+               if (have_hwcap2(PPC_FEATURE2_SCV)) {
+                       FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
+                       FAIL_IF(getppid_tm(true, false) != -1);  /* Should fail... */
+                       FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+                       FAIL_IF(!failure_is_syscall());    /* ...with code syscall. */
+               }
+
                gettimeofday(&now, 0);
        }
 
index ee8208b2f946024e28367faa3eff0b5f2425324c..69c3ead25313de00f4129c2a24ff7ebd7ee0d0b4 100644 (file)
@@ -265,12 +265,6 @@ nomem:
        }
 
        entry->ifnum = ifnum;
-
-       /* FIXME update USBDEVFS_CONNECTINFO so it tells about high speed etc */
-
-       fprintf(stderr, "%s speed\t%s\t%u\n",
-               speed(entry->speed), entry->name, entry->ifnum);
-
        entry->next = testdevs;
        testdevs = entry;
        return 0;
@@ -299,6 +293,14 @@ static void *handle_testdev (void *arg)
                return 0;
        }
 
+       status  =  ioctl(fd, USBDEVFS_GET_SPEED, NULL);
+       if (status < 0)
+               fprintf(stderr, "USBDEVFS_GET_SPEED failed %d\n", status);
+       else
+               dev->speed = status;
+       fprintf(stderr, "%s speed\t%s\t%u\n",
+                       speed(dev->speed), dev->name, dev->ifnum);
+
 restart:
        for (i = 0; i < TEST_CASES; i++) {
                if (dev->test != -1 && dev->test != i)
index 0517c744b04e8d5d62c540d1c710591a12909b4b..f62f10c988db107c5aba893e31972f0e64a3b0f4 100644 (file)
@@ -1331,7 +1331,7 @@ int main(int argc, char *argv[])
        if (opt_list && opt_list_mapcnt)
                kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY);
 
-       if (opt_mark_idle && opt_file)
+       if (opt_mark_idle)
                page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR);
 
        if (opt_list && opt_pid)
index 439d3b4cd1a941ea8aca486ccc7e6c2b50241e48..7851f3a1b5f7c0c5ea59d808b40d39ef7cf0fd8a 100644 (file)
@@ -235,9 +235,13 @@ static void ack_flush(void *_completed)
 {
 }
 
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
 {
-       if (unlikely(!cpus))
+       const struct cpumask *cpus;
+
+       if (likely(cpumask_available(tmp)))
+               cpus = tmp;
+       else
                cpus = cpu_online_mask;
 
        if (cpumask_empty(cpus))
@@ -263,14 +267,34 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
                        continue;
 
                kvm_make_request(req, vcpu);
-               cpu = vcpu->cpu;
 
                if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
                        continue;
 
-               if (tmp != NULL && cpu != -1 && cpu != me &&
-                   kvm_request_needs_ipi(vcpu, req))
-                       __cpumask_set_cpu(cpu, tmp);
+               /*
+                * tmp can be "unavailable" if cpumasks are allocated off stack
+                * as allocation of the mask is deliberately not fatal and is
+                * handled by falling back to kicking all online CPUs.
+                */
+               if (!cpumask_available(tmp))
+                       continue;
+
+               /*
+                * Note, the vCPU could get migrated to a different pCPU at any
+                * point after kvm_request_needs_ipi(), which could result in
+                * sending an IPI to the previous pCPU.  But, that's ok because
+                * the purpose of the IPI is to ensure the vCPU returns to
+                * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.
+                * Entering READING_SHADOW_PAGE_TABLES after this point is also
+                * ok, as the requirement is only that KVM wait for vCPUs that
+                * were reading SPTEs _before_ any changes were finalized.  See
+                * kvm_vcpu_kick() for more details on handling requests.
+                */
+               if (kvm_request_needs_ipi(vcpu, req)) {
+                       cpu = READ_ONCE(vcpu->cpu);
+                       if (cpu != -1 && cpu != me)
+                               __cpumask_set_cpu(cpu, tmp);
+               }
        }
 
        called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
@@ -302,13 +326,8 @@ EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
-       /*
-        * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
-        * kvm_make_all_cpus_request.
-        */
-       long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
-
        ++kvm->stat.generic.remote_tlb_flush_requests;
+
        /*
         * We want to publish modifications to the page tables before reading
         * mode. Pairs with a memory barrier in arch-specific code.
@@ -323,7 +342,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
        if (!kvm_arch_flush_remote_tlb(kvm)
            || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
                ++kvm->stat.generic.remote_tlb_flush;
-       cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 #endif
@@ -528,7 +546,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
                }
        }
 
-       if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
+       if (range->flush_on_ret && ret)
                kvm_flush_remote_tlbs(kvm);
 
        if (locked)
@@ -3134,15 +3152,19 @@ out:
 
 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
-       unsigned int old, val, shrink;
+       unsigned int old, val, shrink, grow_start;
 
        old = val = vcpu->halt_poll_ns;
        shrink = READ_ONCE(halt_poll_ns_shrink);
+       grow_start = READ_ONCE(halt_poll_ns_grow_start);
        if (shrink == 0)
                val = 0;
        else
                val /= shrink;
 
+       if (val < grow_start)
+               val = 0;
+
        vcpu->halt_poll_ns = val;
        trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
 }
@@ -3290,16 +3312,24 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
  */
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 {
-       int me;
-       int cpu = vcpu->cpu;
+       int me, cpu;
 
        if (kvm_vcpu_wake_up(vcpu))
                return;
 
+       /*
+        * Note, the vCPU could get migrated to a different pCPU at any point
+        * after kvm_arch_vcpu_should_kick(), which could result in sending an
+        * IPI to the previous pCPU.  But, that's ok because the purpose of the
+        * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
+        * vCPU also requires it to leave IN_GUEST_MODE.
+        */
        me = get_cpu();
-       if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
-               if (kvm_arch_vcpu_should_kick(vcpu))
+       if (kvm_arch_vcpu_should_kick(vcpu)) {
+               cpu = READ_ONCE(vcpu->cpu);
+               if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
                        smp_send_reschedule(cpu);
+       }
        put_cpu();
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);