]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 16 Sep 2019 22:29:34 +0000 (15:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 16 Sep 2019 22:29:34 +0000 (15:29 -0700)
Pull RISC-V updates from Paul Walmsley:
 "Add the following new features:

   - Generic CPU topology description support for DT-based platforms,
     including ARM64, ARM and RISC-V.

   - Sparsemem support

   - Perf callchain support

   - SiFive PLIC irqchip modifications, in preparation for M-mode Linux

  and clean up the code base:

   - Clean up chip-specific register (CSR) manipulation code, IPIs, TLB
     flushing, and the RISC-V CPU-local timer code

   - Kbuild cleanup from one of the Kbuild maintainers"

[ The CPU topology parts came in through the arm64 tree with a shared
  branch   - Linus ]

* tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  irqchip/sifive-plic: set max threshold for ignored handlers
  riscv: move the TLB flush logic out of line
  riscv: don't use the rdtime(h) pseudo-instructions
  riscv: cleanup riscv_cpuid_to_hartid_mask
  riscv: optimize send_ipi_single
  riscv: cleanup send_ipi_mask
  riscv: refactor the IPI code
  riscv: Add support for libdw
  riscv: Add support for perf registers sampling
  riscv: Add perf callchain support
  riscv: add arch/riscv/Kbuild
  RISC-V: Implement sparsemem
  riscv: Using CSR numbers to access CSRs

937 files changed:
.clang-format
.mailmap
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/perf/imx-ddr.rst [new file with mode: 0644]
Documentation/arm64/index.rst
Documentation/arm64/kasan-offsets.sh [new file with mode: 0644]
Documentation/arm64/memory.rst
Documentation/arm64/tagged-address-abi.rst [new file with mode: 0644]
Documentation/arm64/tagged-pointers.rst
Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
Documentation/devicetree/bindings/gpio/gpio-davinci.txt
Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/ads1015.txt [deleted file]
Documentation/devicetree/bindings/hwmon/as370.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
Documentation/devicetree/bindings/hwmon/lm75.txt
Documentation/devicetree/bindings/iio/adc/ads1015.txt [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
Documentation/devicetree/bindings/net/dsa/ksz.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/regulator/act8865-regulator.txt
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/regulator/mt6358-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
Documentation/devicetree/bindings/regulator/sy8824x.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/twl-regulator.txt
Documentation/devicetree/bindings/regulator/uniphier-regulator.txt
Documentation/devicetree/bindings/spi/nuvoton,npcm-fiu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-controller.yaml
Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt
Documentation/devicetree/bindings/spi/spi-mt65xx.txt
Documentation/devicetree/bindings/spi/spi-sprd-adi.txt
Documentation/devicetree/bindings/trivial-devices.yaml
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/driver-api/gpio/driver.rst
Documentation/hwmon/ads1015.rst [deleted file]
Documentation/hwmon/index.rst
Documentation/hwmon/inspur-ipsps1.rst [new file with mode: 0644]
Documentation/hwmon/lm75.rst
Documentation/hwmon/pxe1610 [deleted file]
Documentation/hwmon/pxe1610.rst [new file with mode: 0644]
Documentation/hwmon/shtc1.rst
Documentation/hwmon/submitting-patches.rst
Documentation/process/embargoed-hardware-issues.rst [new file with mode: 0644]
Documentation/process/index.rst
Documentation/riscv/boot-image-header.txt
Documentation/security/tpm/index.rst
Documentation/security/tpm/tpm_ftpm_tee.rst [new file with mode: 0644]
MAINTAINERS
Makefile
arch/Kconfig
arch/arc/boot/dts/Makefile
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/linkage.h
arch/arc/include/asm/mach_desc.h
arch/arc/kernel/mcip.c
arch/arc/kernel/unwind.c
arch/arc/mm/dma.c
arch/arc/plat-hsdk/platform.c
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-l4.dtsi
arch/arm/boot/dts/am571x-idk.dts
arch/arm/boot/dts/am572x-idk.dts
arch/arm/boot/dts/am574x-idk.dts
arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/dra7-l4.dtsi
arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
arch/arm/boot/dts/vf610-bk4.dts
arch/arm/configs/lpc32xx_defconfig
arch/arm/lib/backtrace.S
arch/arm/mach-ep93xx/edb93xx.c
arch/arm/mach-ep93xx/simone.c
arch/arm/mach-ep93xx/ts72xx.c
arch/arm/mach-ep93xx/vision_ep9307.c
arch/arm/mach-omap1/ams-delta-fiq-handler.S
arch/arm/mach-omap1/ams-delta-fiq.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/omap-iommu.c [new file with mode: 0644]
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/mm/init.c
arch/arm64/Kbuild [new file with mode: 0644]
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
arch/arm64/boot/dts/renesas/hihope-common.dtsi
arch/arm64/boot/dts/renesas/r8a77995-draak.dts
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cache.h
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/compat.h
arch/arm64/include/asm/cpu_ops.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/debug-monitors.h
arch/arm64/include/asm/dma-mapping.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/esr.h
arch/arm64/include/asm/exception.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/hw_breakpoint.h
arch/arm64/include/asm/io.h
arch/arm64/include/asm/irqflags.h
arch/arm64/include/asm/kasan.h
arch/arm64/include/asm/lse.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/pci.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/pointer_auth.h
arch/arm64/include/asm/proc-fns.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/signal32.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/include/asm/uaccess.h
arch/arm64/include/asm/vdso.h
arch/arm64/include/asm/vdso_datapage.h
arch/arm64/include/uapi/asm/stat.h [deleted file]
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuidle.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/hibernate-asm.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/image-vars.h [new file with mode: 0644]
arch/arm64/kernel/image.h
arch/arm64/kernel/insn.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/kexec_image.c
arch/arm64/kernel/machine_kexec_file.c
arch/arm64/kernel/module-plts.c
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/process.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kernel/topology.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/va_layout.c
arch/arm64/lib/Makefile
arch/arm64/lib/atomic_ll_sc.c [deleted file]
arch/arm64/lib/error-inject.c [new file with mode: 0644]
arch/arm64/mm/dump.c
arch/arm64/mm/fault.c
arch/arm64/mm/init.c
arch/arm64/mm/ioremap.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/numa.c
arch/arm64/mm/pageattr.c
arch/arm64/mm/proc.S
arch/ia64/include/asm/iommu.h
arch/ia64/kernel/pci-dma.c
arch/m68k/atari/config.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/atarihw.h
arch/m68k/include/asm/io_mm.h
arch/m68k/include/asm/kmap.h
arch/m68k/include/asm/macintosh.h
arch/m68k/mac/config.c
arch/nds32/kernel/signal.c
arch/powerpc/Makefile
arch/powerpc/include/asm/error-injection.h [deleted file]
arch/powerpc/kernel/process.c
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c
arch/powerpc/mm/nohash/tlb.c
arch/riscv/include/asm/image.h
arch/riscv/kernel/head.S
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/kernel/sys_sparc_64.c
arch/unicore32/kernel/irq.c
arch/x86/Makefile
arch/x86/boot/compressed/pgtable_64.c
arch/x86/events/amd/ibs.c
arch/x86/events/intel/core.c
arch/x86/hyperv/mmu.c
arch/x86/include/asm/bootparam_utils.h
arch/x86/include/asm/error-injection.h [deleted file]
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/iommu.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/uaccess.h
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/mce/severity.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/uprobes.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/pageattr.c
arch/x86/purgatory/Makefile
drivers/acpi/arm64/iort.c
drivers/acpi/pptt.c
drivers/atm/Kconfig
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-irq.c
drivers/block/rbd.c
drivers/bluetooth/bpa10x.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_qca.c
drivers/bus/hisi_lpc.c
drivers/bus/ti-sysc.c
drivers/char/Kconfig
drivers/char/random.c
drivers/char/tpm/Kconfig
drivers/char/tpm/Makefile
drivers/char/tpm/tpm-chip.c
drivers/char/tpm/tpm-sysfs.c
drivers/char/tpm/tpm_ftpm_tee.c [new file with mode: 0644]
drivers/char/tpm/tpm_ftpm_tee.h [new file with mode: 0644]
drivers/char/tpm/tpm_tis_core.c
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-arm.c
drivers/cpuidle/cpuidle-psci.c [new file with mode: 0644]
drivers/crypto/ccp/ccp-dev.c
drivers/dma/bcm2835-dma.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/sprd-dma.c
drivers/dma/ti/dma-crossbar.c
drivers/dma/ti/omap-dma.c
drivers/edac/Kconfig
drivers/edac/Makefile
drivers/edac/altera_edac.c
drivers/edac/altera_edac.h
drivers/edac/amd64_edac.c
drivers/edac/amd64_edac.h
drivers/edac/bluefield_edac.c [new file with mode: 0644]
drivers/edac/edac_mc.c
drivers/edac/edac_mc.h
drivers/edac/edac_mc_sysfs.c
drivers/edac/ghes_edac.c
drivers/edac/i5100_edac.c
drivers/edac/pnd2_edac.c
drivers/firmware/psci/psci.c
drivers/firmware/psci/psci_checker.c
drivers/fpga/altera-ps-spi.c
drivers/fsi/fsi-scom.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-arizona.c
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-bd70528.c
drivers/gpio/gpio-brcmstb.c
drivers/gpio/gpio-cadence.c
drivers/gpio/gpio-creg-snps.c
drivers/gpio/gpio-dwapb.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-em.c
drivers/gpio/gpio-ep93xx.c
drivers/gpio/gpio-ftgpio010.c
drivers/gpio/gpio-grgpio.c
drivers/gpio/gpio-hlwd.c
drivers/gpio/gpio-htc-egpio.c
drivers/gpio/gpio-intel-mid.c
drivers/gpio/gpio-ixp4xx.c
drivers/gpio/gpio-ks8695.c [deleted file]
drivers/gpio/gpio-lpc32xx.c
drivers/gpio/gpio-lynxpoint.c
drivers/gpio/gpio-madera.c
drivers/gpio/gpio-max77620.c
drivers/gpio/gpio-max77650.c
drivers/gpio/gpio-mb86s7x.c
drivers/gpio/gpio-merrifield.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-mt7621.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-pch.c
drivers/gpio/gpio-pmic-eic-sprd.c
drivers/gpio/gpio-sprd.c
drivers/gpio/gpio-stmpe.c
drivers/gpio/gpio-tb10x.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpio-thunderx.c
drivers/gpio/gpio-tqmx86.c
drivers/gpio/gpio-vf610.c
drivers/gpio/gpio-viperboard.c
drivers/gpio/gpio-xgene-sb.c
drivers/gpio/gpio-xlp.c
drivers/gpio/gpio-zx.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-acpi.h [new file with mode: 0644]
drivers/gpio/gpiolib-devres.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib-of.h [new file with mode: 0644]
drivers/gpio/gpiolib.c
drivers/gpio/gpiolib.h
drivers/gpio/sgpio-aspeed.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/arm/display/komeda/komeda_dev.c
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_vdsc.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/ingenic/ingenic-drm.c
drivers/gpu/drm/lima/lima_gem.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
drivers/gpu/drm/omapdrm/dss/output.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/selftests/drm_cmdline_selftests.h
drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/ads1015.c [deleted file]
drivers/hwmon/adt7475.c
drivers/hwmon/as370-hwmon.c [new file with mode: 0644]
drivers/hwmon/asb100.c
drivers/hwmon/coretemp.c
drivers/hwmon/iio_hwmon.c
drivers/hwmon/k10temp.c
drivers/hwmon/k8temp.c
drivers/hwmon/lm75.c
drivers/hwmon/ltc2990.c
drivers/hwmon/nct6775.c
drivers/hwmon/nct7904.c
drivers/hwmon/npcm750-pwm-fan.c
drivers/hwmon/pmbus/Kconfig
drivers/hwmon/pmbus/Makefile
drivers/hwmon/pmbus/ibm-cffps.c
drivers/hwmon/pmbus/inspur-ipsps.c [new file with mode: 0644]
drivers/hwmon/pmbus/max31785.c
drivers/hwmon/pmbus/ucd9000.c
drivers/hwmon/raspberrypi-hwmon.c
drivers/hwmon/shtc1.c
drivers/hwmon/smm665.c
drivers/hwmon/w83781d.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83793.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/core.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/busses/i2c-designware-slave.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/i2c-core-base.c
drivers/i3c/device.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/i3c/master/i3c-master-cdns.c
drivers/iio/adc/Kconfig
drivers/infiniband/sw/siw/siw_cm.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu.h [new file with mode: 0644]
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_quirks.c [new file with mode: 0644]
drivers/iommu/amd_iommu_types.h
drivers/iommu/arm-smmu-impl.c [new file with mode: 0644]
drivers/iommu/arm-smmu-regs.h [deleted file]
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/arm-smmu.h [new file with mode: 0644]
drivers/iommu/dma-iommu.c
drivers/iommu/dmar.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-svm.c
drivers/iommu/intel-trace.c [new file with mode: 0644]
drivers/iommu/intel_irq_remapping.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu.h
drivers/iommu/mtk_iommu_v1.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iommu.h
drivers/iommu/qcom_iommu.c
drivers/iommu/rockchip-iommu.c
drivers/iommu/s390-iommu.c
drivers/iommu/tegra-gart.c
drivers/iommu/tegra-smmu.c
drivers/iommu/virtio-iommu.c
drivers/isdn/capi/capi.c
drivers/memory/mtk-smi.c
drivers/mfd/rk808.c
drivers/misc/lkdtm/bugs.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/vmw_balloon.c
drivers/misc/vmw_vmci/vmci_doorbell.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/sd.c
drivers/mmc/host/bcm2835.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-pci-o2micro.c
drivers/mmc/host/sdhci-sprd.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/tmio_mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/mmc/host/tmio_mmc_core.c
drivers/mmc/host/uniphier-sd.c
drivers/mtd/hyperbus/Kconfig
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/microchip/ksz9477_spi.c
drivers/net/dsa/microchip/ksz_common.h
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/aquantia/atlantic/aq_filters.c
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/freescale/enetc/enetc_ptp.c
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mscc/ocelot_ace.c
drivers/net/ethernet/natsemi/sonic.c
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/hamradio/6pack.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/phy/phy-c45.c
drivers/net/phy/phy.c
drivers/net/phy/phylink.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wimax/i2400m/op-rfkill.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/marvell/mwifiex/ie.c
drivers/net/wireless/marvell/mwifiex/uap_cmd.c
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
drivers/net/wireless/rsi/rsi_91x_usb.c
drivers/nfc/st95hf/core.c
drivers/nvdimm/pfn_devs.c
drivers/of/fdt.c
drivers/perf/arm_smmuv3_pmu.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
drivers/perf/qcom_l2_pmu.c
drivers/perf/xgene_pmu.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
drivers/pinctrl/aspeed/pinmux-aspeed.c
drivers/pinctrl/aspeed/pinmux-aspeed.h
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinctrl-stmfx.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
drivers/platform/chrome/cros_ec_spi.c
drivers/platform/x86/intel_int0002_vgpio.c
drivers/ras/Makefile
drivers/ras/cec.c
drivers/ras/debugfs.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/act8865-regulator.c
drivers/regulator/act8945a-regulator.c
drivers/regulator/core.c
drivers/regulator/da9062-regulator.c
drivers/regulator/da9063-regulator.c
drivers/regulator/da9211-regulator.c
drivers/regulator/fixed.c
drivers/regulator/helpers.c
drivers/regulator/lm363x-regulator.c
drivers/regulator/lp87565-regulator.c
drivers/regulator/lp8788-ldo.c
drivers/regulator/max77686-regulator.c
drivers/regulator/max8660.c
drivers/regulator/mt6358-regulator.c [new file with mode: 0644]
drivers/regulator/qcom-rpmh-regulator.c
drivers/regulator/rk808-regulator.c
drivers/regulator/s2mps11.c
drivers/regulator/slg51000-regulator.c
drivers/regulator/stm32-booster.c
drivers/regulator/sy8824x.c [new file with mode: 0644]
drivers/regulator/tps65132-regulator.c
drivers/regulator/twl-regulator.c
drivers/regulator/twl6030-regulator.c
drivers/regulator/uniphier-regulator.c
drivers/s390/net/qeth_core_main.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/soc/ixp4xx/Kconfig
drivers/soc/qcom/qcom-geni-se.c
drivers/soc/ti/pm33xx.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/atmel-quadspi.c
drivers/spi/spi-altera.c
drivers/spi/spi-armada-3700.c
drivers/spi/spi-ath79.c
drivers/spi/spi-atmel.c
drivers/spi/spi-axi-spi-engine.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bcm2835aux.c
drivers/spi/spi-bcm63xx-hsspi.c
drivers/spi/spi-bcm63xx.c
drivers/spi/spi-cadence.c
drivers/spi/spi-cavium-octeon.c
drivers/spi/spi-clps711x.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-dw-mmio.c
drivers/spi/spi-dw-pci.c
drivers/spi/spi-efm32.c
drivers/spi/spi-ep93xx.c
drivers/spi/spi-fsl-cpm.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-fsl-lib.h
drivers/spi/spi-fsl-qspi.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-geni-qcom.c
drivers/spi/spi-gpio.c
drivers/spi/spi-lantiq-ssc.c
drivers/spi/spi-lp8841-rtc.c
drivers/spi/spi-meson-spicc.c
drivers/spi/spi-meson-spifc.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-mt7621.c
drivers/spi/spi-mxs.c
drivers/spi/spi-npcm-fiu.c [new file with mode: 0644]
drivers/spi/spi-npcm-pspi.c
drivers/spi/spi-nuc900.c
drivers/spi/spi-nxp-fspi.c
drivers/spi/spi-oc-tiny.c
drivers/spi/spi-pic32-sqi.c
drivers/spi/spi-pic32.c
drivers/spi/spi-qcom-qspi.c
drivers/spi/spi-rb4xx.c
drivers/spi/spi-s3c24xx.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-sh.c
drivers/spi/spi-sifive.c
drivers/spi/spi-sirf.c
drivers/spi/spi-slave-mt27xx.c
drivers/spi/spi-sprd-adi.c
drivers/spi/spi-sprd.c
drivers/spi/spi-st-ssc4.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spi-sun4i.c
drivers/spi/spi-sun6i.c
drivers/spi/spi-synquacer.c
drivers/spi/spi-tegra20-sflash.c
drivers/spi/spi-ti-qspi.c
drivers/spi/spi-uniphier.c
drivers/spi/spi-xlp.c
drivers/spi/spi-zynq-qspi.c
drivers/spi/spi-zynqmp-gqspi.c
drivers/spi/spi.c
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-wdm.c
drivers/usb/class/usbtmc.c
drivers/usb/core/hcd-pci.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/xhci-rcar.c
drivers/usb/host/xhci-tegra.c
drivers/usb/storage/realtek_cr.c
drivers/usb/storage/unusual_devs.h
drivers/usb/typec/tcpm/tcpm.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/test.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/video/fbdev/atafb.c
drivers/virtio/virtio_ring.c
drivers/xen/swiotlb-xen.c
fs/btrfs/extent_io.c
fs/btrfs/tree-log.c
fs/cifs/cifsfs.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/misc.c
fs/cifs/sess.c
fs/configfs/configfs_internal.h
fs/configfs/dir.c
fs/configfs/file.c
fs/ext4/inode.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4file.c
fs/nfs/pagelist.c
fs/nfs/pnfs_nfs.c
fs/nfs/proc.c
fs/nfs/read.c
fs/nfs/write.c
include/asm-generic/error-injection.h
include/dt-bindings/memory/mt8183-larb-port.h [new file with mode: 0644]
include/dt-bindings/regulator/active-semi,8865-regulator.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/amd-iommu.h
include/linux/blk_types.h
include/linux/compiler.h
include/linux/cpuidle.h
include/linux/edac.h
include/linux/error-injection.h
include/linux/gpio.h
include/linux/gpio/consumer.h
include/linux/gpio/driver.h
include/linux/i3c/device.h
include/linux/i3c/master.h
include/linux/input/elan-i2c-ids.h
include/linux/intel-iommu.h
include/linux/io-pgtable.h
include/linux/iommu.h
include/linux/logic_pio.h
include/linux/mmzone.h
include/linux/netfilter/nf_conntrack_h323_types.h
include/linux/of_gpio.h
include/linux/omap-iommu.h
include/linux/pci_ids.h
include/linux/phy.h
include/linux/phy_fixed.h
include/linux/pid.h
include/linux/platform_data/gpio-htc-egpio.h
include/linux/platform_data/iommu-omap.h
include/linux/psci.h
include/linux/random.h
include/linux/regulator/consumer.h
include/linux/regulator/mt6358-regulator.h [new file with mode: 0644]
include/linux/sunrpc/sched.h
include/linux/swiotlb.h
include/linux/syscalls.h
include/linux/trace_events.h
include/math-emu/op-common.h
include/net/act_api.h
include/net/addrconf.h
include/net/ip_fib.h
include/net/net_namespace.h
include/net/nexthop.h
include/net/psample.h
include/net/route.h
include/net/xfrm.h
include/soc/arc/mcip.h
include/soc/mediatek/smi.h
include/trace/events/intel_iommu.h [new file with mode: 0644]
include/trace/events/rxrpc.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/isdn/capicmd.h
include/uapi/linux/netfilter/xt_nfacct.h
include/uapi/linux/prctl.h
include/uapi/linux/rds.h
include/uapi/linux/wait.h
init/Kconfig
ipc/util.h
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/dma/direct.c
kernel/dma/swiotlb.c
kernel/events/hw_breakpoint.c
kernel/exit.c
kernel/fork.c
kernel/irq/resend.c
kernel/jump_label.c
kernel/kallsyms.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/signal.c
kernel/sys.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_probe.c
lib/Kconfig
lib/kfifo.c
lib/logic_pio.c
mm/balloon_compaction.c
mm/memcontrol.c
mm/vmscan.c
mm/z3fold.c
mm/zsmalloc.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/netlink.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_core.c
net/bridge/br_mdb.c
net/bridge/br_netfilter_hooks.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nft_meta_bridge.c
net/ceph/crypto.c
net/core/dev.c
net/core/filter.c
net/core/flow_dissector.c
net/core/netpoll.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_map.c
net/core/stream.c
net/dsa/tag_8021q.c
net/ieee802154/socket.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/mcast.c
net/ipv6/ping.c
net/ipv6/route.c
net/mac80211/cfg.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mpls/mpls_iptunnel.c
net/ncsi/ncsi-cmd.c
net/ncsi/ncsi-rsp.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nft_fib_netdev.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_socket.c
net/netfilter/xt_nfacct.c
net/netfilter/xt_physdev.c
net/openvswitch/conntrack.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/psample/psample.c
net/qrtr/tun.c
net/rds/bind.c
net/rds/ib.c
net/rds/ib.h
net/rds/ib_cm.c
net/rds/rdma_transport.c
net/rds/recv.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/local_event.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/protocol.h
net/rxrpc/recvmsg.c
net/rxrpc/rxkad.c
net/rxrpc/sendmsg.c
net/rxrpc/skbuff.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_ct.c
net/sched/act_ctinfo.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_mpls.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/sch_api.c
net/sched/sch_cbs.c
net/sched/sch_generic.c
net/sched/sch_hhf.c
net/sched/sch_taprio.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/smc/smc_tx.c
net/sunrpc/clnt.c
net/sunrpc/xprt.c
net/tipc/name_distr.c
net/wireless/reg.c
net/wireless/util.c
net/xdp/xdp_umem.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_policy.c
scripts/Makefile.kasan
scripts/tools-support-relr.sh [new file with mode: 0755]
security/keys/request_key.c
security/keys/request_key_auth.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_fifo.c
sound/core/seq/seq_fifo.h
sound/firewire/oxfw/oxfw-pcm.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_generic.h
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/soc/intel/boards/bytcht_cx2072x.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/intel/boards/cht_bsw_rt5672.c
sound/usb/line6/pcm.c
sound/usb/mixer.c
sound/usb/mixer_quirks.c
sound/usb/pcm.c
tools/bpf/bpftool/prog.c
tools/power/x86/turbostat/Makefile
tools/power/x86/turbostat/turbostat.c
tools/power/x86/x86_energy_perf_policy/Makefile
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
tools/testing/selftests/arm64/.gitignore [new file with mode: 0644]
tools/testing/selftests/arm64/Makefile [new file with mode: 0644]
tools/testing/selftests/arm64/run_tags_test.sh [new file with mode: 0755]
tools/testing/selftests/arm64/tags_test.c [new file with mode: 0644]
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/test_btf_dump.c
tools/testing/selftests/bpf/test_cgroup_storage.c
tools/testing/selftests/bpf/test_sock.c
tools/testing/selftests/cgroup/test_freezer.c
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/xfrm_policy.sh
tools/testing/selftests/pidfd/.gitignore
tools/testing/selftests/pidfd/Makefile
tools/testing/selftests/pidfd/pidfd.h
tools/testing/selftests/pidfd/pidfd_open_test.c
tools/testing/selftests/pidfd/pidfd_poll_test.c [new file with mode: 0644]
tools/testing/selftests/pidfd/pidfd_test.c
tools/testing/selftests/pidfd/pidfd_wait.c [new file with mode: 0644]
tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c

index 2ffd69afc1a8280654a8763e044e27e08be7a69f..196ca317bd1f24ad57ad9ded549bc0b7994d8111 100644 (file)
@@ -107,10 +107,13 @@ ForEachMacros:
   - 'css_for_each_descendant_post'
   - 'css_for_each_descendant_pre'
   - 'device_for_each_child_node'
+  - 'dma_fence_chain_for_each'
   - 'drm_atomic_crtc_for_each_plane'
   - 'drm_atomic_crtc_state_for_each_plane'
   - 'drm_atomic_crtc_state_for_each_plane_state'
   - 'drm_atomic_for_each_plane_damage'
+  - 'drm_client_for_each_connector_iter'
+  - 'drm_client_for_each_modeset'
   - 'drm_connector_for_each_possible_encoder'
   - 'drm_for_each_connector_iter'
   - 'drm_for_each_crtc'
@@ -126,6 +129,7 @@ ForEachMacros:
   - 'drm_mm_for_each_node_in_range'
   - 'drm_mm_for_each_node_safe'
   - 'flow_action_for_each'
+  - 'for_each_active_dev_scope'
   - 'for_each_active_drhd_unit'
   - 'for_each_active_iommu'
   - 'for_each_available_child_of_node'
@@ -153,6 +157,8 @@ ForEachMacros:
   - 'for_each_cpu_not'
   - 'for_each_cpu_wrap'
   - 'for_each_dev_addr'
+  - 'for_each_dev_scope'
+  - 'for_each_displayid_db'
   - 'for_each_dma_cap_mask'
   - 'for_each_dpcm_be'
   - 'for_each_dpcm_be_rollback'
@@ -169,6 +175,8 @@ ForEachMacros:
   - 'for_each_evictable_lru'
   - 'for_each_fib6_node_rt_rcu'
   - 'for_each_fib6_walker_rt'
+  - 'for_each_free_mem_pfn_range_in_zone'
+  - 'for_each_free_mem_pfn_range_in_zone_from'
   - 'for_each_free_mem_range'
   - 'for_each_free_mem_range_reverse'
   - 'for_each_func_rsrc'
@@ -178,6 +186,7 @@ ForEachMacros:
   - 'for_each_ip_tunnel_rcu'
   - 'for_each_irq_nr'
   - 'for_each_link_codecs'
+  - 'for_each_link_platforms'
   - 'for_each_lru'
   - 'for_each_matching_node'
   - 'for_each_matching_node_and_match'
@@ -302,7 +311,10 @@ ForEachMacros:
   - 'ide_port_for_each_present_dev'
   - 'idr_for_each_entry'
   - 'idr_for_each_entry_continue'
+  - 'idr_for_each_entry_continue_ul'
   - 'idr_for_each_entry_ul'
+  - 'in_dev_for_each_ifa_rcu'
+  - 'in_dev_for_each_ifa_rtnl'
   - 'inet_bind_bucket_for_each'
   - 'inet_lhash2_for_each_icsk_rcu'
   - 'key_for_each'
@@ -343,8 +355,6 @@ ForEachMacros:
   - 'media_device_for_each_intf'
   - 'media_device_for_each_link'
   - 'media_device_for_each_pad'
-  - 'mp_bvec_for_each_page'
-  - 'mp_bvec_for_each_segment'
   - 'nanddev_io_for_each_page'
   - 'netdev_for_each_lower_dev'
   - 'netdev_for_each_lower_private'
@@ -381,18 +391,19 @@ ForEachMacros:
   - 'radix_tree_for_each_slot'
   - 'radix_tree_for_each_tagged'
   - 'rbtree_postorder_for_each_entry_safe'
+  - 'rdma_for_each_block'
   - 'rdma_for_each_port'
   - 'resource_list_for_each_entry'
   - 'resource_list_for_each_entry_safe'
   - 'rhl_for_each_entry_rcu'
   - 'rhl_for_each_rcu'
   - 'rht_for_each'
-  - 'rht_for_each_from'
   - 'rht_for_each_entry'
   - 'rht_for_each_entry_from'
   - 'rht_for_each_entry_rcu'
   - 'rht_for_each_entry_rcu_from'
   - 'rht_for_each_entry_safe'
+  - 'rht_for_each_from'
   - 'rht_for_each_rcu'
   - 'rht_for_each_rcu_from'
   - '__rq_for_each_bio'
index acba1a6163f1b1e86ab78066b52d2e86887d9314..afaad605284a8be6b9aa96f0157716fdf869453b 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -64,6 +64,9 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
 Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
 Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
 Domen Puncer <domen@coderock.org>
 Douglas Gilbert <dougg@torque.net>
 Ed L. Cashin <ecashin@coraid.com>
@@ -160,6 +163,8 @@ Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.co
 Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
 Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
 Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
+Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
+Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
index 4c1971960afa30484cfe0d2533e9cf28d003662c..d31ffa110461156a8abdf2703514e57e3f42beb9 100644 (file)
                        Note that using this option lowers the security
                        provided by tboot because it makes the system
                        vulnerable to DMA attacks.
+               nobounce [Default off]
+                       Disable bounce buffer for unstrusted devices such as
+                       the Thunderbolt devices. This will treat the untrusted
+                       devices as the trusted ones, hence might expose security
+                       risks of DMA attacks.
 
        intel_idle.max_cstate=  [KNL,HW,ACPI,X86]
                        0       disables intel_idle and fall back on acpi_idle.
                          synchronously.
 
        iommu.passthrough=
-                       [ARM64] Configure DMA to bypass the IOMMU by default.
+                       [ARM64, X86] Configure DMA to bypass the IOMMU by default.
                        Format: { "0" | "1" }
                        0 - Use IOMMU translation for DMA.
                        1 - Bypass the IOMMU for DMA.
diff --git a/Documentation/admin-guide/perf/imx-ddr.rst b/Documentation/admin-guide/perf/imx-ddr.rst
new file mode 100644 (file)
index 0000000..517a205
--- /dev/null
@@ -0,0 +1,52 @@
+=====================================================
+Freescale i.MX8 DDR Performance Monitoring Unit (PMU)
+=====================================================
+
+There are no performance counters inside the DRAM controller, so performance
+signals are brought out to the edge of the controller where a set of 4 x 32 bit
+counters is implemented. This is controlled by the CSV modes programed in counter
+control register which causes a large number of PERF signals to be generated.
+
+Selection of the value for each counter is done via the config registers. There
+is one register for each counter. Counter 0 is special in that it always counts
+“time” and when expired causes a lock on itself and the other counters and an
+interrupt is raised. If any other counter overflows, it continues counting, and
+no interrupt is raised.
+
+The "format" directory describes format of the config (event ID) and config1
+(AXI filtering) fields of the perf_event_attr structure, see /sys/bus/event_source/
+devices/imx8_ddr0/format/. The "events" directory describes the events types
+hardware supported that can be used with perf tool, see /sys/bus/event_source/
+devices/imx8_ddr0/events/.
+  e.g.::
+        perf stat -a -e imx8_ddr0/cycles/ cmd
+        perf stat -a -e imx8_ddr0/read/,imx8_ddr0/write/ cmd
+
+AXI filtering is only used by CSV modes 0x41 (axid-read) and 0x42 (axid-write)
+to count reading or writing matches filter setting. Filter setting is various
+from different DRAM controller implementations, which is distinguished by quirks
+in the driver.
+
+* With DDR_CAP_AXI_ID_FILTER quirk.
+  Filter is defined with two configuration parts:
+  --AXI_ID defines AxID matching value.
+  --AXI_MASKING defines which bits of AxID are meaningful for the matching.
+        0:corresponding bit is masked.
+        1: corresponding bit is not masked, i.e. used to do the matching.
+
+  AXI_ID and AXI_MASKING are mapped on DPCR1 register in performance counter.
+  When non-masked bits are matching corresponding AXI_ID bits then counter is
+  incremented. Perf counter is incremented if
+          AxID && AXI_MASKING == AXI_ID && AXI_MASKING
+
+  This filter doesn't support filter different AXI ID for axid-read and axid-write
+  event at the same time as this filter is shared between counters.
+  e.g.::
+        perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
+        perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
+
+  NOTE: axi_mask is inverted in userspace(i.e. set bits are bits to mask), and
+  it will be reverted in driver automatically. so that the user can just specify
+  axi_id to monitor a specific id, rather than having to specify axi_mask.
+  e.g.::
+        perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12
index 96b696ba4e6cb70a2411a3876f4b2f828743975c..5c0c69dc58aa3887e52453f3630afdb9ed8e2bd1 100644 (file)
@@ -16,6 +16,7 @@ ARM64 Architecture
     pointer-authentication
     silicon-errata
     sve
+    tagged-address-abi
     tagged-pointers
 
 .. only::  subproject and html
diff --git a/Documentation/arm64/kasan-offsets.sh b/Documentation/arm64/kasan-offsets.sh
new file mode 100644 (file)
index 0000000..2b7a021
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+# Print out the KASAN_SHADOW_OFFSETS required to place the KASAN SHADOW
+# start address at the mid-point of the kernel VA space
+
+print_kasan_offset () {
+       printf "%02d\t" $1
+       printf "0x%08x00000000\n" $(( (0xffffffff & (-1 << ($1 - 1 - 32))) \
+                       + (1 << ($1 - 32 - $2)) \
+                       - (1 << (64 - 32 - $2)) ))
+}
+
+echo KASAN_SHADOW_SCALE_SHIFT = 3
+printf "VABITS\tKASAN_SHADOW_OFFSET\n"
+print_kasan_offset 48 3
+print_kasan_offset 47 3
+print_kasan_offset 42 3
+print_kasan_offset 39 3
+print_kasan_offset 36 3
+echo
+echo KASAN_SHADOW_SCALE_SHIFT = 4
+printf "VABITS\tKASAN_SHADOW_OFFSET\n"
+print_kasan_offset 48 4
+print_kasan_offset 47 4
+print_kasan_offset 42 4
+print_kasan_offset 39 4
+print_kasan_offset 36 4
index 464b880fc4b7c2966a6c923241b3acb709caccff..b040909e45f81d7ab9108cfd1286338fdd0d1471 100644 (file)
@@ -14,6 +14,10 @@ with the 4KB page configuration, allowing 39-bit (512GB) or 48-bit
 64KB pages, only 2 levels of translation tables, allowing 42-bit (4TB)
 virtual address, are used but the memory layout is the same.
 
+ARMv8.2 adds optional support for Large Virtual Address space. This is
+only available when running with a 64KB page size and expands the
+number of descriptors in the first level of translation.
+
 User addresses have bits 63:48 set to 0 while the kernel addresses have
 the same bits set to 1. TTBRx selection is given by bit 63 of the
 virtual address. The swapper_pg_dir contains only kernel (global)
@@ -22,40 +26,43 @@ The swapper_pg_dir address is written to TTBR1 and never written to
 TTBR0.
 
 
-AArch64 Linux memory layout with 4KB pages + 3 levels::
-
-  Start                        End                     Size            Use
-  -----------------------------------------------------------------------
-  0000000000000000     0000007fffffffff         512GB          user
-  ffffff8000000000     ffffffffffffffff         512GB          kernel
-
-
-AArch64 Linux memory layout with 4KB pages + 4 levels::
+AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
 
   Start                        End                     Size            Use
   -----------------------------------------------------------------------
   0000000000000000     0000ffffffffffff         256TB          user
-  ffff000000000000     ffffffffffffffff         256TB          kernel
-
-
-AArch64 Linux memory layout with 64KB pages + 2 levels::
+  ffff000000000000     ffff7fffffffffff         128TB          kernel logical memory map
+  ffff800000000000     ffff9fffffffffff          32TB          kasan shadow region
+  ffffa00000000000     ffffa00007ffffff         128MB          bpf jit region
+  ffffa00008000000     ffffa0000fffffff         128MB          modules
+  ffffa00010000000     fffffdffbffeffff         ~93TB          vmalloc
+  fffffdffbfff0000     fffffdfffe5f8fff        ~998MB          [guard region]
+  fffffdfffe5f9000     fffffdfffe9fffff        4124KB          fixed mappings
+  fffffdfffea00000     fffffdfffebfffff           2MB          [guard region]
+  fffffdfffec00000     fffffdffffbfffff          16MB          PCI I/O space
+  fffffdffffc00000     fffffdffffdfffff           2MB          [guard region]
+  fffffdffffe00000     ffffffffffdfffff           2TB          vmemmap
+  ffffffffffe00000     ffffffffffffffff           2MB          [guard region]
+
+
+AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support)::
 
   Start                        End                     Size            Use
   -----------------------------------------------------------------------
-  0000000000000000     000003ffffffffff           4TB          user
-  fffffc0000000000     ffffffffffffffff           4TB          kernel
-
-
-AArch64 Linux memory layout with 64KB pages + 3 levels::
-
-  Start                        End                     Size            Use
-  -----------------------------------------------------------------------
-  0000000000000000     0000ffffffffffff         256TB          user
-  ffff000000000000     ffffffffffffffff         256TB          kernel
-
-
-For details of the virtual kernel memory layout please see the kernel
-booting log.
+  0000000000000000     000fffffffffffff           4PB          user
+  fff0000000000000     fff7ffffffffffff           2PB          kernel logical memory map
+  fff8000000000000     fffd9fffffffffff        1440TB          [gap]
+  fffda00000000000     ffff9fffffffffff         512TB          kasan shadow region
+  ffffa00000000000     ffffa00007ffffff         128MB          bpf jit region
+  ffffa00008000000     ffffa0000fffffff         128MB          modules
+  ffffa00010000000     fffff81ffffeffff         ~88TB          vmalloc
+  fffff81fffff0000     fffffc1ffe58ffff          ~3TB          [guard region]
+  fffffc1ffe590000     fffffc1ffe9fffff        4544KB          fixed mappings
+  fffffc1ffea00000     fffffc1ffebfffff           2MB          [guard region]
+  fffffc1ffec00000     fffffc1fffbfffff          16MB          PCI I/O space
+  fffffc1fffc00000     fffffc1fffdfffff           2MB          [guard region]
+  fffffc1fffe00000     ffffffffffdfffff        3968GB          vmemmap
+  ffffffffffe00000     ffffffffffffffff           2MB          [guard region]
 
 
 Translation table lookup with 4KB pages::
@@ -83,7 +90,8 @@ Translation table lookup with 64KB pages::
    |                 |    |               |            [15:0]  in-page offset
    |                 |    |               +----------> [28:16] L3 index
    |                 |    +--------------------------> [41:29] L2 index
-   |                 +-------------------------------> [47:42] L1 index
+   |                 +-------------------------------> [47:42] L1 index (48-bit)
+   |                                                   [51:42] L1 index (52-bit)
    +-------------------------------------------------> [63] TTBR0/1
 
 
@@ -96,3 +104,62 @@ ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
 
 When using KVM with the Virtualization Host Extensions, no additional
 mappings are created, since the host kernel runs directly in EL2.
+
+52-bit VA support in the kernel
+-------------------------------
+If the ARMv8.2-LVA optional feature is present, and we are running
+with a 64KB page size; then it is possible to use 52-bits of address
+space for both userspace and kernel addresses. However, any kernel
+binary that supports 52-bit must also be able to fall back to 48-bit
+at early boot time if the hardware feature is not present.
+
+This fallback mechanism necessitates the kernel .text to be in the
+higher addresses such that they are invariant to 48/52-bit VAs. Due
+to the kasan shadow being a fraction of the entire kernel VA space,
+the end of the kasan shadow must also be in the higher half of the
+kernel VA space for both 48/52-bit. (Switching from 48-bit to 52-bit,
+the end of the kasan shadow is invariant and dependent on ~0UL,
+whilst the start address will "grow" towards the lower addresses).
+
+In order to optimise phys_to_virt and virt_to_phys, the PAGE_OFFSET
+is kept constant at 0xFFF0000000000000 (corresponding to 52-bit),
+this obviates the need for an extra variable read. The physvirt
+offset and vmemmap offsets are computed at early boot to enable
+this logic.
+
+As a single binary will need to support both 48-bit and 52-bit VA
+spaces, the VMEMMAP must be sized large enough for 52-bit VAs and
+also must be sized large enought to accommodate a fixed PAGE_OFFSET.
+
+Most code in the kernel should not need to consider the VA_BITS, for
+code that does need to know the VA size the variables are
+defined as follows:
+
+VA_BITS                constant        the *maximum* VA space size
+
+VA_BITS_MIN    constant        the *minimum* VA space size
+
+vabits_actual  variable        the *actual* VA space size
+
+
+Maximum and minimum sizes can be useful to ensure that buffers are
+sized large enough or that addresses are positioned close enough for
+the "worst" case.
+
+52-bit userspace VAs
+--------------------
+To maintain compatibility with software that relies on the ARMv8.0
+VA space maximum size of 48-bits, the kernel will, by default,
+return virtual addresses to userspace from a 48-bit range.
+
+Software can "opt-in" to receiving VAs from a 52-bit space by
+specifying an mmap hint parameter that is larger than 48-bit.
+For example:
+    maybe_high_address = mmap(~0UL, size, prot, flags,...);
+
+It is also possible to build a debug kernel that returns addresses
+from a 52-bit space by enabling the following kernel config options:
+   CONFIG_EXPERT=y && CONFIG_ARM64_FORCE_52BIT=y
+
+Note that this option is only intended for debugging applications
+and should not be used in production.
diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
new file mode 100644 (file)
index 0000000..d4a85d5
--- /dev/null
@@ -0,0 +1,156 @@
+==========================
+AArch64 TAGGED ADDRESS ABI
+==========================
+
+Authors: Vincenzo Frascino <vincenzo.frascino@arm.com>
+         Catalin Marinas <catalin.marinas@arm.com>
+
+Date: 21 August 2019
+
+This document describes the usage and semantics of the Tagged Address
+ABI on AArch64 Linux.
+
+1. Introduction
+---------------
+
+On AArch64 the ``TCR_EL1.TBI0`` bit is set by default, allowing
+userspace (EL0) to perform memory accesses through 64-bit pointers with
+a non-zero top byte. This document describes the relaxation of the
+syscall ABI that allows userspace to pass certain tagged pointers to
+kernel syscalls.
+
+2. AArch64 Tagged Address ABI
+-----------------------------
+
+From the kernel syscall interface perspective and for the purposes of
+this document, a "valid tagged pointer" is a pointer with a potentially
+non-zero top-byte that references an address in the user process address
+space obtained in one of the following ways:
+
+- ``mmap()`` syscall where either:
+
+  - flags have the ``MAP_ANONYMOUS`` bit set or
+  - the file descriptor refers to a regular file (including those
+    returned by ``memfd_create()``) or ``/dev/zero``
+
+- ``brk()`` syscall (i.e. the heap area between the initial location of
+  the program break at process creation and its current location).
+
+- any memory mapped by the kernel in the address space of the process
+  during creation and with the same restrictions as for ``mmap()`` above
+  (e.g. data, bss, stack).
+
+The AArch64 Tagged Address ABI has two stages of relaxation depending
+how the user addresses are used by the kernel:
+
+1. User addresses not accessed by the kernel but used for address space
+   management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
+   of valid tagged pointers in this context is always allowed.
+
+2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
+   relaxation is disabled by default and the application thread needs to
+   explicitly enable it via ``prctl()`` as follows:
+
+   - ``PR_SET_TAGGED_ADDR_CTRL``: enable or disable the AArch64 Tagged
+     Address ABI for the calling thread.
+
+     The ``(unsigned int) arg2`` argument is a bit mask describing the
+     control mode used:
+
+     - ``PR_TAGGED_ADDR_ENABLE``: enable AArch64 Tagged Address ABI.
+       Default status is disabled.
+
+     Arguments ``arg3``, ``arg4``, and ``arg5`` must be 0.
+
+   - ``PR_GET_TAGGED_ADDR_CTRL``: get the status of the AArch64 Tagged
+     Address ABI for the calling thread.
+
+     Arguments ``arg2``, ``arg3``, ``arg4``, and ``arg5`` must be 0.
+
+   The ABI properties described above are thread-scoped, inherited on
+   clone() and fork() and cleared on exec().
+
+   Calling ``prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0)``
+   returns ``-EINVAL`` if the AArch64 Tagged Address ABI is globally
+   disabled by ``sysctl abi.tagged_addr_disabled=1``. The default
+   ``sysctl abi.tagged_addr_disabled`` configuration is 0.
+
+When the AArch64 Tagged Address ABI is enabled for a thread, the
+following behaviours are guaranteed:
+
+- All syscalls except the cases mentioned in section 3 can accept any
+  valid tagged pointer.
+
+- The syscall behaviour is undefined for invalid tagged pointers: it may
+  result in an error code being returned, a (fatal) signal being raised,
+  or other modes of failure.
+
+- The syscall behaviour for a valid tagged pointer is the same as for
+  the corresponding untagged pointer.
+
+
+A definition of the meaning of tagged pointers on AArch64 can be found
+in Documentation/arm64/tagged-pointers.rst.
+
+3. AArch64 Tagged Address ABI Exceptions
+-----------------------------------------
+
+The following system call parameters must be untagged regardless of the
+ABI relaxation:
+
+- ``prctl()`` other than pointers to user data either passed directly or
+  indirectly as arguments to be accessed by the kernel.
+
+- ``ioctl()`` other than pointers to user data either passed directly or
+  indirectly as arguments to be accessed by the kernel.
+
+- ``shmat()`` and ``shmdt()``.
+
+Any attempt to use non-zero tagged pointers may result in an error code
+being returned, a (fatal) signal being raised, or other modes of
+failure.
+
+4. Example of correct usage
+---------------------------
+.. code-block:: c
+
+   #include <stdlib.h>
+   #include <string.h>
+   #include <unistd.h>
+   #include <sys/mman.h>
+   #include <sys/prctl.h>
+   
+   #define PR_SET_TAGGED_ADDR_CTRL     55
+   #define PR_TAGGED_ADDR_ENABLE       (1UL << 0)
+   
+   #define TAG_SHIFT           56
+   
+   int main(void)
+   {
+       int tbi_enabled = 0;
+       unsigned long tag = 0;
+       char *ptr;
+   
+       /* check/enable the tagged address ABI */
+       if (!prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0))
+               tbi_enabled = 1;
+   
+       /* memory allocation */
+       ptr = mmap(NULL, sysconf(_SC_PAGE_SIZE), PROT_READ | PROT_WRITE,
+                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+       if (ptr == MAP_FAILED)
+               return 1;
+   
+       /* set a non-zero tag if the ABI is available */
+       if (tbi_enabled)
+               tag = rand() & 0xff;
+       ptr = (char *)((unsigned long)ptr | (tag << TAG_SHIFT));
+   
+       /* memory access to a tagged address */
+       strcpy(ptr, "tagged pointer\n");
+   
+       /* syscall with a tagged pointer */
+       write(1, ptr, strlen(ptr));
+   
+       return 0;
+   }
index 2acdec3ebbeb869c9f93536152364e75232eb45d..eab4323609b97fd8dafa962404713aab8c8c04e9 100644 (file)
@@ -20,7 +20,9 @@ Passing tagged addresses to the kernel
 --------------------------------------
 
 All interpretation of userspace memory addresses by the kernel assumes
-an address tag of 0x00.
+an address tag of 0x00, unless the application enables the AArch64
+Tagged Address ABI explicitly
+(Documentation/arm64/tagged-address-abi.rst).
 
 This includes, but is not limited to, addresses found in:
 
@@ -33,13 +35,15 @@ This includes, but is not limited to, addresses found in:
  - the frame pointer (x29) and frame records, e.g. when interpreting
    them to generate a backtrace or call graph.
 
-Using non-zero address tags in any of these locations may result in an
-error code being returned, a (fatal) signal being raised, or other modes
-of failure.
+Using non-zero address tags in any of these locations when the
+userspace application did not enable the AArch64 Tagged Address ABI may
+result in an error code being returned, a (fatal) signal being raised,
+or other modes of failure.
 
-For these reasons, passing non-zero address tags to the kernel via
-system calls is forbidden, and using a non-zero address tag for sp is
-strongly discouraged.
+For these reasons, when the AArch64 Tagged Address ABI is disabled,
+passing non-zero address tags to the kernel via system calls is
+forbidden, and using a non-zero address tag for sp is strongly
+discouraged.
 
 Programs maintaining a frame pointer and frame records that use non-zero
 address tags may suffer impaired or inaccurate debug and profiling
@@ -59,6 +63,9 @@ be preserved.
 The architecture prevents the use of a tagged PC, so the upper byte will
 be set to a sign-extension of bit 55 on exception return.
 
+This behaviour is maintained when the AArch64 Tagged Address ABI is
+enabled.
+
 
 Other considerations
 --------------------
index 7e9b586770b0599e7c4e88b45d03e692b46e9a78..b2033fc3a71a6e0988cdd7dab1b5b42ff49649c2 100644 (file)
@@ -2,7 +2,8 @@ Aspeed GPIO controller Device Tree Bindings
 -------------------------------------------
 
 Required properties:
-- compatible           : Either "aspeed,ast2400-gpio" or "aspeed,ast2500-gpio"
+- compatible           : Either "aspeed,ast2400-gpio", "aspeed,ast2500-gpio",
+                                       or "aspeed,ast2600-gpio".
 
 - #gpio-cells          : Should be two
                          - First cell is the GPIO line number
@@ -17,7 +18,9 @@ Required properties:
 
 Optional properties:
 
-- clocks                : A phandle to the clock to use for debounce timings
+- clocks               : A phandle to the clock to use for debounce timings
+- ngpios               : Number of GPIOs controlled by this controller. Should be set
+                                 when there are multiple GPIO controllers on a SoC (ast2600).
 
 The gpio and interrupt properties are further described in their respective
 bindings documentation:
index bc6b4b62df839ec8fff9b5fd44c3e0bc7094379e..cd91d61eac31492bcb8ce34d92a9a3912105bb1b 100644 (file)
@@ -6,6 +6,7 @@ Required Properties:
                                                66AK2E SoCs
                        "ti,k2g-gpio", "ti,keystone-gpio": for 66AK2G
                        "ti,am654-gpio", "ti,keystone-gpio": for TI K3 AM654
+                       "ti,j721e-gpio", "ti,keystone-gpio": for J721E SoCs
 
 - reg: Physical base address of the controller and the size of memory mapped
        registers.
index 69d46162d0f5ba385da868a0ce9240e2b34b06ee..cd28e932bf50e45b0c1ee554f848d6b597a1a499 100644 (file)
@@ -4,7 +4,7 @@ Required properties:
 - compatible : Should be "fsl,<soc>-gpio"
   The following <soc>s are known to be supported:
        mpc5121, mpc5125, mpc8349, mpc8572, mpc8610, pq3, qoriq,
-       ls1021a, ls1043a, ls2080a.
+       ls1021a, ls1043a, ls2080a, ls1028a, ls1088a.
 - reg : Address and length of the register set for the device
 - interrupts : Should be the port interrupt shared by all 32 pins.
 - #gpio-cells : Should be two.  The first cell is the pin number and
@@ -37,3 +37,17 @@ gpio0: gpio@2300000 {
        interrupt-controller;
        #interrupt-cells = <2>;
 };
+
+
+Example of gpio-controller node for a ls1028a/ls1088a SoC:
+
+gpio1: gpio@2300000 {
+       compatible = "fsl,ls1028a-gpio", "fsl,ls1088a-gpio", "fsl,qoriq-gpio";
+       reg = <0x0 0x2300000 0x0 0x10000>;
+       interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+       gpio-controller;
+       #gpio-cells = <2>;
+       interrupt-controller;
+       #interrupt-cells = <2>;
+       little-endian;
+};
diff --git a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt
new file mode 100644 (file)
index 0000000..d4d8391
--- /dev/null
@@ -0,0 +1,45 @@
+Aspeed SGPIO controller Device Tree Bindings
+--------------------------------------------
+
+This SGPIO controller is for ASPEED AST2500 SoC, it supports up to 80 full
+featured Serial GPIOs. Each of the Serial GPIO pins can be programmed to
+support the following options:
+- Support interrupt option for each input port and various interrupt
+  sensitivity option (level-high, level-low, edge-high, edge-low)
+- Support reset tolerance option for each output port
+- Directly connected to APB bus and its shift clock is from APB bus clock
+  divided by a programmable value.
+- Co-work with external signal-chained TTL components (74LV165/74LV595)
+
+Required properties:
+
+- compatible : Should be one of
+  "aspeed,ast2400-sgpio", "aspeed,ast2500-sgpio"
+- #gpio-cells : Should be 2, see gpio.txt
+- reg : Address and length of the register set for the device
+- gpio-controller : Marks the device node as a GPIO controller
+- interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt
+- interrupt-controller : Mark the GPIO controller as an interrupt-controller
+- ngpios : number of GPIO lines, see gpio.txt
+  (should be multiple of 8, up to 80 pins)
+- clocks : A phandle to the APB clock for SGPM clock division
+- bus-frequency : SGPM CLK frequency
+
+The sgpio and interrupt properties are further described in their respective
+bindings documentation:
+
+- Documentation/devicetree/bindings/gpio/gpio.txt
+- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+
+  Example:
+       sgpio: sgpio@1e780200 {
+               #gpio-cells = <2>;
+               compatible = "aspeed,ast2500-sgpio";
+               gpio-controller;
+               interrupts = <40>;
+               reg = <0x1e780200 0x0100>;
+               clocks = <&syscon ASPEED_CLK_APB>;
+               interrupt-controller;
+               ngpios = <8>;
+               bus-frequency = <12000000>;
+       };
diff --git a/Documentation/devicetree/bindings/hwmon/ads1015.txt b/Documentation/devicetree/bindings/hwmon/ads1015.txt
deleted file mode 100644 (file)
index 918a507..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-ADS1015 (I2C)
-
-This device is a 12-bit A-D converter with 4 inputs.
-
-The inputs can be used single ended or in certain differential combinations.
-
-For configuration all possible combinations are mapped to 8 channels:
-  0: Voltage over AIN0 and AIN1.
-  1: Voltage over AIN0 and AIN3.
-  2: Voltage over AIN1 and AIN3.
-  3: Voltage over AIN2 and AIN3.
-  4: Voltage over AIN0 and GND.
-  5: Voltage over AIN1 and GND.
-  6: Voltage over AIN2 and GND.
-  7: Voltage over AIN3 and GND.
-
-Each channel can be configured individually:
- - pga is the programmable gain amplifier (values are full scale)
-    0: +/- 6.144 V
-    1: +/- 4.096 V
-    2: +/- 2.048 V (default)
-    3: +/- 1.024 V
-    4: +/- 0.512 V
-    5: +/- 0.256 V
- - data_rate in samples per second
-    0: 128
-    1: 250
-    2: 490
-    3: 920
-    4: 1600 (default)
-    5: 2400
-    6: 3300
-
-1) The /ads1015 node
-
-  Required properties:
-
-   - compatible : must be "ti,ads1015"
-   - reg : I2C bus address of the device
-   - #address-cells : must be <1>
-   - #size-cells : must be <0>
-
-  The node contains child nodes for each channel that the platform uses.
-
-  Example ADS1015 node:
-
-    ads1015@49 {
-           compatible = "ti,ads1015";
-           reg = <0x49>;
-           #address-cells = <1>;
-           #size-cells = <0>;
-
-           [ child node definitions... ]
-    }
-
-2) channel nodes
-
-  Required properties:
-
-   - reg : the channel number
-
-  Optional properties:
-
-   - ti,gain : the programmable gain amplifier setting
-   - ti,datarate : the converter data rate
-
-  Example ADS1015 channel node:
-
-    channel@4 {
-           reg = <4>;
-           ti,gain = <3>;
-           ti,datarate = <5>;
-    };
diff --git a/Documentation/devicetree/bindings/hwmon/as370.txt b/Documentation/devicetree/bindings/hwmon/as370.txt
new file mode 100644 (file)
index 0000000..d102fe7
--- /dev/null
@@ -0,0 +1,11 @@
+Bindings for Synaptics AS370 PVT sensors
+
+Required properties:
+- compatible : "syna,as370-hwmon"
+- reg        : address and length of the register set.
+
+Example:
+       hwmon@ea0810 {
+               compatible = "syna,as370-hwmon";
+               reg = <0xea0810 0xc>;
+       };
index f68a0a68fc524cdc1ee547313892a34e78248800..1036f65fb778eeb54d6e600d0722c5c6a1746680 100644 (file)
@@ -1,8 +1,10 @@
-Device-tree bindings for IBM Common Form Factor Power Supply Version 1
-----------------------------------------------------------------------
+Device-tree bindings for IBM Common Form Factor Power Supply Versions 1 and 2
+-----------------------------------------------------------------------------
 
 Required properties:
- - compatible = "ibm,cffps1";
+ - compatible                          : Must be one of the following:
+                                               "ibm,cffps1"
+                                               "ibm,cffps2"
  - reg = < I2C bus address >;          : Address of the power supply on the
                                          I2C bus.
 
index 586b5ed70be7a0f2ff21e49c60ba2ae072645083..273616702c51b5f089c521dd49d734b42879aa29 100644 (file)
@@ -15,6 +15,7 @@ Required properties:
                "maxim,max31725",
                "maxim,max31726",
                "maxim,mcp980x",
+               "nxp,pct2075",
                "st,stds75",
                "st,stlm75",
                "microchip,tcn75",
diff --git a/Documentation/devicetree/bindings/iio/adc/ads1015.txt b/Documentation/devicetree/bindings/iio/adc/ads1015.txt
new file mode 100644 (file)
index 0000000..918a507
--- /dev/null
@@ -0,0 +1,73 @@
+ADS1015 (I2C)
+
+This device is a 12-bit A-D converter with 4 inputs.
+
+The inputs can be used single ended or in certain differential combinations.
+
+For configuration all possible combinations are mapped to 8 channels:
+  0: Voltage over AIN0 and AIN1.
+  1: Voltage over AIN0 and AIN3.
+  2: Voltage over AIN1 and AIN3.
+  3: Voltage over AIN2 and AIN3.
+  4: Voltage over AIN0 and GND.
+  5: Voltage over AIN1 and GND.
+  6: Voltage over AIN2 and GND.
+  7: Voltage over AIN3 and GND.
+
+Each channel can be configured individually:
+ - pga is the programmable gain amplifier (values are full scale)
+    0: +/- 6.144 V
+    1: +/- 4.096 V
+    2: +/- 2.048 V (default)
+    3: +/- 1.024 V
+    4: +/- 0.512 V
+    5: +/- 0.256 V
+ - data_rate in samples per second
+    0: 128
+    1: 250
+    2: 490
+    3: 920
+    4: 1600 (default)
+    5: 2400
+    6: 3300
+
+1) The /ads1015 node
+
+  Required properties:
+
+   - compatible : must be "ti,ads1015"
+   - reg : I2C bus address of the device
+   - #address-cells : must be <1>
+   - #size-cells : must be <0>
+
+  The node contains child nodes for each channel that the platform uses.
+
+  Example ADS1015 node:
+
+    ads1015@49 {
+           compatible = "ti,ads1015";
+           reg = <0x49>;
+           #address-cells = <1>;
+           #size-cells = <0>;
+
+           [ child node definitions... ]
+    }
+
+2) channel nodes
+
+  Required properties:
+
+   - reg : the channel number
+
+  Optional properties:
+
+   - ti,gain : the programmable gain amplifier setting
+   - ti,datarate : the converter data rate
+
+  Example ADS1015 channel node:
+
+    channel@4 {
+           reg = <4>;
+           ti,gain = <3>;
+           ti,datarate = <5>;
+    };
index 09fc02b9984577860563661d881d88f3308a489a..a5c1db95b3ecb105c31eb7890bc731340ba92a3a 100644 (file)
@@ -1,20 +1,30 @@
 * ARC-HS Interrupt Distribution Unit
 
-  This optional 2nd level interrupt controller can be used in SMP configurations for
-  dynamic IRQ routing, load balancing of common/external IRQs towards core intc.
+  This optional 2nd level interrupt controller can be used in SMP configurations
+  for dynamic IRQ routing, load balancing of common/external IRQs towards core
+  intc.
 
 Properties:
 
 - compatible: "snps,archs-idu-intc"
 - interrupt-controller: This is an interrupt controller.
-- #interrupt-cells: Must be <1>.
-
-  Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N
-  of the particular interrupt line of IDU corresponds to the line N+24 of the
-  core interrupt controller.
-
-  intc accessed via the special ARC AUX register interface, hence "reg" property
-  is not specified.
+- #interrupt-cells: Must be <1> or <2>.
+
+  Value of the first cell specifies the "common" IRQ from peripheral to IDU.
+  Number N of the particular interrupt line of IDU corresponds to the line N+24
+  of the core interrupt controller.
+
+  The (optional) second cell specifies any of the following flags:
+    - bits[3:0] trigger type and level flags
+        1 = low-to-high edge triggered
+        2 = NOT SUPPORTED (high-to-low edge triggered)
+        4 = active high level-sensitive <<< DEFAULT
+        8 = NOT SUPPORTED (active low level-sensitive)
+  When no second cell is specified, the interrupt is assumed to be level
+  sensitive.
+
+  The interrupt controller is accessed via the special ARC AUX register
+  interface, hence "reg" property is not specified.
 
 Example:
        core_intc: core-interrupt-controller {
index 6922db598deface3f76f32e605499cf096060968..ce59a505f5a4c21ea288688b4d967e061274bc83 100644 (file)
@@ -11,10 +11,23 @@ ARM Short-Descriptor translation table format for address translation.
                |
               m4u (Multimedia Memory Management Unit)
                |
+          +--------+
+          |        |
+      gals0-rx   gals1-rx    (Global Async Local Sync rx)
+          |        |
+          |        |
+      gals0-tx   gals1-tx    (Global Async Local Sync tx)
+          |        |          Some SoCs may have GALS.
+          +--------+
+               |
            SMI Common(Smart Multimedia Interface Common)
                |
        +----------------+-------
        |                |
+       |             gals-rx        There may be GALS in some larbs.
+       |                |
+       |                |
+       |             gals-tx
        |                |
    SMI larb0        SMI larb1   ... SoCs have several SMI local arbiter(larb).
    (display)         (vdec)
@@ -36,6 +49,10 @@ each local arbiter.
 like display, video decode, and camera. And there are different ports
 in each larb. Take a example, There are many ports like MC, PP, VLD in the
 video decode local arbiter, all these ports are according to the video HW.
+  In some SoCs, there may be a GALS(Global Async Local Sync) module between
+smi-common and m4u, and additional GALS module between smi-larb and
+smi-common. GALS can been seen as a "asynchronous fifo" which could help
+synchronize for the modules in different clock frequency.
 
 Required properties:
 - compatible : must be one of the following string:
@@ -44,18 +61,25 @@ Required properties:
        "mediatek,mt7623-m4u", "mediatek,mt2701-m4u" for mt7623 which uses
                                                     generation one m4u HW.
        "mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW.
+       "mediatek,mt8183-m4u" for mt8183 which uses generation two m4u HW.
 - reg : m4u register base and size.
 - interrupts : the interrupt of m4u.
 - clocks : must contain one entry for each clock-names.
-- clock-names : must be "bclk", It is the block clock of m4u.
+- clock-names : Only 1 optional clock:
+  - "bclk": the block clock of m4u.
+  Here is the list which require this "bclk":
+  - mt2701, mt2712, mt7623 and mt8173.
+  Note that m4u use the EMI clock which always has been enabled before kernel
+  if there is no this "bclk".
 - mediatek,larbs : List of phandle to the local arbiters in the current Socs.
        Refer to bindings/memory-controllers/mediatek,smi-larb.txt. It must sort
        according to the local arbiter index, like larb0, larb1, larb2...
 - iommu-cells : must be 1. This is the mtk_m4u_id according to the HW.
        Specifies the mtk_m4u_id as defined in
        dt-binding/memory/mt2701-larb-port.h for mt2701, mt7623
-       dt-binding/memory/mt2712-larb-port.h for mt2712, and
-       dt-binding/memory/mt8173-larb-port.h for mt8173.
+       dt-binding/memory/mt2712-larb-port.h for mt2712,
+       dt-binding/memory/mt8173-larb-port.h for mt8173, and
+       dt-binding/memory/mt8183-larb-port.h for mt8183.
 
 Example:
        iommu: iommu@10205000 {
index e937ddd871a6bffe58f4c536eea311f4be445349..b478ade4da654e3c1206a209db6f58dff6b7ee07 100644 (file)
@@ -2,9 +2,10 @@ SMI (Smart Multimedia Interface) Common
 
 The hardware block diagram please check bindings/iommu/mediatek,iommu.txt
 
-Mediatek SMI have two generations of HW architecture, mt2712 and mt8173 use
-the second generation of SMI HW while mt2701 uses the first generation HW of
-SMI.
+Mediatek SMI have two generations of HW architecture, here is the list
+which generation the SoCs use:
+generation 1: mt2701 and mt7623.
+generation 2: mt2712, mt8173 and mt8183.
 
 There's slight differences between the two SMI, for generation 2, the
 register which control the iommu port is at each larb's register base. But
@@ -19,6 +20,7 @@ Required properties:
        "mediatek,mt2712-smi-common"
        "mediatek,mt7623-smi-common", "mediatek,mt2701-smi-common"
        "mediatek,mt8173-smi-common"
+       "mediatek,mt8183-smi-common"
 - reg : the register and size of the SMI block.
 - power-domains : a phandle to the power domain of this local arbiter.
 - clocks : Must contain an entry for each entry in clock-names.
@@ -30,6 +32,10 @@ Required properties:
            They may be the same if both source clocks are the same.
   - "async" : asynchronous clock, it help transform the smi clock into the emi
              clock domain, this clock is only needed by generation 1 smi HW.
+  and these 2 option clocks for generation 2 smi HW:
+  - "gals0": the path0 clock of GALS(Global Async Local Sync).
+  - "gals1": the path1 clock of GALS(Global Async Local Sync).
+  Here is the list which has this GALS: mt8183.
 
 Example:
        smi_common: smi@14022000 {
index 94eddcae77ab5f08af6b13885c3a3122cd53ce41..4b369b3e1a69b1d421fbca8492b2bcd2bc40e111 100644 (file)
@@ -8,6 +8,7 @@ Required properties:
                "mediatek,mt2712-smi-larb"
                "mediatek,mt7623-smi-larb", "mediatek,mt2701-smi-larb"
                "mediatek,mt8173-smi-larb"
+               "mediatek,mt8183-smi-larb"
 - reg : the register and size of this local arbiter.
 - mediatek,smi : a phandle to the smi_common node.
 - power-domains : a phandle to the power domain of this local arbiter.
@@ -16,6 +17,9 @@ Required properties:
   - "apb" : Advanced Peripheral Bus clock, It's the clock for setting
            the register.
   - "smi" : It's the clock for transfer data and command.
+  and this optional clock name:
+  - "gals": the clock for GALS(Global Async Local Sync).
+  Here is the list which has this GALS: mt8183.
 
 Required property for mt2701, mt2712 and mt7623:
 - mediatek,larb-id :the hardware id of this larb.
index 4ac21cef370ece9fb5b769b3c217d47d5a08d2d9..113e7ac79aad59259e035ea2179a51e4c62fbba4 100644 (file)
@@ -12,6 +12,7 @@ Required properties:
   - "microchip,ksz8565"
   - "microchip,ksz9893"
   - "microchip,ksz9563"
+  - "microchip,ksz8563"
 
 Optional properties:
 
index 63c73fafe26dcc73be043d84d036cb776f1c97b8..0b61a90f1592bfd6a31534fc41bfb955b477e5f7 100644 (file)
@@ -15,10 +15,10 @@ Required properties:
   Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
   Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
   Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
-  Use "sifive,fu540-macb" for SiFive FU540-C000 SoC.
+  Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
   Or the generic form: "cdns,emac".
 - reg: Address and length of the register set for the device
-       For "sifive,fu540-macb", second range is required to specify the
+       For "sifive,fu540-c000-gem", second range is required to specify the
        address and length of the registers for GEMGXL Management block.
 - interrupts: Should contain macb interrupt
 - phy-mode: See ethernet.txt file in the same directory.
index 3ae9f1088845ba4fecf43b90102256317f4b71e8..b9f58e480349896655d2d6d97881c641a2666fcb 100644 (file)
@@ -34,6 +34,9 @@ Optional input supply properties:
   - inl67-supply: The input supply for LDO_REG3 and LDO_REG4
 
 Any standard regulator properties can be used to configure the single regulator.
+regulator-initial-mode, regulator-allowed-modes and regulator-mode could be specified
+for act8865 using mode values from dt-bindings/regulator/active-semi,8865-regulator.h
+file.
 
 The valid names for regulators are:
        - for act8846:
@@ -47,6 +50,8 @@ The valid names for regulators are:
 Example:
 --------
 
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
+
                i2c1: i2c@f0018000 {
                        pmic: act8865@5b {
                                compatible = "active-semi,act8865";
@@ -65,9 +70,19 @@ Example:
                                                regulator-name = "VCC_1V2";
                                                regulator-min-microvolt = <1100000>;
                                                regulator-max-microvolt = <1300000>;
-                                               regulator-suspend-mem-microvolt = <1150000>;
-                                               regulator-suspend-standby-microvolt = <1150000>;
                                                regulator-always-on;
+
+                                               regulator-allowed-modes = <ACT8865_REGULATOR_MODE_FIXED>,
+                                                                         <ACT8865_REGULATOR_MODE_LOWPOWER>;
+                                               regulator-initial-mode = <ACT8865_REGULATOR_MODE_FIXED>;
+
+                                               regulator-state-mem {
+                                                       regulator-on-in-suspend;
+                                                       regulator-suspend-min-microvolt = <1150000>;
+                                                       regulator-suspend-max-microvolt = <1150000>;
+                                                       regulator-changeable-in-suspend;
+                                                       regulator-mode = <ACT8865_REGULATOR_MODE_LOWPOWER>;
+                                               };
                                        };
 
                                        vcc_3v3_reg: DCDC_REG3 {
@@ -82,6 +97,14 @@ Example:
                                                regulator-min-microvolt = <3300000>;
                                                regulator-max-microvolt = <3300000>;
                                                regulator-always-on;
+
+                                               regulator-allowed-modes = <ACT8865_REGULATOR_MODE_NORMAL>,
+                                                                         <ACT8865_REGULATOR_MODE_LOWPOWER>;
+                                               regulator-initial-mode = <ACT8865_REGULATOR_MODE_NORMAL>;
+
+                                               regulator-state-mem {
+                                                       regulator-off-in-suspend;
+                                               };
                                        };
 
                                        vddfuse_reg: LDO_REG2 {
index a650b457085de27f96a56dffd0026e8016cafb66..a78150c47aa2a67b8759c7a7c450e9edd20a375b 100644 (file)
@@ -19,9 +19,19 @@ description:
 allOf:
   - $ref: "regulator.yaml#"
 
+if:
+  properties:
+    compatible:
+      contains:
+        const: regulator-fixed-clock
+  required:
+    - clocks
+
 properties:
   compatible:
-    const: regulator-fixed
+    enum:
+      - const: regulator-fixed
+      - const: regulator-fixed-clock
 
   regulator-name: true
 
@@ -29,6 +39,13 @@ properties:
     description: gpio to use for enable control
     maxItems: 1
 
+  clocks:
+    description:
+      clock to use for enable control. This binding is only available if
+      the compatible is chosen to regulator-fixed-clock. The clock binding
+      is mandatory if compatible is chosen to regulator-fixed-clock.
+    maxItems: 1
+
   startup-delay-us:
     description: startup time in microseconds
     $ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/regulator/mt6358-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6358-regulator.txt
new file mode 100644 (file)
index 0000000..9a90a92
--- /dev/null
@@ -0,0 +1,358 @@
+MediaTek MT6358 Regulator
+
+All voltage regulators provided by the MT6358 PMIC are described as the
+subnodes of the MT6358 regulators node. Each regulator is named according
+to its regulator type, buck_<name> and ldo_<name>. The definition for each
+of these nodes is defined using the standard binding for regulators at
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are::
+BUCK:
+  buck_vdram1, buck_vcore, buck_vpa, buck_vproc11, buck_vproc12, buck_vgpu,
+  buck_vs2, buck_vmodem, buck_vs1
+LDO:
+  ldo_vdram2, ldo_vsim1, ldo_vibr, ldo_vrf12, ldo_vio18, ldo_vusb, ldo_vcamio,
+  ldo_vcamd, ldo_vcn18, ldo_vfe28, ldo_vsram_proc11, ldo_vcn28, ldo_vsram_others,
+  ldo_vsram_gpu, ldo_vxo22, ldo_vefuse, ldo_vaux18, ldo_vmch, ldo_vbif28,
+  ldo_vsram_proc12, ldo_vcama1, ldo_vemc, ldo_vio28, ldo_va12, ldo_vrf18,
+  ldo_vcn33_bt, ldo_vcn33_wifi, ldo_vcama2, ldo_vmc, ldo_vldo28, ldo_vaud28,
+  ldo_vsim2
+
+Example:
+
+       pmic {
+               compatible = "mediatek,mt6358";
+
+               mt6358regulator: mt6358regulator {
+                       compatible = "mediatek,mt6358-regulator";
+
+                       mt6358_vdram1_reg: buck_vdram1 {
+                               regulator-compatible = "buck_vdram1";
+                               regulator-name = "vdram1";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <2087500>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <0>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vcore_reg: buck_vcore {
+                               regulator-name = "vcore";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <200>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vpa_reg: buck_vpa {
+                               regulator-name = "vpa";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <3650000>;
+                               regulator-ramp-delay = <50000>;
+                               regulator-enable-ramp-delay = <250>;
+                       };
+
+                       mt6358_vproc11_reg: buck_vproc11 {
+                               regulator-name = "vproc11";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <200>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vproc12_reg: buck_vproc12 {
+                               regulator-name = "vproc12";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <200>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vgpu_reg: buck_vgpu {
+                               regulator-name = "vgpu";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <200>;
+                       };
+
+                       mt6358_vs2_reg: buck_vs2 {
+                               regulator-name = "vs2";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <2087500>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <0>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vmodem_reg: buck_vmodem {
+                               regulator-name = "vmodem";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <900>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vs1_reg: buck_vs1 {
+                               regulator-name = "vs1";
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <2587500>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <0>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vdram2_reg: ldo_vdram2 {
+                               regulator-name = "vdram2";
+                               regulator-min-microvolt = <600000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-enable-ramp-delay = <3300>;
+                       };
+
+                       mt6358_vsim1_reg: ldo_vsim1 {
+                               regulator-name = "vsim1";
+                               regulator-min-microvolt = <1700000>;
+                               regulator-max-microvolt = <3100000>;
+                               regulator-enable-ramp-delay = <540>;
+                       };
+
+                       mt6358_vibr_reg: ldo_vibr {
+                               regulator-name = "vibr";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <60>;
+                       };
+
+                       mt6358_vrf12_reg: ldo_vrf12 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vrf12";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <1200000>;
+                               regulator-enable-ramp-delay = <120>;
+                       };
+
+                       mt6358_vio18_reg: ldo_vio18 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vio18";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-enable-ramp-delay = <2700>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vusb_reg: ldo_vusb {
+                               regulator-name = "vusb";
+                               regulator-min-microvolt = <3000000>;
+                               regulator-max-microvolt = <3100000>;
+                               regulator-enable-ramp-delay = <270>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vcamio_reg: ldo_vcamio {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vcamio";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vcamd_reg: ldo_vcamd {
+                               regulator-name = "vcamd";
+                               regulator-min-microvolt = <900000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vcn18_reg: ldo_vcn18 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vcn18";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vfe28_reg: ldo_vfe28 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vfe28";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vsram_proc11_reg: ldo_vsram_proc11 {
+                               regulator-name = "vsram_proc11";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <240>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vcn28_reg: ldo_vcn28 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vcn28";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vsram_others_reg: ldo_vsram_others {
+                               regulator-name = "vsram_others";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <240>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vsram_gpu_reg: ldo_vsram_gpu {
+                               regulator-name = "vsram_gpu";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <240>;
+                       };
+
+                       mt6358_vxo22_reg: ldo_vxo22 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vxo22";
+                               regulator-min-microvolt = <2200000>;
+                               regulator-max-microvolt = <2200000>;
+                               regulator-enable-ramp-delay = <120>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vefuse_reg: ldo_vefuse {
+                               regulator-name = "vefuse";
+                               regulator-min-microvolt = <1700000>;
+                               regulator-max-microvolt = <1900000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vaux18_reg: ldo_vaux18 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vaux18";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vmch_reg: ldo_vmch {
+                               regulator-name = "vmch";
+                               regulator-min-microvolt = <2900000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <60>;
+                       };
+
+                       mt6358_vbif28_reg: ldo_vbif28 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vbif28";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vsram_proc12_reg: ldo_vsram_proc12 {
+                               regulator-name = "vsram_proc12";
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1293750>;
+                               regulator-ramp-delay = <6250>;
+                               regulator-enable-ramp-delay = <240>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vcama1_reg: ldo_vcama1 {
+                               regulator-name = "vcama1";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vemc_reg: ldo_vemc {
+                               regulator-name = "vemc";
+                               regulator-min-microvolt = <2900000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <60>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vio28_reg: ldo_vio28 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vio28";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_va12_reg: ldo_va12 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "va12";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <1200000>;
+                               regulator-enable-ramp-delay = <270>;
+                               regulator-always-on;
+                       };
+
+                       mt6358_vrf18_reg: ldo_vrf18 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vrf18";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-enable-ramp-delay = <120>;
+                       };
+
+                       mt6358_vcn33_bt_reg: ldo_vcn33_bt {
+                               regulator-name = "vcn33_bt";
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3500000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vcn33_wifi_reg: ldo_vcn33_wifi {
+                               regulator-name = "vcn33_wifi";
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3500000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vcama2_reg: ldo_vcama2 {
+                               regulator-name = "vcama2";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vmc_reg: ldo_vmc {
+                               regulator-name = "vmc";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <60>;
+                       };
+
+                       mt6358_vldo28_reg: ldo_vldo28 {
+                               regulator-name = "vldo28";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vaud28_reg: ldo_vaud28 {
+                               compatible = "regulator-fixed";
+                               regulator-name = "vaud28";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <270>;
+                       };
+
+                       mt6358_vsim2_reg: ldo_vsim2 {
+                               regulator-name = "vsim2";
+                               regulator-min-microvolt = <1700000>;
+                               regulator-max-microvolt = <3100000>;
+                               regulator-enable-ramp-delay = <540>;
+                       };
+               };
+       };
index 14d2eee96b3d5e075ed90af0ca9e042eae0ee065..bab9f71140b835161fba4895c7fddca8df786598 100644 (file)
@@ -22,9 +22,12 @@ RPMh resource.
 
 The names used for regulator nodes must match those supported by a given PMIC.
 Supported regulator node names:
+       PM8005:         smps1 - smps4
+       PM8009:         smps1 - smps2, ldo1 - ldo7
+       PM8150:         smps1 - smps10, ldo1 - ldo18
+       PM8150L:        smps1 - smps8, ldo1 - ldo11, bob, flash, rgb
        PM8998:         smps1 - smps13, ldo1 - ldo28, lvs1 - lvs2
        PMI8998:        bob
-       PM8005:         smps1 - smps4
 
 ========================
 First Level Nodes - PMIC
@@ -33,9 +36,13 @@ First Level Nodes - PMIC
 - compatible
        Usage:      required
        Value type: <string>
-       Definition: Must be one of: "qcom,pm8998-rpmh-regulators",
-                   "qcom,pmi8998-rpmh-regulators" or
-                   "qcom,pm8005-rpmh-regulators".
+       Definition: Must be one of below:
+                   "qcom,pm8005-rpmh-regulators"
+                   "qcom,pm8009-rpmh-regulators"
+                   "qcom,pm8150-rpmh-regulators"
+                   "qcom,pm8150l-rpmh-regulators"
+                   "qcom,pm8998-rpmh-regulators"
+                   "qcom,pmi8998-rpmh-regulators"
 
 - qcom,pmic-id
        Usage:      required
diff --git a/Documentation/devicetree/bindings/regulator/sy8824x.txt b/Documentation/devicetree/bindings/regulator/sy8824x.txt
new file mode 100644 (file)
index 0000000..c5e9585
--- /dev/null
@@ -0,0 +1,24 @@
+SY8824C/SY8824E/SY20276 Voltage regulator
+
+Required properties:
+- compatible: Must be one of the following.
+       "silergy,sy8824c"
+       "silergy,sy8824e"
+       "silergy,sy20276"
+       "silergy,sy20278"
+- reg: I2C slave address
+
+Any property defined as part of the core regulator binding, defined in
+./regulator.txt, can also be used.
+
+Example:
+
+       vcore: regulator@00 {
+               compatible = "silergy,sy8824c";
+               reg = <0x66>;
+               regulator-name = "vcore";
+               regulator-min-microvolt = <800000>;
+               regulator-max-microvolt = <1150000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
index 74a91c4f8530a4cd17cb5d184554c6cd283163e8..549f80436debca8eed1558f547264967cba89a42 100644 (file)
@@ -71,3 +71,10 @@ Example:
                regulator-min-microvolt  = <1000000>;
                regulator-max-microvolt  = <3000000>;
        };
+
+For twl6030 regulators/LDOs:
+
+ - ti,retain-on-reset: Does not turn off the supplies during warm
+                       reset. Could be needed for VMMC, as TWL6030
+                       reset sequence for this signal does not comply
+                       with the SD specification.
index c9919f4b92d2121ef25471c1f22df69f621a911c..94fd38b0d163fdcedc20085c442e3c510ed4f292 100644 (file)
@@ -13,6 +13,7 @@ this layer. These clocks and resets should be described in each property.
 Required properties:
 - compatible: Should be
     "socionext,uniphier-pro4-usb3-regulator" - for Pro4 SoC
+    "socionext,uniphier-pro5-usb3-regulator" - for Pro5 SoC
     "socionext,uniphier-pxs2-usb3-regulator" - for PXs2 SoC
     "socionext,uniphier-ld20-usb3-regulator" - for LD20 SoC
     "socionext,uniphier-pxs3-usb3-regulator" - for PXs3 SoC
@@ -20,12 +21,12 @@ Required properties:
 - clocks: A list of phandles to the clock gate for USB3 glue layer.
        According to the clock-names, appropriate clocks are required.
 - clock-names: Should contain
-    "gio", "link" - for Pro4 SoC
+    "gio", "link" - for Pro4 and Pro5 SoCs
     "link"        - for others
 - resets: A list of phandles to the reset control for USB3 glue layer.
        According to the reset-names, appropriate resets are required.
 - reset-names: Should contain
-    "gio", "link" - for Pro4 SoC
+    "gio", "link" - for Pro4 and Pro5 SoCs
     "link"        - for others
 
 See Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/spi/nuvoton,npcm-fiu.txt b/Documentation/devicetree/bindings/spi/nuvoton,npcm-fiu.txt
new file mode 100644 (file)
index 0000000..a388005
--- /dev/null
@@ -0,0 +1,47 @@
+* Nuvoton FLASH Interface Unit (FIU) SPI Controller
+
+NPCM FIU supports single, dual and quad communication interface.
+
+The NPCM7XX supports three FIU modules,
+FIU0 and FIUx supports two chip selects,
+FIU3 support four chip select.
+
+Required properties:
+  - compatible : "nuvoton,npcm750-fiu" for the NPCM7XX BMC
+  - #address-cells : should be 1.
+  - #size-cells : should be 0.
+  - reg : the first contains the register location and length,
+          the second contains the memory mapping address and length
+  - reg-names: Should contain the reg names "control" and "memory"
+  - clocks : phandle of FIU reference clock.
+
+Required properties in case the pins can be muxed:
+  - pinctrl-names : a pinctrl state named "default" must be defined.
+  - pinctrl-0 : phandle referencing pin configuration of the device.
+
+Optional property:
+  - nuvoton,spix-mode: enable spix-mode for an expansion bus to an ASIC or CPLD.
+
+Aliases:
+- All the FIU controller nodes should be represented in the aliases node using
+  the following format 'fiu{n}' where n is a unique number for the alias.
+  In the NPCM7XX BMC:
+               fiu0 represent fiu 0 controller
+               fiu1 represent fiu 3 controller
+               fiu2 represent fiu x controller
+
+Example:
+fiu3: spi@c00000000 {
+       compatible = "nuvoton,npcm750-fiu";
+       #address-cells = <1>;
+       #size-cells = <0>;
+       reg = <0xfb000000 0x1000>, <0x80000000 0x10000000>;
+       reg-names = "control", "memory";
+       clocks = <&clk NPCM7XX_CLK_AHB>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&spi3_pins>;
+       spi-nor@0 {
+                       ...
+       };
+};
+
index a02e2fe2bfb22350773759da4b27fc13800f1c49..732339275848c12a52c438656f97e3e74a44b9ff 100644 (file)
@@ -31,7 +31,7 @@ properties:
       If that property is used, the number of chip selects will be
       increased automatically with max(cs-gpios, hardware chip selects).
 
-      So if, for example, the controller has 2 CS lines, and the
+      So if, for example, the controller has 4 CS lines, and the
       cs-gpios looks like this
         cs-gpios = <&gpio1 0 0>, <0>, <&gpio1 1 0>, <&gpio1 2 0>;
 
index e8f1d627d2885cd2ae8729705427fcb7c085beef..69dc5d57b1efbe73324b6e17df474317e1ef684a 100644 (file)
@@ -3,9 +3,8 @@
 Required properties:
   - compatible : Should be "fsl,vf610-qspi", "fsl,imx6sx-qspi",
                 "fsl,imx7d-qspi", "fsl,imx6ul-qspi",
-                "fsl,ls1021a-qspi"
+                "fsl,ls1021a-qspi", "fsl,ls2080a-qspi"
                 or
-                "fsl,ls2080a-qspi" followed by "fsl,ls1021a-qspi",
                 "fsl,ls1043a-qspi" followed by "fsl,ls1021a-qspi"
   - reg : the first contains the register location and length,
           the second contains the memory mapping address and length
@@ -34,7 +33,11 @@ qspi0: quadspi@40044000 {
        clock-names = "qspi_en", "qspi";
 
        flash0: s25fl128s@0 {
-               ....
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "spansion,s25fl128s", "jedec,spi-nor";
+               spi-max-frequency = <50000000>;
+               reg = <0>;
        };
 };
 
index c0f6c8ecfa2e2ff963fbb440ae04524a8f85c669..3a8079eb18c8d7971a2cc5553d4a6f59ccf7210e 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
     - mediatek,mt2701-spi: for mt2701 platforms
     - mediatek,mt2712-spi: for mt2712 platforms
     - mediatek,mt6589-spi: for mt6589 platforms
+    - mediatek,mt6765-spi: for mt6765 platforms
     - mediatek,mt7622-spi: for mt7622 platforms
     - "mediatek,mt7629-spi", "mediatek,mt7622-spi": for mt7629 platforms
     - mediatek,mt8135-spi: for mt8135 platforms
index 8de589b376ce1b72a83ab2c088c811ed0af9f360..2567c829e2dcf10e97ddfa0675acca3b94b122e5 100644 (file)
@@ -25,18 +25,23 @@ data by ADI software channels at the same time, or two parallel routine of setti
 ADI registers will make ADI controller registers chaos to lead incorrect results.
 Then we need one hardware spinlock to synchronize between the multiple subsystems.
 
+The new version ADI controller supplies multiple master channels for different
+subsystem accessing, that means no need to add hardware spinlock to synchronize,
+thus change the hardware spinlock support to be optional to keep backward
+compatibility.
+
 Required properties:
 - compatible: Should be "sprd,sc9860-adi".
 - reg: Offset and length of ADI-SPI controller register space.
-- hwlocks: Reference to a phandle of a hwlock provider node.
-- hwlock-names: Reference to hwlock name strings defined in the same order
-       as the hwlocks, should be "adi".
 - #address-cells: Number of cells required to define a chip select address
        on the ADI-SPI bus. Should be set to 1.
 - #size-cells: Size of cells required to define a chip select address size
        on the ADI-SPI bus. Should be set to 0.
 
 Optional properties:
+- hwlocks: Reference to a phandle of a hwlock provider node.
+- hwlock-names: Reference to hwlock name strings defined in the same order
+       as the hwlocks, should be "adi".
 - sprd,hw-channels: This is an array of channel values up to 49 channels.
        The first value specifies the hardware channel id which is used to
        transfer data triggered by hardware automatically, and the second
index 2e742d399e87b039cc77f04c53ff33634e94c177..870ac52d2225923961712ced3a51c030cba0a386 100644 (file)
@@ -104,6 +104,8 @@ properties:
           - infineon,slb9645tt
             # Infineon TLV493D-A1B6 I2C 3D Magnetic Sensor
           - infineon,tlv493d-a1b6
+            # Inspur Power System power supply unit version 1
+          - inspur,ipsps1
             # Intersil ISL29028 Ambient Light and Proximity Sensor
           - isil,isl29028
             # Intersil ISL29030 Ambient Light and Proximity Sensor
index 6992bbbbffab6a57b185a244048ea9d03d60ea15..d61a203138cbe91686142b9d070a28514cd799d0 100644 (file)
@@ -575,6 +575,8 @@ patternProperties:
     description: Micro Crystal AG
   "^micron,.*":
     description: Micron Technology Inc.
+  "^microsoft,.*":
+    description: Microsoft Corporation
   "^mikroe,.*":
     description: MikroElektronika d.o.o.
   "^miniand,.*":
index 921c71a3d6839234b04f84d072c3aa1129dd4cdc..3fdb32422f8ac11d9459f7237f9616e22b4468ea 100644 (file)
@@ -69,9 +69,9 @@ driver code:
 
 The code implementing a gpio_chip should support multiple instances of the
 controller, preferably using the driver model. That code will configure each
-gpio_chip and issue ``gpiochip_add[_data]()`` or ``devm_gpiochip_add_data()``.
-Removing a GPIO controller should be rare; use ``[devm_]gpiochip_remove()``
-when it is unavoidable.
+gpio_chip and issue gpiochip_add(), gpiochip_add_data(), or
+devm_gpiochip_add_data().  Removing a GPIO controller should be rare; use
+gpiochip_remove() when it is unavoidable.
 
 Often a gpio_chip is part of an instance-specific structure with states not
 exposed by the GPIO interfaces, such as addressing, power management, and more.
@@ -259,7 +259,7 @@ most often cascaded off a parent interrupt controller, and in some special
 cases the GPIO logic is melded with a SoC's primary interrupt controller.
 
 The IRQ portions of the GPIO block are implemented using an irq_chip, using
-the header <linux/irq.h>. So basically such a driver is utilizing two sub-
+the header <linux/irq.h>. So this combined driver is utilizing two sub-
 systems simultaneously: gpio and irq.
 
 It is legal for any IRQ consumer to request an IRQ from any irqchip even if it
@@ -391,25 +391,119 @@ Infrastructure helpers for GPIO irqchips
 ----------------------------------------
 
 To help out in handling the set-up and management of GPIO irqchips and the
-associated irqdomain and resource allocation callbacks, the gpiolib has
-some helpers that can be enabled by selecting the GPIOLIB_IRQCHIP Kconfig
-symbol:
-
-- gpiochip_irqchip_add(): adds a chained cascaded irqchip to a gpiochip. It
-  will pass the struct gpio_chip* for the chip to all IRQ callbacks, so the
-  callbacks need to embed the gpio_chip in its state container and obtain a
-  pointer to the container using container_of().
-  (See Documentation/driver-api/driver-model/design-patterns.rst)
+associated irqdomain and resource allocation callbacks. These are activated
+by selecting the Kconfig symbol GPIOLIB_IRQCHIP. If the symbol
+IRQ_DOMAIN_HIERARCHY is also selected, hierarchical helpers will also be
+provided. A big portion of overhead code will be managed by gpiolib,
+under the assumption that your interrupts are 1-to-1-mapped to the
+GPIO line index:
+
+  GPIO line offset   Hardware IRQ
+  0                  0
+  1                  1
+  2                  2
+  ...                ...
+  ngpio-1            ngpio-1
+
+If some GPIO lines do not have corresponding IRQs, the bitmask valid_mask
+and the flag need_valid_mask in gpio_irq_chip can be used to mask off some
+lines as invalid for associating with IRQs.
+
+The preferred way to set up the helpers is to fill in the
+struct gpio_irq_chip inside struct gpio_chip before adding the gpio_chip.
+If you do this, the additional irq_chip will be set up by gpiolib at the
+same time as setting up the rest of the GPIO functionality. The following
+is a typical example of a cascaded interrupt handler using gpio_irq_chip:
+
+  /* Typical state container with dynamic irqchip */
+  struct my_gpio {
+      struct gpio_chip gc;
+      struct irq_chip irq;
+  };
+
+  int irq; /* from platform etc */
+  struct my_gpio *g;
+  struct gpio_irq_chip *girq;
+
+  /* Set up the irqchip dynamically */
+  g->irq.name = "my_gpio_irq";
+  g->irq.irq_ack = my_gpio_ack_irq;
+  g->irq.irq_mask = my_gpio_mask_irq;
+  g->irq.irq_unmask = my_gpio_unmask_irq;
+  g->irq.irq_set_type = my_gpio_set_irq_type;
+
+  /* Get a pointer to the gpio_irq_chip */
+  girq = &g->gc.irq;
+  girq->chip = &g->irq;
+  girq->parent_handler = ftgpio_gpio_irq_handler;
+  girq->num_parents = 1;
+  girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+                               GFP_KERNEL);
+  if (!girq->parents)
+      return -ENOMEM;
+  girq->default_type = IRQ_TYPE_NONE;
+  girq->handler = handle_bad_irq;
+  girq->parents[0] = irq;
+
+  return devm_gpiochip_add_data(dev, &g->gc, g);
+
+The helper support using hierarchical interrupt controllers as well.
+In this case the typical set-up will look like this:
+
+  /* Typical state container with dynamic irqchip */
+  struct my_gpio {
+      struct gpio_chip gc;
+      struct irq_chip irq;
+      struct fwnode_handle *fwnode;
+  };
+
+  int irq; /* from platform etc */
+  struct my_gpio *g;
+  struct gpio_irq_chip *girq;
+
+  /* Set up the irqchip dynamically */
+  g->irq.name = "my_gpio_irq";
+  g->irq.irq_ack = my_gpio_ack_irq;
+  g->irq.irq_mask = my_gpio_mask_irq;
+  g->irq.irq_unmask = my_gpio_unmask_irq;
+  g->irq.irq_set_type = my_gpio_set_irq_type;
+
+  /* Get a pointer to the gpio_irq_chip */
+  girq = &g->gc.irq;
+  girq->chip = &g->irq;
+  girq->default_type = IRQ_TYPE_NONE;
+  girq->handler = handle_bad_irq;
+  girq->fwnode = g->fwnode;
+  girq->parent_domain = parent;
+  girq->child_to_parent_hwirq = my_gpio_child_to_parent_hwirq;
+
+  return devm_gpiochip_add_data(dev, &g->gc, g);
+
+As you can see pretty similar, but you do not supply a parent handler for
+the IRQ, instead a parent irqdomain, an fwnode for the hardware and
+a funcion .child_to_parent_hwirq() that has the purpose of looking up
+the parent hardware irq from a child (i.e. this gpio chip) hardware irq.
+As always it is good to look at examples in the kernel tree for advice
+on how to find the required pieces.
+
+The old way of adding irqchips to gpiochips after registration is also still
+available but we try to move away from this:
+
+- DEPRECATED: gpiochip_irqchip_add(): adds a chained cascaded irqchip to a
+  gpiochip. It will pass the struct gpio_chip* for the chip to all IRQ
+  callbacks, so the callbacks need to embed the gpio_chip in its state
+  container and obtain a pointer to the container using container_of().
+  (See Documentation/driver-model/design-patterns.txt)
 
 - gpiochip_irqchip_add_nested(): adds a nested cascaded irqchip to a gpiochip,
   as discussed above regarding different types of cascaded irqchips. The
   cascaded irq has to be handled by a threaded interrupt handler.
   Apart from that it works exactly like the chained irqchip.
 
-- gpiochip_set_chained_irqchip(): sets up a chained cascaded irq handler for a
-  gpio_chip from a parent IRQ and passes the struct gpio_chip* as handler
-  data. Notice that we pass is as the handler data, since the irqchip data is
-  likely used by the parent irqchip.
+- DEPRECATED: gpiochip_set_chained_irqchip(): sets up a chained cascaded irq
+  handler for a gpio_chip from a parent IRQ and passes the struct gpio_chip*
+  as handler data. Notice that we pass is as the handler data, since the
+  irqchip data is likely used by the parent irqchip.
 
 - gpiochip_set_nested_irqchip(): sets up a nested cascaded irq handler for a
   gpio_chip from a parent IRQ. As the parent IRQ has usually been
@@ -418,11 +512,11 @@ symbol:
 
 If there is a need to exclude certain GPIO lines from the IRQ domain handled by
 these helpers, we can set .irq.need_valid_mask of the gpiochip before
-``[devm_]gpiochip_add_data()`` is called. This allocates an .irq.valid_mask with as
-many bits set as there are GPIO lines in the chip, each bit representing line
-0..n-1. Drivers can exclude GPIO lines by clearing bits from this mask. The mask
-must be filled in before gpiochip_irqchip_add() or gpiochip_irqchip_add_nested()
-is called.
+devm_gpiochip_add_data() or gpiochip_add_data() is called. This allocates an
+.irq.valid_mask with as many bits set as there are GPIO lines in the chip, each
+bit representing line 0..n-1. Drivers can exclude GPIO lines by clearing bits
+from this mask. The mask must be filled in before gpiochip_irqchip_add() or
+gpiochip_irqchip_add_nested() is called.
 
 To use the helpers please keep the following in mind:
 
diff --git a/Documentation/hwmon/ads1015.rst b/Documentation/hwmon/ads1015.rst
deleted file mode 100644 (file)
index e0951c4..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-Kernel driver ads1015
-=====================
-
-Supported chips:
-
-  * Texas Instruments ADS1015
-
-    Prefix: 'ads1015'
-
-    Datasheet: Publicly available at the Texas Instruments website:
-
-              http://focus.ti.com/lit/ds/symlink/ads1015.pdf
-
-  * Texas Instruments ADS1115
-
-    Prefix: 'ads1115'
-
-    Datasheet: Publicly available at the Texas Instruments website:
-
-              http://focus.ti.com/lit/ds/symlink/ads1115.pdf
-
-Authors:
-       Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
-
-Description
------------
-
-This driver implements support for the Texas Instruments ADS1015/ADS1115.
-
-This device is a 12/16-bit A-D converter with 4 inputs.
-
-The inputs can be used single ended or in certain differential combinations.
-
-The inputs can be made available by 8 sysfs input files in0_input - in7_input:
-
-  - in0: Voltage over AIN0 and AIN1.
-  - in1: Voltage over AIN0 and AIN3.
-  - in2: Voltage over AIN1 and AIN3.
-  - in3: Voltage over AIN2 and AIN3.
-  - in4: Voltage over AIN0 and GND.
-  - in5: Voltage over AIN1 and GND.
-  - in6: Voltage over AIN2 and GND.
-  - in7: Voltage over AIN3 and GND.
-
-Which inputs are available can be configured using platform data or devicetree.
-
-By default all inputs are exported.
-
-Platform Data
--------------
-
-In linux/platform_data/ads1015.h platform data is defined, channel_data contains
-configuration data for the used input combinations:
-
-- pga is the programmable gain amplifier (values are full scale)
-
-    - 0: +/- 6.144 V
-    - 1: +/- 4.096 V
-    - 2: +/- 2.048 V
-    - 3: +/- 1.024 V
-    - 4: +/- 0.512 V
-    - 5: +/- 0.256 V
-
-- data_rate in samples per second
-
-    - 0: 128
-    - 1: 250
-    - 2: 490
-    - 3: 920
-    - 4: 1600
-    - 5: 2400
-    - 6: 3300
-
-Example::
-
-  struct ads1015_platform_data data = {
-       .channel_data = {
-               [2] = { .enabled = true, .pga = 1, .data_rate = 0 },
-               [4] = { .enabled = true, .pga = 4, .data_rate = 5 },
-       }
-  };
-
-In this case only in2_input (FS +/- 4.096 V, 128 SPS) and in4_input
-(FS +/- 0.512 V, 2400 SPS) would be created.
-
-Devicetree
-----------
-
-Configuration is also possible via devicetree:
-Documentation/devicetree/bindings/hwmon/ads1015.txt
index ee090e51653a65d6302cf979baaf51ff660e62f0..8147c3f218bfb11167ca160ebc69c3d33b5b07f4 100644 (file)
@@ -30,7 +30,6 @@ Hardware Monitoring Kernel Drivers
    adm1031
    adm1275
    adm9240
-   ads1015
    ads7828
    adt7410
    adt7411
@@ -130,6 +129,7 @@ Hardware Monitoring Kernel Drivers
    pcf8591
    pmbus
    powr1220
+   pxe1610
    pwm-fan
    raspberrypi-hwmon
    sch5627
diff --git a/Documentation/hwmon/inspur-ipsps1.rst b/Documentation/hwmon/inspur-ipsps1.rst
new file mode 100644 (file)
index 0000000..2b871ae
--- /dev/null
@@ -0,0 +1,79 @@
+Kernel driver inspur-ipsps1
+=======================
+
+Supported chips:
+
+  * Inspur Power System power supply unit
+
+Author: John Wang <wangzqbj@inspur.com>
+
+Description
+-----------
+
+This driver supports Inspur Power System power supplies. This driver
+is a client to the core PMBus driver.
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+Sysfs entries
+-------------
+
+The following attributes are supported:
+
+======================= ======================================================
+curr1_input            Measured input current
+curr1_label            "iin"
+curr1_max              Maximum current
+curr1_max_alarm                Current high alarm
+curr2_input            Measured output current in mA.
+curr2_label            "iout1"
+curr2_crit             Critical maximum current
+curr2_crit_alarm       Current critical high alarm
+curr2_max              Maximum current
+curr2_max_alarm                Current high alarm
+
+fan1_alarm             Fan 1 warning.
+fan1_fault             Fan 1 fault.
+fan1_input             Fan 1 speed in RPM.
+
+in1_alarm              Input voltage under-voltage alarm.
+in1_input              Measured input voltage in mV.
+in1_label              "vin"
+in2_input              Measured output voltage in mV.
+in2_label              "vout1"
+in2_lcrit              Critical minimum output voltage
+in2_lcrit_alarm                Output voltage critical low alarm
+in2_max                        Maximum output voltage
+in2_max_alarm          Output voltage high alarm
+in2_min                        Minimum output voltage
+in2_min_alarm          Output voltage low alarm
+
+power1_alarm           Input fault or alarm.
+power1_input           Measured input power in uW.
+power1_label           "pin"
+power1_max             Input power limit
+power2_max_alarm       Output power high alarm
+power2_max             Output power limit
+power2_input           Measured output power in uW.
+power2_label           "pout"
+
+temp[1-3]_input                Measured temperature
+temp[1-2]_max          Maximum temperature
+temp[1-3]_max_alarm    Temperature high alarm
+
+vendor                 Manufacturer name
+model                  Product model
+part_number            Product part number
+serial_number          Product serial number
+fw_version             Firmware version
+hw_version             Hardware version
+mode                   Work mode. Can be set to active or
+                       standby, when set to standby, PSU will
+                       automatically switch between standby
+                       and redundancy mode.
+======================= ======================================================
index ba8acbd2a6cb3f0c1945e3f9fa4d5b2699fcc27e..e749f827c00236ba7d5ada47d4ec34b1d1c86db6 100644 (file)
@@ -119,9 +119,9 @@ Supported chips:
 
               http://www.ti.com/product/tmp275
 
-  * NXP LM75B
+  * NXP LM75B, PCT2075
 
-    Prefix: 'lm75b'
+    Prefix: 'lm75b', 'pct2075'
 
     Addresses scanned: none
 
@@ -129,6 +129,8 @@ Supported chips:
 
               http://www.nxp.com/documents/data_sheet/LM75B.pdf
 
+               http://www.nxp.com/docs/en/data-sheet/PCT2075.pdf
+
 Author: Frodo Looijaard <frodol@dds.nl>
 
 Description
diff --git a/Documentation/hwmon/pxe1610 b/Documentation/hwmon/pxe1610
deleted file mode 100644 (file)
index 211cede..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-Kernel driver pxe1610
-=====================
-
-Supported chips:
-  * Infineon PXE1610
-    Prefix: 'pxe1610'
-    Addresses scanned: -
-    Datasheet: Datasheet is not publicly available.
-
-  * Infineon PXE1110
-    Prefix: 'pxe1110'
-    Addresses scanned: -
-    Datasheet: Datasheet is not publicly available.
-
-  * Infineon PXM1310
-    Prefix: 'pxm1310'
-    Addresses scanned: -
-    Datasheet: Datasheet is not publicly available.
-
-Author: Vijay Khemka <vijaykhemka@fb.com>
-
-
-Description
------------
-
-PXE1610/PXE1110 are Multi-rail/Multiphase Digital Controllers
-and compliant to
-       -- Intel VR13 DC-DC converter specifications.
-       -- Intel SVID protocol.
-Used for Vcore power regulation for Intel VR13 based microprocessors
-       -- Servers, Workstations, and High-end desktops
-
-PXM1310 is a Multi-rail Controller and it is compliant to
-       -- Intel VR13 DC-DC converter specifications.
-       -- Intel SVID protocol.
-Used for DDR3/DDR4 Memory power regulation for Intel VR13 and
-IMVP8 based systems
-
-
-Usage Notes
------------
-
-This driver does not probe for PMBus devices. You will have
-to instantiate devices explicitly.
-
-Example: the following commands will load the driver for an PXE1610
-at address 0x70 on I2C bus #4:
-
-# modprobe pxe1610
-# echo pxe1610 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
-
-It can also be instantiated by declaring in device tree
-
-
-Sysfs attributes
-----------------
-
-curr1_label            "iin"
-curr1_input            Measured input current
-curr1_alarm            Current high alarm
-
-curr[2-4]_label                "iout[1-3]"
-curr[2-4]_input                Measured output current
-curr[2-4]_crit         Critical maximum current
-curr[2-4]_crit_alarm   Current critical high alarm
-
-in1_label              "vin"
-in1_input              Measured input voltage
-in1_crit               Critical maximum input voltage
-in1_crit_alarm         Input voltage critical high alarm
-
-in[2-4]_label          "vout[1-3]"
-in[2-4]_input          Measured output voltage
-in[2-4]_lcrit          Critical minimum output voltage
-in[2-4]_lcrit_alarm    Output voltage critical low alarm
-in[2-4]_crit           Critical maximum output voltage
-in[2-4]_crit_alarm     Output voltage critical high alarm
-
-power1_label           "pin"
-power1_input           Measured input power
-power1_alarm           Input power high alarm
-
-power[2-4]_label       "pout[1-3]"
-power[2-4]_input       Measured output power
-
-temp[1-3]_input                Measured temperature
-temp[1-3]_crit         Critical high temperature
-temp[1-3]_crit_alarm   Chip temperature critical high alarm
-temp[1-3]_max          Maximum temperature
-temp[1-3]_max_alarm    Chip temperature high alarm
diff --git a/Documentation/hwmon/pxe1610.rst b/Documentation/hwmon/pxe1610.rst
new file mode 100644 (file)
index 0000000..4f23888
--- /dev/null
@@ -0,0 +1,107 @@
+Kernel driver pxe1610
+=====================
+
+Supported chips:
+
+  * Infineon PXE1610
+
+    Prefix: 'pxe1610'
+
+    Addresses scanned: -
+
+    Datasheet: Datasheet is not publicly available.
+
+  * Infineon PXE1110
+
+    Prefix: 'pxe1110'
+
+    Addresses scanned: -
+
+    Datasheet: Datasheet is not publicly available.
+
+  * Infineon PXM1310
+
+    Prefix: 'pxm1310'
+
+    Addresses scanned: -
+
+    Datasheet: Datasheet is not publicly available.
+
+Author: Vijay Khemka <vijaykhemka@fb.com>
+
+
+Description
+-----------
+
+PXE1610/PXE1110 are Multi-rail/Multiphase Digital Controllers
+and compliant to
+
+       - Intel VR13 DC-DC converter specifications.
+       - Intel SVID protocol.
+
+Used for Vcore power regulation for Intel VR13 based microprocessors
+
+       - Servers, Workstations, and High-end desktops
+
+PXM1310 is a Multi-rail Controller and it is compliant to
+
+       - Intel VR13 DC-DC converter specifications.
+       - Intel SVID protocol.
+
+Used for DDR3/DDR4 Memory power regulation for Intel VR13 and
+IMVP8 based systems
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have
+to instantiate devices explicitly.
+
+Example: the following commands will load the driver for an PXE1610
+at address 0x70 on I2C bus #4::
+
+    # modprobe pxe1610
+    # echo pxe1610 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
+
+It can also be instantiated by declaring in device tree
+
+
+Sysfs attributes
+----------------
+
+======================  ====================================
+curr1_label            "iin"
+curr1_input            Measured input current
+curr1_alarm            Current high alarm
+
+curr[2-4]_label                "iout[1-3]"
+curr[2-4]_input                Measured output current
+curr[2-4]_crit         Critical maximum current
+curr[2-4]_crit_alarm   Current critical high alarm
+
+in1_label              "vin"
+in1_input              Measured input voltage
+in1_crit               Critical maximum input voltage
+in1_crit_alarm         Input voltage critical high alarm
+
+in[2-4]_label          "vout[1-3]"
+in[2-4]_input          Measured output voltage
+in[2-4]_lcrit          Critical minimum output voltage
+in[2-4]_lcrit_alarm    Output voltage critical low alarm
+in[2-4]_crit           Critical maximum output voltage
+in[2-4]_crit_alarm     Output voltage critical high alarm
+
+power1_label           "pin"
+power1_input           Measured input power
+power1_alarm           Input power high alarm
+
+power[2-4]_label       "pout[1-3]"
+power[2-4]_input       Measured output power
+
+temp[1-3]_input                Measured temperature
+temp[1-3]_crit         Critical high temperature
+temp[1-3]_crit_alarm   Chip temperature critical high alarm
+temp[1-3]_max          Maximum temperature
+temp[1-3]_max_alarm    Chip temperature high alarm
+======================  ====================================
index aa116332ba26087a35f72b1541ee388ac21b7d1f..9b0f1eee5bf2adad564efba59a791da5039400d4 100644 (file)
@@ -19,7 +19,17 @@ Supported chips:
 
     Addresses scanned: none
 
-    Datasheet: Not publicly available
+    Datasheet: http://www.sensirion.com/file/datasheet_shtw1
+
+
+
+  * Sensirion SHTC3
+
+    Prefix: 'shtc3'
+
+    Addresses scanned: none
+
+    Datasheet: http://www.sensirion.com/file/datasheet_shtc3
 
 
 
@@ -30,10 +40,9 @@ Author:
 Description
 -----------
 
-This driver implements support for the Sensirion SHTC1 chip, a humidity and
-temperature sensor. Temperature is measured in degrees celsius, relative
-humidity is expressed as a percentage. Driver can be used as well for SHTW1
-chip, which has the same electrical interface.
+This driver implements support for the Sensirion SHTC1, SHTW1, and SHTC3
+chips, a humidity and temperature sensor. Temperature is measured in degrees
+celsius, relative humidity is expressed as a percentage.
 
 The device communicates with the I2C protocol. All sensors are set to I2C
 address 0x70. See Documentation/i2c/instantiating-devices for methods to
index 452fc28d8e0bb3f7efefc6f20c5a0786ae72b22e..9a218ea996d8633e8705d4f9e8f41f0e90f3b2ad 100644 (file)
@@ -20,6 +20,10 @@ increase the chances of your change being accepted.
   errors, no warnings, and few if any check messages. If there are any
   messages, please be prepared to explain.
 
+* Please use the standard multi-line comment style. Do not mix C and C++
+  style comments in a single driver (with the exception of the SPDX license
+  identifier).
+
 * If your patch generates checkpatch errors, warnings, or check messages,
   please refrain from explanations such as "I prefer that coding style".
   Keep in mind that each unnecessary message helps hiding a real problem,
@@ -120,8 +124,8 @@ increase the chances of your change being accepted.
   completely initialize your chip and your driver first, then register with
   the hwmon subsystem.
 
-* Use devm_hwmon_device_register_with_groups() or, if your driver needs a remove
-  function, hwmon_device_register_with_groups() to register your driver with the
+* Use devm_hwmon_device_register_with_info() or, if your driver needs a remove
+  function, hwmon_device_register_with_info() to register your driver with the
   hwmon subsystem. Try using devm_add_action() instead of a remove function if
   possible. Do not use hwmon_device_register().
 
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
new file mode 100644 (file)
index 0000000..4026363
--- /dev/null
@@ -0,0 +1,279 @@
+Embargoed hardware issues
+=========================
+
+Scope
+-----
+
+Hardware issues which result in security problems are a different category
+of security bugs than pure software bugs which only affect the Linux
+kernel.
+
+Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
+differently because they usually affect all Operating Systems ("OS") and
+therefore need coordination across different OS vendors, distributions,
+hardware vendors and other parties. For some of the issues, software
+mitigations can depend on microcode or firmware updates, which need further
+coordination.
+
+.. _Contact:
+
+Contact
+-------
+
+The Linux kernel hardware security team is separate from the regular Linux
+kernel security team.
+
+The team only handles the coordination of embargoed hardware security
+issues.  Reports of pure software security bugs in the Linux kernel are not
+handled by this team and the reporter will be guided to contact the regular
+Linux kernel security team (:ref:`Documentation/admin-guide/
+<securitybugs>`) instead.
+
+The team can be contacted by email at <hardware-security@kernel.org>. This
+is a private list of security officers who will help you to coordinate an
+issue according to our documented process.
+
+The list is encrypted and email to the list can be sent by either PGP or
+S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
+certificate. The list's PGP key and S/MIME certificate are available from
+https://www.kernel.org/....
+
+While hardware security issues are often handled by the affected hardware
+vendor, we welcome contact from researchers or individuals who have
+identified a potential hardware flaw.
+
+Hardware security officers
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The current team of hardware security officers:
+
+  - Linus Torvalds (Linux Foundation Fellow)
+  - Greg Kroah-Hartman (Linux Foundation Fellow)
+  - Thomas Gleixner (Linux Foundation Fellow)
+
+Operation of mailing-lists
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The encrypted mailing-lists which are used in our process are hosted on
+Linux Foundation's IT infrastructure. By providing this service Linux
+Foundation's director of IT Infrastructure security technically has the
+ability to access the embargoed information, but is obliged to
+confidentiality by his employment contract. Linux Foundation's director of
+IT Infrastructure security is also responsible for the kernel.org
+infrastructure.
+
+The Linux Foundation's current director of IT Infrastructure security is
+Konstantin Ryabitsev.
+
+
+Non-disclosure agreements
+-------------------------
+
+The Linux kernel hardware security team is not a formal body and therefore
+unable to enter into any non-disclosure agreements.  The kernel community
+is aware of the sensitive nature of such issues and offers a Memorandum of
+Understanding instead.
+
+
+Memorandum of Understanding
+---------------------------
+
+The Linux kernel community has a deep understanding of the requirement to
+keep hardware security issues under embargo for coordination between
+different OS vendors, distributors, hardware vendors and other parties.
+
+The Linux kernel community has successfully handled hardware security
+issues in the past and has the necessary mechanisms in place to allow
+community compliant development under embargo restrictions.
+
+The Linux kernel community has a dedicated hardware security team for
+initial contact, which oversees the process of handling such issues under
+embargo rules.
+
+The hardware security team identifies the developers (domain experts) who
+will form the initial response team for a particular issue. The initial
+response team can bring in further developers (domain experts) to address
+the issue in the best technical way.
+
+All involved developers pledge to adhere to the embargo rules and to keep
+the received information confidential. Violation of the pledge will lead to
+immediate exclusion from the current issue and removal from all related
+mailing-lists. In addition, the hardware security team will also exclude
+the offender from future issues. The impact of this consequence is a highly
+effective deterrent in our community. In case a violation happens the
+hardware security team will inform the involved parties immediately. If you
+or anyone becomes aware of a potential violation, please report it
+immediately to the Hardware security officers.
+
+
+Process
+^^^^^^^
+
+Due to the globally distributed nature of Linux kernel development,
+face-to-face meetings are almost impossible to address hardware security
+issues.  Phone conferences are hard to coordinate due to time zones and
+other factors and should be only used when absolutely necessary. Encrypted
+email has been proven to be the most effective and secure communication
+method for these types of issues.
+
+Start of Disclosure
+"""""""""""""""""""
+
+Disclosure starts by contacting the Linux kernel hardware security team by
+email. This initial contact should contain a description of the problem and
+a list of any known affected hardware. If your organization builds or
+distributes the affected hardware, we encourage you to also consider what
+other hardware could be affected.
+
+The hardware security team will provide an incident-specific encrypted
+mailing-list which will be used for initial discussion with the reporter,
+further disclosure and coordination.
+
+The hardware security team will provide the disclosing party a list of
+developers (domain experts) who should be informed initially about the
+issue after confirming with the developers  that they will adhere to this
+Memorandum of Understanding and the documented process. These developers
+form the initial response team and will be responsible for handling the
+issue after initial contact. The hardware security team is supporting the
+response team, but is not necessarily involved in the mitigation
+development process.
+
+While individual developers might be covered by a non-disclosure agreement
+via their employer, they cannot enter individual non-disclosure agreements
+in their role as Linux kernel developers. They will, however, agree to
+adhere to this documented process and the Memorandum of Understanding.
+
+
+Disclosure
+""""""""""
+
+The disclosing party provides detailed information to the initial response
+team via the specific encrypted mailing-list.
+
+From our experience the technical documentation of these issues is usually
+a sufficient starting point and further technical clarification is best
+done via email.
+
+Mitigation development
+""""""""""""""""""""""
+
+The initial response team sets up an encrypted mailing-list or repurposes
+an existing one if appropriate. The disclosing party should provide a list
+of contacts for all other parties who have already been, or should be,
+informed about the issue. The response team contacts these parties so they
+can name experts who should be subscribed to the mailing-list.
+
+Using a mailing-list is close to the normal Linux development process and
+has been successfully used in developing mitigations for various hardware
+security issues in the past.
+
+The mailing-list operates in the same way as normal Linux development.
+Patches are posted, discussed and reviewed and if agreed on applied to a
+non-public git repository which is only accessible to the participating
+developers via a secure connection. The repository contains the main
+development branch against the mainline kernel and backport branches for
+stable kernel versions as necessary.
+
+The initial response team will identify further experts from the Linux
+kernel developer community as needed and inform the disclosing party about
+their participation. Bringing in experts can happen at any time of the
+development process and often needs to be handled in a timely manner.
+
+Coordinated release
+"""""""""""""""""""
+
+The involved parties will negotiate the date and time where the embargo
+ends. At that point the prepared mitigations are integrated into the
+relevant kernel trees and published.
+
+While we understand that hardware security issues need coordinated embargo
+time, the embargo time should be constrained to the minimum time which is
+required for all involved parties to develop, test and prepare the
+mitigations. Extending embargo time artificially to meet conference talk
+dates or other non-technical reasons is creating more work and burden for
+the involved developers and response teams as the patches need to be kept
+up to date in order to follow the ongoing upstream kernel development,
+which might create conflicting changes.
+
+CVE assignment
+""""""""""""""
+
+Neither the hardware security team nor the initial response team assign
+CVEs, nor are CVEs required for the development process. If CVEs are
+provided by the disclosing party they can be used for documentation
+purposes.
+
+Process ambassadors
+-------------------
+
+For assistance with this process we have established ambassadors in various
+organizations, who can answer questions about or provide guidance on the
+reporting process and further handling. Ambassadors are not involved in the
+disclosure of a particular issue, unless requested by a response team or by
+an involved disclosed party. The current ambassadors list:
+
+  ============= ========================================================
+  ARM
+  AMD
+  IBM
+  Intel
+  Qualcomm     Trilok Soni <tsoni@codeaurora.org>
+
+  Microsoft    Sasha Levin <sashal@kernel.org>
+  VMware
+  Xen          Andrew Cooper <andrew.cooper3@citrix.com>
+
+  Canonical    Tyler Hicks <tyhicks@canonical.com>
+  Debian       Ben Hutchings <ben@decadent.org.uk>
+  Oracle       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+  Red Hat      Josh Poimboeuf <jpoimboe@redhat.com>
+  SUSE         Jiri Kosina <jkosina@suse.cz>
+
+  Amazon
+  Google       Kees Cook <keescook@chromium.org>
+  ============= ========================================================
+
+If you want your organization to be added to the ambassadors list, please
+contact the hardware security team. The nominated ambassador has to
+understand and support our process fully and is ideally well connected in
+the Linux kernel community.
+
+Encrypted mailing-lists
+-----------------------
+
+We use encrypted mailing-lists for communication. The operating principle
+of these lists is that email sent to the list is encrypted either with the
+list's PGP key or with the list's S/MIME certificate. The mailing-list
+software decrypts the email and re-encrypts it individually for each
+subscriber with the subscriber's PGP key or S/MIME certificate. Details
+about the mailing-list software and the setup which is used to ensure the
+security of the lists and protection of the data can be found here:
+https://www.kernel.org/....
+
+List keys
+^^^^^^^^^
+
+For initial contact see :ref:`Contact`. For incident specific mailing-lists
+the key and S/MIME certificate are conveyed to the subscribers by email
+sent from the specific list.
+
+Subscription to incident specific lists
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Subscription is handled by the response teams. Disclosed parties who want
+to participate in the communication send a list of potential subscribers to
+the response team so the response team can validate subscription requests.
+
+Each subscriber needs to send a subscription request to the response team
+by email. The email must be signed with the subscriber's PGP key or S/MIME
+certificate. If a PGP key is used, it must be available from a public key
+server and is ideally connected to the Linux kernel's PGP web of trust. See
+also: https://www.kernel.org/signature.html.
+
+The response team verifies that the subscriber request is valid and adds
+the subscriber to the list. After subscription the subscriber will receive
+email from the mailing-list which is signed either with the list's PGP key
+or the list's S/MIME certificate. The subscriber's email client can extract
+the PGP key or the S/MIME certificate from the signature so the subscriber
+can send encrypted email to the list.
+
index 878ebfda7eeff378a2fee48e3b361aa6b3587896..e2c9ffc682c520c0a91036372bcdc8d707c2a78a 100644 (file)
@@ -45,6 +45,7 @@ Other guides to the community that are of interest to most developers are:
    submit-checklist
    kernel-docs
    deprecated
+   embargoed-hardware-issues
 
 These are some overall technical guides that have been put here for now for
 lack of a better place.
index 1b73fea23b39f11fee9be7207fb8b359c0e7e73f..14b1492f689bbe71c983a1baba1756d4e79e4204 100644 (file)
@@ -18,7 +18,7 @@ The following 64-byte header is present in decompressed Linux kernel image.
        u32 res1  = 0;            /* Reserved */
        u64 res2  = 0;            /* Reserved */
        u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
-       u32 res3;                 /* Reserved for additional RISC-V specific header */
+       u32 magic2 = 0x56534905;  /* Magic number 2, little endian, "RSC\x05" */
        u32 res4;                 /* Reserved for PE COFF offset */
 
 This header format is compliant with PE/COFF header and largely inspired from
@@ -37,13 +37,14 @@ Notes:
        Bits 16:31 - Major version
 
   This preserves compatibility across newer and older version of the header.
-  The current version is defined as 0.1.
+  The current version is defined as 0.2.
 
-- res3 is reserved for offset to any other additional fields. This makes the
-  header extendible in future. One example would be to accommodate ISA
-  extension for RISC-V in future. For current version, it is set to be zero.
+- The "magic" field is deprecated as of version 0.2.  In a future
+  release, it may be removed.  This originally should have matched up
+  with the ARM64 header "magic" field, but unfortunately does not.
+  The "magic2" field replaces it, matching up with the ARM64 header.
 
-- In current header, the flag field has only one field.
+- In current header, the flags field has only one field.
        Bit 0: Kernel endianness. 1 if BE, 0 if LE.
 
 - Image size is mandatory for boot loader to load kernel image. Booting will
index 3296533e54cfbcd08e2b27c4c79a3c2ce19cab96..487852fda33e959cf8d24a32d949e6fe0ad5bf92 100644 (file)
@@ -6,3 +6,4 @@ Trusted Platform Module documentation
 
    tpm_vtpm_proxy
    xen-tpmfront
+   tpm_ftpm_tee
diff --git a/Documentation/security/tpm/tpm_ftpm_tee.rst b/Documentation/security/tpm/tpm_ftpm_tee.rst
new file mode 100644 (file)
index 0000000..8c2bae1
--- /dev/null
@@ -0,0 +1,27 @@
+=============================================
+Firmware TPM Driver
+=============================================
+
+This document describes the firmware Trusted Platform Module (fTPM)
+device driver.
+
+Introduction
+============
+
+This driver is a shim for firmware implemented in ARM's TrustZone
+environment. The driver allows programs to interact with the TPM in the same
+way they would interact with a hardware TPM.
+
+Design
+======
+
+The driver acts as a thin layer that passes commands to and from a TPM
+implemented in firmware. The driver itself doesn't contain much logic and is
+used more like a dumb pipe between firmware and kernel/userspace.
+
+The firmware itself is based on the following paper:
+https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/ftpm1.pdf
+
+When the driver is loaded it will expose ``/dev/tpmX`` character devices to
+userspace which will enable userspace to communicate with the firmware TPM
+through this device.
index 73b057e566e6c499d013ff5b0034510c3a498f3a..1547e033d3547192bb5016fa4ca06f470821ab25 100644 (file)
@@ -517,14 +517,6 @@ W: http://ez.analog.com/community/linux-device-drivers
 S:     Supported
 F:     drivers/video/backlight/adp8860_bl.c
 
-ADS1015 HARDWARE MONITOR DRIVER
-M:     Dirk Eibach <eibach@gdsys.de>
-L:     linux-hwmon@vger.kernel.org
-S:     Maintained
-F:     Documentation/hwmon/ads1015.rst
-F:     drivers/hwmon/ads1015.c
-F:     include/linux/platform_data/ads1015.h
-
 ADT746X FAN DRIVER
 M:     Colin Leroy <colin@colino.net>
 S:     Maintained
@@ -683,7 +675,7 @@ S:  Maintained
 F:     drivers/crypto/sunxi-ss/
 
 ALLWINNER VPU DRIVER
-M:     Maxime Ripard <maxime.ripard@bootlin.com>
+M:     Maxime Ripard <mripard@kernel.org>
 M:     Paul Kocialkowski <paul.kocialkowski@bootlin.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
@@ -1350,8 +1342,7 @@ M:        Will Deacon <will@kernel.org>
 R:     Robin Murphy <robin.murphy@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     drivers/iommu/arm-smmu.c
-F:     drivers/iommu/arm-smmu-v3.c
+F:     drivers/iommu/arm-smmu*
 F:     drivers/iommu/io-pgtable-arm.c
 F:     drivers/iommu/io-pgtable-arm-v7s.c
 
@@ -1408,7 +1399,7 @@ S:        Maintained
 F:     drivers/clk/sunxi/
 
 ARM/Allwinner sunXi SoC support
-M:     Maxime Ripard <maxime.ripard@bootlin.com>
+M:     Maxime Ripard <mripard@kernel.org>
 M:     Chen-Yu Tsai <wens@csie.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -3577,7 +3568,7 @@ F:        Documentation/filesystems/caching/cachefiles.txt
 F:     fs/cachefiles/
 
 CADENCE MIPI-CSI2 BRIDGES
-M:     Maxime Ripard <maxime.ripard@bootlin.com>
+M:     Maxime Ripard <mripard@kernel.org>
 L:     linux-media@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/media/cdns,*.txt
@@ -4290,6 +4281,14 @@ S:       Supported
 F:     drivers/cpuidle/cpuidle-exynos.c
 F:     arch/arm/mach-exynos/pm.c
 
+CPUIDLE DRIVER - ARM PSCI
+M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M:     Sudeep Holla <sudeep.holla@arm.com>
+L:     linux-pm@vger.kernel.org
+L:     linux-arm-kernel@lists.infradead.org
+S:     Supported
+F:     drivers/cpuidle/cpuidle-psci.c
+
 CPU IDLE TIME MANAGEMENT FRAMEWORK
 M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
@@ -5295,7 +5294,7 @@ F:        include/linux/vga*
 
 DRM DRIVERS AND MISC GPU PATCHES
 M:     Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
-M:     Maxime Ripard <maxime.ripard@bootlin.com>
+M:     Maxime Ripard <mripard@kernel.org>
 M:     Sean Paul <sean@poorly.run>
 W:     https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
 S:     Maintained
@@ -5308,7 +5307,7 @@ F:        include/uapi/drm/drm*
 F:     include/linux/vga*
 
 DRM DRIVERS FOR ALLWINNER A10
-M:     Maxime Ripard  <maxime.ripard@bootlin.com>
+M:     Maxime Ripard <mripard@kernel.org>
 L:     dri-devel@lists.freedesktop.org
 S:     Supported
 F:     drivers/gpu/drm/sun4i/
@@ -5761,6 +5760,11 @@ S:       Supported
 F:     drivers/edac/aspeed_edac.c
 F:     Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
 
+EDAC-BLUEFIELD
+M:     Shravan Kumar Ramani <sramani@mellanox.com>
+S:     Supported
+F:     drivers/edac/bluefield_edac.c
+
 EDAC-CALXEDA
 M:     Robert Richter <rric@kernel.org>
 L:     linux-edac@vger.kernel.org
@@ -5785,10 +5789,11 @@ F:      drivers/edac/thunderx_edac*
 EDAC-CORE
 M:     Borislav Petkov <bp@alien8.de>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
+M:     Tony Luck <tony.luck@intel.com>
 R:     James Morse <james.morse@arm.com>
+R:     Robert Richter <rrichter@marvell.com>
 L:     linux-edac@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras.git edac-for-next
 S:     Supported
 F:     Documentation/admin-guide/ras.rst
 F:     Documentation/driver-api/edac.rst
@@ -6331,15 +6336,6 @@ S:       Odd Fixes
 L:     linux-block@vger.kernel.org
 F:     drivers/block/floppy.c
 
-FMC SUBSYSTEM
-M:     Alessandro Rubini <rubini@gnudd.com>
-W:     http://www.ohwr.org/projects/fmc-bus
-S:     Supported
-F:     drivers/fmc/
-F:     include/linux/fmc*.h
-F:     include/linux/ipmi-fru.h
-K:     fmc_d.*register
-
 FPGA MANAGER FRAMEWORK
 M:     Moritz Fischer <mdf@kernel.org>
 L:     linux-fpga@vger.kernel.org
@@ -6439,6 +6435,7 @@ M:        Frank Li <Frank.li@nxp.com>
 L:     linux-arm-kernel@lists.infradead.org
 S:     Maintained
 F:     drivers/perf/fsl_imx8_ddr_perf.c
+F:     Documentation/admin-guide/perf/imx-ddr.rst
 F:     Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
 
 FREESCALE IMX I2C DRIVER
@@ -7520,7 +7517,7 @@ I2C MV64XXX MARVELL AND ALLWINNER DRIVER
 M:     Gregory CLEMENT <gregory.clement@bootlin.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
-F:     Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+F:     Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
 F:     drivers/i2c/busses/i2c-mv64xxx.c
 
 I2C OVER PARALLEL PORT
@@ -8392,12 +8389,6 @@ F:       Documentation/x86/intel_txt.rst
 F:     include/linux/tboot.h
 F:     arch/x86/kernel/tboot.c
 
-INTEL-MID GPIO DRIVER
-M:     David Cohen <david.a.cohen@linux.intel.com>
-L:     linux-gpio@vger.kernel.org
-S:     Maintained
-F:     drivers/gpio/gpio-intel-mid.c
-
 INTERCONNECT API
 M:     Georgi Djakov <georgi.djakov@linaro.org>
 L:     linux-pm@vger.kernel.org
@@ -8461,11 +8452,6 @@ S:       Maintained
 F:     fs/io_uring.c
 F:     include/uapi/linux/io_uring.h
 
-IP MASQUERADING
-M:     Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
-S:     Maintained
-F:     net/ipv4/netfilter/ipt_MASQUERADE.c
-
 IPMI SUBSYSTEM
 M:     Corey Minyard <minyard@acm.org>
 L:     openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -8945,7 +8931,7 @@ F:        security/keys/encrypted-keys/
 
 KEYS-TRUSTED
 M:     James Bottomley <jejb@linux.ibm.com>
-M:      Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+M:     Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
 M:     Mimi Zohar <zohar@linux.ibm.com>
 L:     linux-integrity@vger.kernel.org
 L:     keyrings@vger.kernel.org
@@ -9241,6 +9227,18 @@ F:       include/linux/nd.h
 F:     include/linux/libnvdimm.h
 F:     include/uapi/linux/ndctl.h
 
+LICENSES and SPDX stuff
+M:     Thomas Gleixner <tglx@linutronix.de>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+L:     linux-spdx@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git
+F:     COPYING
+F:     Documentation/process/license-rules.rst
+F:     LICENSES/
+F:     scripts/spdxcheck-test.sh
+F:     scripts/spdxcheck.py
+
 LIGHTNVM PLATFORM SUPPORT
 M:     Matias Bjorling <mb@lightnvm.io>
 W:     http://github/OpenChannelSSD
@@ -11093,7 +11091,7 @@ NET_FAILOVER MODULE
 M:     Sridhar Samudrala <sridhar.samudrala@intel.com>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     driver/net/net_failover.c
+F:     drivers/net/net_failover.c
 F:     include/net/net_failover.h
 F:     Documentation/networking/net_failover.rst
 
@@ -14485,6 +14483,7 @@ F:      drivers/net/phy/phylink.c
 F:     drivers/net/phy/sfp*
 F:     include/linux/phylink.h
 F:     include/linux/sfp.h
+K:     phylink
 
 SGI GRU DRIVER
 M:     Dimitri Sivanich <sivanich@sgi.com>
@@ -17262,6 +17261,7 @@ F:      Documentation/power/regulator/
 F:     drivers/regulator/
 F:     include/dt-bindings/regulator/
 F:     include/linux/regulator/
+K:     regulator_get_optional
 
 VRF
 M:     David Ahern <dsa@cumulusnetworks.com>
@@ -17698,8 +17698,7 @@ F:      include/uapi/linux/dqblk_xfs.h
 F:     include/uapi/linux/fsmap.h
 
 XILINX AXI ETHERNET DRIVER
-M:     Anirudha Sarangi <anirudh@xilinx.com>
-M:     John Linn <John.Linn@xilinx.com>
+M:     Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
 S:     Maintained
 F:     drivers/net/ethernet/xilinx/xilinx_axienet*
 
index f125625efd6013efa2873b7f72d2bc073bba37a1..4262ef93a294f4a8dd18c7a8d810e00444ee8c09 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Bobtail Squid
 
 # *DOCUMENTATION*
@@ -913,6 +913,10 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
 LDFLAGS_vmlinux        += $(call ld-option, -X,)
 endif
 
+ifeq ($(CONFIG_RELR),y)
+LDFLAGS_vmlinux        += --pack-dyn-relocs=relr
+endif
+
 # insure the checker run with the right endianness
 CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
 
index a7b57dd42c26698b319430ac65cd81610ba08b71..aa6bdb3df5c172bd1a63de57420e8a04241d7d55 100644 (file)
@@ -925,6 +925,20 @@ config LOCK_EVENT_COUNTS
          the chance of application behavior change because of timing
          differences. The counts are reported via debugfs.
 
+# Select if the architecture has support for applying RELR relocations.
+config ARCH_HAS_RELR
+       bool
+
+config RELR
+       bool "Use RELR relocation packing"
+       depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR
+       default y
+       help
+         Store the kernel's dynamic relocations in the RELR relocation packing
+         format. Requires a compatible linker (LLD supports this feature), as
+         well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
+         are compatible).
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
index a83c4f5e928b3957f2e1f9113cfa856acc19a065..8483a86c743d409872c94c3af8e34ce39ec8a772 100644 (file)
@@ -12,3 +12,6 @@ dtb-y := $(builtindtb-y).dtb
 # for CONFIG_OF_ALL_DTBS test
 dtstree        := $(srctree)/$(src)
 dtb-   := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+
+# board-specific dtc flags
+DTC_FLAGS_hsdk += --pad 20
index f5ae394ebe067028bd9076dbd5075068a589390c..41b16f21beecac28099ed7ee154db6947b6b0d14 100644 (file)
 
 .macro FAKE_RET_FROM_EXCPN
        lr      r9, [status32]
-       bic     r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK)
+       bic     r9, r9, STATUS_AE_MASK
        or      r9, r9, STATUS_IE_MASK
        kflag   r9
 .endm
index a0eeb9f8f0a90fc5a9834b2faf4d4445a3dbcde3..d9ee43c6b7dbc2c698960365e8bbe78d0f087ddb 100644 (file)
 #else  /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_ARC_HAS_ICCM
-#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#define __arcfp_code __section(.text.arcfp)
 #else
-#define __arcfp_code __attribute__((__section__(".text")))
+#define __arcfp_code __section(.text)
 #endif
 
 #ifdef CONFIG_ARC_HAS_DCCM
-#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#define __arcfp_data __section(.data.arcfp)
 #else
-#define __arcfp_data __attribute__((__section__(".data")))
+#define __arcfp_data __section(.data)
 #endif
 
 #endif /* __ASSEMBLY__ */
index 8ac0e2ac3e70614c6f90d06989980fec8b333841..73746ed5b834dd9d8a4d91fb6cb337c51b3306c8 100644 (file)
@@ -53,8 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
  */
 #define MACHINE_START(_type, _name)                    \
 static const struct machine_desc __mach_desc_##_type   \
-__used                                                 \
-__attribute__((__section__(".arch.info.init"))) = {    \
+__used __section(.arch.info.init) = {                  \
        .name           = _name,
 
 #define MACHINE_END                            \
index 18b493dfb3a83941e8049b537785e629f63337dc..abf9398cc333acb9a8dee9709e54f589c67aec8f 100644 (file)
@@ -202,8 +202,8 @@ static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
        __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
 }
 
-static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
-                          unsigned int distr)
+static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
+                        bool set_distr, unsigned int distr)
 {
        union {
                unsigned int word;
@@ -212,8 +212,11 @@ static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
                };
        } data;
 
-       data.distr = distr;
-       data.lvl = lvl;
+       data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
+       if (set_distr)
+               data.distr = distr;
+       if (set_lvl)
+               data.lvl = lvl;
        __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
 }
 
@@ -240,6 +243,25 @@ static void idu_irq_unmask(struct irq_data *data)
        raw_spin_unlock_irqrestore(&mcip_lock, flags);
 }
 
+static void idu_irq_ack(struct irq_data *data)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+       __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void idu_irq_mask_ack(struct irq_data *data)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+       __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
+       __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
 static int
 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
                     bool force)
@@ -263,13 +285,36 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
        else
                distribution_mode = IDU_M_DISTRI_RR;
 
-       idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
+       idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
 
        raw_spin_unlock_irqrestore(&mcip_lock, flags);
 
        return IRQ_SET_MASK_OK;
 }
 
+static int idu_irq_set_type(struct irq_data *data, u32 type)
+{
+       unsigned long flags;
+
+       /*
+        * ARCv2 IDU HW does not support inverse polarity, so these are the
+        * only interrupt types supported.
+        */
+       if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
+               return -EINVAL;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
+       idu_set_mode(data->hwirq, true,
+                    type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
+                                                  IDU_M_TRIG_LEVEL,
+                    false, 0);
+
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
+       return 0;
+}
+
 static void idu_irq_enable(struct irq_data *data)
 {
        /*
@@ -289,7 +334,10 @@ static struct irq_chip idu_irq_chip = {
        .name                   = "MCIP IDU Intc",
        .irq_mask               = idu_irq_mask,
        .irq_unmask             = idu_irq_unmask,
+       .irq_ack                = idu_irq_ack,
+       .irq_mask_ack           = idu_irq_mask_ack,
        .irq_enable             = idu_irq_enable,
+       .irq_set_type           = idu_irq_set_type,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = idu_irq_set_affinity,
 #endif
@@ -317,7 +365,7 @@ static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t
 }
 
 static const struct irq_domain_ops idu_irq_ops = {
-       .xlate  = irq_domain_xlate_onecell,
+       .xlate  = irq_domain_xlate_onetwocell,
        .map    = idu_irq_map,
 };
 
index c2663fce7f6c8c43c5085dd3c139f3001879590a..dc05a63516f5b5a3c695578cd864b2f3886910ba 100644 (file)
@@ -572,6 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
 #else
                BUILD_BUG_ON(sizeof(u32) != sizeof(value));
 #endif
+               /* Fall through */
        case DW_EH_PE_native:
                if (end < (const void *)(ptr.pul + 1))
                        return 0;
@@ -826,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
                        case DW_CFA_def_cfa:
                                state->cfa.reg = get_uleb128(&ptr.p8, end);
                                unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
-                               /*nobreak*/
+                               /* fall through */
                        case DW_CFA_def_cfa_offset:
                                state->cfa.offs = get_uleb128(&ptr.p8, end);
                                unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -834,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
                                break;
                        case DW_CFA_def_cfa_sf:
                                state->cfa.reg = get_uleb128(&ptr.p8, end);
-                               /*nobreak */
+                               /* fall through */
                        case DW_CFA_def_cfa_offset_sf:
                                state->cfa.offs = get_sleb128(&ptr.p8, end)
                                    * state->dataAlign;
index 62c210e7ee4cdc5046b422d819a997e03134652f..70a3fbe79fbaa2affaefdec0c10afcbcc05f96d7 100644 (file)
@@ -101,7 +101,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        if (is_isa_arcv2() && ioc_enable && coherent)
                dev->dma_coherent = true;
 
-       dev_info(dev, "use %sncoherent DMA ops\n",
+       dev_info(dev, "use %scoherent DMA ops\n",
                 dev->dma_coherent ? "" : "non");
 }
 
index 7dd2dd335cf66ef321bc5d6389a732cf9e68788d..0b961a2a10b8efb525ba0ffa980e6eab74e057c5 100644 (file)
@@ -6,11 +6,15 @@
  */
 
 #include <linux/init.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
 #include <linux/smp.h>
 #include <asm/arcregs.h>
 #include <asm/io.h>
 #include <asm/mach_desc.h>
 
+int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
+
 #define ARC_CCM_UNUSED_ADDR    0x60000000
 
 static void __init hsdk_init_per_cpu(unsigned int cpu)
@@ -97,6 +101,42 @@ static void __init hsdk_enable_gpio_intc_wire(void)
        iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
 }
 
+static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
+{
+       void *fdt = initial_boot_params;
+       const void *prop;
+       int node, ret;
+       bool dt_coh_set;
+
+       node = fdt_path_offset(fdt, path);
+       if (node < 0)
+               goto tweak_fail;
+
+       prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
+       if (!prop && ret != -FDT_ERR_NOTFOUND)
+               goto tweak_fail;
+
+       dt_coh_set = ret != -FDT_ERR_NOTFOUND;
+       ret = 0;
+
+       /* need to remove "dma-coherent" property */
+       if (dt_coh_set && !coherent)
+               ret = fdt_delprop(fdt, node, "dma-coherent");
+
+       /* need to set "dma-coherent" property */
+       if (!dt_coh_set && coherent)
+               ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
+
+       if (ret < 0)
+               goto tweak_fail;
+
+       return 0;
+
+tweak_fail:
+       pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
+       return -EFAULT;
+}
+
 enum hsdk_axi_masters {
        M_HS_CORE = 0,
        M_HS_RTT,
@@ -162,6 +202,39 @@ enum hsdk_axi_masters {
 #define CREG_PAE               ((void __iomem *)(CREG_BASE + 0x180))
 #define CREG_PAE_UPDT          ((void __iomem *)(CREG_BASE + 0x194))
 
+static void __init hsdk_init_memory_bridge_axi_dmac(void)
+{
+       bool coherent = !!arc_hsdk_axi_dmac_coherent;
+       u32 axi_m_slv1, axi_m_oft1;
+
+       /*
+        * Don't tweak memory bridge configuration if we failed to tweak DTB
+        * as we will end up in a inconsistent state.
+        */
+       if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
+               return;
+
+       if (coherent) {
+               axi_m_slv1 = 0x77999999;
+               axi_m_oft1 = 0x76DCBA98;
+       } else {
+               axi_m_slv1 = 0x77777777;
+               axi_m_oft1 = 0x76543210;
+       }
+
+       writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
+       writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
+       writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
+       writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
+       writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
+
+       writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
+       writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
+       writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
+       writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
+       writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
+}
+
 static void __init hsdk_init_memory_bridge(void)
 {
        u32 reg;
@@ -227,24 +300,14 @@ static void __init hsdk_init_memory_bridge(void)
        writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
        writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
 
-       writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
-       writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
-       writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
-       writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
-       writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
-
-       writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
-       writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
-       writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
-       writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
-       writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
-
        writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
        writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
        writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
        writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
        writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
 
+       hsdk_init_memory_bridge_axi_dmac();
+
        /*
         * PAE remapping for DMA clients does not work due to an RTL bug, so
         * CREG_PAE register must be programmed to all zeroes, otherwise it
index ced1a19d5f8982f11c12986dce5ea4d81decec3e..46849d6ecb3e264fb2b6f0fbef0c0e8e1031306a 100644 (file)
                        uart0: serial@0 {
                                compatible = "ti,am3352-uart", "ti,omap3-uart";
                                clock-frequency = <48000000>;
-                               reg = <0x0 0x2000>;
+                               reg = <0x0 0x1000>;
                                interrupts = <72>;
                                status = "disabled";
                                dmas = <&edma 26 0>, <&edma 27 0>;
                        uart1: serial@0 {
                                compatible = "ti,am3352-uart", "ti,omap3-uart";
                                clock-frequency = <48000000>;
-                               reg = <0x0 0x2000>;
+                               reg = <0x0 0x1000>;
                                interrupts = <73>;
                                status = "disabled";
                                dmas = <&edma 28 0>, <&edma 29 0>;
                        uart2: serial@0 {
                                compatible = "ti,am3352-uart", "ti,omap3-uart";
                                clock-frequency = <48000000>;
-                               reg = <0x0 0x2000>;
+                               reg = <0x0 0x1000>;
                                interrupts = <74>;
                                status = "disabled";
                                dmas = <&edma 30 0>, <&edma 31 0>;
                        uart3: serial@0 {
                                compatible = "ti,am3352-uart", "ti,omap3-uart";
                                clock-frequency = <48000000>;
-                               reg = <0x0 0x2000>;
+                               reg = <0x0 0x1000>;
                                interrupts = <44>;
                                status = "disabled";
                        };
                        uart4: serial@0 {
                                compatible = "ti,am3352-uart", "ti,omap3-uart";
                                clock-frequency = <48000000>;
-                               reg = <0x0 0x2000>;
+                               reg = <0x0 0x1000>;
                                interrupts = <45>;
                                status = "disabled";
                        };
                        uart5: serial@0 {
                                compatible = "ti,am3352-uart", "ti,omap3-uart";
                                clock-frequency = <48000000>;
-                               reg = <0x0 0x2000>;
+                               reg = <0x0 0x1000>;
                                interrupts = <46>;
                                status = "disabled";
                        };
 
                target-module@cc000 {                   /* 0x481cc000, ap 60 46.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
+                       reg = <0xcc020 0x4>;
+                       reg-names = "rev";
                        ti,hwmods = "d_can0";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
                        clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
 
                target-module@d0000 {                   /* 0x481d0000, ap 62 42.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
+                       reg = <0xd0020 0x4>;
+                       reg-names = "rev";
                        ti,hwmods = "d_can1";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
                        clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
index e5c2f71a7c77da4cd1f4393652e442eac45a4220..fb6b8aa12cc56c68e15eaba389e3120e90a40336 100644 (file)
                        interrupt-names = "edma3_tcerrint";
                };
 
-               mmc3: mmc@47810000 {
-                       compatible = "ti,omap4-hsmmc";
+               target-module@47810000 {
+                       compatible = "ti,sysc-omap2", "ti,sysc";
                        ti,hwmods = "mmc3";
-                       ti,needs-special-reset;
-                       interrupts = <29>;
-                       reg = <0x47810000 0x1000>;
-                       status = "disabled";
+                       reg = <0x478102fc 0x4>,
+                             <0x47810110 0x4>,
+                             <0x47810114 0x4>;
+                       reg-names = "rev", "sysc", "syss";
+                       ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+                                        SYSC_OMAP2_ENAWAKEUP |
+                                        SYSC_OMAP2_SOFTRESET |
+                                        SYSC_OMAP2_AUTOIDLE)>;
+                       ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+                                       <SYSC_IDLE_NO>,
+                                       <SYSC_IDLE_SMART>;
+                       ti,syss-mask = <1>;
+                       clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>;
+                       clock-names = "fck";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges = <0x0 0x47810000 0x1000>;
+
+                       mmc3: mmc@0 {
+                               compatible = "ti,omap4-hsmmc";
+                               ti,needs-special-reset;
+                               interrupts = <29>;
+                               reg = <0x0 0x1000>;
+                       };
                };
 
                usb: usb@47400000 {
index 55aff4db9c7c29625e8ef26fc30642f6995fcfb9..848e2a8884e2cdd0a7430a65c87576eca048f5ac 100644 (file)
                        interrupt-names = "edma3_tcerrint";
                };
 
-               mmc3: mmc@47810000 {
-                       compatible = "ti,omap4-hsmmc";
-                       reg = <0x47810000 0x1000>;
+               target-module@47810000 {
+                       compatible = "ti,sysc-omap2", "ti,sysc";
                        ti,hwmods = "mmc3";
-                       ti,needs-special-reset;
-                       interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
-                       status = "disabled";
+                       reg = <0x478102fc 0x4>,
+                             <0x47810110 0x4>,
+                             <0x47810114 0x4>;
+                       reg-names = "rev", "sysc", "syss";
+                       ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+                                        SYSC_OMAP2_ENAWAKEUP |
+                                        SYSC_OMAP2_SOFTRESET |
+                                        SYSC_OMAP2_AUTOIDLE)>;
+                       ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+                                       <SYSC_IDLE_NO>,
+                                       <SYSC_IDLE_SMART>;
+                       ti,syss-mask = <1>;
+                       clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>;
+                       clock-names = "fck";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges = <0x0 0x47810000 0x1000>;
+
+                       mmc3: mmc@0 {
+                               compatible = "ti,omap4-hsmmc";
+                               ti,needs-special-reset;
+                               interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x0 0x1000>;
+                       };
                };
 
                sham: sham@53100000 {
index 989cb60b90295868f1b983ff4129c867c33fce6b..04bee4ff9dcb86e6ea435834d5826c9eca98ab00 100644 (file)
 
                target-module@cc000 {                   /* 0x481cc000, ap 50 46.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
+                       reg = <0xcc020 0x4>;
+                       reg-names = "rev";
                        ti,hwmods = "d_can0";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
                        clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
 
                target-module@d0000 {                   /* 0x481d0000, ap 52 3a.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
+                       reg = <0xd0020 0x4>;
+                       reg-names = "rev";
                        ti,hwmods = "d_can1";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
                        clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
index 1d5e99964bbf89411daa4a73bc22b9d091f2dcca..0aaacea1d887bf2c2b61f9887f970a5719bbf6fb 100644 (file)
 };
 
 &mmc1 {
-       pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+       pinctrl-names = "default", "hs";
        pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
        pinctrl-1 = <&mmc1_pins_hs>;
-       pinctrl-2 = <&mmc1_pins_sdr12>;
-       pinctrl-3 = <&mmc1_pins_sdr25>;
-       pinctrl-4 = <&mmc1_pins_sdr50>;
-       pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
-       pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
 };
 
 &mmc2 {
index c65d7f6d3b5a6384b34c9969b945a42b442ca51a..ea1c119feaa5746d4b72baeaf63b0cfd2a15dce2 100644 (file)
 };
 
 &mmc1 {
-       pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+       pinctrl-names = "default", "hs";
        pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
        pinctrl-1 = <&mmc1_pins_hs>;
-       pinctrl-2 = <&mmc1_pins_sdr12>;
-       pinctrl-3 = <&mmc1_pins_sdr25>;
-       pinctrl-4 = <&mmc1_pins_sdr50>;
-       pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
-       pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
 };
 
 &mmc2 {
index dc5141c35610e9596ee5e6fb3272bedc95e426c3..7935d70874ce2b9558a1c0410c40ff4d40633e9e 100644 (file)
 };
 
 &mmc1 {
-       pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+       pinctrl-names = "default", "hs";
        pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
        pinctrl-1 = <&mmc1_pins_hs>;
-       pinctrl-2 = <&mmc1_pins_default>;
-       pinctrl-3 = <&mmc1_pins_hs>;
-       pinctrl-4 = <&mmc1_pins_sdr50>;
-       pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
-       pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
 };
 
 &mmc2 {
index d02f5fa61e5f5a5144c2a1f8032a96d156b23eb4..bc76f1705c0f667dab3ad9af455b5f7685823e08 100644 (file)
        };
 };
 
-&gpio7 {
+&gpio7_target {
        ti,no-reset-on-init;
        ti,no-idle-on-init;
 };
 
        bus-width = <4>;
        cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
+       no-1-8-v;
 };
 
 &mmc2 {
index a374b5cd6db0e2ec16ea7994e33b350f68aa8532..7b113b52c3fb6494411afcd7fc32e7f5bb150e15 100644 (file)
 };
 
 &mmc1 {
-       pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+       pinctrl-names = "default", "hs";
        pinctrl-0 = <&mmc1_pins_default>;
        pinctrl-1 = <&mmc1_pins_hs>;
-       pinctrl-2 = <&mmc1_pins_sdr12>;
-       pinctrl-3 = <&mmc1_pins_sdr25>;
-       pinctrl-4 = <&mmc1_pins_sdr50>;
-       pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
-       pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
        vmmc-supply = <&vdd_3v3>;
        vqmmc-supply = <&ldo1_reg>;
 };
index 4badd2144db9a3ba95fdcb3bb3fd007942672116..30c500b15b21972baf89a7ce540776ff987f519a 100644 (file)
 };
 
 &mmc1 {
-       pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+       pinctrl-names = "default", "hs";
        pinctrl-0 = <&mmc1_pins_default>;
        pinctrl-1 = <&mmc1_pins_hs>;
-       pinctrl-2 = <&mmc1_pins_sdr12>;
-       pinctrl-3 = <&mmc1_pins_sdr25>;
-       pinctrl-4 = <&mmc1_pins_sdr50>;
-       pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
-       pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
        vmmc-supply = <&vdd_3v3>;
        vqmmc-supply = <&ldo1_reg>;
 };
index 714e971b912a488e46d4ebd6d0f0edafb9a74016..de7f85efaa5120a765abf05eb34780b8000994d2 100644 (file)
        phy-supply = <&ldousb_reg>;
 };
 
-&gpio7 {
+&gpio7_target {
        ti,no-reset-on-init;
        ti,no-idle-on-init;
 };
index 23faedec08abd04f215ba55d21391281d93e374f..21e5914fdd6209157b7149710b4cec249056c9dc 100644 (file)
                        };
                };
 
-               target-module@51000 {                   /* 0x48051000, ap 45 2e.0 */
+               gpio7_target: target-module@51000 {             /* 0x48051000, ap 45 2e.0 */
                        compatible = "ti,sysc-omap2", "ti,sysc";
                        ti,hwmods = "gpio7";
                        reg = <0x51000 0x4>,
 
                target-module@80000 {                   /* 0x48480000, ap 31 16.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
-                       reg = <0x80000 0x4>;
+                       reg = <0x80020 0x4>;
                        reg-names = "rev";
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>;
                        clock-names = "fck";
 
                target-module@c000 {                    /* 0x4ae3c000, ap 30 04.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
-                       reg = <0xc000 0x4>;
+                       reg = <0xc020 0x4>;
                        reg-names = "rev";
                        clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>;
                        clock-names = "fck";
index 28ebb4eb884a9450f51508c576e2b37d3d8a0808..214b9e6de2c356d1c6a3650ba9fc92bcc17b8240 100644 (file)
@@ -32,7 +32,7 @@
  *
  * Datamanual Revisions:
  *
- * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016
+ * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
  * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
  *
  */
 
        mmc3_pins_default: mmc3_pins_default {
                pinctrl-single,pins = <
-                       DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
-                       DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
-                       DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
-                       DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
-                       DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
-                       DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+                       DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+                       DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+                       DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+                       DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+                       DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+                       DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
                >;
        };
 
        mmc3_pins_hs: mmc3_pins_hs {
                pinctrl-single,pins = <
-                       DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
-                       DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
-                       DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
-                       DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
-                       DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
-                       DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+                       DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+                       DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+                       DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+                       DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+                       DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+                       DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
                >;
        };
 
        mmc3_pins_sdr12: mmc3_pins_sdr12 {
                pinctrl-single,pins = <
-                       DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
-                       DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
-                       DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
-                       DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
-                       DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
-                       DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+                       DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+                       DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+                       DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+                       DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+                       DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+                       DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
                >;
        };
 
        mmc3_pins_sdr25: mmc3_pins_sdr25 {
                pinctrl-single,pins = <
-                       DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
-                       DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
-                       DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
-                       DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
-                       DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
-                       DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+                       DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+                       DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+                       DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+                       DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+                       DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+                       DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
                >;
        };
 
index 3fa0cbe456db6f35f3d35c8bfce85a41edadd2af..0f3870d3b0990e7f07caf2be5a4a0a47ece86366 100644 (file)
                reg = <0>;
        };
 
-       n25q128a13_2: flash@1 {
+       n25q128a13_2: flash@2 {
                compatible = "n25q128a13", "jedec,spi-nor";
                #address-cells = <1>;
                #size-cells = <1>;
                spi-max-frequency = <66000000>;
                spi-rx-bus-width = <2>;
-               reg = <1>;
+               reg = <2>;
        };
 };
 
index 0cdc6c7974b340fe704e4d2edb38c1ffba7ee697..3772d5a8975ae621ad097e7ac286d62196d78829 100644 (file)
@@ -93,6 +93,7 @@ CONFIG_SERIAL_HS_LPC32XX_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_PNX=y
+CONFIG_GPIO_LPC32XX=y
 CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_SYSFS=y
index 1d5210eb4776edcacb44cb0b5a23ea91864b87c3..582925238d65ea261cc126078462ce2a66421549 100644 (file)
@@ -66,7 +66,7 @@ for_each_frame:       tst     frame, mask             @ Check for address exceptions
 
 1003:          ldr     r2, [sv_pc, #-4]        @ if stmfd sp!, {args} exists,
                ldr     r3, .Ldsi+4             @ adjust saved 'pc' back one
-               teq     r3, r2, lsr #10         @ instruction
+               teq     r3, r2, lsr #11         @ instruction
                subne   r0, sv_pc, #4           @ allow for mov
                subeq   r0, sv_pc, #8           @ allow for mov + stmia
 
index 1f0da76a39de7462d668c4ad2c3f50d0cfd6a727..7b7280c21ee09992127c3edbb48c542294bdcdf0 100644 (file)
@@ -103,7 +103,7 @@ static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
 };
 
 static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
-       .dev_id = "ep93xx-spi.0",
+       .dev_id = "spi0",
        .table = {
                GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
                { },
index e2658e22bba1d738e4009421c357aeafdb987276..8a53b74dc4b215eb8e77783df379cfc073a2d2f5 100644 (file)
@@ -73,7 +73,7 @@ static struct spi_board_info simone_spi_devices[] __initdata = {
  * v1.3 parts will still work, since the signal on SFRMOUT is automatic.
  */
 static struct gpiod_lookup_table simone_spi_cs_gpio_table = {
-       .dev_id = "ep93xx-spi.0",
+       .dev_id = "spi0",
        .table = {
                GPIO_LOOKUP("A", 1, "cs", GPIO_ACTIVE_LOW),
                { },
index 582e06e104fd695b797e3aecf97c8cb104cdf452..e0e1b11032f1d3767172dae6929ce0e364373328 100644 (file)
@@ -267,7 +267,7 @@ static struct spi_board_info bk3_spi_board_info[] __initdata = {
  * goes through CPLD
  */
 static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
-       .dev_id = "ep93xx-spi.0",
+       .dev_id = "spi0",
        .table = {
                GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
                { },
@@ -316,7 +316,7 @@ static struct spi_board_info ts72xx_spi_devices[] __initdata = {
 };
 
 static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
-       .dev_id = "ep93xx-spi.0",
+       .dev_id = "spi0",
        .table = {
                /* DIO_17 */
                GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
index a88a1d807b325160e25dfdaa72c61fe77cd9f1db..cbcba3136d74a04b9cc45811d9fda0abf80a143e 100644 (file)
@@ -242,7 +242,7 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
 };
 
 static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
-       .dev_id = "ep93xx-spi.0",
+       .dev_id = "spi0",
        .table = {
                GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
index 81159af44862eecf396f1289e74f6726514b80fd..14a6c3eb329850dab50f29908cade0e8ab68dca6 100644 (file)
@@ -126,6 +126,8 @@ restart:
        orr r11, r11, r13                       @ mask all requested interrupts
        str r11, [r12, #OMAP1510_GPIO_INT_MASK]
 
+       str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
+
        ands r10, r13, #KEYBRD_CLK_MASK         @ extract keyboard status - set?
        beq hksw                                @ no - try next source
 
@@ -133,7 +135,6 @@ restart:
        @@@@@@@@@@@@@@@@@@@@@@
        @ Keyboard clock FIQ mode interrupt handler
        @ r10 now contains KEYBRD_CLK_MASK, use it
-       str r10, [r12, #OMAP1510_GPIO_INT_STATUS]       @ ack the interrupt
        bic r11, r11, r10                               @ unmask it
        str r11, [r12, #OMAP1510_GPIO_INT_MASK]
 
index 43899fa5667437a57312858d5375ab6ae3d5f70c..0254eb9cf8c64340e056e2810dd8774f282b887a 100644 (file)
@@ -70,9 +70,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
                         * interrupts default to since commit 80ac93c27441
                         * requires interrupt already acked and unmasked.
                         */
-                       if (irq_chip->irq_ack)
-                               irq_chip->irq_ack(d);
-                       if (irq_chip->irq_unmask)
+                       if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
                                irq_chip->irq_unmask(d);
                }
                for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
index 60065055162199dc4188fff1e448acb37e6d4627..d4f11c5070aeeec0294e14d44e359799e0a65bc9 100644 (file)
@@ -229,3 +229,5 @@ include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORC
 $(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
 
 targets += pm-asm-offsets.s
+
+obj-$(CONFIG_OMAP_IOMMU)               += omap-iommu.o
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
new file mode 100644 (file)
index 0000000..f1a6ece
--- /dev/null
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP IOMMU quirks for various TI SoCs
+ *
+ * Copyright (C) 2015-2019 Texas Instruments Incorporated - http://www.ti.com/
+ *      Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#include "omap_hwmod.h"
+#include "omap_device.h"
+#include "powerdomain.h"
+
+int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request,
+                                   u8 *pwrst)
+{
+       struct powerdomain *pwrdm;
+       struct omap_device *od;
+       u8 next_pwrst;
+
+       od = to_omap_device(pdev);
+       if (!od)
+               return -ENODEV;
+
+       if (od->hwmods_cnt != 1)
+               return -EINVAL;
+
+       pwrdm = omap_hwmod_get_pwrdm(od->hwmods[0]);
+       if (!pwrdm)
+               return -EINVAL;
+
+       if (request)
+               *pwrst = pwrdm_read_next_pwrst(pwrdm);
+
+       if (*pwrst > PWRDM_POWER_RET)
+               return 0;
+
+       next_pwrst = request ? PWRDM_POWER_ON : *pwrst;
+
+       return pwrdm_set_next_pwrst(pwrdm, next_pwrst);
+}
index f9c02f9f1c92199027cecf15f6a4567027b82d02..5c3845730dbf547d8c34040d069bab4b51006be5 100644 (file)
@@ -127,6 +127,9 @@ static int __init omap4_sram_init(void)
        struct device_node *np;
        struct gen_pool *sram_pool;
 
+       if (!soc_is_omap44xx() && !soc_is_omap54xx())
+               return 0;
+
        np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
        if (!np)
                pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
index 4a5b4aee6615a32eada87b5934521aefac09e08f..1ec21e9ba1e99aa565f1a70c8978f7ef58457f12 100644 (file)
@@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
 static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
        .rev_offs       = 0x0,
        .sysc_offs      = 0x4,
-       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
+       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+                         SYSC_HAS_RESET_STATUS,
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
        .sysc_fields    = &omap_hwmod_sysc_type2,
 };
index 16d373d587c476e3caf81e43f61b5fc325ffa9a4..b4be3baa83d4d284e96c8f29ff84266473b234bb 100644 (file)
@@ -175,6 +175,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 int pfn_valid(unsigned long pfn)
 {
+       phys_addr_t addr = __pfn_to_phys(pfn);
+
+       if (__phys_to_pfn(addr) != pfn)
+               return 0;
+
        return memblock_is_map_memory(__pfn_to_phys(pfn));
 }
 EXPORT_SYMBOL(pfn_valid);
@@ -628,7 +633,8 @@ static void update_sections_early(struct section_perm perms[], int n)
                if (t->flags & PF_KTHREAD)
                        continue;
                for_each_thread(t, s)
-                       set_section_perms(perms, n, true, s->mm);
+                       if (s->mm)
+                               set_section_perms(perms, n, true, s->mm);
        }
        set_section_perms(perms, n, true, current->active_mm);
        set_section_perms(perms, n, true, &init_mm);
diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild
new file mode 100644 (file)
index 0000000..d646582
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y                  += kernel/ mm/
+obj-$(CONFIG_NET)      += net/
+obj-$(CONFIG_KVM)      += kvm/
+obj-$(CONFIG_XEN)      += xen/
+obj-$(CONFIG_CRYPTO)   += crypto/
index 3adcec05b1f672d7ff3356e0eaf13d2667a64b6a..6ae6ad8a4db0ab63647433c0ebe8473b1d07674c 100644 (file)
@@ -148,6 +148,7 @@ config ARM64
        select HAVE_FAST_GUP
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_ERROR_INJECTION
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT if PERF_EVENTS
@@ -286,7 +287,7 @@ config PGTABLE_LEVELS
        int
        default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
        default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
-       default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52)
+       default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
        default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
        default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
        default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
@@ -297,6 +298,21 @@ config ARCH_SUPPORTS_UPROBES
 config ARCH_PROC_KCORE_TEXT
        def_bool y
 
+config KASAN_SHADOW_OFFSET
+       hex
+       depends on KASAN
+       default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
+       default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
+       default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
+       default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
+       default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
+       default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
+       default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
+       default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
+       default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
+       default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
+       default 0xffffffffffffffff
+
 source "arch/arm64/Kconfig.platforms"
 
 menu "Kernel Features"
@@ -744,13 +760,14 @@ config ARM64_VA_BITS_47
 config ARM64_VA_BITS_48
        bool "48-bit"
 
-config ARM64_USER_VA_BITS_52
-       bool "52-bit (user)"
+config ARM64_VA_BITS_52
+       bool "52-bit"
        depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
        help
          Enable 52-bit virtual addressing for userspace when explicitly
-         requested via a hint to mmap(). The kernel will continue to
-         use 48-bit virtual addresses for its own mappings.
+         requested via a hint to mmap(). The kernel will also use 52-bit
+         virtual addresses for its own mappings (provided HW support for
+         this feature is available, otherwise it reverts to 48-bit).
 
          NOTE: Enabling 52-bit virtual addressing in conjunction with
          ARMv8.3 Pointer Authentication will result in the PAC being
@@ -763,7 +780,7 @@ endchoice
 
 config ARM64_FORCE_52BIT
        bool "Force 52-bit virtual addresses for userspace"
-       depends on ARM64_USER_VA_BITS_52 && EXPERT
+       depends on ARM64_VA_BITS_52 && EXPERT
        help
          For systems with 52-bit userspace VAs enabled, the kernel will attempt
          to maintain compatibility with older software by providing 48-bit VAs
@@ -780,7 +797,8 @@ config ARM64_VA_BITS
        default 39 if ARM64_VA_BITS_39
        default 42 if ARM64_VA_BITS_42
        default 47 if ARM64_VA_BITS_47
-       default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52
+       default 48 if ARM64_VA_BITS_48
+       default 52 if ARM64_VA_BITS_52
 
 choice
        prompt "Physical address space size"
@@ -1110,6 +1128,15 @@ config ARM64_SW_TTBR0_PAN
          zeroed area and reserved ASID. The user access routines
          restore the valid TTBR0_EL1 temporarily.
 
+config ARM64_TAGGED_ADDR_ABI
+       bool "Enable the tagged user addresses syscall ABI"
+       default y
+       help
+         When this option is enabled, user applications can opt in to a
+         relaxed ABI via prctl() allowing tagged addresses to be passed
+         to system calls as pointer arguments. For details, see
+         Documentation/arm64/tagged-address-abi.txt.
+
 menuconfig COMPAT
        bool "Kernel support for 32-bit EL0"
        depends on ARM64_4K_PAGES || EXPERT
@@ -1263,6 +1290,7 @@ config ARM64_PAN
 
 config ARM64_LSE_ATOMICS
        bool "Atomic instructions"
+       depends on JUMP_LABEL
        default y
        help
          As part of the Large System Extensions, ARMv8.1 introduces new
@@ -1467,6 +1495,7 @@ endif
 
 config RELOCATABLE
        bool
+       select ARCH_HAS_RELR
        help
          This builds the kernel as a Position Independent Executable (PIE),
          which retains all relocation metadata required to relocate the
index 61de992bbea3fad61895e4f1e796cea460fd9deb..f843d298792dcb606bb87066cb40edc5d2c9a050 100644 (file)
@@ -39,6 +39,12 @@ $(warning LSE atomics not supported by binutils)
   endif
 endif
 
+cc_has_k_constraint := $(call try-run,echo                             \
+       'int main(void) {                                               \
+               asm volatile("and w0, w0, %w0" :: "K" (4294967295));    \
+               return 0;                                               \
+       }' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
+
 ifeq ($(CONFIG_ARM64), y)
 brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
 
@@ -63,7 +69,8 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
   endif
 endif
 
-KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr) $(brokengasinst) $(compat_vdso)
+KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr) $(brokengasinst)     \
+                  $(compat_vdso) $(cc_has_k_constraint)
 KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
 KBUILD_CFLAGS  += $(call cc-disable-warning, psabi)
 KBUILD_AFLAGS  += $(lseinstr) $(brokengasinst) $(compat_vdso)
@@ -126,21 +133,9 @@ KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
 KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
 KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
 
-# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
-#                               - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
-# in 32-bit arithmetic
-KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
-       (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
-       + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
-       - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
-
 export TEXT_OFFSET GZFLAGS
 
-core-y         += arch/arm64/kernel/ arch/arm64/mm/
-core-$(CONFIG_NET) += arch/arm64/net/
-core-$(CONFIG_KVM) += arch/arm64/kvm/
-core-$(CONFIG_XEN) += arch/arm64/xen/
-core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
+core-y         += arch/arm64/
 libs-y         := arch/arm64/lib/ $(libs-y)
 core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
 
index c7a87368850b03ffbacb0a8f122fd4da6b800e20..12aa7eaeaf6800bb3374114be1027adb58036e74 100644 (file)
        pinctrl-names = "default";
 };
 
+&ir {
+       status = "okay";
+       pinctrl-0 = <&remote_input_ao_pins>;
+       pinctrl-names = "default";
+};
+
 &pwm_ef {
        status = "okay";
        pinctrl-0 = <&pwm_e_pins>;
index f8d43e3dcf20ce4d57d2b17abd6693c4b2dad77a..1785552d450c448066c76d7251745eb54035b9b1 100644 (file)
                                clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
                                clock-names = "ddr";
                                phys = <&usb2_phy1>;
+                               phy-names = "usb2-phy";
                                dr_mode = "peripheral";
                                g-rx-fifo-size = <192>;
                                g-np-tx-fifo-size = <128>;
index 81780ffcc7f0d0d3ff3fa766cdf49e6b4b7ff0c8..4e916e1f71f768623a336eebfecf21d75d825d3c 100644 (file)
@@ -53,6 +53,7 @@
 
                gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>;
                enable-active-high;
+               regulator-always-on;
        };
 
        tf_io: gpio-regulator-tf_io {
index 3311a982fff89f66fa52c4a435ad2eeeebeec55c..23fd0224ca90e103c20af7dee574abf8af9b590d 100644 (file)
        mmc-hs200-1_8v;
        non-removable;
        fixed-emmc-driver-type = <1>;
+       status = "okay";
 };
 
 &usb_extal_clk {
index 0711170b26b1fe1c6ab4d230cc9acb2fb10a0398..3aa2564dfdc25fff2d09a121f7048390202b2a94 100644 (file)
@@ -97,7 +97,7 @@
                reg = <0x0 0x48000000 0x0 0x18000000>;
        };
 
-       reg_1p8v: regulator0 {
+       reg_1p8v: regulator-1p8v {
                compatible = "regulator-fixed";
                regulator-name = "fixed-1.8V";
                regulator-min-microvolt = <1800000>;
                regulator-always-on;
        };
 
-       reg_3p3v: regulator1 {
+       reg_3p3v: regulator-3p3v {
                compatible = "regulator-fixed";
                regulator-name = "fixed-3.3V";
                regulator-min-microvolt = <3300000>;
                regulator-always-on;
        };
 
-       reg_12p0v: regulator1 {
+       reg_12p0v: regulator-12p0v {
                compatible = "regulator-fixed";
                regulator-name = "D12.0V";
                regulator-min-microvolt = <12000000>;
index e3a15c751b1398e911c19c3a879e9d1e766410f4..b8cf7c85ffa2a2e7c5cacd15910185d44fe751a8 100644 (file)
@@ -123,17 +123,6 @@ alternative_else
 alternative_endif
        .endm
 
-/*
- * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
- * of bounds.
- */
-       .macro  mask_nospec64, idx, limit, tmp
-       sub     \tmp, \idx, \limit
-       bic     \tmp, \tmp, \idx
-       and     \idx, \idx, \tmp, asr #63
-       csdb
-       .endm
-
 /*
  * NOP sequence
  */
@@ -349,6 +338,13 @@ alternative_endif
        bfi     \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
        .endm
 
+/*
+ * tcr_set_t1sz - update TCR.T1SZ
+ */
+       .macro  tcr_set_t1sz, valreg, t1sz
+       bfi     \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
+       .endm
+
 /*
  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
  * ID_AA64MMFR0_EL1.PARange value
@@ -538,9 +534,13 @@ USER(\label, ic    ivau, \tmp2)                    // invalidate I line PoU
  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
  *     ttbr: Value of ttbr to set, modified.
  */
-       .macro  offset_ttbr1, ttbr
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+       .macro  offset_ttbr1, ttbr, tmp
+#ifdef CONFIG_ARM64_VA_BITS_52
+       mrs_s   \tmp, SYS_ID_AA64MMFR2_EL1
+       and     \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+       cbnz    \tmp, .Lskipoffs_\@
        orr     \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
+.Lskipoffs_\@ :
 #endif
        .endm
 
@@ -550,7 +550,7 @@ USER(\label, ic     ivau, \tmp2)                    // invalidate I line PoU
  * to be nop'ed out when dealing with 52-bit kernel VAs.
  */
        .macro  restore_ttbr1, ttbr
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_ARM64_VA_BITS_52
        bic     \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
 #endif
        .endm
index 657b0457d83cf1ebd5cdf92bc5d66adee25fe4a5..9543b5e0534d216f52d517de1c200d20762fc414 100644 (file)
 #include <linux/types.h>
 
 #include <asm/barrier.h>
+#include <asm/cmpxchg.h>
 #include <asm/lse.h>
 
-#ifdef __KERNEL__
-
-#define __ARM64_IN_ATOMIC_IMPL
-
-#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
-#include <asm/atomic_lse.h>
-#else
-#include <asm/atomic_ll_sc.h>
-#endif
-
-#undef __ARM64_IN_ATOMIC_IMPL
-
-#include <asm/cmpxchg.h>
+#define ATOMIC_OP(op)                                                  \
+static inline void arch_##op(int i, atomic_t *v)                       \
+{                                                                      \
+       __lse_ll_sc_body(op, i, v);                                     \
+}
+
+ATOMIC_OP(atomic_andnot)
+ATOMIC_OP(atomic_or)
+ATOMIC_OP(atomic_xor)
+ATOMIC_OP(atomic_add)
+ATOMIC_OP(atomic_and)
+ATOMIC_OP(atomic_sub)
+
+#undef ATOMIC_OP
+
+#define ATOMIC_FETCH_OP(name, op)                                      \
+static inline int arch_##op##name(int i, atomic_t *v)                  \
+{                                                                      \
+       return __lse_ll_sc_body(op##name, i, v);                        \
+}
+
+#define ATOMIC_FETCH_OPS(op)                                           \
+       ATOMIC_FETCH_OP(_relaxed, op)                                   \
+       ATOMIC_FETCH_OP(_acquire, op)                                   \
+       ATOMIC_FETCH_OP(_release, op)                                   \
+       ATOMIC_FETCH_OP(        , op)
+
+ATOMIC_FETCH_OPS(atomic_fetch_andnot)
+ATOMIC_FETCH_OPS(atomic_fetch_or)
+ATOMIC_FETCH_OPS(atomic_fetch_xor)
+ATOMIC_FETCH_OPS(atomic_fetch_add)
+ATOMIC_FETCH_OPS(atomic_fetch_and)
+ATOMIC_FETCH_OPS(atomic_fetch_sub)
+ATOMIC_FETCH_OPS(atomic_add_return)
+ATOMIC_FETCH_OPS(atomic_sub_return)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_FETCH_OPS
+
+#define ATOMIC64_OP(op)                                                        \
+static inline void arch_##op(long i, atomic64_t *v)                    \
+{                                                                      \
+       __lse_ll_sc_body(op, i, v);                                     \
+}
+
+ATOMIC64_OP(atomic64_andnot)
+ATOMIC64_OP(atomic64_or)
+ATOMIC64_OP(atomic64_xor)
+ATOMIC64_OP(atomic64_add)
+ATOMIC64_OP(atomic64_and)
+ATOMIC64_OP(atomic64_sub)
+
+#undef ATOMIC64_OP
+
+#define ATOMIC64_FETCH_OP(name, op)                                    \
+static inline long arch_##op##name(long i, atomic64_t *v)              \
+{                                                                      \
+       return __lse_ll_sc_body(op##name, i, v);                        \
+}
+
+#define ATOMIC64_FETCH_OPS(op)                                         \
+       ATOMIC64_FETCH_OP(_relaxed, op)                                 \
+       ATOMIC64_FETCH_OP(_acquire, op)                                 \
+       ATOMIC64_FETCH_OP(_release, op)                                 \
+       ATOMIC64_FETCH_OP(        , op)
+
+ATOMIC64_FETCH_OPS(atomic64_fetch_andnot)
+ATOMIC64_FETCH_OPS(atomic64_fetch_or)
+ATOMIC64_FETCH_OPS(atomic64_fetch_xor)
+ATOMIC64_FETCH_OPS(atomic64_fetch_add)
+ATOMIC64_FETCH_OPS(atomic64_fetch_and)
+ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
+ATOMIC64_FETCH_OPS(atomic64_add_return)
+ATOMIC64_FETCH_OPS(atomic64_sub_return)
+
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_FETCH_OPS
+
+static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+       return __lse_ll_sc_body(atomic64_dec_if_positive, v);
+}
 
 #define ATOMIC_INIT(i) { (i) }
 
 
 #include <asm-generic/atomic-instrumented.h>
 
-#endif
-#endif
+#endif /* __ASM_ATOMIC_H */
index c8c850bc3dfb0cd8c6aad369e161cd18aa2f02c8..7b012148bfd6c1ccbd6f47a437ca1323d92428ed 100644 (file)
 #ifndef __ASM_ATOMIC_LL_SC_H
 #define __ASM_ATOMIC_LL_SC_H
 
-#ifndef __ARM64_IN_ATOMIC_IMPL
-#error "please don't include this file directly"
+#include <linux/stringify.h>
+
+#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
+#define __LL_SC_FALLBACK(asm_ops)                                      \
+"      b       3f\n"                                                   \
+"      .subsection     1\n"                                            \
+"3:\n"                                                                 \
+asm_ops "\n"                                                           \
+"      b       4f\n"                                                   \
+"      .previous\n"                                                    \
+"4:\n"
+#else
+#define __LL_SC_FALLBACK(asm_ops) asm_ops
+#endif
+
+#ifndef CONFIG_CC_HAS_K_CONSTRAINT
+#define K
 #endif
 
 /*
  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
  * store exclusive to ensure that these are atomic.  We may loop
  * to ensure that the update happens.
- *
- * NOTE: these functions do *not* follow the PCS and must explicitly
- * save any clobbered registers other than x0 (regardless of return
- * value).  This is achieved through -fcall-saved-* compiler flags for
- * this file, which unfortunately don't work on a per-function basis
- * (the optimize attribute silently ignores these options).
  */
 
-#define ATOMIC_OP(op, asm_op)                                          \
-__LL_SC_INLINE void                                                    \
-__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v))                   \
+#define ATOMIC_OP(op, asm_op, constraint)                              \
+static inline void                                                     \
+__ll_sc_atomic_##op(int i, atomic_t *v)                                        \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
                                                                        \
        asm volatile("// atomic_" #op "\n"                              \
+       __LL_SC_FALLBACK(                                               \
 "      prfm    pstl1strm, %2\n"                                        \
 "1:    ldxr    %w0, %2\n"                                              \
 "      " #asm_op "     %w0, %w0, %w3\n"                                \
 "      stxr    %w1, %w0, %2\n"                                         \
-"      cbnz    %w1, 1b"                                                \
+"      cbnz    %w1, 1b\n")                                             \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i));                                                    \
-}                                                                      \
-__LL_SC_EXPORT(arch_atomic_##op);
+       : __stringify(constraint) "r" (i));                             \
+}
 
-#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)           \
-__LL_SC_INLINE int                                                     \
-__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v))    \
+#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
+static inline int                                                      \
+__ll_sc_atomic_##op##_return##name(int i, atomic_t *v)                 \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
                                                                        \
        asm volatile("// atomic_" #op "_return" #name "\n"              \
+       __LL_SC_FALLBACK(                                               \
 "      prfm    pstl1strm, %2\n"                                        \
 "1:    ld" #acq "xr    %w0, %2\n"                                      \
 "      " #asm_op "     %w0, %w0, %w3\n"                                \
 "      st" #rel "xr    %w1, %w0, %2\n"                                 \
 "      cbnz    %w1, 1b\n"                                              \
-"      " #mb                                                           \
+"      " #mb )                                                         \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i)                                                      \
+       : __stringify(constraint) "r" (i)                               \
        : cl);                                                          \
                                                                        \
        return result;                                                  \
-}                                                                      \
-__LL_SC_EXPORT(arch_atomic_##op##_return##name);
+}
 
-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)            \
-__LL_SC_INLINE int                                                     \
-__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v))       \
+#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
+static inline int                                                      \
+__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v)                    \
 {                                                                      \
        unsigned long tmp;                                              \
        int val, result;                                                \
                                                                        \
        asm volatile("// atomic_fetch_" #op #name "\n"                  \
+       __LL_SC_FALLBACK(                                               \
 "      prfm    pstl1strm, %3\n"                                        \
 "1:    ld" #acq "xr    %w0, %3\n"                                      \
 "      " #asm_op "     %w1, %w0, %w4\n"                                \
 "      st" #rel "xr    %w2, %w1, %3\n"                                 \
 "      cbnz    %w2, 1b\n"                                              \
-"      " #mb                                                           \
+"      " #mb )                                                         \
        : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
-       : "Ir" (i)                                                      \
+       : __stringify(constraint) "r" (i)                               \
        : cl);                                                          \
                                                                        \
        return result;                                                  \
-}                                                                      \
-__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
+}
 
 #define ATOMIC_OPS(...)                                                        \
        ATOMIC_OP(__VA_ARGS__)                                          \
@@ -99,8 +108,8 @@ __LL_SC_EXPORT(arch_atomic_fetch_##op##name);
        ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
        ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
 
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
+ATOMIC_OPS(add, add, I)
+ATOMIC_OPS(sub, sub, J)
 
 #undef ATOMIC_OPS
 #define ATOMIC_OPS(...)                                                        \
@@ -110,77 +119,82 @@ ATOMIC_OPS(sub, sub)
        ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
        ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
 
-ATOMIC_OPS(and, and)
-ATOMIC_OPS(andnot, bic)
-ATOMIC_OPS(or, orr)
-ATOMIC_OPS(xor, eor)
+ATOMIC_OPS(and, and, K)
+ATOMIC_OPS(or, orr, K)
+ATOMIC_OPS(xor, eor, K)
+/*
+ * GAS converts the mysterious and undocumented BIC (immediate) alias to
+ * an AND (immediate) instruction with the immediate inverted. We don't
+ * have a constraint for this, so fall back to register.
+ */
+ATOMIC_OPS(andnot, bic, )
 
 #undef ATOMIC_OPS
 #undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define ATOMIC64_OP(op, asm_op)                                                \
-__LL_SC_INLINE void                                                    \
-__LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v))               \
+#define ATOMIC64_OP(op, asm_op, constraint)                            \
+static inline void                                                     \
+__ll_sc_atomic64_##op(s64 i, atomic64_t *v)                            \
 {                                                                      \
        s64 result;                                                     \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile("// atomic64_" #op "\n"                            \
+       __LL_SC_FALLBACK(                                               \
 "      prfm    pstl1strm, %2\n"                                        \
 "1:    ldxr    %0, %2\n"                                               \
 "      " #asm_op "     %0, %0, %3\n"                                   \
 "      stxr    %w1, %0, %2\n"                                          \
-"      cbnz    %w1, 1b"                                                \
+"      cbnz    %w1, 1b")                                               \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i));                                                    \
-}                                                                      \
-__LL_SC_EXPORT(arch_atomic64_##op);
+       : __stringify(constraint) "r" (i));                             \
+}
 
-#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)         \
-__LL_SC_INLINE s64                                                     \
-__LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
+#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
+static inline long                                                     \
+__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)             \
 {                                                                      \
        s64 result;                                                     \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile("// atomic64_" #op "_return" #name "\n"            \
+       __LL_SC_FALLBACK(                                               \
 "      prfm    pstl1strm, %2\n"                                        \
 "1:    ld" #acq "xr    %0, %2\n"                                       \
 "      " #asm_op "     %0, %0, %3\n"                                   \
 "      st" #rel "xr    %w1, %0, %2\n"                                  \
 "      cbnz    %w1, 1b\n"                                              \
-"      " #mb                                                           \
+"      " #mb )                                                         \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
-       : "Ir" (i)                                                      \
+       : __stringify(constraint) "r" (i)                               \
        : cl);                                                          \
                                                                        \
        return result;                                                  \
-}                                                                      \
-__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
+}
 
-#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)          \
-__LL_SC_INLINE s64                                                     \
-__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v))   \
+#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
+static inline long                                                     \
+__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)                \
 {                                                                      \
        s64 result, val;                                                \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile("// atomic64_fetch_" #op #name "\n"                \
+       __LL_SC_FALLBACK(                                               \
 "      prfm    pstl1strm, %3\n"                                        \
 "1:    ld" #acq "xr    %0, %3\n"                                       \
 "      " #asm_op "     %1, %0, %4\n"                                   \
 "      st" #rel "xr    %w2, %1, %3\n"                                  \
 "      cbnz    %w2, 1b\n"                                              \
-"      " #mb                                                           \
+"      " #mb )                                                         \
        : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
-       : "Ir" (i)                                                      \
+       : __stringify(constraint) "r" (i)                               \
        : cl);                                                          \
                                                                        \
        return result;                                                  \
-}                                                                      \
-__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
+}
 
 #define ATOMIC64_OPS(...)                                              \
        ATOMIC64_OP(__VA_ARGS__)                                        \
@@ -193,8 +207,8 @@ __LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
        ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
        ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
 
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
+ATOMIC64_OPS(add, add, I)
+ATOMIC64_OPS(sub, sub, J)
 
 #undef ATOMIC64_OPS
 #define ATOMIC64_OPS(...)                                              \
@@ -204,23 +218,29 @@ ATOMIC64_OPS(sub, sub)
        ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
        ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
 
-ATOMIC64_OPS(and, and)
-ATOMIC64_OPS(andnot, bic)
-ATOMIC64_OPS(or, orr)
-ATOMIC64_OPS(xor, eor)
+ATOMIC64_OPS(and, and, L)
+ATOMIC64_OPS(or, orr, L)
+ATOMIC64_OPS(xor, eor, L)
+/*
+ * GAS converts the mysterious and undocumented BIC (immediate) alias to
+ * an AND (immediate) instruction with the immediate inverted. We don't
+ * have a constraint for this, so fall back to register.
+ */
+ATOMIC64_OPS(andnot, bic, )
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-__LL_SC_INLINE s64
-__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
+static inline s64
+__ll_sc_atomic64_dec_if_positive(atomic64_t *v)
 {
        s64 result;
        unsigned long tmp;
 
        asm volatile("// atomic64_dec_if_positive\n"
+       __LL_SC_FALLBACK(
 "      prfm    pstl1strm, %2\n"
 "1:    ldxr    %0, %2\n"
 "      subs    %0, %0, #1\n"
@@ -228,20 +248,19 @@ __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
 "      stlxr   %w1, %0, %2\n"
 "      cbnz    %w1, 1b\n"
 "      dmb     ish\n"
-"2:"
+"2:")
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
        :
        : "cc", "memory");
 
        return result;
 }
-__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
 
-#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl)             \
-__LL_SC_INLINE u##sz                                                   \
-__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr,           \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
+static inline u##sz                                                    \
+__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr,                   \
                                         unsigned long old,             \
-                                        u##sz new))                    \
+                                        u##sz new)                     \
 {                                                                      \
        unsigned long tmp;                                              \
        u##sz oldval;                                                   \
@@ -255,6 +274,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr,                \
                old = (u##sz)old;                                       \
                                                                        \
        asm volatile(                                                   \
+       __LL_SC_FALLBACK(                                               \
        "       prfm    pstl1strm, %[v]\n"                              \
        "1:     ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n"          \
        "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
@@ -262,46 +282,51 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr,              \
        "       st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n"    \
        "       cbnz    %w[tmp], 1b\n"                                  \
        "       " #mb "\n"                                              \
-       "2:"                                                            \
+       "2:")                                                           \
        : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                   \
          [v] "+Q" (*(u##sz *)ptr)                                      \
-       : [old] "Kr" (old), [new] "r" (new)                             \
+       : [old] __stringify(constraint) "r" (old), [new] "r" (new)      \
        : cl);                                                          \
                                                                        \
        return oldval;                                                  \
-}                                                                      \
-__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
+}
 
-__CMPXCHG_CASE(w, b,     ,  8,        ,  ,  ,         )
-__CMPXCHG_CASE(w, h,     , 16,        ,  ,  ,         )
-__CMPXCHG_CASE(w,  ,     , 32,        ,  ,  ,         )
-__CMPXCHG_CASE( ,  ,     , 64,        ,  ,  ,         )
-__CMPXCHG_CASE(w, b, acq_,  8,        , a,  , "memory")
-__CMPXCHG_CASE(w, h, acq_, 16,        , a,  , "memory")
-__CMPXCHG_CASE(w,  , acq_, 32,        , a,  , "memory")
-__CMPXCHG_CASE( ,  , acq_, 64,        , a,  , "memory")
-__CMPXCHG_CASE(w, b, rel_,  8,        ,  , l, "memory")
-__CMPXCHG_CASE(w, h, rel_, 16,        ,  , l, "memory")
-__CMPXCHG_CASE(w,  , rel_, 32,        ,  , l, "memory")
-__CMPXCHG_CASE( ,  , rel_, 64,        ,  , l, "memory")
-__CMPXCHG_CASE(w, b,  mb_,  8, dmb ish,  , l, "memory")
-__CMPXCHG_CASE(w, h,  mb_, 16, dmb ish,  , l, "memory")
-__CMPXCHG_CASE(w,  ,  mb_, 32, dmb ish,  , l, "memory")
-__CMPXCHG_CASE( ,  ,  mb_, 64, dmb ish,  , l, "memory")
+/*
+ * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
+ * handle the 'K' constraint for the value 4294967295 - thus we use no
+ * constraint for 32 bit operations.
+ */
+__CMPXCHG_CASE(w, b,     ,  8,        ,  ,  ,         , K)
+__CMPXCHG_CASE(w, h,     , 16,        ,  ,  ,         , K)
+__CMPXCHG_CASE(w,  ,     , 32,        ,  ,  ,         , K)
+__CMPXCHG_CASE( ,  ,     , 64,        ,  ,  ,         , L)
+__CMPXCHG_CASE(w, b, acq_,  8,        , a,  , "memory", K)
+__CMPXCHG_CASE(w, h, acq_, 16,        , a,  , "memory", K)
+__CMPXCHG_CASE(w,  , acq_, 32,        , a,  , "memory", K)
+__CMPXCHG_CASE( ,  , acq_, 64,        , a,  , "memory", L)
+__CMPXCHG_CASE(w, b, rel_,  8,        ,  , l, "memory", K)
+__CMPXCHG_CASE(w, h, rel_, 16,        ,  , l, "memory", K)
+__CMPXCHG_CASE(w,  , rel_, 32,        ,  , l, "memory", K)
+__CMPXCHG_CASE( ,  , rel_, 64,        ,  , l, "memory", L)
+__CMPXCHG_CASE(w, b,  mb_,  8, dmb ish,  , l, "memory", K)
+__CMPXCHG_CASE(w, h,  mb_, 16, dmb ish,  , l, "memory", K)
+__CMPXCHG_CASE(w,  ,  mb_, 32, dmb ish,  , l, "memory", K)
+__CMPXCHG_CASE( ,  ,  mb_, 64, dmb ish,  , l, "memory", L)
 
 #undef __CMPXCHG_CASE
 
 #define __CMPXCHG_DBL(name, mb, rel, cl)                               \
-__LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,              \
+static inline long                                                     \
+__ll_sc__cmpxchg_double##name(unsigned long old1,                      \
                                      unsigned long old2,               \
                                      unsigned long new1,               \
                                      unsigned long new2,               \
-                                     volatile void *ptr))              \
+                                     volatile void *ptr)               \
 {                                                                      \
        unsigned long tmp, ret;                                         \
                                                                        \
        asm volatile("// __cmpxchg_double" #name "\n"                   \
+       __LL_SC_FALLBACK(                                               \
        "       prfm    pstl1strm, %2\n"                                \
        "1:     ldxp    %0, %1, %2\n"                                   \
        "       eor     %0, %0, %3\n"                                   \
@@ -311,18 +336,18 @@ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,         \
        "       st" #rel "xp    %w0, %5, %6, %2\n"                      \
        "       cbnz    %w0, 1b\n"                                      \
        "       " #mb "\n"                                              \
-       "2:"                                                            \
+       "2:")                                                           \
        : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
        : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
        : cl);                                                          \
                                                                        \
        return ret;                                                     \
-}                                                                      \
-__LL_SC_EXPORT(__cmpxchg_double##name);
+}
 
 __CMPXCHG_DBL(   ,        ,  ,         )
 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
 
 #undef __CMPXCHG_DBL
+#undef K
 
 #endif /* __ASM_ATOMIC_LL_SC_H */
index 69acb1c19a15ab9b878ce252ff00a736294580fa..c6bd87d2915b4de23210ab3eba9eee4b249377db 100644 (file)
 #ifndef __ASM_ATOMIC_LSE_H
 #define __ASM_ATOMIC_LSE_H
 
-#ifndef __ARM64_IN_ATOMIC_IMPL
-#error "please don't include this file directly"
-#endif
-
-#define __LL_SC_ATOMIC(op)     __LL_SC_CALL(arch_atomic_##op)
 #define ATOMIC_OP(op, asm_op)                                          \
-static inline void arch_atomic_##op(int i, atomic_t *v)                        \
+static inline void __lse_atomic_##op(int i, atomic_t *v)                       \
 {                                                                      \
-       register int w0 asm ("w0") = i;                                 \
-       register atomic_t *x1 asm ("x1") = v;                           \
-                                                                       \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op),          \
-"      " #asm_op "     %w[i], %[v]\n")                                 \
-       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS);                                            \
+       asm volatile(                                                   \
+"      " #asm_op "     %w[i], %[v]\n"                                  \
+       : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+       : "r" (v));                                                     \
 }
 
 ATOMIC_OP(andnot, stclr)
@@ -36,21 +27,15 @@ ATOMIC_OP(add, stadd)
 #undef ATOMIC_OP
 
 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)                   \
-static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v)     \
+static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)    \
 {                                                                      \
-       register int w0 asm ("w0") = i;                                 \
-       register atomic_t *x1 asm ("x1") = v;                           \
-                                                                       \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC(fetch_##op##name),                               \
-       /* LSE atomics */                                               \
-"      " #asm_op #mb " %w[i], %w[i], %[v]")                            \
-       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       asm volatile(                                                   \
+"      " #asm_op #mb " %w[i], %w[i], %[v]"                             \
+       : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return w0;                                                      \
+       return i;                                                       \
 }
 
 #define ATOMIC_FETCH_OPS(op, asm_op)                                   \
@@ -68,23 +53,18 @@ ATOMIC_FETCH_OPS(add, ldadd)
 #undef ATOMIC_FETCH_OPS
 
 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                          \
-static inline int arch_atomic_add_return##name(int i, atomic_t *v)     \
+static inline int __lse_atomic_add_return##name(int i, atomic_t *v)    \
 {                                                                      \
-       register int w0 asm ("w0") = i;                                 \
-       register atomic_t *x1 asm ("x1") = v;                           \
+       u32 tmp;                                                        \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC(add_return##name)                                \
-       __nops(1),                                                      \
-       /* LSE atomics */                                               \
-       "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
-       "       add     %w[i], %w[i], w30")                             \
-       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       asm volatile(                                                   \
+       "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
+       "       add     %w[i], %w[i], %w[tmp]"                          \
+       : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return w0;                                                      \
+       return i;                                                       \
 }
 
 ATOMIC_OP_ADD_RETURN(_relaxed,   )
@@ -94,41 +74,26 @@ ATOMIC_OP_ADD_RETURN(        , al, "memory")
 
 #undef ATOMIC_OP_ADD_RETURN
 
-static inline void arch_atomic_and(int i, atomic_t *v)
+static inline void __lse_atomic_and(int i, atomic_t *v)
 {
-       register int w0 asm ("w0") = i;
-       register atomic_t *x1 asm ("x1") = v;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       __LL_SC_ATOMIC(and)
-       __nops(1),
-       /* LSE atomics */
+       asm volatile(
        "       mvn     %w[i], %w[i]\n"
-       "       stclr   %w[i], %[v]")
-       : [i] "+&r" (w0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+       "       stclr   %w[i], %[v]"
+       : [i] "+&r" (i), [v] "+Q" (v->counter)
+       : "r" (v));
 }
 
 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)                           \
-static inline int arch_atomic_fetch_and##name(int i, atomic_t *v)      \
+static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)     \
 {                                                                      \
-       register int w0 asm ("w0") = i;                                 \
-       register atomic_t *x1 asm ("x1") = v;                           \
-                                                                       \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC(fetch_and##name)                                 \
-       __nops(1),                                                      \
-       /* LSE atomics */                                               \
+       asm volatile(                                                   \
        "       mvn     %w[i], %w[i]\n"                                 \
-       "       ldclr" #mb "    %w[i], %w[i], %[v]")                    \
-       : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       "       ldclr" #mb "    %w[i], %w[i], %[v]"                     \
+       : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return w0;                                                      \
+       return i;                                                       \
 }
 
 ATOMIC_FETCH_OP_AND(_relaxed,   )
@@ -138,42 +103,29 @@ ATOMIC_FETCH_OP_AND(        , al, "memory")
 
 #undef ATOMIC_FETCH_OP_AND
 
-static inline void arch_atomic_sub(int i, atomic_t *v)
+static inline void __lse_atomic_sub(int i, atomic_t *v)
 {
-       register int w0 asm ("w0") = i;
-       register atomic_t *x1 asm ("x1") = v;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       __LL_SC_ATOMIC(sub)
-       __nops(1),
-       /* LSE atomics */
+       asm volatile(
        "       neg     %w[i], %w[i]\n"
-       "       stadd   %w[i], %[v]")
-       : [i] "+&r" (w0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+       "       stadd   %w[i], %[v]"
+       : [i] "+&r" (i), [v] "+Q" (v->counter)
+       : "r" (v));
 }
 
 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)                          \
-static inline int arch_atomic_sub_return##name(int i, atomic_t *v)     \
+static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)    \
 {                                                                      \
-       register int w0 asm ("w0") = i;                                 \
-       register atomic_t *x1 asm ("x1") = v;                           \
+       u32 tmp;                                                        \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC(sub_return##name)                                \
-       __nops(2),                                                      \
-       /* LSE atomics */                                               \
+       asm volatile(                                                   \
        "       neg     %w[i], %w[i]\n"                                 \
-       "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
-       "       add     %w[i], %w[i], w30")                             \
-       : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS , ##cl);                                     \
+       "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
+       "       add     %w[i], %w[i], %w[tmp]"                          \
+       : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)       \
+       : "r" (v)                                                       \
+       : cl);                                                  \
                                                                        \
-       return w0;                                                      \
+       return i;                                                       \
 }
 
 ATOMIC_OP_SUB_RETURN(_relaxed,   )
@@ -184,23 +136,16 @@ ATOMIC_OP_SUB_RETURN(        , al, "memory")
 #undef ATOMIC_OP_SUB_RETURN
 
 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...)                           \
-static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v)      \
+static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)     \
 {                                                                      \
-       register int w0 asm ("w0") = i;                                 \
-       register atomic_t *x1 asm ("x1") = v;                           \
-                                                                       \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC(fetch_sub##name)                                 \
-       __nops(1),                                                      \
-       /* LSE atomics */                                               \
+       asm volatile(                                                   \
        "       neg     %w[i], %w[i]\n"                                 \
-       "       ldadd" #mb "    %w[i], %w[i], %[v]")                    \
-       : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       "       ldadd" #mb "    %w[i], %w[i], %[v]"                     \
+       : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return w0;                                                      \
+       return i;                                                       \
 }
 
 ATOMIC_FETCH_OP_SUB(_relaxed,   )
@@ -209,20 +154,14 @@ ATOMIC_FETCH_OP_SUB(_release,  l, "memory")
 ATOMIC_FETCH_OP_SUB(        , al, "memory")
 
 #undef ATOMIC_FETCH_OP_SUB
-#undef __LL_SC_ATOMIC
 
-#define __LL_SC_ATOMIC64(op)   __LL_SC_CALL(arch_atomic64_##op)
 #define ATOMIC64_OP(op, asm_op)                                                \
-static inline void arch_atomic64_##op(s64 i, atomic64_t *v)            \
+static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)           \
 {                                                                      \
-       register s64 x0 asm ("x0") = i;                                 \
-       register atomic64_t *x1 asm ("x1") = v;                         \
-                                                                       \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),        \
-"      " #asm_op "     %[i], %[v]\n")                                  \
-       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS);                                            \
+       asm volatile(                                                   \
+"      " #asm_op "     %[i], %[v]\n"                                   \
+       : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+       : "r" (v));                                                     \
 }
 
 ATOMIC64_OP(andnot, stclr)
@@ -233,21 +172,15 @@ ATOMIC64_OP(add, stadd)
 #undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)                 \
-static inline s64 arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
 {                                                                      \
-       register s64 x0 asm ("x0") = i;                                 \
-       register atomic64_t *x1 asm ("x1") = v;                         \
-                                                                       \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC64(fetch_##op##name),                             \
-       /* LSE atomics */                                               \
-"      " #asm_op #mb " %[i], %[i], %[v]")                              \
-       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       asm volatile(                                                   \
+"      " #asm_op #mb " %[i], %[i], %[v]"                               \
+       : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return x0;                                                      \
+       return i;                                                       \
 }
 
 #define ATOMIC64_FETCH_OPS(op, asm_op)                                 \
@@ -265,23 +198,18 @@ ATOMIC64_FETCH_OPS(add, ldadd)
 #undef ATOMIC64_FETCH_OPS
 
 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)                                \
-static inline s64 arch_atomic64_add_return##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
 {                                                                      \
-       register s64 x0 asm ("x0") = i;                                 \
-       register atomic64_t *x1 asm ("x1") = v;                         \
+       unsigned long tmp;                                              \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC64(add_return##name)                              \
-       __nops(1),                                                      \
-       /* LSE atomics */                                               \
-       "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
-       "       add     %[i], %[i], x30")                               \
-       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       asm volatile(                                                   \
+       "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
+       "       add     %[i], %[i], %x[tmp]"                            \
+       : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return x0;                                                      \
+       return i;                                                       \
 }
 
 ATOMIC64_OP_ADD_RETURN(_relaxed,   )
@@ -291,41 +219,26 @@ ATOMIC64_OP_ADD_RETURN(        , al, "memory")
 
 #undef ATOMIC64_OP_ADD_RETURN
 
-static inline void arch_atomic64_and(s64 i, atomic64_t *v)
+static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 {
-       register s64 x0 asm ("x0") = i;
-       register atomic64_t *x1 asm ("x1") = v;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       __LL_SC_ATOMIC64(and)
-       __nops(1),
-       /* LSE atomics */
+       asm volatile(
        "       mvn     %[i], %[i]\n"
-       "       stclr   %[i], %[v]")
-       : [i] "+&r" (x0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+       "       stclr   %[i], %[v]"
+       : [i] "+&r" (i), [v] "+Q" (v->counter)
+       : "r" (v));
 }
 
 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                         \
-static inline s64 arch_atomic64_fetch_and##name(s64 i, atomic64_t *v)  \
+static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)        \
 {                                                                      \
-       register s64 x0 asm ("x0") = i;                                 \
-       register atomic64_t *x1 asm ("x1") = v;                         \
-                                                                       \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC64(fetch_and##name)                               \
-       __nops(1),                                                      \
-       /* LSE atomics */                                               \
+       asm volatile(                                                   \
        "       mvn     %[i], %[i]\n"                                   \
-       "       ldclr" #mb "    %[i], %[i], %[v]")                      \
-       : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       "       ldclr" #mb "    %[i], %[i], %[v]"                       \
+       : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return x0;                                                      \
+       return i;                                                       \
 }
 
 ATOMIC64_FETCH_OP_AND(_relaxed,   )
@@ -335,42 +248,29 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_AND
 
-static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
+static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
 {
-       register s64 x0 asm ("x0") = i;
-       register atomic64_t *x1 asm ("x1") = v;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       __LL_SC_ATOMIC64(sub)
-       __nops(1),
-       /* LSE atomics */
+       asm volatile(
        "       neg     %[i], %[i]\n"
-       "       stadd   %[i], %[v]")
-       : [i] "+&r" (x0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+       "       stadd   %[i], %[v]"
+       : [i] "+&r" (i), [v] "+Q" (v->counter)
+       : "r" (v));
 }
 
 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                                \
-static inline s64 arch_atomic64_sub_return##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)       \
 {                                                                      \
-       register s64 x0 asm ("x0") = i;                                 \
-       register atomic64_t *x1 asm ("x1") = v;                         \
+       unsigned long tmp;                                              \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC64(sub_return##name)                              \
-       __nops(2),                                                      \
-       /* LSE atomics */                                               \
+       asm volatile(                                                   \
        "       neg     %[i], %[i]\n"                                   \
-       "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
-       "       add     %[i], %[i], x30")                               \
-       : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
+       "       add     %[i], %[i], %x[tmp]"                            \
+       : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)       \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return x0;                                                      \
+       return i;                                                       \
 }
 
 ATOMIC64_OP_SUB_RETURN(_relaxed,   )
@@ -381,23 +281,16 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 #undef ATOMIC64_OP_SUB_RETURN
 
 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)                         \
-static inline s64 arch_atomic64_fetch_sub##name(s64 i, atomic64_t *v)  \
+static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)        \
 {                                                                      \
-       register s64 x0 asm ("x0") = i;                                 \
-       register atomic64_t *x1 asm ("x1") = v;                         \
-                                                                       \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_ATOMIC64(fetch_sub##name)                               \
-       __nops(1),                                                      \
-       /* LSE atomics */                                               \
+       asm volatile(                                                   \
        "       neg     %[i], %[i]\n"                                   \
-       "       ldadd" #mb "    %[i], %[i], %[v]")                      \
-       : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
-       : "r" (x1)                                                      \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       "       ldadd" #mb "    %[i], %[i], %[v]"                       \
+       : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
+       : "r" (v)                                                       \
+       : cl);                                                          \
                                                                        \
-       return x0;                                                      \
+       return i;                                                       \
 }
 
 ATOMIC64_FETCH_OP_SUB(_relaxed,   )
@@ -407,54 +300,44 @@ ATOMIC64_FETCH_OP_SUB(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_SUB
 
-static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
 {
-       register long x0 asm ("x0") = (long)v;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       __LL_SC_ATOMIC64(dec_if_positive)
-       __nops(6),
-       /* LSE atomics */
-       "1:     ldr     x30, %[v]\n"
-       "       subs    %[ret], x30, #1\n"
+       unsigned long tmp;
+
+       asm volatile(
+       "1:     ldr     %x[tmp], %[v]\n"
+       "       subs    %[ret], %x[tmp], #1\n"
        "       b.lt    2f\n"
-       "       casal   x30, %[ret], %[v]\n"
-       "       sub     x30, x30, #1\n"
-       "       sub     x30, x30, %[ret]\n"
-       "       cbnz    x30, 1b\n"
-       "2:")
-       : [ret] "+&r" (x0), [v] "+Q" (v->counter)
+       "       casal   %x[tmp], %[ret], %[v]\n"
+       "       sub     %x[tmp], %x[tmp], #1\n"
+       "       sub     %x[tmp], %x[tmp], %[ret]\n"
+       "       cbnz    %x[tmp], 1b\n"
+       "2:"
+       : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
        :
-       : __LL_SC_CLOBBERS, "cc", "memory");
+       : "cc", "memory");
 
-       return x0;
+       return (long)v;
 }
 
-#undef __LL_SC_ATOMIC64
-
-#define __LL_SC_CMPXCHG(op)    __LL_SC_CALL(__cmpxchg_case_##op)
-
 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)                    \
-static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr,      \
+static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
                                              u##sz old,                \
                                              u##sz new)                \
 {                                                                      \
        register unsigned long x0 asm ("x0") = (unsigned long)ptr;      \
        register u##sz x1 asm ("x1") = old;                             \
        register u##sz x2 asm ("x2") = new;                             \
+       unsigned long tmp;                                              \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_CMPXCHG(name##sz)                                       \
-       __nops(2),                                                      \
-       /* LSE atomics */                                               \
-       "       mov     " #w "30, %" #w "[old]\n"                       \
-       "       cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n"        \
-       "       mov     %" #w "[ret], " #w "30")                        \
-       : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)             \
+       asm volatile(                                                   \
+       "       mov     %" #w "[tmp], %" #w "[old]\n"                   \
+       "       cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"    \
+       "       mov     %" #w "[ret], %" #w "[tmp]"                     \
+       : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr),            \
+         [tmp] "=&r" (tmp)                                             \
        : [old] "r" (x1), [new] "r" (x2)                                \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       : cl);                                                          \
                                                                        \
        return x0;                                                      \
 }
@@ -476,13 +359,10 @@ __CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
 __CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
 __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
 
-#undef __LL_SC_CMPXCHG
 #undef __CMPXCHG_CASE
 
-#define __LL_SC_CMPXCHG_DBL(op)        __LL_SC_CALL(__cmpxchg_double##op)
-
 #define __CMPXCHG_DBL(name, mb, cl...)                                 \
-static inline long __cmpxchg_double##name(unsigned long old1,          \
+static inline long __lse__cmpxchg_double##name(unsigned long old1,     \
                                         unsigned long old2,            \
                                         unsigned long new1,            \
                                         unsigned long new2,            \
@@ -496,20 +376,16 @@ static inline long __cmpxchg_double##name(unsigned long old1,             \
        register unsigned long x3 asm ("x3") = new2;                    \
        register unsigned long x4 asm ("x4") = (unsigned long)ptr;      \
                                                                        \
-       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
-       /* LL/SC */                                                     \
-       __LL_SC_CMPXCHG_DBL(name)                                       \
-       __nops(3),                                                      \
-       /* LSE atomics */                                               \
+       asm volatile(                                                   \
        "       casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
        "       eor     %[old1], %[old1], %[oldval1]\n"                 \
        "       eor     %[old2], %[old2], %[oldval2]\n"                 \
-       "       orr     %[old1], %[old1], %[old2]")                     \
+       "       orr     %[old1], %[old1], %[old2]"                      \
        : [old1] "+&r" (x0), [old2] "+&r" (x1),                         \
          [v] "+Q" (*(unsigned long *)ptr)                              \
        : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
          [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
-       : __LL_SC_CLOBBERS, ##cl);                                      \
+       : cl);                                                          \
                                                                        \
        return x0;                                                      \
 }
@@ -517,7 +393,6 @@ static inline long __cmpxchg_double##name(unsigned long old1,               \
 __CMPXCHG_DBL(   ,   )
 __CMPXCHG_DBL(_mb, al, "memory")
 
-#undef __LL_SC_CMPXCHG_DBL
 #undef __CMPXCHG_DBL
 
 #endif /* __ASM_ATOMIC_LSE_H */
index 64eeaa41e7caa2c76083ffdafd993abc7df17f4f..43da6dd295920798debddb2854563a909ba5e19b 100644 (file)
@@ -78,7 +78,7 @@ static inline u32 cache_type_cwg(void)
        return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
 }
 
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(.data..read_mostly)
 
 static inline int cache_line_size_of_cpu(void)
 {
index 7a299a20f6dcc558219d452287767d64cd9c73e0..a1398f2f9994fcdc0e110731feffa550ab765964 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/build_bug.h>
 #include <linux/compiler.h>
 
-#include <asm/atomic.h>
 #include <asm/barrier.h>
 #include <asm/lse.h>
 
@@ -104,6 +103,50 @@ __XCHG_GEN(_mb)
 #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
 #define arch_xchg(...)         __xchg_wrapper( _mb, __VA_ARGS__)
 
+#define __CMPXCHG_CASE(name, sz)                       \
+static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr,      \
+                                             u##sz old,                \
+                                             u##sz new)                \
+{                                                                      \
+       return __lse_ll_sc_body(_cmpxchg_case_##name##sz,               \
+                               ptr, old, new);                         \
+}
+
+__CMPXCHG_CASE(    ,  8)
+__CMPXCHG_CASE(    , 16)
+__CMPXCHG_CASE(    , 32)
+__CMPXCHG_CASE(    , 64)
+__CMPXCHG_CASE(acq_,  8)
+__CMPXCHG_CASE(acq_, 16)
+__CMPXCHG_CASE(acq_, 32)
+__CMPXCHG_CASE(acq_, 64)
+__CMPXCHG_CASE(rel_,  8)
+__CMPXCHG_CASE(rel_, 16)
+__CMPXCHG_CASE(rel_, 32)
+__CMPXCHG_CASE(rel_, 64)
+__CMPXCHG_CASE(mb_,  8)
+__CMPXCHG_CASE(mb_, 16)
+__CMPXCHG_CASE(mb_, 32)
+__CMPXCHG_CASE(mb_, 64)
+
+#undef __CMPXCHG_CASE
+
+#define __CMPXCHG_DBL(name)                                            \
+static inline long __cmpxchg_double##name(unsigned long old1,          \
+                                        unsigned long old2,            \
+                                        unsigned long new1,            \
+                                        unsigned long new2,            \
+                                        volatile void *ptr)            \
+{                                                                      \
+       return __lse_ll_sc_body(_cmpxchg_double##name,                  \
+                               old1, old2, new1, new2, ptr);           \
+}
+
+__CMPXCHG_DBL(   )
+__CMPXCHG_DBL(_mb)
+
+#undef __CMPXCHG_DBL
+
 #define __CMPXCHG_GEN(sfx)                                             \
 static inline unsigned long __cmpxchg##sfx(volatile void *ptr,         \
                                           unsigned long old,           \
index fb8ad4616b3bfff87e8e8e418755bbebb2174e39..b0d53a265f1d510cbb6372685b39a53de2361eed 100644 (file)
@@ -4,7 +4,6 @@
  */
 #ifndef __ASM_COMPAT_H
 #define __ASM_COMPAT_H
-#ifdef __KERNEL__
 #ifdef CONFIG_COMPAT
 
 /*
@@ -215,5 +214,4 @@ static inline int is_compat_thread(struct thread_info *thread)
 }
 
 #endif /* CONFIG_COMPAT */
-#endif /* __KERNEL__ */
 #endif /* __ASM_COMPAT_H */
index c09d633c3109f4b1560aa359a1daca89d8b8b7b0..86aabf1e019932c70a0d1964750e59905d3f0861 100644 (file)
@@ -23,6 +23,8 @@
  * @cpu_boot:  Boots a cpu into the kernel.
  * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
  *             synchronisation. Called from the cpu being booted.
+ * @cpu_can_disable: Determines whether a CPU can be disabled based on
+ *             mechanism-specific information.
  * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
  *             reason, which will cause the hot unplug to be aborted. Called
  *             from the cpu to be killed.
@@ -42,6 +44,7 @@ struct cpu_operations {
        int             (*cpu_boot)(unsigned int);
        void            (*cpu_postboot)(void);
 #ifdef CONFIG_HOTPLUG_CPU
+       bool            (*cpu_can_disable)(unsigned int cpu);
        int             (*cpu_disable)(unsigned int cpu);
        void            (*cpu_die)(unsigned int cpu);
        int             (*cpu_kill)(unsigned int cpu);
index c96ffa4722d33cba234afdc64ce4483926eba26c..9cde5d2e768f43160d7322361a9dfc2c77293b68 100644 (file)
@@ -289,9 +289,16 @@ struct arm64_cpu_capabilities {
        u16 type;
        bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
        /*
-        * Take the appropriate actions to enable this capability for this CPU.
-        * For each successfully booted CPU, this method is called for each
-        * globally detected capability.
+        * Take the appropriate actions to configure this capability
+        * for this CPU. If the capability is detected by the kernel
+        * this will be called on all the CPUs in the system,
+        * including the hotplugged CPUs, regardless of whether the
+        * capability is available on that specific CPU. This is
+        * useful for some capabilities (e.g, working around CPU
+        * errata), where all the CPUs must take some action (e.g,
+        * changing system control/configuration). Thus, if an action
+        * is required only if the CPU has the capability, then the
+        * routine must check it before taking any action.
         */
        void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
        union {
@@ -363,21 +370,6 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
        return false;
 }
 
-/*
- * Take appropriate action for all matching entries in the shared capability
- * entry.
- */
-static inline void
-cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
-{
-       const struct arm64_cpu_capabilities *caps;
-
-       for (caps = entry->match_list; caps->matches; caps++)
-               if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
-                   caps->cpu_enable)
-                       caps->cpu_enable(caps);
-}
-
 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
 extern struct static_key_false arm64_const_caps_ready;
index e7d46631cc42ba1bd18adf6a279a2c1f74575e85..b1454d117cd2c69582d519f123fcaa66f5f81c44 100644 (file)
 #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
                             MIDR_ARCHITECTURE_MASK)
 
-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max)           \
-({                                                                     \
-       u32 _model = (midr) & MIDR_CPU_MODEL_MASK;                      \
-       u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);     \
-                                                                       \
-       _model == (model) && rv >= (rv_min) && rv <= (rv_max);          \
- })
-
 #define ARM_CPU_IMP_ARM                        0x41
 #define ARM_CPU_IMP_APM                        0x50
 #define ARM_CPU_IMP_CAVIUM             0x43
@@ -159,10 +151,19 @@ struct midr_range {
 #define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
 #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
 
+static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
+                                          u32 rv_max)
+{
+       u32 _model = midr & MIDR_CPU_MODEL_MASK;
+       u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
+
+       return _model == model && rv >= rv_min && rv <= rv_max;
+}
+
 static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
 {
-       return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
-                                range->rv_min, range->rv_max);
+       return midr_is_cpu_model_range(midr, range->model,
+                                      range->rv_min, range->rv_max);
 }
 
 static inline bool
index d8ec5bb881c26700d013b08ef35cffda8468fd1d..7619f473155f2fac2dabfc9ebddfcb797475f285 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef __ASM_DEBUG_MONITORS_H
 #define __ASM_DEBUG_MONITORS_H
 
-#ifdef __KERNEL__
-
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <asm/brk-imm.h>
@@ -128,5 +126,4 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
 int aarch32_break_handler(struct pt_regs *regs);
 
 #endif /* __ASSEMBLY */
-#endif /* __KERNEL__ */
 #endif /* __ASM_DEBUG_MONITORS_H */
index bdcb0922a40c953d28c263cf46a01b0f46eb23f1..fb3e5044f473a36cf3b36c19686060c4bd9ad133 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef __ASM_DMA_MAPPING_H
 #define __ASM_DMA_MAPPING_H
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 
@@ -27,5 +25,4 @@ static inline bool is_device_dma_coherent(struct device *dev)
        return dev->dma_coherent;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_DMA_MAPPING_H */
index 76a14470258693743eb29a09028751e0f710a2c3..b54d3a86c44446735bfbfe4e49ab217460fc5b27 100644 (file)
@@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
 
 /*
  * On arm64, we have to ensure that the initrd ends up in the linear region,
- * which is a 1 GB aligned region of size '1UL << (VA_BITS - 1)' that is
+ * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is
  * guaranteed to cover the kernel Image.
  *
  * Since the EFI stub is part of the kernel Image, we can relax the
@@ -90,7 +90,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
 static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
                                                    unsigned long image_addr)
 {
-       return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS - 1));
+       return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
 }
 
 #define efi_call_early(f, ...)         sys_table_arg->boottime->f(__VA_ARGS__)
index 65ac184009798ca2831646fdc46a4c82018cce68..cb29253ae86b00ee09844b567620c7fb2c19e24f 100644 (file)
@@ -34,7 +34,8 @@
 #define ESR_ELx_EC_SMC64       (0x17)  /* EL2 and above */
 #define ESR_ELx_EC_SYS64       (0x18)
 #define ESR_ELx_EC_SVE         (0x19)
-/* Unallocated EC: 0x1A - 0x1E */
+#define ESR_ELx_EC_ERET                (0x1a)  /* EL2 only */
+/* Unallocated EC: 0x1b - 0x1E */
 #define ESR_ELx_EC_IMP_DEF     (0x1f)  /* EL3 only */
 #define ESR_ELx_EC_IABT_LOW    (0x20)
 #define ESR_ELx_EC_IABT_CUR    (0x21)
index ed57b760f38cf2497ad37a91041b6f806026f817..a17393ff6677425e676d0e6dd958f8862f87e913 100644 (file)
@@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr)
        return esr;
 }
 
+asmlinkage void enter_from_user_mode(void);
+
 #endif /* __ASM_EXCEPTION_H */
index b6a2c352f4c3fd64726115f478d1c6a4fd60b040..59f10dd13f121e3acb156740ff6c17e0d12856da 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/stddef.h>
 #include <linux/types.h>
 
-#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
 /* Masks for extracting the FPSR and FPCR from the FPSCR */
 #define VFP_FPSCR_STAT_MASK    0xf800009f
 #define VFP_FPSCR_CTRL_MASK    0x07f79f00
index 6211e3105491e033f16d372916733ab88f7c6b37..6cc26a1278197a60d8f275a3b3ba0df7aba0a265 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef __ASM_FUTEX_H
 #define __ASM_FUTEX_H
 
-#ifdef __KERNEL__
-
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 
@@ -129,5 +127,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
        return ret;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_FUTEX_H */
index db9ab760e6fdd9b239c97c954975503df9368a55..bc7aaed4b34e958386ae844a6795ba6d576bc182 100644 (file)
@@ -10,8 +10,6 @@
 #include <asm/sysreg.h>
 #include <asm/virt.h>
 
-#ifdef __KERNEL__
-
 struct arch_hw_breakpoint_ctrl {
        u32 __reserved  : 19,
        len             : 8,
@@ -156,5 +154,4 @@ static inline int get_num_wrps(void)
                                                ID_AA64DFR0_WRPS_SHIFT);
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_BREAKPOINT_H */
index 7ed92626949d7d001d6f84a4402d9f419d01e637..323cb306bd2884a3ea1c6877df5829ae87deb440 100644 (file)
@@ -8,8 +8,6 @@
 #ifndef __ASM_IO_H
 #define __ASM_IO_H
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 
 #include <asm/byteorder.h>
@@ -97,7 +95,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
 ({                                                                     \
        unsigned long tmp;                                              \
                                                                        \
-       rmb();                                                          \
+       dma_rmb();                                                              \
                                                                        \
        /*                                                              \
         * Create a dummy control dependency from the IO read to any    \
@@ -111,7 +109,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
 })
 
 #define __io_par(v)            __iormb(v)
-#define __iowmb()              wmb()
+#define __iowmb()              dma_wmb()
 
 /*
  * Relaxed I/O memory access primitives. These follow the Device memory
@@ -165,14 +163,13 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
  * I/O memory mapping functions.
  */
 extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
-extern void __iounmap(volatile void __iomem *addr);
+extern void iounmap(volatile void __iomem *addr);
 extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 
 #define ioremap(addr, size)            __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
 #define ioremap_nocache(addr, size)    __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
 #define ioremap_wc(addr, size)         __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
 #define ioremap_wt(addr, size)         __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
-#define iounmap                                __iounmap
 
 /*
  * PCI configuration space mapping function.
@@ -207,5 +204,4 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
 
 extern int devmem_is_allowed(unsigned long pfn);
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_IO_H */
index 7872f260c9ee3e14755e916230be06095a936dbf..1a59f0ed1ae392ac06fe26d2da2a9274239c0432 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef __ASM_IRQFLAGS_H
 #define __ASM_IRQFLAGS_H
 
-#ifdef __KERNEL__
-
 #include <asm/alternative.h>
 #include <asm/ptrace.h>
 #include <asm/sysreg.h>
@@ -128,5 +126,4 @@ static inline void arch_local_irq_restore(unsigned long flags)
                : "memory");
 }
 
-#endif
-#endif
+#endif /* __ASM_IRQFLAGS_H */
index b52aacd2c5261ada9d417f2a6de2dbb96658ff1c..b0dc4abc358911237956c24ed12a3d64d09c4474 100644 (file)
  * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
  * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
  * where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
- */
-#define KASAN_SHADOW_START      (VA_START)
-#define KASAN_SHADOW_END        (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
-
-/*
+ *
+ * KASAN_SHADOW_OFFSET:
  * This value is used to map an address to the corresponding shadow
  * address by the following formula:
  *     shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
@@ -33,8 +30,8 @@
  *      KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
  *                             (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
  */
-#define KASAN_SHADOW_OFFSET     (KASAN_SHADOW_END - (1ULL << \
-                                       (64 - KASAN_SHADOW_SCALE_SHIFT)))
+#define _KASAN_SHADOW_START(va)        (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
+#define KASAN_SHADOW_START      _KASAN_SHADOW_START(vabits_actual)
 
 void kasan_init(void);
 void kasan_copy_shadow(pgd_t *pgdir);
index 8262325e2fc66ec3d42f83ae6621ea4ec7e19651..80b388278149681cbf968449bd3681d7080a8f52 100644 (file)
@@ -2,56 +2,47 @@
 #ifndef __ASM_LSE_H
 #define __ASM_LSE_H
 
+#include <asm/atomic_ll_sc.h>
+
 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
 
 #include <linux/compiler_types.h>
 #include <linux/export.h>
+#include <linux/jump_label.h>
 #include <linux/stringify.h>
 #include <asm/alternative.h>
+#include <asm/atomic_lse.h>
 #include <asm/cpucaps.h>
 
-#ifdef __ASSEMBLER__
-
-.arch_extension        lse
-
-.macro alt_lse, llsc, lse
-       alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS
-.endm
-
-#else  /* __ASSEMBLER__ */
-
 __asm__(".arch_extension       lse");
 
-/* Move the ll/sc atomics out-of-line */
-#define __LL_SC_INLINE         notrace
-#define __LL_SC_PREFIX(x)      __ll_sc_##x
-#define __LL_SC_EXPORT(x)      EXPORT_SYMBOL(__LL_SC_PREFIX(x))
+extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+extern struct static_key_false arm64_const_caps_ready;
+
+static inline bool system_uses_lse_atomics(void)
+{
+       return (static_branch_likely(&arm64_const_caps_ready)) &&
+               static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]);
+}
 
-/* Macro for constructing calls to out-of-line ll/sc atomics */
-#define __LL_SC_CALL(op)       "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
-#define __LL_SC_CLOBBERS       "x16", "x17", "x30"
+#define __lse_ll_sc_body(op, ...)                                      \
+({                                                                     \
+       system_uses_lse_atomics() ?                                     \
+               __lse_##op(__VA_ARGS__) :                               \
+               __ll_sc_##op(__VA_ARGS__);                              \
+})
 
 /* In-line patching at runtime */
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)                               \
        ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
 
-#endif /* __ASSEMBLER__ */
 #else  /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
 
-#ifdef __ASSEMBLER__
-
-.macro alt_lse, llsc, lse
-       \llsc
-.endm
-
-#else  /* __ASSEMBLER__ */
+static inline bool system_uses_lse_atomics(void) { return false; }
 
-#define __LL_SC_INLINE         static inline
-#define __LL_SC_PREFIX(x)      x
-#define __LL_SC_EXPORT(x)
+#define __lse_ll_sc_body(op, ...)              __ll_sc_##op(__VA_ARGS__)
 
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)       llsc
 
-#endif /* __ASSEMBLER__ */
 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
 #endif /* __ASM_LSE_H */
index fb04f10a78ab35c462e251d0bacce3a05da9be32..b61b50bf68b1893024e997d268fbbb3c91153d91 100644 (file)
 
 #include <linux/compiler.h>
 #include <linux/const.h>
+#include <linux/sizes.h>
 #include <linux/types.h>
 #include <asm/bug.h>
 #include <asm/page-def.h>
-#include <linux/sizes.h>
 
 /*
  * Size of the PCI I/O space. This must remain a power of two so that
 /*
  * VMEMMAP_SIZE - allows the whole linear region to be covered by
  *                a struct page array
+ *
+ * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
+ * needs to cover the memory region from the beginning of the 52-bit
+ * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
+ * keep a constant PAGE_OFFSET and "fallback" to using the higher end
+ * of the VMEMMAP where 52-bit support is not available in hardware.
  */
-#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
+#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
+                       >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
 
 /*
- * PAGE_OFFSET - the virtual address of the start of the linear map (top
- *              (VA_BITS - 1))
- * KIMAGE_VADDR - the virtual address of the start of the kernel image
+ * PAGE_OFFSET - the virtual address of the start of the linear map, at the
+ *               start of the TTBR1 address space.
+ * PAGE_END - the end of the linear map, where all other kernel mappings begin.
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image.
  * VA_BITS - the maximum number of bits for virtual addresses.
- * VA_START - the first kernel virtual address.
  */
 #define VA_BITS                        (CONFIG_ARM64_VA_BITS)
-#define VA_START               (UL(0xffffffffffffffff) - \
-       (UL(1) << VA_BITS) + 1)
-#define PAGE_OFFSET            (UL(0xffffffffffffffff) - \
-       (UL(1) << (VA_BITS - 1)) + 1)
+#define _PAGE_OFFSET(va)       (-(UL(1) << (va)))
+#define PAGE_OFFSET            (_PAGE_OFFSET(VA_BITS))
 #define KIMAGE_VADDR           (MODULES_END)
-#define BPF_JIT_REGION_START   (VA_START + KASAN_SHADOW_SIZE)
+#define BPF_JIT_REGION_START   (KASAN_SHADOW_END)
 #define BPF_JIT_REGION_SIZE    (SZ_128M)
 #define BPF_JIT_REGION_END     (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
 #define MODULES_END            (MODULES_VADDR + MODULES_VSIZE)
 #define MODULES_VADDR          (BPF_JIT_REGION_END)
 #define MODULES_VSIZE          (SZ_128M)
-#define VMEMMAP_START          (PAGE_OFFSET - VMEMMAP_SIZE)
+#define VMEMMAP_START          (-VMEMMAP_SIZE - SZ_2M)
 #define PCI_IO_END             (VMEMMAP_START - SZ_2M)
 #define PCI_IO_START           (PCI_IO_END - PCI_IO_SIZE)
 #define FIXADDR_TOP            (PCI_IO_START - SZ_2M)
 
-#define KERNEL_START      _text
-#define KERNEL_END        _end
+#if VA_BITS > 48
+#define VA_BITS_MIN            (48)
+#else
+#define VA_BITS_MIN            (VA_BITS)
+#endif
+
+#define _PAGE_END(va)          (-(UL(1) << ((va) - 1)))
 
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#define KERNEL_START           _text
+#define KERNEL_END             _end
+
+#ifdef CONFIG_ARM64_VA_BITS_52
 #define MAX_USER_VA_BITS       52
 #else
 #define MAX_USER_VA_BITS       VA_BITS
  * significantly, so double the (minimum) stack size when they are in use.
  */
 #ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SIZE      (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_OFFSET    _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_END       ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
+                                       + KASAN_SHADOW_OFFSET)
 #define KASAN_THREAD_SHIFT     1
 #else
-#define KASAN_SHADOW_SIZE      (0)
 #define KASAN_THREAD_SHIFT     0
-#endif
+#define KASAN_SHADOW_END       (_PAGE_END(VA_BITS_MIN))
+#endif /* CONFIG_KASAN */
 
 #define MIN_THREAD_SHIFT       (14 + KASAN_THREAD_SHIFT)
 
  * 16 KB granule: 128 level 3 entries, with contiguous bit
  * 64 KB granule:  32 level 3 entries, with contiguous bit
  */
-#define SEGMENT_ALIGN                  SZ_2M
+#define SEGMENT_ALIGN          SZ_2M
 #else
 /*
  *  4 KB granule:  16 level 3 entries, with contiguous bit
  * 16 KB granule:   4 level 3 entries, without contiguous bit
  * 64 KB granule:   1 level 3 entry
  */
-#define SEGMENT_ALIGN                  SZ_64K
+#define SEGMENT_ALIGN          SZ_64K
 #endif
 
 /*
 #endif
 
 #ifndef __ASSEMBLY__
+extern u64                     vabits_actual;
+#define PAGE_END               (_PAGE_END(vabits_actual))
 
 #include <linux/bitops.h>
 #include <linux/mmdebug.h>
 
+extern s64                     physvirt_offset;
 extern s64                     memstart_addr;
 /* PHYS_OFFSET - the physical address of the start of memory. */
 #define PHYS_OFFSET            ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
@@ -176,9 +194,6 @@ static inline unsigned long kaslr_offset(void)
        return kimage_vaddr - KIMAGE_VADDR;
 }
 
-/* the actual size of a user virtual address */
-extern u64                     vabits_user;
-
 /*
  * Allow all memory at the discovery stage. We will clip it later.
  */
@@ -201,24 +216,24 @@ extern u64                        vabits_user;
  * pass on to access_ok(), for instance.
  */
 #define untagged_addr(addr)    \
-       ((__typeof__(addr))sign_extend64((u64)(addr), 55))
+       ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
 
 #ifdef CONFIG_KASAN_SW_TAGS
 #define __tag_shifted(tag)     ((u64)(tag) << 56)
-#define __tag_set(addr, tag)   (__typeof__(addr))( \
-               ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
 #define __tag_reset(addr)      untagged_addr(addr)
 #define __tag_get(addr)                (__u8)((u64)(addr) >> 56)
 #else
+#define __tag_shifted(tag)     0UL
+#define __tag_reset(addr)      (addr)
+#define __tag_get(addr)                0
+#endif /* CONFIG_KASAN_SW_TAGS */
+
 static inline const void *__tag_set(const void *addr, u8 tag)
 {
-       return addr;
+       u64 __addr = (u64)addr & ~__tag_shifted(0xff);
+       return (const void *)(__addr | __tag_shifted(tag));
 }
 
-#define __tag_reset(addr)      (addr)
-#define __tag_get(addr)                0
-#endif
-
 /*
  * Physical vs virtual RAM address space conversion.  These are
  * private definitions which should NOT be used outside memory.h
@@ -227,19 +242,18 @@ static inline const void *__tag_set(const void *addr, u8 tag)
 
 
 /*
- * The linear kernel range starts in the middle of the virtual adddress
+ * The linear kernel range starts at the bottom of the virtual address
  * space. Testing the top bit for the start of the region is a
- * sufficient check.
+ * sufficient check and avoids having to worry about the tag.
  */
-#define __is_lm_address(addr)  (!!((addr) & BIT(VA_BITS - 1)))
+#define __is_lm_address(addr)  (!(((u64)addr) & BIT(vabits_actual - 1)))
 
-#define __lm_to_phys(addr)     (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+#define __lm_to_phys(addr)     (((addr) + physvirt_offset))
 #define __kimg_to_phys(addr)   ((addr) - kimage_voffset)
 
 #define __virt_to_phys_nodebug(x) ({                                   \
-       phys_addr_t __x = (phys_addr_t)(x);                             \
-       __is_lm_address(__x) ? __lm_to_phys(__x) :                      \
-                              __kimg_to_phys(__x);                     \
+       phys_addr_t __x = (phys_addr_t)(__tag_reset(x));                \
+       __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \
 })
 
 #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
@@ -250,9 +264,9 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
 #else
 #define __virt_to_phys(x)      __virt_to_phys_nodebug(x)
 #define __phys_addr_symbol(x)  __pa_symbol_nodebug(x)
-#endif
+#endif /* CONFIG_DEBUG_VIRTUAL */
 
-#define __phys_to_virt(x)      ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+#define __phys_to_virt(x)      ((unsigned long)((x) - physvirt_offset))
 #define __phys_to_kimg(x)      ((unsigned long)((x) + kimage_voffset))
 
 /*
@@ -286,41 +300,38 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __pa_nodebug(x)                __virt_to_phys_nodebug((unsigned long)(x))
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-#define virt_to_pfn(x)      __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
-#define sym_to_pfn(x)      __phys_to_pfn(__pa_symbol(x))
+#define virt_to_pfn(x)         __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+#define sym_to_pfn(x)          __phys_to_pfn(__pa_symbol(x))
 
 /*
- *  virt_to_page(k)    convert a _valid_ virtual address to struct page *
- *  virt_addr_valid(k) indicates whether a virtual address is valid
+ *  virt_to_page(x)    convert a _valid_ virtual address to struct page *
+ *  virt_addr_valid(x) indicates whether a virtual address is valid
  */
 #define ARCH_PFN_OFFSET                ((unsigned long)PHYS_PFN_OFFSET)
 
 #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
-#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define _virt_addr_valid(kaddr)        pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(x)                pfn_to_page(virt_to_pfn(x))
 #else
-#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr)  (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
-
-#define page_to_virt(page)     ({                                      \
-       unsigned long __addr =                                          \
-               ((__page_to_voff(page)) | PAGE_OFFSET);                 \
-       const void *__addr_tag =                                        \
-               __tag_set((void *)__addr, page_kasan_tag(page));        \
-       ((void *)__addr_tag);                                           \
+#define page_to_virt(x)        ({                                              \
+       __typeof__(x) __page = x;                                       \
+       u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
+       u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE);                 \
+       (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
 })
 
-#define virt_to_page(vaddr)    ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
+#define virt_to_page(x)        ({                                              \
+       u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE;    \
+       u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page));     \
+       (struct page *)__addr;                                          \
+})
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
 
-#define _virt_addr_valid(kaddr)        pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
-                                          + PHYS_OFFSET) >> PAGE_SHIFT)
-#endif
-#endif
+#define virt_addr_valid(addr)  ({                                      \
+       __typeof__(addr) __addr = addr;                                 \
+       __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));      \
+})
 
-#define _virt_addr_is_linear(kaddr)    \
-       (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
-#define virt_addr_valid(kaddr)         \
-       (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
+#endif /* !ASSEMBLY */
 
 /*
  * Given that the GIC architecture permits ITS implementations that can only be
@@ -335,4 +346,4 @@ static inline void *phys_to_virt(phys_addr_t x)
 
 #include <asm-generic/memory_model.h>
 
-#endif
+#endif /* __ASM_MEMORY_H */
index fd6161336653664f0c0d1fa297c63e6f3c884cab..f217e32929193114d0cf8e5f85fd0bff35ba9cfe 100644 (file)
@@ -126,7 +126,7 @@ extern void init_mem_pgprot(void);
 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
                               unsigned long virt, phys_addr_t size,
                               pgprot_t prot, bool page_mappings_only);
-extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
 extern void mark_linear_text_alias_ro(void);
 
 #define INIT_MM_CONTEXT(name)  \
index 7ed0adb187a8aa06320dc44402a5dfe31a7d97b2..3827ff4040a3f71da95de705ac5e905b25b8a15d 100644 (file)
@@ -63,7 +63,7 @@ extern u64 idmap_ptrs_per_pgd;
 
 static inline bool __cpu_uses_extended_idmap(void)
 {
-       if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52))
+       if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
                return false;
 
        return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
@@ -95,7 +95,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
        isb();
 }
 
-#define cpu_set_default_tcr_t0sz()     __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
+#define cpu_set_default_tcr_t0sz()     __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
 #define cpu_set_idmap_tcr_t0sz()       __cpu_set_tcr_t0sz(idmap_t0sz)
 
 /*
index 9e690686e8aad3fac10fd838dc75d8b7e77a8cde..70b323cf83000d51c1d823c5f4c8eb8fe47bebda 100644 (file)
@@ -1,7 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __ASM_PCI_H
 #define __ASM_PCI_H
-#ifdef __KERNEL__
 
 #include <linux/types.h>
 #include <linux/slab.h>
@@ -35,5 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 }
 #endif  /* CONFIG_PCI */
 
-#endif  /* __KERNEL__ */
 #endif  /* __ASM_PCI_H */
index db92950bb1a0f3a888069643dded8af7e41d6282..3df60f97da1f2c86d3287ac02c1122d46ea1ef24 100644 (file)
 #define TTBR_BADDR_MASK_52     (((UL(1) << 46) - 1) << 2)
 #endif
 
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_ARM64_VA_BITS_52
 /* Must be at least 64-byte aligned to prevent corruption of the TTBR */
 #define TTBR1_BADDR_4852_OFFSET        (((UL(1) << (52 - PGDIR_SHIFT)) - \
                                 (UL(1) << (48 - PGDIR_SHIFT))) * 8)
index e09760ece844fe55cbd39dc8c32910a7bfdb6708..470ba7ae88218272c2567e5fb22197d4ea06e8b6 100644 (file)
@@ -21,9 +21,7 @@
  *     and fixed mappings
  */
 #define VMALLOC_START          (MODULES_END)
-#define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
-
-#define vmemmap                        ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
+#define VMALLOC_END            (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define FIRST_USER_ADDRESS     0UL
 
@@ -35,6 +33,8 @@
 #include <linux/mm_types.h>
 #include <linux/sched.h>
 
+extern struct page *vmemmap;
+
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
 extern void __pud_error(const char *file, int line, unsigned long val);
@@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
         * Only if the new pte is valid and kernel, otherwise TLB maintenance
         * or update_mmu_cache() have the necessary barriers.
         */
-       if (pte_valid_not_user(pte))
+       if (pte_valid_not_user(pte)) {
                dsb(ishst);
+               isb();
+       }
 }
 
 extern void __sync_icache_dcache(pte_t pteval);
@@ -484,8 +486,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 
        WRITE_ONCE(*pmdp, pmd);
 
-       if (pmd_valid(pmd))
+       if (pmd_valid(pmd)) {
                dsb(ishst);
+               isb();
+       }
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -543,8 +547,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 
        WRITE_ONCE(*pudp, pud);
 
-       if (pud_valid(pud))
+       if (pud_valid(pud)) {
                dsb(ishst);
+               isb();
+       }
 }
 
 static inline void pud_clear(pud_t *pudp)
@@ -602,6 +608,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 
        WRITE_ONCE(*pgdp, pgd);
        dsb(ishst);
+       isb();
 }
 
 static inline void pgd_clear(pgd_t *pgdp)
@@ -859,8 +866,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 
-#define kc_vaddr_to_offset(v)  ((v) & ~VA_START)
-#define kc_offset_to_vaddr(o)  ((o) | VA_START)
+#define kc_vaddr_to_offset(v)  ((v) & ~PAGE_END)
+#define kc_offset_to_vaddr(o)  ((o) | PAGE_END)
 
 #ifdef CONFIG_ARM64_PA_BITS_52
 #define phys_to_ttbr(addr)     (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
index d328540cb85edc5f272190e3d36481b4c1987694..7a24bad1a58b89a38390eabf909515082a3b9aaa 100644 (file)
@@ -69,7 +69,7 @@ extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
  * The EL0 pointer bits used by a pointer authentication code.
  * This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
  */
-#define ptrauth_user_pac_mask()        GENMASK(54, vabits_user)
+#define ptrauth_user_pac_mask()        GENMASK(54, vabits_actual)
 
 /* Only valid for EL0 TTBR0 instruction pointers */
 static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
index 368d90a9d0e5fa893f53bad7f34e54ec33ab3353..a2ce65a0c1fa9d539e1f38a73d18ae11537f7b27 100644 (file)
@@ -9,7 +9,6 @@
 #ifndef __ASM_PROCFNS_H
 #define __ASM_PROCFNS_H
 
-#ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
 #include <asm/page.h>
@@ -25,5 +24,4 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 #include <asm/memory.h>
 
 #endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 #endif /* __ASM_PROCFNS_H */
index 844e2964b0f5e5b0aad4b866283f24e26f794afe..c67848c55009d9744ff68deea54eb58844d1c7f1 100644 (file)
@@ -20,7 +20,6 @@
 #define NET_IP_ALIGN   0
 
 #ifndef __ASSEMBLY__
-#ifdef __KERNEL__
 
 #include <linux/build_bug.h>
 #include <linux/cache.h>
@@ -42,8 +41,8 @@
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
  */
 
-#define DEFAULT_MAP_WINDOW_64  (UL(1) << VA_BITS)
-#define TASK_SIZE_64           (UL(1) << vabits_user)
+#define DEFAULT_MAP_WINDOW_64  (UL(1) << VA_BITS_MIN)
+#define TASK_SIZE_64           (UL(1) << vabits_actual)
 
 #ifdef CONFIG_COMPAT
 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
@@ -283,8 +282,6 @@ static inline void spin_lock_prefetch(const void *ptr)
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 
-#endif
-
 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
 extern void __init minsigstksz_setup(void);
 
@@ -306,6 +303,14 @@ extern void __init minsigstksz_setup(void);
 /* PR_PAC_RESET_KEYS prctl */
 #define PAC_RESET_KEYS(tsk, arg)       ptrauth_prctl_reset_keys(tsk, arg)
 
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
+long set_tagged_addr_ctrl(unsigned long arg);
+long get_tagged_addr_ctrl(void);
+#define SET_TAGGED_ADDR_CTRL(arg)      set_tagged_addr_ctrl(arg)
+#define GET_TAGGED_ADDR_CTRL()         get_tagged_addr_ctrl()
+#endif
+
 /*
  * For CONFIG_GCC_PLUGIN_STACKLEAK
  *
index 1dcf63a9ac1f313975f7fd318f783acd3f21147c..fbebb411ae202c1ea22acaaf5a666fe79c7a63fe 100644 (file)
@@ -301,6 +301,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
        return regs->regs[0];
 }
 
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+       regs->regs[0] = rc;
+}
+
 /**
  * regs_get_kernel_argument() - get Nth function argument in kernel
  * @regs:      pt_regs of that context
index bd43d1cf724b175f1bc187bf83554492402cefa7..7e9f163d02ecad97d4df2992395bce32bdef2375 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef __ASM_SIGNAL32_H
 #define __ASM_SIGNAL32_H
 
-#ifdef __KERNEL__
 #ifdef CONFIG_COMPAT
 #include <linux/compat.h>
 
@@ -79,5 +78,4 @@ static inline void compat_setup_restart_syscall(struct pt_regs *regs)
 {
 }
 #endif /* CONFIG_COMPAT */
-#endif /* __KERNEL__ */
 #endif /* __ASM_SIGNAL32_H */
index 06ebcfef73df965dfaa9b2bda97946218b6711d0..972d196c7714689ab46d22b6366453b7e018c721 100644 (file)
 #define SYS_FAR_EL1                    sys_reg(3, 0, 6, 0, 0)
 #define SYS_PAR_EL1                    sys_reg(3, 0, 7, 4, 0)
 
+#define SYS_PAR_EL1_F                  BIT(1)
+#define SYS_PAR_EL1_FST                        GENMASK(6, 1)
+
 /*** Statistical Profiling Extension ***/
 /* ID registers */
 #define SYS_PMSIDR_EL1                 sys_reg(3, 0, 9, 9, 7)
 #define SCTLR_EL2_RES1 ((BIT(4))  | (BIT(5))  | (BIT(11)) | (BIT(16)) | \
                         (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
                         (BIT(29)))
-#define SCTLR_EL2_RES0 ((BIT(6))  | (BIT(7))  | (BIT(8))  | (BIT(9))  | \
-                        (BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \
-                        (BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \
-                        (BIT(27)) | (BIT(30)) | (BIT(31)) | \
-                        (0xffffefffUL << 32))
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #define ENDIAN_SET_EL2         SCTLR_ELx_EE
-#define ENDIAN_CLEAR_EL2       0
 #else
 #define ENDIAN_SET_EL2         0
-#define ENDIAN_CLEAR_EL2       SCTLR_ELx_EE
-#endif
-
-/* SCTLR_EL2 value used for the hyp-stub */
-#define SCTLR_EL2_SET  (SCTLR_ELx_IESB   | ENDIAN_SET_EL2   | SCTLR_EL2_RES1)
-#define SCTLR_EL2_CLEAR        (SCTLR_ELx_M      | SCTLR_ELx_A    | SCTLR_ELx_C   | \
-                        SCTLR_ELx_SA     | SCTLR_ELx_I    | SCTLR_ELx_WXN | \
-                        SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
-
-#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
-#error "Inconsistent SCTLR_EL2 set/clear bits"
 #endif
 
 /* SCTLR_EL1 specific flags. */
 
 #define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
                         (BIT(29)))
-#define SCTLR_EL1_RES0  ((BIT(6))  | (BIT(10)) | (BIT(13)) | (BIT(17)) | \
-                        (BIT(27)) | (BIT(30)) | (BIT(31)) | \
-                        (0xffffefffUL << 32))
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #define ENDIAN_SET_EL1         (SCTLR_EL1_E0E | SCTLR_ELx_EE)
-#define ENDIAN_CLEAR_EL1       0
 #else
 #define ENDIAN_SET_EL1         0
-#define ENDIAN_CLEAR_EL1       (SCTLR_EL1_E0E | SCTLR_ELx_EE)
 #endif
 
 #define SCTLR_EL1_SET  (SCTLR_ELx_M    | SCTLR_ELx_C    | SCTLR_ELx_SA   |\
                         SCTLR_EL1_DZE  | SCTLR_EL1_UCT                   |\
                         SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
                         ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
-#define SCTLR_EL1_CLEAR        (SCTLR_ELx_A   | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD    |\
-                        SCTLR_EL1_UMA | SCTLR_ELx_WXN     | ENDIAN_CLEAR_EL1 |\
-                        SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI  | SCTLR_EL1_RES0)
-
-#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
-#error "Inconsistent SCTLR_EL1 set/clear bits"
-#endif
 
 /* id_aa64isar0 */
 #define ID_AA64ISAR0_TS_SHIFT          52
index 180b34ec59650a9bcbca37ae4c7fbba373251e09..f0cec4160136e9cc62233ee43b01bb152fe5891c 100644 (file)
@@ -8,8 +8,6 @@
 #ifndef __ASM_THREAD_INFO_H
 #define __ASM_THREAD_INFO_H
 
-#ifdef __KERNEL__
-
 #include <linux/compiler.h>
 
 #ifndef __ASSEMBLY__
@@ -59,29 +57,18 @@ void arch_release_task_struct(struct task_struct *tsk);
 
 #endif
 
-/*
- * thread information flags:
- *  TIF_SYSCALL_TRACE  - syscall trace active
- *  TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
- *  TIF_SYSCALL_AUDIT  - syscall auditing
- *  TIF_SECCOMP                - syscall secure computing
- *  TIF_SYSCALL_EMU     - syscall emulation active
- *  TIF_SIGPENDING     - signal pending
- *  TIF_NEED_RESCHED   - rescheduling necessary
- *  TIF_NOTIFY_RESUME  - callback before returning to user
- */
-#define TIF_SIGPENDING         0
-#define TIF_NEED_RESCHED       1
+#define TIF_SIGPENDING         0       /* signal pending */
+#define TIF_NEED_RESCHED       1       /* rescheduling necessary */
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_FOREIGN_FPSTATE    3       /* CPU's FP state is not current's */
 #define TIF_UPROBE             4       /* uprobe breakpoint or singlestep */
 #define TIF_FSCHECK            5       /* Check FS is USER_DS on return */
 #define TIF_NOHZ               7
-#define TIF_SYSCALL_TRACE      8
-#define TIF_SYSCALL_AUDIT      9
-#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_SECCOMP            11
-#define TIF_SYSCALL_EMU                12
+#define TIF_SYSCALL_TRACE      8       /* syscall trace active */
+#define TIF_SYSCALL_AUDIT      9       /* syscall auditing */
+#define TIF_SYSCALL_TRACEPOINT 10      /* syscall tracepoint for ftrace */
+#define TIF_SECCOMP            11      /* syscall secure computing */
+#define TIF_SYSCALL_EMU                12      /* syscall emulation active */
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_FREEZE             19
 #define TIF_RESTORE_SIGMASK    20
@@ -90,6 +77,7 @@ void arch_release_task_struct(struct task_struct *tsk);
 #define TIF_SVE                        23      /* Scalable Vector Extension in use */
 #define TIF_SVE_VL_INHERIT     24      /* Inherit sve_vl_onexec across exec */
 #define TIF_SSBD               25      /* Wants SSB mitigation */
+#define TIF_TAGGED_ADDR                26      /* Allow tagged user addresses */
 
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
@@ -121,5 +109,4 @@ void arch_release_task_struct(struct task_struct *tsk);
        .addr_limit     = KERNEL_DS,                                    \
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_THREAD_INFO_H */
index 8af7a85f76bdb0b61637d5f4b18bb976d487820d..bc39490647259aeaf54c6bc9645dbaea3d07bd4f 100644 (file)
@@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
        dsb(ishst);
        __tlbi(vaae1is, addr);
        dsb(ish);
+       isb();
 }
 #endif
 
index 5a1c32260c1f6885e2c4215828296343b3892ffb..097d6bfac0b74b93b842959d724d67d3cf8cd8a3 100644 (file)
@@ -62,6 +62,10 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
 {
        unsigned long ret, limit = current_thread_info()->addr_limit;
 
+       if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
+           test_thread_flag(TIF_TAGGED_ADDR))
+               addr = untagged_addr(addr);
+
        __chk_user_ptr(addr);
        asm volatile(
        // A + B <= C + 1 for all A,B,C, in four easy steps:
@@ -215,7 +219,8 @@ static inline void uaccess_enable_not_uao(void)
 
 /*
  * Sanitise a uaccess pointer such that it becomes NULL if above the
- * current addr_limit.
+ * current addr_limit. In case the pointer is tagged (has the top byte set),
+ * untag the pointer before checking.
  */
 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
@@ -223,10 +228,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
        void __user *safe_ptr;
 
        asm volatile(
-       "       bics    xzr, %1, %2\n"
+       "       bics    xzr, %3, %2\n"
        "       csel    %0, %1, xzr, eq\n"
        : "=&r" (safe_ptr)
-       : "r" (ptr), "r" (current_thread_info()->addr_limit)
+       : "r" (ptr), "r" (current_thread_info()->addr_limit),
+         "r" (untagged_addr(ptr))
        : "cc");
 
        csdb();
index 9c15e0a063017ad27677b958b85c36ce329873d3..07468428fd2907a6e9453e840da0d95694fcf541 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef __ASM_VDSO_H
 #define __ASM_VDSO_H
 
-#ifdef __KERNEL__
-
 /*
  * Default link address for the vDSO.
  * Since we randomise the VDSO mapping, there's little point in trying
@@ -28,6 +26,4 @@
 
 #endif /* !__ASSEMBLY__ */
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_VDSO_H */
index ba6dbc3de86480129727fda5fa150ee5b1df2f30..1f38bf330a6efe4d89803b7a86c4417f3cf33862 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef __ASM_VDSO_DATAPAGE_H
 #define __ASM_VDSO_DATAPAGE_H
 
-#ifdef __KERNEL__
-
 #ifndef __ASSEMBLY__
 
 struct vdso_data {
@@ -32,6 +30,4 @@ struct vdso_data {
 
 #endif /* !__ASSEMBLY__ */
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm64/include/uapi/asm/stat.h b/arch/arm64/include/uapi/asm/stat.h
deleted file mode 100644 (file)
index 313325f..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#include <asm-generic/stat.h>
index b1fdc486aed8239c681acdcab1d98550bbdf60d9..9323bcc40a58a7c34d2ec502c35fad4633a5d118 100644 (file)
@@ -894,7 +894,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
        u32 midr = read_cpuid_id();
 
        /* Cavium ThunderX pass 1.x and 2.x */
-       return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
+       return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
                MIDR_CPU_VAR_REV(0, 0),
                MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
 }
index d1048173fd8a1bdcf77b607def93b4d629dae71c..e4d6af2fdec716ae36028f8e060dda92fb5127d2 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/cpu_pm.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/psci.h>
 
 #include <asm/cpuidle.h>
 #include <asm/cpu_ops.h>
@@ -46,17 +47,58 @@ int arm_cpuidle_suspend(int index)
 
 #define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
 
+static int psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+       int i, count;
+       struct acpi_lpi_state *lpi;
+       struct acpi_processor *pr = per_cpu(processors, cpu);
+
+       /*
+        * If the PSCI cpu_suspend function hook has not been initialized
+        * idle states must not be enabled, so bail out
+        */
+       if (!psci_ops.cpu_suspend)
+               return -EOPNOTSUPP;
+
+       if (unlikely(!pr || !pr->flags.has_lpi))
+               return -EINVAL;
+
+       count = pr->power.count - 1;
+       if (count <= 0)
+               return -ENODEV;
+
+       for (i = 0; i < count; i++) {
+               u32 state;
+
+               lpi = &pr->power.lpi_states[i + 1];
+               /*
+                * Only bits[31:0] represent a PSCI power_state while
+                * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+                */
+               state = lpi->address;
+               if (!psci_power_state_is_valid(state)) {
+                       pr_warn("Invalid PSCI power state %#x\n", state);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 int acpi_processor_ffh_lpi_probe(unsigned int cpu)
 {
-       return arm_cpuidle_init(cpu);
+       return psci_acpi_cpu_init_idle(cpu);
 }
 
 int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
 {
+       u32 state = lpi->address;
+
        if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
-               return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend,
-                                               lpi->index);
+               return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
+                                               lpi->index, state);
        else
-               return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
+               return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+                                            lpi->index, state);
 }
 #endif
index 876055e373525999e1775dbd2a1bc482c9359b62..05933c065732b8cc5dac245d2baea0f1ccd4147f 100644 (file)
@@ -33,7 +33,7 @@
 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
 static struct cpuinfo_arm64 boot_cpu_data;
 
-static char *icache_policy_str[] = {
+static const char *icache_policy_str[] = {
        [0 ... ICACHE_POLICY_PIPT]      = "RESERVED/UNKNOWN",
        [ICACHE_POLICY_VIPT]            = "VIPT",
        [ICACHE_POLICY_PIPT]            = "PIPT",
index 320a30dbe35efb02e4f7d5bd52c82189e1f4400c..84a822748c84e58c85fc19a62d63053b97f0b2c2 100644 (file)
@@ -30,9 +30,9 @@
  * Context tracking subsystem.  Used to instrument transitions
  * between user and kernel mode.
  */
-       .macro ct_user_exit
+       .macro ct_user_exit_irqoff
 #ifdef CONFIG_CONTEXT_TRACKING
-       bl      context_tracking_user_exit
+       bl      enter_from_user_mode
 #endif
        .endm
 
@@ -792,8 +792,8 @@ el0_cp15:
        /*
         * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
         */
+       ct_user_exit_irqoff
        enable_daif
-       ct_user_exit
        mov     x0, x25
        mov     x1, sp
        bl      do_cp15instr
@@ -805,8 +805,8 @@ el0_da:
         * Data abort handling
         */
        mrs     x26, far_el1
+       ct_user_exit_irqoff
        enable_daif
-       ct_user_exit
        clear_address_tag x0, x26
        mov     x1, x25
        mov     x2, sp
@@ -818,11 +818,11 @@ el0_ia:
         */
        mrs     x26, far_el1
        gic_prio_kentry_setup tmp=x0
+       ct_user_exit_irqoff
        enable_da_f
 #ifdef CONFIG_TRACE_IRQFLAGS
        bl      trace_hardirqs_off
 #endif
-       ct_user_exit
        mov     x0, x26
        mov     x1, x25
        mov     x2, sp
@@ -832,8 +832,8 @@ el0_fpsimd_acc:
        /*
         * Floating Point or Advanced SIMD access
         */
+       ct_user_exit_irqoff
        enable_daif
-       ct_user_exit
        mov     x0, x25
        mov     x1, sp
        bl      do_fpsimd_acc
@@ -842,8 +842,8 @@ el0_sve_acc:
        /*
         * Scalable Vector Extension access
         */
+       ct_user_exit_irqoff
        enable_daif
-       ct_user_exit
        mov     x0, x25
        mov     x1, sp
        bl      do_sve_acc
@@ -852,8 +852,8 @@ el0_fpsimd_exc:
        /*
         * Floating Point, Advanced SIMD or SVE exception
         */
+       ct_user_exit_irqoff
        enable_daif
-       ct_user_exit
        mov     x0, x25
        mov     x1, sp
        bl      do_fpsimd_exc
@@ -868,11 +868,11 @@ el0_sp_pc:
         * Stack or PC alignment exception handling
         */
        gic_prio_kentry_setup tmp=x0
+       ct_user_exit_irqoff
        enable_da_f
 #ifdef CONFIG_TRACE_IRQFLAGS
        bl      trace_hardirqs_off
 #endif
-       ct_user_exit
        mov     x0, x26
        mov     x1, x25
        mov     x2, sp
@@ -882,8 +882,8 @@ el0_undef:
        /*
         * Undefined instruction
         */
+       ct_user_exit_irqoff
        enable_daif
-       ct_user_exit
        mov     x0, sp
        bl      do_undefinstr
        b       ret_to_user
@@ -891,8 +891,8 @@ el0_sys:
        /*
         * System instructions, for trapped cache maintenance instructions
         */
+       ct_user_exit_irqoff
        enable_daif
-       ct_user_exit
        mov     x0, x25
        mov     x1, sp
        bl      do_sysinstr
@@ -902,17 +902,18 @@ el0_dbg:
         * Debug exception handling
         */
        tbnz    x24, #0, el0_inv                // EL0 only
+       mrs     x24, far_el1
        gic_prio_kentry_setup tmp=x3
-       mrs     x0, far_el1
+       ct_user_exit_irqoff
+       mov     x0, x24
        mov     x1, x25
        mov     x2, sp
        bl      do_debug_exception
        enable_da_f
-       ct_user_exit
        b       ret_to_user
 el0_inv:
+       ct_user_exit_irqoff
        enable_daif
-       ct_user_exit
        mov     x0, sp
        mov     x1, #BAD_SYNC
        mov     x2, x25
@@ -925,13 +926,13 @@ el0_irq:
        kernel_entry 0
 el0_irq_naked:
        gic_prio_irq_setup pmr=x20, tmp=x0
+       ct_user_exit_irqoff
        enable_da_f
 
 #ifdef CONFIG_TRACE_IRQFLAGS
        bl      trace_hardirqs_off
 #endif
 
-       ct_user_exit
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
        tbz     x22, #55, 1f
        bl      do_el0_irq_bp_hardening
@@ -958,13 +959,14 @@ ENDPROC(el1_error)
 el0_error:
        kernel_entry 0
 el0_error_naked:
-       mrs     x1, esr_el1
+       mrs     x25, esr_el1
        gic_prio_kentry_setup tmp=x2
+       ct_user_exit_irqoff
        enable_dbg
        mov     x0, sp
+       mov     x1, x25
        bl      do_serror
        enable_da_f
-       ct_user_exit
        b       ret_to_user
 ENDPROC(el0_error)
 
index 2cdacd1c141b97295192e9685ffca9ef32765939..989b1944cb719bb5afcd1f4fe1547ba4e2511d4e 100644 (file)
@@ -102,6 +102,8 @@ pe_header:
         *  x23        stext() .. start_kernel()  physical misalignment/KASLR offset
         *  x28        __create_page_tables()     callee preserved temp register
         *  x19/x20    __primary_switch()         callee preserved temp registers
+        *  x24        __primary_switch() .. relocate_kernel()
+        *                                        current RELR displacement
         */
 ENTRY(stext)
        bl      preserve_boot_args
@@ -308,15 +310,15 @@ __create_page_tables:
        adrp    x0, idmap_pg_dir
        adrp    x3, __idmap_text_start          // __pa(__idmap_text_start)
 
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_ARM64_VA_BITS_52
        mrs_s   x6, SYS_ID_AA64MMFR2_EL1
        and     x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
        mov     x5, #52
        cbnz    x6, 1f
 #endif
-       mov     x5, #VA_BITS
+       mov     x5, #VA_BITS_MIN
 1:
-       adr_l   x6, vabits_user
+       adr_l   x6, vabits_actual
        str     x5, [x6]
        dmb     sy
        dc      ivac, x6                // Invalidate potentially stale cache line
@@ -724,14 +726,22 @@ __secondary_switched:
 
        adr_l   x0, secondary_data
        ldr     x1, [x0, #CPU_BOOT_STACK]       // get secondary_data.stack
+       cbz     x1, __secondary_too_slow
        mov     sp, x1
        ldr     x2, [x0, #CPU_BOOT_TASK]
+       cbz     x2, __secondary_too_slow
        msr     sp_el0, x2
        mov     x29, #0
        mov     x30, #0
        b       secondary_start_kernel
 ENDPROC(__secondary_switched)
 
+__secondary_too_slow:
+       wfe
+       wfi
+       b       __secondary_too_slow
+ENDPROC(__secondary_too_slow)
+
 /*
  * The booting CPU updates the failed status @__early_cpu_boot_status,
  * with MMU turned off.
@@ -772,7 +782,7 @@ ENTRY(__enable_mmu)
        phys_to_ttbr x1, x1
        phys_to_ttbr x2, x2
        msr     ttbr0_el1, x2                   // load TTBR0
-       offset_ttbr1 x1
+       offset_ttbr1 x1, x3
        msr     ttbr1_el1, x1                   // load TTBR1
        isb
        msr     sctlr_el1, x0
@@ -789,8 +799,8 @@ ENTRY(__enable_mmu)
 ENDPROC(__enable_mmu)
 
 ENTRY(__cpu_secondary_check52bitva)
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
-       ldr_l   x0, vabits_user
+#ifdef CONFIG_ARM64_VA_BITS_52
+       ldr_l   x0, vabits_actual
        cmp     x0, #52
        b.ne    2f
 
@@ -834,14 +844,93 @@ __relocate_kernel:
 
 0:     cmp     x9, x10
        b.hs    1f
-       ldp     x11, x12, [x9], #24
-       ldr     x13, [x9, #-8]
-       cmp     w12, #R_AARCH64_RELATIVE
+       ldp     x12, x13, [x9], #24
+       ldr     x14, [x9, #-8]
+       cmp     w13, #R_AARCH64_RELATIVE
        b.ne    0b
-       add     x13, x13, x23                   // relocate
-       str     x13, [x11, x23]
+       add     x14, x14, x23                   // relocate
+       str     x14, [x12, x23]
        b       0b
-1:     ret
+
+1:
+#ifdef CONFIG_RELR
+       /*
+        * Apply RELR relocations.
+        *
+        * RELR is a compressed format for storing relative relocations. The
+        * encoded sequence of entries looks like:
+        * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
+        *
+        * i.e. start with an address, followed by any number of bitmaps. The
+        * address entry encodes 1 relocation. The subsequent bitmap entries
+        * encode up to 63 relocations each, at subsequent offsets following
+        * the last address entry.
+        *
+        * The bitmap entries must have 1 in the least significant bit. The
+        * assumption here is that an address cannot have 1 in lsb. Odd
+        * addresses are not supported. Any odd addresses are stored in the RELA
+        * section, which is handled above.
+        *
+        * Excluding the least significant bit in the bitmap, each non-zero
+        * bit in the bitmap represents a relocation to be applied to
+        * a corresponding machine word that follows the base address
+        * word. The second least significant bit represents the machine
+        * word immediately following the initial address, and each bit
+        * that follows represents the next word, in linear order. As such,
+        * a single bitmap can encode up to 63 relocations in a 64-bit object.
+        *
+        * In this implementation we store the address of the next RELR table
+        * entry in x9, the address being relocated by the current address or
+        * bitmap entry in x13 and the address being relocated by the current
+        * bit in x14.
+        *
+        * Because addends are stored in place in the binary, RELR relocations
+        * cannot be applied idempotently. We use x24 to keep track of the
+        * currently applied displacement so that we can correctly relocate if
+        * __relocate_kernel is called twice with non-zero displacements (i.e.
+        * if there is both a physical misalignment and a KASLR displacement).
+        */
+       ldr     w9, =__relr_offset              // offset to reloc table
+       ldr     w10, =__relr_size               // size of reloc table
+       add     x9, x9, x11                     // __va(.relr)
+       add     x10, x9, x10                    // __va(.relr) + sizeof(.relr)
+
+       sub     x15, x23, x24                   // delta from previous offset
+       cbz     x15, 7f                         // nothing to do if unchanged
+       mov     x24, x23                        // save new offset
+
+2:     cmp     x9, x10
+       b.hs    7f
+       ldr     x11, [x9], #8
+       tbnz    x11, #0, 3f                     // branch to handle bitmaps
+       add     x13, x11, x23
+       ldr     x12, [x13]                      // relocate address entry
+       add     x12, x12, x15
+       str     x12, [x13], #8                  // adjust to start of bitmap
+       b       2b
+
+3:     mov     x14, x13
+4:     lsr     x11, x11, #1
+       cbz     x11, 6f
+       tbz     x11, #0, 5f                     // skip bit if not set
+       ldr     x12, [x14]                      // relocate bit
+       add     x12, x12, x15
+       str     x12, [x14]
+
+5:     add     x14, x14, #8                    // move to next bit's address
+       b       4b
+
+6:     /*
+        * Move to the next bitmap's address. 8 is the word size, and 63 is the
+        * number of significant bits in a bitmap entry.
+        */
+       add     x13, x13, #(8 * 63)
+       b       2b
+
+7:
+#endif
+       ret
+
 ENDPROC(__relocate_kernel)
 #endif
 
@@ -854,6 +943,9 @@ __primary_switch:
        adrp    x1, init_pg_dir
        bl      __enable_mmu
 #ifdef CONFIG_RELOCATABLE
+#ifdef CONFIG_RELR
+       mov     x24, #0                         // no RELR displacement yet
+#endif
        bl      __relocate_kernel
 #ifdef CONFIG_RANDOMIZE_BASE
        ldr     x8, =__primary_switched
index 2f4a2ce7264b8f9c7441305727feca6b98259965..38bcd4d4e43bb1314f103f5ac477e1143c019240 100644 (file)
  * Even switching to our copied tables will cause a changed output address at
  * each stage of the walk.
  */
-.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
        phys_to_ttbr \tmp, \zero_page
        msr     ttbr1_el1, \tmp
        isb
        tlbi    vmalle1
        dsb     nsh
        phys_to_ttbr \tmp, \page_table
-       offset_ttbr1 \tmp
+       offset_ttbr1 \tmp, \tmp2
        msr     ttbr1_el1, \tmp
        isb
 .endm
@@ -70,7 +70,7 @@ ENTRY(swsusp_arch_suspend_exit)
         * We execute from ttbr0, change ttbr1 to our copied linear map tables
         * with a break-before-make via the zero page
         */
-       break_before_make_ttbr_switch   x5, x0, x6
+       break_before_make_ttbr_switch   x5, x0, x6, x8
 
        mov     x21, x1
        mov     x30, x2
@@ -101,7 +101,7 @@ ENTRY(swsusp_arch_suspend_exit)
        dsb     ish             /* wait for PoU cleaning to finish */
 
        /* switch to the restored kernels page tables */
-       break_before_make_ttbr_switch   x25, x21, x6
+       break_before_make_ttbr_switch   x25, x21, x6, x8
 
        ic      ialluis
        dsb     ish
index 9341fcc6e809bc678872fa4574666a85ada5ec6f..e0a7fce0e01c1d5533a6dd19fed7431dd3fa3ed5 100644 (file)
@@ -496,7 +496,7 @@ int swsusp_arch_resume(void)
                rc = -ENOMEM;
                goto out;
        }
-       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
        if (rc)
                goto out;
 
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
new file mode 100644 (file)
index 0000000..25a2a9b
--- /dev/null
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linker script variables to be set after section resolution, as
+ * ld.lld does not like variables assigned before SECTIONS is processed.
+ */
+#ifndef __ARM64_KERNEL_IMAGE_VARS_H
+#define __ARM64_KERNEL_IMAGE_VARS_H
+
+#ifndef LINKER_SCRIPT
+#error This file should only be included in vmlinux.lds.S
+#endif
+
+#ifdef CONFIG_EFI
+
+__efistub_stext_offset = stext - _text;
+
+/*
+ * The EFI stub has its own symbol namespace prefixed by __efistub_, to
+ * isolate it from the kernel proper. The following symbols are legally
+ * accessed by the stub, so provide some aliases to make them accessible.
+ * Only include data symbols here, or text symbols of functions that are
+ * guaranteed to be safe when executed at another offset than they were
+ * linked at. The routines below are all implemented in assembler in a
+ * position independent manner
+ */
+__efistub_memcmp               = __pi_memcmp;
+__efistub_memchr               = __pi_memchr;
+__efistub_memcpy               = __pi_memcpy;
+__efistub_memmove              = __pi_memmove;
+__efistub_memset               = __pi_memset;
+__efistub_strlen               = __pi_strlen;
+__efistub_strnlen              = __pi_strnlen;
+__efistub_strcmp               = __pi_strcmp;
+__efistub_strncmp              = __pi_strncmp;
+__efistub_strrchr              = __pi_strrchr;
+__efistub___flush_dcache_area  = __pi___flush_dcache_area;
+
+#ifdef CONFIG_KASAN
+__efistub___memcpy             = __pi_memcpy;
+__efistub___memmove            = __pi_memmove;
+__efistub___memset             = __pi_memset;
+#endif
+
+__efistub__text                        = _text;
+__efistub__end                 = _end;
+__efistub__edata               = _edata;
+__efistub_screen_info          = screen_info;
+
+#endif
+
+#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
index 2b85c0d6fa3d12df5a471302f5facb3a2a1ee7c1..c7d38c660372c4a1494e35fac2a9d3a8b1c453e8 100644 (file)
        DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET);      \
        DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
 
-#ifdef CONFIG_EFI
-
-/*
- * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
- * https://github.com/ClangBuiltLinux/linux/issues/561
- */
-__efistub_stext_offset = ABSOLUTE(stext - _text);
-
-/*
- * The EFI stub has its own symbol namespace prefixed by __efistub_, to
- * isolate it from the kernel proper. The following symbols are legally
- * accessed by the stub, so provide some aliases to make them accessible.
- * Only include data symbols here, or text symbols of functions that are
- * guaranteed to be safe when executed at another offset than they were
- * linked at. The routines below are all implemented in assembler in a
- * position independent manner
- */
-__efistub_memcmp               = __pi_memcmp;
-__efistub_memchr               = __pi_memchr;
-__efistub_memcpy               = __pi_memcpy;
-__efistub_memmove              = __pi_memmove;
-__efistub_memset               = __pi_memset;
-__efistub_strlen               = __pi_strlen;
-__efistub_strnlen              = __pi_strnlen;
-__efistub_strcmp               = __pi_strcmp;
-__efistub_strncmp              = __pi_strncmp;
-__efistub_strrchr              = __pi_strrchr;
-__efistub___flush_dcache_area  = __pi___flush_dcache_area;
-
-#ifdef CONFIG_KASAN
-__efistub___memcpy             = __pi_memcpy;
-__efistub___memmove            = __pi_memmove;
-__efistub___memset             = __pi_memset;
-#endif
-
-__efistub__text                        = _text;
-__efistub__end                 = _end;
-__efistub__edata               = _edata;
-__efistub_screen_info          = screen_info;
-
-#endif
-
 #endif /* __ARM64_KERNEL_IMAGE_H */
index 84b059ed04fc2b461ff7ba4e8182d4be4cd11012..d801a7094076e40b3dc1606d3b1b6f012f89dbf3 100644 (file)
@@ -26,7 +26,7 @@
 #define AARCH64_INSN_N_BIT     BIT(22)
 #define AARCH64_INSN_LSL_12    BIT(22)
 
-static int aarch64_insn_encoding_class[] = {
+static const int aarch64_insn_encoding_class[] = {
        AARCH64_INSN_CLS_UNKNOWN,
        AARCH64_INSN_CLS_UNKNOWN,
        AARCH64_INSN_CLS_UNKNOWN,
index 708051655ad9c597138419af816583c9f5ed2299..416f537bf6142818c15267be2cc54c82716201c2 100644 (file)
@@ -62,9 +62,6 @@ out:
        return default_cmdline;
 }
 
-extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
-                                      pgprot_t prot);
-
 /*
  * This routine will be executed with the kernel mapped at its default virtual
  * address, and if it returns successfully, the kernel will be remapped, and
@@ -93,7 +90,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
         * attempt at mapping the FDT in setup_machine()
         */
        early_fixmap_init();
-       fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+       fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
        if (!fdt)
                return 0;
 
@@ -116,15 +113,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
        /*
         * OK, so we are proceeding with KASLR enabled. Calculate a suitable
         * kernel image offset from the seed. Let's place the kernel in the
-        * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
+        * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
         * the lower and upper quarters to avoid colliding with other
         * allocations.
         * Even if we could randomize at page granularity for 16k and 64k pages,
         * let's always round to 2 MB so we don't interfere with the ability to
         * map using contiguous PTEs
         */
-       mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
-       offset = BIT(VA_BITS - 3) + (seed & mask);
+       mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
+       offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
 
        /* use the top 16 bits to randomize the linear region */
        memstart_offset_seed = seed >> 48;
index 2514fd6f12cbd15797963755f2ca70827a6ce294..29a9428486a5778e229d83aaa464644f33c29c09 100644 (file)
@@ -84,7 +84,7 @@ static void *image_load(struct kimage *image,
 
        kbuf.buffer = kernel;
        kbuf.bufsz = kernel_len;
-       kbuf.mem = 0;
+       kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
        kbuf.memsz = le64_to_cpu(h->image_size);
        text_offset = le64_to_cpu(h->text_offset);
        kbuf.buf_align = MIN_KIMG_ALIGN;
index 58871333737a4e5458d723dcb0fd6e5e609548da..7b08bf9499b6b5a338d68d3f8baa08741e720c53 100644 (file)
@@ -27,6 +27,8 @@
 #define FDT_PROP_INITRD_END    "linux,initrd-end"
 #define FDT_PROP_BOOTARGS      "bootargs"
 #define FDT_PROP_KASLR_SEED    "kaslr-seed"
+#define FDT_PROP_RNG_SEED      "rng-seed"
+#define RNG_SEED_SIZE          128
 
 const struct kexec_file_ops * const kexec_file_loaders[] = {
        &kexec_image_ops,
@@ -102,6 +104,19 @@ static int setup_dtb(struct kimage *image,
                                FDT_PROP_KASLR_SEED);
        }
 
+       /* add rng-seed */
+       if (rng_is_initialized()) {
+               u8 rng_seed[RNG_SEED_SIZE];
+               get_random_bytes(rng_seed, RNG_SEED_SIZE);
+               ret = fdt_setprop(dtb, off, FDT_PROP_RNG_SEED, rng_seed,
+                               RNG_SEED_SIZE);
+               if (ret)
+                       goto out;
+       } else {
+               pr_notice("RNG is not initialised: omitting \"%s\" property\n",
+                               FDT_PROP_RNG_SEED);
+       }
+
 out:
        if (ret)
                return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
@@ -110,7 +125,8 @@ out:
 }
 
 /*
- * More space needed so that we can add initrd, bootargs and kaslr-seed.
+ * More space needed so that we can add initrd, bootargs, kaslr-seed, and
+ * rng-seed.
  */
 #define DTB_EXTRA_SPACE 0x1000
 
@@ -177,7 +193,7 @@ int load_other_segments(struct kimage *image,
        if (initrd) {
                kbuf.buffer = initrd;
                kbuf.bufsz = initrd_len;
-               kbuf.mem = 0;
+               kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
                kbuf.memsz = initrd_len;
                kbuf.buf_align = 0;
                /* within 1GB-aligned window of up to 32GB in size */
@@ -204,7 +220,7 @@ int load_other_segments(struct kimage *image,
        dtb_len = fdt_totalsize(dtb);
        kbuf.buffer = dtb;
        kbuf.bufsz = dtb_len;
-       kbuf.mem = 0;
+       kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
        kbuf.memsz = dtb_len;
        /* not across 2MB boundary */
        kbuf.buf_align = SZ_2M;
index 044c0ae4d6c85eac6db5000facc151b805e4b05e..b182442b87a32fcf4836f2179466c50508a6372d 100644 (file)
@@ -302,7 +302,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
                /* sort by type, symbol index and addend */
                sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
 
-               if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+               if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
                        core_plts += count_plts(syms, rels, numrels,
                                                sechdrs[i].sh_info, dstsec);
                else
index 96e90e270042de1846e19c077325fcd4db276e25..a0b4f1bca4917e0fea04beac112e4814de1980ec 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of.h>
 #include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
+#include <linux/smp.h>
 
 /* ARMv8 Cortex-A53 specific event types. */
 #define ARMV8_A53_PERFCTR_PREF_LINEFILL                                0xC2
@@ -157,7 +158,6 @@ armv8pmu_events_sysfs_show(struct device *dev,
        return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
 }
 
-#define ARMV8_EVENT_ATTR_RESOLVE(m) #m
 #define ARMV8_EVENT_ATTR(name, config) \
        PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
                       config, armv8pmu_events_sysfs_show)
index f674f28df663b29aa26f4856aa837c14e1da4234..03689c0beb34863ee501274a959efce24c067d88 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/stddef.h>
+#include <linux/sysctl.h>
 #include <linux/unistd.h>
 #include <linux/user.h>
 #include <linux/delay.h>
@@ -38,6 +39,7 @@
 #include <trace/events/power.h>
 #include <linux/percpu.h>
 #include <linux/thread_info.h>
+#include <linux/prctl.h>
 
 #include <asm/alternative.h>
 #include <asm/arch_gicv3.h>
@@ -307,11 +309,18 @@ static void tls_thread_flush(void)
        }
 }
 
+static void flush_tagged_addr_state(void)
+{
+       if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
+               clear_thread_flag(TIF_TAGGED_ADDR);
+}
+
 void flush_thread(void)
 {
        fpsimd_flush_thread();
        tls_thread_flush();
        flush_ptrace_hw_breakpoint(current);
+       flush_tagged_addr_state();
 }
 
 void release_thread(struct task_struct *dead_task)
@@ -565,3 +574,70 @@ void arch_setup_new_exec(void)
 
        ptrauth_thread_init_user(current);
 }
+
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/*
+ * Control the relaxed ABI allowing tagged user addresses into the kernel.
+ */
+static unsigned int tagged_addr_disabled;
+
+long set_tagged_addr_ctrl(unsigned long arg)
+{
+       if (is_compat_task())
+               return -EINVAL;
+       if (arg & ~PR_TAGGED_ADDR_ENABLE)
+               return -EINVAL;
+
+       /*
+        * Do not allow the enabling of the tagged address ABI if globally
+        * disabled via sysctl abi.tagged_addr_disabled.
+        */
+       if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
+               return -EINVAL;
+
+       update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
+
+       return 0;
+}
+
+long get_tagged_addr_ctrl(void)
+{
+       if (is_compat_task())
+               return -EINVAL;
+
+       if (test_thread_flag(TIF_TAGGED_ADDR))
+               return PR_TAGGED_ADDR_ENABLE;
+
+       return 0;
+}
+
+/*
+ * Global sysctl to disable the tagged user addresses support. This control
+ * only prevents the tagged address ABI enabling via prctl() and does not
+ * disable it for tasks that already opted in to the relaxed ABI.
+ */
+static int zero;
+static int one = 1;
+
+static struct ctl_table tagged_addr_sysctl_table[] = {
+       {
+               .procname       = "tagged_addr_disabled",
+               .mode           = 0644,
+               .data           = &tagged_addr_disabled,
+               .maxlen         = sizeof(int),
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
+       { }
+};
+
+static int __init tagged_addr_init(void)
+{
+       if (!register_sysctl("abi", tagged_addr_sysctl_table))
+               return -EINVAL;
+       return 0;
+}
+
+core_initcall(tagged_addr_init);
+#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
index 85ee7d07889e785c6128a09e1b04dbe5639695eb..c9f72b2665f1cc26cd7cc877fca628473be8e8bd 100644 (file)
@@ -46,6 +46,11 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+static bool cpu_psci_cpu_can_disable(unsigned int cpu)
+{
+       return !psci_tos_resident_on(cpu);
+}
+
 static int cpu_psci_cpu_disable(unsigned int cpu)
 {
        /* Fail early if we don't have CPU_OFF support */
@@ -105,14 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
 
 const struct cpu_operations cpu_psci_ops = {
        .name           = "psci",
-#ifdef CONFIG_CPU_IDLE
-       .cpu_init_idle  = psci_cpu_init_idle,
-       .cpu_suspend    = psci_cpu_suspend_enter,
-#endif
        .cpu_init       = cpu_psci_cpu_init,
        .cpu_prepare    = cpu_psci_cpu_prepare,
        .cpu_boot       = cpu_psci_cpu_boot,
 #ifdef CONFIG_HOTPLUG_CPU
+       .cpu_can_disable = cpu_psci_cpu_can_disable,
        .cpu_disable    = cpu_psci_cpu_disable,
        .cpu_die        = cpu_psci_cpu_die,
        .cpu_kill       = cpu_psci_cpu_kill,
index 3cf3b135027e3fae6e26017ba8406f8a80b19f0f..21176d02e21a1c1b8e4977087717c816ba4329e5 100644 (file)
@@ -870,7 +870,7 @@ static int sve_set(struct task_struct *target,
                goto out;
 
        /*
-        * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
+        * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
         * sve_set_vector_length(), which will also validate them for us:
         */
        ret = sve_set_vector_length(target, header.vl,
index 9c4bad7d71317ef927e6766a0eefaf4352e7840f..56f6645617548600de5c0fa25133753d357895e3 100644 (file)
@@ -170,9 +170,13 @@ static void __init smp_build_mpidr_hash(void)
 
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
 {
-       void *dt_virt = fixmap_remap_fdt(dt_phys);
+       int size;
+       void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
        const char *name;
 
+       if (dt_virt)
+               memblock_reserve(dt_phys, size);
+
        if (!dt_virt || !early_init_dt_scan(dt_virt)) {
                pr_crit("\n"
                        "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
@@ -184,6 +188,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
                        cpu_relax();
        }
 
+       /* Early fixups are done, map the FDT as read-only now */
+       fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
+
        name = of_flat_dt_get_machine_name();
        if (!name)
                return;
@@ -357,6 +364,15 @@ void __init setup_arch(char **cmdline_p)
        }
 }
 
+static inline bool cpu_can_disable(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
+               return cpu_ops[cpu]->cpu_can_disable(cpu);
+#endif
+       return false;
+}
+
 static int __init topology_init(void)
 {
        int i;
@@ -366,7 +382,7 @@ static int __init topology_init(void)
 
        for_each_possible_cpu(i) {
                struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
-               cpu->hotpluggable = 1;
+               cpu->hotpluggable = cpu_can_disable(i);
                register_cpu(cpu, i);
        }
 
index 018a33e01b0ed2fdac2997b34d6065979bc343e1..dc9fe879c27932ef413dc99df73f39994855f052 100644 (file)
@@ -123,7 +123,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
                 * time out.
                 */
                wait_for_completion_timeout(&cpu_running,
-                                           msecs_to_jiffies(1000));
+                                           msecs_to_jiffies(5000));
 
                if (!cpu_online(cpu)) {
                        pr_crit("CPU%u: failed to come online\n", cpu);
@@ -136,6 +136,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 
        secondary_data.task = NULL;
        secondary_data.stack = NULL;
+       __flush_dcache_area(&secondary_data, sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
        if (ret && status) {
 
@@ -146,6 +147,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
                default:
                        pr_err("CPU%u: failed in unknown state : 0x%lx\n",
                                        cpu, status);
+                       cpus_stuck_in_kernel++;
                        break;
                case CPU_KILL_ME:
                        if (!op_cpu_kill(cpu)) {
index 76c2739ba8a483c8a77ba4ae9d1ed25cc7caf6ca..c8a3fee00c11329910ae63f6fe3ac98e0fd60fdb 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/smp_plat.h>
 
 extern void secondary_holding_pen(void);
-volatile unsigned long __section(".mmuoff.data.read")
+volatile unsigned long __section(.mmuoff.data.read)
 secondary_holding_pen_release = INVALID_HWID;
 
 static phys_addr_t cpu_release_addr[NR_CPUS];
index 6b95c91e7d6798c00034cc08eaf8b201b9dfdb04..fa9528dfd0ce3cf4331d4d05169a05146a49b0d6 100644 (file)
@@ -60,20 +60,31 @@ topology_populated:
 }
 
 #ifdef CONFIG_ACPI
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+       int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+       /*
+        * if the PPTT doesn't have thread information, assume a homogeneous
+        * machine and return the current CPU's thread state.
+        */
+       if (is_threaded < 0)
+               is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+
+       return !!is_threaded;
+}
+
 /*
  * Propagate the topology information of the processor_topology_node tree to the
  * cpu_topology array.
  */
 int __init parse_acpi_topology(void)
 {
-       bool is_threaded;
        int cpu, topology_id;
 
        if (acpi_disabled)
                return 0;
 
-       is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
-
        for_each_possible_cpu(cpu) {
                int i, cache_id;
 
@@ -81,7 +92,7 @@ int __init parse_acpi_topology(void)
                if (topology_id < 0)
                        return topology_id;
 
-               if (is_threaded) {
+               if (acpi_cpu_is_threaded(cpu)) {
                        cpu_topology[cpu].thread_id = topology_id;
                        topology_id = find_acpi_cpu_topology(cpu, 1);
                        cpu_topology[cpu].core_id   = topology_id;
index 32893b3d9164e70ec7d0dcf11619840f07def078..34739e80211bc5f59f88be6793b0e4a8d15d67c8 100644 (file)
@@ -7,9 +7,11 @@
  */
 
 #include <linux/bug.h>
+#include <linux/context_tracking.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
 #include <linux/kallsyms.h>
+#include <linux/kprobes.h>
 #include <linux/spinlock.h>
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
@@ -511,7 +513,7 @@ struct sys64_hook {
        void (*handler)(unsigned int esr, struct pt_regs *regs);
 };
 
-static struct sys64_hook sys64_hooks[] = {
+static const struct sys64_hook sys64_hooks[] = {
        {
                .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
                .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
@@ -636,7 +638,7 @@ static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
        arm64_compat_skip_faulting_instruction(regs, 4);
 }
 
-static struct sys64_hook cp15_32_hooks[] = {
+static const struct sys64_hook cp15_32_hooks[] = {
        {
                .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
                .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
@@ -656,7 +658,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
        arm64_compat_skip_faulting_instruction(regs, 4);
 }
 
-static struct sys64_hook cp15_64_hooks[] = {
+static const struct sys64_hook cp15_64_hooks[] = {
        {
                .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
                .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
@@ -667,7 +669,7 @@ static struct sys64_hook cp15_64_hooks[] = {
 
 asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
 {
-       struct sys64_hook *hook, *hook_base;
+       const struct sys64_hook *hook, *hook_base;
 
        if (!cp15_cond_valid(esr, regs)) {
                /*
@@ -707,7 +709,7 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
 
 asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
 {
-       struct sys64_hook *hook;
+       const struct sys64_hook *hook;
 
        for (hook = sys64_hooks; hook->handler; hook++)
                if ((hook->esr_mask & esr) == hook->esr_val) {
@@ -744,6 +746,7 @@ static const char *esr_class_str[] = {
        [ESR_ELx_EC_SMC64]              = "SMC (AArch64)",
        [ESR_ELx_EC_SYS64]              = "MSR/MRS (AArch64)",
        [ESR_ELx_EC_SVE]                = "SVE",
+       [ESR_ELx_EC_ERET]               = "ERET/ERETAA/ERETAB",
        [ESR_ELx_EC_IMP_DEF]            = "EL3 IMP DEF",
        [ESR_ELx_EC_IABT_LOW]           = "IABT (lower EL)",
        [ESR_ELx_EC_IABT_CUR]           = "IABT (current EL)",
@@ -900,6 +903,13 @@ asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
                nmi_exit();
 }
 
+asmlinkage void enter_from_user_mode(void)
+{
+       CT_WARN_ON(ct_state() != CONTEXT_USER);
+       user_exit_irqoff();
+}
+NOKPROBE_SYMBOL(enter_from_user_mode);
+
 void __pte_error(const char *file, int line, unsigned long val)
 {
        pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
index 7fa00837490787c35a5554430f01812418d76dcc..aa76f725966853ec8f2c6418bbc3fccf80920f16 100644 (file)
@@ -200,6 +200,15 @@ SECTIONS
        __rela_offset   = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
        __rela_size     = SIZEOF(.rela.dyn);
 
+#ifdef CONFIG_RELR
+       .relr.dyn : ALIGN(8) {
+               *(.relr.dyn)
+       }
+
+       __relr_offset   = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
+       __relr_size     = SIZEOF(.relr.dyn);
+#endif
+
        . = ALIGN(SEGMENT_ALIGN);
        __initdata_end = .;
        __init_end = .;
@@ -245,6 +254,8 @@ SECTIONS
        HEAD_SYMBOLS
 }
 
+#include "image-vars.h"
+
 /*
  * The HYP init code and ID map text can't be longer than a page each,
  * and should not cross a page boundary.
index adaf266d8de88f818954a1414c7f7ca729fb1a32..bd978ad71936dc7413c0d077f7d6c18cdc8f48ac 100644 (file)
@@ -264,7 +264,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
        tmp = read_sysreg(par_el1);
        write_sysreg(par, par_el1);
 
-       if (unlikely(tmp & 1))
+       if (unlikely(tmp & SYS_PAR_EL1_F))
                return false; /* Translation failed, back to guest */
 
        /* Convert PAR to HPFAR format */
index acd8084f1f2c1055adc0335d87644542018aa5da..2cf7d4b606c38d1f32e9772bc08716bd331950b8 100644 (file)
@@ -29,25 +29,25 @@ static void compute_layout(void)
        int kva_msb;
 
        /* Where is my RAM region? */
-       hyp_va_msb  = idmap_addr & BIT(VA_BITS - 1);
-       hyp_va_msb ^= BIT(VA_BITS - 1);
+       hyp_va_msb  = idmap_addr & BIT(vabits_actual - 1);
+       hyp_va_msb ^= BIT(vabits_actual - 1);
 
        kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
                        (u64)(high_memory - 1));
 
-       if (kva_msb == (VA_BITS - 1)) {
+       if (kva_msb == (vabits_actual - 1)) {
                /*
                 * No space in the address, let's compute the mask so
-                * that it covers (VA_BITS - 1) bits, and the region
+                * that it covers (vabits_actual - 1) bits, and the region
                 * bit. The tag stays set to zero.
                 */
-               va_mask  = BIT(VA_BITS - 1) - 1;
+               va_mask  = BIT(vabits_actual - 1) - 1;
                va_mask |= hyp_va_msb;
        } else {
                /*
                 * We do have some free bits to insert a random tag.
                 * Hyp VAs are now created from kernel linear map VAs
-                * using the following formula (with V == VA_BITS):
+                * using the following formula (with V == vabits_actual):
                 *
                 *  63 ... V |     V-1    | V-2 .. tag_lsb | tag_lsb - 1 .. 0
                 *  ---------------------------------------------------------
@@ -55,7 +55,7 @@ static void compute_layout(void)
                 */
                tag_lsb = kva_msb;
                va_mask = GENMASK_ULL(tag_lsb - 1, 0);
-               tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb);
+               tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
                tag_val |= hyp_va_msb;
                tag_val >>= tag_lsb;
        }
index 33c2a4abda046532c733b1fcc55f90ff8fa68d7c..c21b936dc01db20c086f816e0b71fa954319a768 100644 (file)
@@ -11,25 +11,8 @@ CFLAGS_REMOVE_xor-neon.o     += -mgeneral-regs-only
 CFLAGS_xor-neon.o              += -ffreestanding
 endif
 
-# Tell the compiler to treat all general purpose registers (with the
-# exception of the IP registers, which are already handled by the caller
-# in case of a PLT) as callee-saved, which allows for efficient runtime
-# patching of the bl instruction in the caller with an atomic instruction
-# when supported by the CPU. Result and argument registers are handled
-# correctly, based on the function prototype.
-lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
-CFLAGS_atomic_ll_sc.o  := -ffixed-x1 -ffixed-x2                        \
-                  -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6          \
-                  -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9           \
-                  -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12   \
-                  -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15   \
-                  -fcall-saved-x18 -fomit-frame-pointer
-CFLAGS_REMOVE_atomic_ll_sc.o := $(CC_FLAGS_FTRACE)
-GCOV_PROFILE_atomic_ll_sc.o    := n
-KASAN_SANITIZE_atomic_ll_sc.o  := n
-KCOV_INSTRUMENT_atomic_ll_sc.o := n
-UBSAN_SANITIZE_atomic_ll_sc.o  := n
-
 lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
 
 obj-$(CONFIG_CRC32) += crc32.o
+
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/arm64/lib/atomic_ll_sc.c b/arch/arm64/lib/atomic_ll_sc.c
deleted file mode 100644 (file)
index b0c538b..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#include <asm/atomic.h>
-#define __ARM64_IN_ATOMIC_IMPL
-#include <asm/atomic_ll_sc.h>
diff --git a/arch/arm64/lib/error-inject.c b/arch/arm64/lib/error-inject.c
new file mode 100644 (file)
index 0000000..ed15021
--- /dev/null
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/error-injection.h>
+#include <linux/kprobes.h>
+
+void override_function_with_return(struct pt_regs *regs)
+{
+       /*
+        * 'regs' represents the state on entry of a predefined function in
+        * the kernel/module and which is captured on a kprobe.
+        *
+        * When kprobe returns back from exception it will override the end
+        * of probed function and directly return to the predefined
+        * function's caller.
+        */
+       instruction_pointer_set(regs, procedure_link_pointer(regs));
+}
+NOKPROBE_SYMBOL(override_function_with_return);
index 82b3a7fdb4a68b83c1be6e5b65b7c8b2eb0bbe3c..93f9f77582ae38e32c93fbe42508cd4987ef2d85 100644 (file)
 #include <asm/pgtable-hwdef.h>
 #include <asm/ptdump.h>
 
-static const struct addr_marker address_markers[] = {
+
+enum address_markers_idx {
+       PAGE_OFFSET_NR = 0,
+       PAGE_END_NR,
 #ifdef CONFIG_KASAN
-       { KASAN_SHADOW_START,           "Kasan shadow start" },
+       KASAN_START_NR,
+#endif
+};
+
+static struct addr_marker address_markers[] = {
+       { PAGE_OFFSET,                  "Linear Mapping start" },
+       { 0 /* PAGE_END */,             "Linear Mapping end" },
+#ifdef CONFIG_KASAN
+       { 0 /* KASAN_SHADOW_START */,   "Kasan shadow start" },
        { KASAN_SHADOW_END,             "Kasan shadow end" },
 #endif
        { MODULES_VADDR,                "Modules start" },
@@ -42,7 +53,6 @@ static const struct addr_marker address_markers[] = {
        { VMEMMAP_START,                "vmemmap start" },
        { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
 #endif
-       { PAGE_OFFSET,                  "Linear mapping" },
        { -1,                           NULL },
 };
 
@@ -376,7 +386,7 @@ static void ptdump_initialize(void)
 static struct ptdump_info kernel_ptdump_info = {
        .mm             = &init_mm,
        .markers        = address_markers,
-       .base_addr      = VA_START,
+       .base_addr      = PAGE_OFFSET,
 };
 
 void ptdump_check_wx(void)
@@ -390,7 +400,7 @@ void ptdump_check_wx(void)
                .check_wx = true,
        };
 
-       walk_pgd(&st, &init_mm, VA_START);
+       walk_pgd(&st, &init_mm, PAGE_OFFSET);
        note_page(&st, 0, 0, 0);
        if (st.wx_pages || st.uxn_pages)
                pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
@@ -401,6 +411,10 @@ void ptdump_check_wx(void)
 
 static int ptdump_init(void)
 {
+       address_markers[PAGE_END_NR].start_address = PAGE_END;
+#ifdef CONFIG_KASAN
+       address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
+#endif
        ptdump_initialize();
        ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
        return 0;
index cfd65b63f36fd05f15557e872bb9e6ec96efa973..115d7a0e4b082679ed2a651c90d8393609f6d1c2 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/bitfield.h>
 #include <linux/extable.h>
 #include <linux/signal.h>
 #include <linux/mm.h>
@@ -86,8 +87,8 @@ static void mem_abort_decode(unsigned int esr)
        pr_alert("Mem abort info:\n");
 
        pr_alert("  ESR = 0x%08x\n", esr);
-       pr_alert("  Exception class = %s, IL = %u bits\n",
-                esr_get_class_string(esr),
+       pr_alert("  EC = 0x%02lx: %s, IL = %u bits\n",
+                ESR_ELx_EC(esr), esr_get_class_string(esr),
                 (esr & ESR_ELx_IL) ? 32 : 16);
        pr_alert("  SET = %lu, FnV = %lu\n",
                 (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT,
@@ -109,7 +110,7 @@ static inline bool is_ttbr0_addr(unsigned long addr)
 static inline bool is_ttbr1_addr(unsigned long addr)
 {
        /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
-       return arch_kasan_reset_tag(addr) >= VA_START;
+       return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
 }
 
 /*
@@ -138,10 +139,9 @@ static void show_pte(unsigned long addr)
                return;
        }
 
-       pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n",
+       pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
                 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
-                mm == &init_mm ? VA_BITS : (int)vabits_user,
-                (unsigned long)virt_to_phys(mm->pgd));
+                vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
        pgdp = pgd_offset(mm, addr);
        pgd = READ_ONCE(*pgdp);
        pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
@@ -242,6 +242,34 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
        return false;
 }
 
+static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
+                                                       unsigned int esr,
+                                                       struct pt_regs *regs)
+{
+       unsigned long flags;
+       u64 par, dfsc;
+
+       if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
+           (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
+               return false;
+
+       local_irq_save(flags);
+       asm volatile("at s1e1r, %0" :: "r" (addr));
+       isb();
+       par = read_sysreg(par_el1);
+       local_irq_restore(flags);
+
+       if (!(par & SYS_PAR_EL1_F))
+               return false;
+
+       /*
+        * If we got a different type of fault from the AT instruction,
+        * treat the translation fault as spurious.
+        */
+       dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par);
+       return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
+}
+
 static void die_kernel_fault(const char *msg, unsigned long addr,
                             unsigned int esr, struct pt_regs *regs)
 {
@@ -270,6 +298,10 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
        if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
                return;
 
+       if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
+           "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
+               return;
+
        if (is_el1_permission_fault(addr, esr, regs)) {
                if (esr & ESR_ELx_WNR)
                        msg = "write to read-only memory";
index f3c795278def0ea01704c6b38fe3d2d140351224..45c00a54909c9b83bbf6b352a00ca1a02b5feff0 100644 (file)
 s64 memstart_addr __ro_after_init = -1;
 EXPORT_SYMBOL(memstart_addr);
 
+s64 physvirt_offset __ro_after_init;
+EXPORT_SYMBOL(physvirt_offset);
+
+struct page *vmemmap __ro_after_init;
+EXPORT_SYMBOL(vmemmap);
+
 phys_addr_t arm64_dma_phys_limit __ro_after_init;
 
 #ifdef CONFIG_KEXEC_CORE
@@ -301,7 +307,7 @@ static void __init fdt_enforce_memory_region(void)
 
 void __init arm64_memblock_init(void)
 {
-       const s64 linear_region_size = -(s64)PAGE_OFFSET;
+       const s64 linear_region_size = BIT(vabits_actual - 1);
 
        /* Handle linux,usable-memory-range property */
        fdt_enforce_memory_region();
@@ -309,19 +315,26 @@ void __init arm64_memblock_init(void)
        /* Remove memory above our supported physical address size */
        memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
 
-       /*
-        * Ensure that the linear region takes up exactly half of the kernel
-        * virtual address space. This way, we can distinguish a linear address
-        * from a kernel/module/vmalloc address by testing a single bit.
-        */
-       BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
-
        /*
         * Select a suitable value for the base of physical memory.
         */
        memstart_addr = round_down(memblock_start_of_DRAM(),
                                   ARM64_MEMSTART_ALIGN);
 
+       physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
+
+       vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
+
+       /*
+        * If we are running with a 52-bit kernel VA config on a system that
+        * does not support it, we have to offset our vmemmap and physvirt_offset
+        * s.t. we avoid the 52-bit portion of the direct linear map
+        */
+       if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
+               vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
+               physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
+       }
+
        /*
         * Remove the memory that we will not be able to cover with the
         * linear mapping. Take care not to clip the kernel which may be
@@ -570,8 +583,12 @@ void free_initmem(void)
 #ifdef CONFIG_BLK_DEV_INITRD
 void __init free_initrd_mem(unsigned long start, unsigned long end)
 {
+       unsigned long aligned_start, aligned_end;
+
+       aligned_start = __virt_to_phys(start) & PAGE_MASK;
+       aligned_end = PAGE_ALIGN(__virt_to_phys(end));
+       memblock_free(aligned_start, aligned_end - aligned_start);
        free_reserved_area((void *)start, (void *)end, 0, "initrd");
-       memblock_free(__virt_to_phys(start), end - start);
 }
 #endif
 
index fdb595a5d65f8741d00f2bd3b7f0450adee64cbd..9be71bee902ca890c911ec864b2c869724fcc57b 100644 (file)
@@ -69,7 +69,7 @@ void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
 }
 EXPORT_SYMBOL(__ioremap);
 
-void __iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *io_addr)
 {
        unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
 
@@ -80,7 +80,7 @@ void __iounmap(volatile void __iomem *io_addr)
        if (is_vmalloc_addr((void *)addr))
                vunmap((void *)addr);
 }
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(iounmap);
 
 void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
 {
index 6cf97b904ebb4a29c8b7c18462d2799ad9b77872..f87a32484ea82c16eb45717b81b641024f915f17 100644 (file)
@@ -156,7 +156,8 @@ asmlinkage void __init kasan_early_init(void)
 {
        BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
                KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
-       BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+       BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
+       BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
        BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
        kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
                           true);
@@ -225,10 +226,10 @@ void __init kasan_init(void)
        kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
                           early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
 
-       kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
-                                   (void *)mod_shadow_start);
+       kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
+                                  (void *)mod_shadow_start);
        kasan_populate_early_shadow((void *)kimg_shadow_end,
-                                   kasan_mem_to_shadow((void *)PAGE_OFFSET));
+                                  (void *)KASAN_SHADOW_END);
 
        if (kimg_shadow_start > mod_shadow_end)
                kasan_populate_early_shadow((void *)mod_shadow_end,
index 750a69dde39bdd76a425b4f7e4d59a093936c5b6..53dc6f24cfb72e66e6b3f77bccad4dce435c6c4b 100644 (file)
@@ -40,8 +40,9 @@
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
-u64 vabits_user __ro_after_init;
-EXPORT_SYMBOL(vabits_user);
+
+u64 __section(".mmuoff.data.write") vabits_actual;
+EXPORT_SYMBOL(vabits_actual);
 
 u64 kimage_voffset __ro_after_init;
 EXPORT_SYMBOL(kimage_voffset);
@@ -398,7 +399,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
                                  phys_addr_t size, pgprot_t prot)
 {
-       if (virt < VMALLOC_START) {
+       if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
                pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
                        &phys, virt);
                return;
@@ -425,7 +426,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
                                phys_addr_t size, pgprot_t prot)
 {
-       if (virt < VMALLOC_START) {
+       if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
                pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
                        &phys, virt);
                return;
@@ -646,6 +647,8 @@ static void __init map_kernel(pgd_t *pgdp)
                set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
                        READ_ONCE(*pgd_offset_k(FIXADDR_START)));
        } else if (CONFIG_PGTABLE_LEVELS > 3) {
+               pgd_t *bm_pgdp;
+               pud_t *bm_pudp;
                /*
                 * The fixmap shares its top level pgd entry with the kernel
                 * mapping. This can really only occur when we are running
@@ -653,9 +656,9 @@ static void __init map_kernel(pgd_t *pgdp)
                 * entry instead.
                 */
                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
-               pud_populate(&init_mm,
-                            pud_set_fixmap_offset(pgdp, FIXADDR_START),
-                            lm_alias(bm_pmd));
+               bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START);
+               bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START);
+               pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
                pud_clear_fixmap();
        } else {
                BUG();
@@ -876,7 +879,7 @@ void __set_fixmap(enum fixed_addresses idx,
        }
 }
 
-void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
 {
        const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
        int offset;
@@ -929,19 +932,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
        return dt_virt;
 }
 
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
-{
-       void *dt_virt;
-       int size;
-
-       dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
-       if (!dt_virt)
-               return NULL;
-
-       memblock_reserve(dt_phys, size);
-       return dt_virt;
-}
-
 int __init arch_ioremap_p4d_supported(void)
 {
        return 0;
index 4f241cc7cc3b1262dfe0d083e3dc48468bd62c11..4decf16597008925206085b9e8efacc9ba537a71 100644 (file)
@@ -29,7 +29,7 @@ static __init int numa_parse_early_param(char *opt)
 {
        if (!opt)
                return -EINVAL;
-       if (!strncmp(opt, "off", 3))
+       if (str_has_prefix(opt, "off"))
                numa_off = true;
 
        return 0;
index 03c53f16ee7767bf1028bad813209ee992774f42..9ce7bd9d4d9ceaf3de79ce4ae67318f2988abdb8 100644 (file)
@@ -128,7 +128,6 @@ int set_memory_nx(unsigned long addr, int numpages)
                                        __pgprot(PTE_PXN),
                                        __pgprot(0));
 }
-EXPORT_SYMBOL_GPL(set_memory_nx);
 
 int set_memory_x(unsigned long addr, int numpages)
 {
@@ -136,7 +135,6 @@ int set_memory_x(unsigned long addr, int numpages)
                                        __pgprot(0),
                                        __pgprot(PTE_PXN));
 }
-EXPORT_SYMBOL_GPL(set_memory_x);
 
 int set_memory_valid(unsigned long addr, int numpages, int enable)
 {
index 7dbf2be470f6c01d548a8d681ad4a35bd08cefaa..a1e0592d1fbcd8833e41b2486869d7ae9898981b 100644 (file)
@@ -168,7 +168,7 @@ ENDPROC(cpu_do_switch_mm)
 .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
        adrp    \tmp1, empty_zero_page
        phys_to_ttbr \tmp2, \tmp1
-       offset_ttbr1 \tmp2
+       offset_ttbr1 \tmp2, \tmp1
        msr     ttbr1_el1, \tmp2
        isb
        tlbi    vmalle1
@@ -187,7 +187,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
 
        __idmap_cpu_set_reserved_ttbr1 x1, x3
 
-       offset_ttbr1 x0
+       offset_ttbr1 x0, x3
        msr     ttbr1_el1, x0
        isb
 
@@ -286,6 +286,15 @@ skip_pgd:
        msr     sctlr_el1, x18
        isb
 
+       /*
+        * Invalidate the local I-cache so that any instructions fetched
+        * speculatively from the PoC are discarded, since they may have
+        * been dynamically patched at the PoU.
+        */
+       ic      iallu
+       dsb     nsh
+       isb
+
        /* Set the flag to zero to indicate that we're all done */
        str     wzr, [flag_ptr]
        ret
@@ -362,7 +371,7 @@ __idmap_kpti_secondary:
        cbnz    w18, 1b
 
        /* All done, act like nothing happened */
-       offset_ttbr1 swapper_ttb
+       offset_ttbr1 swapper_ttb, x18
        msr     ttbr1_el1, swapper_ttb
        isb
        ret
@@ -438,10 +447,11 @@ ENTRY(__cpu_setup)
                        TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
        tcr_clear_errata_bits x10, x9, x5
 
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
-       ldr_l           x9, vabits_user
+#ifdef CONFIG_ARM64_VA_BITS_52
+       ldr_l           x9, vabits_actual
        sub             x9, xzr, x9
        add             x9, x9, #64
+       tcr_set_t1sz    x10, x9
 #else
        ldr_l           x9, idmap_t0sz
 #endif
index 7429a72f3f921996643c48d19dc206fe3034b7dd..92aceef63710861abc7c9605fe2c6b15105c3c97 100644 (file)
@@ -8,10 +8,8 @@
 extern void no_iommu_init(void);
 #ifdef CONFIG_INTEL_IOMMU
 extern int force_iommu, no_iommu;
-extern int iommu_pass_through;
 extern int iommu_detected;
 #else
-#define iommu_pass_through     (0)
 #define no_iommu               (1)
 #define iommu_detected         (0)
 #endif
index fe988c49f01ce6ae844355731ddc50f01fef3df1..f5d49cd3fbb01a9933f5d547682d4adbdde2617e 100644 (file)
@@ -22,8 +22,6 @@ int force_iommu __read_mostly = 1;
 int force_iommu __read_mostly;
 #endif
 
-int iommu_pass_through;
-
 static int __init pci_iommu_init(void)
 {
        if (iommu_detected)
index 902255e7b5b2ad56840561a112433e38d72ccb5c..73bf5ea9ee1bf1cf4a7631ee58e7eceb2c96534f 100644 (file)
@@ -246,9 +246,9 @@ void __init config_atari(void)
        } else if (hwreg_present(tt_palette)) {
                ATARIHW_SET(TT_SHIFTER);
                pr_cont(" TT_SHIFTER");
-       } else if (hwreg_present(&shifter.bas_hi)) {
-               if (hwreg_present(&shifter.bas_lo) &&
-                   (shifter.bas_lo = 0x0aau, shifter.bas_lo == 0x0aau)) {
+       } else if (hwreg_present(&shifter_st.bas_hi)) {
+               if (hwreg_present(&shifter_st.bas_lo) &&
+                   (shifter_st.bas_lo = 0x0aau, shifter_st.bas_lo == 0x0aau)) {
                        ATARIHW_SET(EXTD_SHIFTER);
                        pr_cont(" EXTD_SHIFTER");
                } else {
index 04e0f211afb3892502f0c542d6ed992671b4b921..9a33c1c006a1f9b8c891fe2b1545aef71958b73a 100644 (file)
@@ -125,6 +125,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -250,9 +251,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -330,7 +333,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -450,7 +452,6 @@ CONFIG_RTC_DRV_RP5C01=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -573,6 +574,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -582,8 +584,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -618,6 +622,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -636,6 +641,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -644,4 +650,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index c6abbb535878cc1d1f36ce3943f42d461ba842ac..7fdbc797a05d4d657b9dba9439c30c1c570d5061 100644 (file)
@@ -121,6 +121,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -246,9 +247,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -315,7 +318,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -410,7 +412,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -533,6 +534,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -542,8 +544,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -578,6 +582,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -596,6 +601,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -604,4 +610,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index 06ae65bad17713122026946ce2c95ea73afc9300..f1763405a53966c806d7ad8cba2e1f060e581d37 100644 (file)
@@ -128,6 +128,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -253,9 +254,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -330,7 +333,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -432,7 +434,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -555,6 +556,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -564,8 +566,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -600,6 +604,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -618,6 +623,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -626,4 +632,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index 5616b94053b6463395d0ccba7c363fa59e29b3ec..91154d6acb313c72ad7ace8e2ffe1e07fcf390ab 100644 (file)
@@ -118,6 +118,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -243,9 +244,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -312,7 +315,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -403,7 +405,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -526,6 +527,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -535,8 +537,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -571,6 +575,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -589,6 +594,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -597,4 +603,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index 1106521f3b56f6df41b91b5095237998365a2cd6..c398c4a94d95c6669e2dfb4870642f5c11aab366 100644 (file)
@@ -120,6 +120,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -245,9 +246,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -314,7 +317,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -412,7 +414,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -535,6 +536,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -544,8 +546,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -580,6 +584,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -598,6 +603,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -606,4 +612,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index 226c6c063cd44cce300eab48b49ded6eb07df45f..350d004559be84310d383d954b6eca16428ae11d 100644 (file)
@@ -119,6 +119,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -244,9 +245,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -321,7 +324,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -434,7 +436,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -557,6 +558,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -566,8 +568,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -602,6 +606,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -620,6 +625,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -628,4 +634,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index 39f603417928f31e577923d650890f8206eeca63..b838dd820348ede0680513ffea8ab0a7dc59aac0 100644 (file)
@@ -139,6 +139,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -264,9 +265,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -354,7 +357,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -516,7 +518,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -639,6 +640,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -648,8 +650,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -684,6 +688,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -702,6 +707,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -710,4 +716,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index 175a607f576cc75acf66177f3f9773db73f88cb8..3f8dd61559cf06b4dfca80e5c5c5610342cbda6a 100644 (file)
@@ -117,6 +117,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -242,9 +243,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -311,7 +314,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -402,7 +404,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -525,6 +526,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -534,8 +536,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -570,6 +574,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -588,6 +593,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -596,4 +602,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index f41c34d3cdd0a893d00132884869eecdfa7ff8df..ae3b2d4f636c7562ec5160d9c43de622a735bfc1 100644 (file)
@@ -118,6 +118,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -243,9 +244,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -312,7 +315,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -403,7 +405,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -526,6 +527,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -535,8 +537,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -571,6 +575,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -589,6 +594,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -597,4 +603,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index c9d2cb0a1cf4c9709ead8ecb7241c6bc6943acf1..cd61ef14b582812f1cd052e53bcece3d0db7f522 100644 (file)
@@ -119,6 +119,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -244,9 +245,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -320,7 +323,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -421,7 +423,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -544,6 +545,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -553,8 +555,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -589,6 +593,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -607,6 +612,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -615,4 +621,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index 79a64fdd6bf06e6339ee394337a24565c841471c..151f5371cd3d42e12567638f7efe44b567a53cfe 100644 (file)
@@ -115,6 +115,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -240,9 +241,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -309,7 +312,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -405,7 +407,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -528,6 +529,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -537,8 +539,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -573,6 +577,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -591,6 +596,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -599,3 +605,4 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
index e3402a5d165b0bbd79af9bf36330ef807fd51d1f..1dcb0ee1fe98952278113629154ab6f4fb9dd007 100644 (file)
@@ -115,6 +115,7 @@ CONFIG_NFT_XFRM=m
 CONFIG_NFT_SOCKET=m
 CONFIG_NFT_OSF=m
 CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -240,9 +241,11 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_CONNTRACK_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
 CONFIG_BRIDGE_EBT_BROUTE=m
 CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -309,7 +312,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
@@ -404,7 +406,6 @@ CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_DAX=m
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
@@ -527,6 +528,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -536,8 +538,10 @@ CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -572,6 +576,7 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
+CONFIG_REED_SOLOMON_TEST=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -590,6 +595,7 @@ CONFIG_TEST_IDA=m
 CONFIG_TEST_VMALLOC=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_FIND_BIT_BENCHMARK=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_SYSCTL=m
@@ -598,4 +604,5 @@ CONFIG_TEST_STATIC_KEYS=m
 CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
+CONFIG_TEST_MEMINIT=m
 CONFIG_EARLY_PRINTK=y
index 533008262b691ad9cd7800e14b676a916a72b4b1..9a038a3edb8370dafeccee2f1d8bcb2c8bcc0dd1 100644 (file)
@@ -22,7 +22,6 @@
 
 #include <linux/types.h>
 #include <asm/bootinfo-atari.h>
-#include <asm/raw_io.h>
 #include <asm/kmap.h>
 
 extern u_long atari_mch_cookie;
@@ -132,14 +131,6 @@ extern struct atari_hw_present atari_hw_present;
  */
 
 
-#define atari_readb   raw_inb
-#define atari_writeb  raw_outb
-
-#define atari_inb_p   raw_inb
-#define atari_outb_p  raw_outb
-
-
-
 #include <linux/mm.h>
 #include <asm/cacheflush.h>
 
@@ -170,7 +161,7 @@ static inline void dma_cache_maintenance( unsigned long paddr,
 #define TT_HIGH 6
 
 #define SHF_BAS (0xffff8200)
-struct SHIFTER
+struct SHIFTER_ST
  {
        u_char pad1;
        u_char bas_hi;
@@ -187,7 +178,7 @@ struct SHIFTER
        u_char pad7;
        u_char bas_lo;
  };
-# define shifter ((*(volatile struct SHIFTER *)SHF_BAS))
+# define shifter_st ((*(volatile struct SHIFTER_ST *)SHF_BAS))
 
 #define SHF_FBAS (0xffff820e)
 struct SHIFTER_F030
index 6c03ca5bc436551e16cf9ec7019cf09414607ba0..819f611dccf28ee2bd27551eacc74305c70b3845 100644 (file)
 #include <asm-generic/iomap.h>
 
 #ifdef CONFIG_ATARI
-#include <asm/atarihw.h>
+#define atari_readb   raw_inb
+#define atari_writeb  raw_outb
+
+#define atari_inb_p   raw_inb
+#define atari_outb_p  raw_outb
 #endif
 
 
index aac7f045f7f0aa8509cf5ef288f8030de8276bce..421b6c9c769d0ab3283a51786a52ad2fbfbbdf12 100644 (file)
@@ -28,14 +28,8 @@ static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
        return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
 
-#define ioremap_nocache ioremap_nocache
-static inline void __iomem *ioremap_nocache(unsigned long physaddr,
-                                           unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-
-#define ioremap_uc ioremap_nocache
+#define ioremap_nocache ioremap
+#define ioremap_uc ioremap
 #define ioremap_wt ioremap_wt
 static inline void __iomem *ioremap_wt(unsigned long physaddr,
                                       unsigned long size)
@@ -43,13 +37,6 @@ static inline void __iomem *ioremap_wt(unsigned long physaddr,
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
 }
 
-#define ioremap_fullcache ioremap_fullcache
-static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
-                                             unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
-}
-
 #define memset_io memset_io
 static inline void memset_io(volatile void __iomem *addr, unsigned char val,
                             int count)
index d9a08bed4b128872cbe999ebfabe2479ccb9f317..8a43babcf53a617509a620d46ee8dd85c38decb9 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <asm/bootinfo-mac.h>
 
@@ -82,11 +83,11 @@ struct mac_model
 #define MAC_EXP_PDS_NUBUS      3 /* Accepts PDS card and/or NuBus card(s) */
 #define MAC_EXP_PDS_COMM       4 /* Accepts PDS card or Comm Slot card */
 
-#define MAC_FLOPPY_IWM         0
-#define MAC_FLOPPY_SWIM_ADDR1  1
-#define MAC_FLOPPY_SWIM_ADDR2  2
-#define MAC_FLOPPY_SWIM_IOP    3
-#define MAC_FLOPPY_AV          4
+#define MAC_FLOPPY_UNSUPPORTED 0
+#define MAC_FLOPPY_SWIM_IOP    1
+#define MAC_FLOPPY_OLD         2
+#define MAC_FLOPPY_QUADRA      3
+#define MAC_FLOPPY_LC          4
 
 extern struct mac_model *macintosh_config;
 
index 205ac75da13d6b095ebc904a2c8c11b921969775..611f73bfc87c95fe7fa0f514d38f4fb0161743eb 100644 (file)
@@ -209,7 +209,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_IWM,
+               .floppy_type    = MAC_FLOPPY_UNSUPPORTED, /* IWM */
        },
 
        /*
@@ -224,7 +224,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_IWM,
+               .floppy_type    = MAC_FLOPPY_UNSUPPORTED, /* IWM */
        }, {
                .ident          = MAC_MODEL_IIX,
                .name           = "IIx",
@@ -233,7 +233,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_IICX,
                .name           = "IIcx",
@@ -242,7 +242,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_SE30,
                .name           = "SE/30",
@@ -251,7 +251,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        },
 
        /*
@@ -269,7 +269,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_IIFX,
                .name           = "IIfx",
@@ -278,7 +278,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_IIFX,
                .scc_type       = MAC_SCC_IOP,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_IOP,
+               .floppy_type    = MAC_FLOPPY_SWIM_IOP, /* SWIM */
        }, {
                .ident          = MAC_MODEL_IISI,
                .name           = "IIsi",
@@ -287,7 +287,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_IIVI,
                .name           = "IIvi",
@@ -296,7 +296,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM */
        }, {
                .ident          = MAC_MODEL_IIVX,
                .name           = "IIvx",
@@ -305,7 +305,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM */
        },
 
        /*
@@ -319,7 +319,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_IICI,
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM */
        }, {
                .ident          = MAC_MODEL_CCL,
                .name           = "Color Classic",
@@ -328,7 +328,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_CCLII,
                .name           = "Color Classic II",
@@ -337,7 +337,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM 2 */
        },
 
        /*
@@ -352,7 +352,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM */
        }, {
                .ident          = MAC_MODEL_LCII,
                .name           = "LC II",
@@ -361,7 +361,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM */
        }, {
                .ident          = MAC_MODEL_LCIII,
                .name           = "LC III",
@@ -370,7 +370,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM 2 */
        },
 
        /*
@@ -391,7 +391,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_QUADRA,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_Q605_ACC,
                .name           = "Quadra 605",
@@ -400,7 +400,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_QUADRA,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_Q610,
                .name           = "Quadra 610",
@@ -410,7 +410,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_Q630,
                .name           = "Quadra 630",
@@ -420,7 +420,7 @@ static struct mac_model mac_data_table[] = {
                .ide_type       = MAC_IDE_QUADRA,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_PDS_COMM,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_Q650,
                .name           = "Quadra 650",
@@ -430,7 +430,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        },
        /* The Q700 does have a NS Sonic */
        {
@@ -442,7 +442,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM */
        }, {
                .ident          = MAC_MODEL_Q800,
                .name           = "Quadra 800",
@@ -452,7 +452,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_Q840,
                .name           = "Quadra 840AV",
@@ -462,7 +462,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_PSC,
                .ether_type     = MAC_ETHER_MACE,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_AV,
+               .floppy_type    = MAC_FLOPPY_UNSUPPORTED, /* New Age */
        }, {
                .ident          = MAC_MODEL_Q900,
                .name           = "Quadra 900",
@@ -472,7 +472,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_IOP,
                .ether_type     = MAC_ETHER_SONIC,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_IOP,
+               .floppy_type    = MAC_FLOPPY_SWIM_IOP, /* SWIM */
        }, {
                .ident          = MAC_MODEL_Q950,
                .name           = "Quadra 950",
@@ -482,7 +482,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_IOP,
                .ether_type     = MAC_ETHER_SONIC,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_IOP,
+               .floppy_type    = MAC_FLOPPY_SWIM_IOP, /* SWIM */
        },
 
        /*
@@ -497,7 +497,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_P475,
                .name           = "Performa 475",
@@ -506,7 +506,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_QUADRA,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_P475F,
                .name           = "Performa 475",
@@ -515,7 +515,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_QUADRA,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_P520,
                .name           = "Performa 520",
@@ -524,7 +524,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_P550,
                .name           = "Performa 550",
@@ -533,7 +533,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM 2 */
        },
        /* These have the comm slot, and therefore possibly SONIC ethernet */
        {
@@ -544,7 +544,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_QUADRA,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS_COMM,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_P588,
                .name           = "Performa 588",
@@ -554,7 +554,7 @@ static struct mac_model mac_data_table[] = {
                .ide_type       = MAC_IDE_QUADRA,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_PDS_COMM,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_TV,
                .name           = "TV",
@@ -562,7 +562,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_IICI,
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_P600,
                .name           = "Performa 600",
@@ -571,7 +571,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_LC, /* SWIM */
        },
 
        /*
@@ -588,7 +588,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_C650,
                .name           = "Centris 650",
@@ -598,7 +598,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR1,
+               .floppy_type    = MAC_FLOPPY_QUADRA, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_C660,
                .name           = "Centris 660AV",
@@ -608,7 +608,7 @@ static struct mac_model mac_data_table[] = {
                .scc_type       = MAC_SCC_PSC,
                .ether_type     = MAC_ETHER_MACE,
                .expansion_type = MAC_EXP_PDS_NUBUS,
-               .floppy_type    = MAC_FLOPPY_AV,
+               .floppy_type    = MAC_FLOPPY_UNSUPPORTED, /* New Age */
        },
 
        /*
@@ -624,7 +624,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_QUADRA,
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB145,
                .name           = "PowerBook 145",
@@ -632,7 +632,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_QUADRA,
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB150,
                .name           = "PowerBook 150",
@@ -641,7 +641,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .ide_type       = MAC_IDE_PB,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB160,
                .name           = "PowerBook 160",
@@ -649,7 +649,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_QUADRA,
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB165,
                .name           = "PowerBook 165",
@@ -657,7 +657,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_QUADRA,
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB165C,
                .name           = "PowerBook 165c",
@@ -665,7 +665,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_QUADRA,
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB170,
                .name           = "PowerBook 170",
@@ -673,7 +673,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_QUADRA,
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB180,
                .name           = "PowerBook 180",
@@ -681,7 +681,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_QUADRA,
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB180C,
                .name           = "PowerBook 180c",
@@ -689,7 +689,7 @@ static struct mac_model mac_data_table[] = {
                .via_type       = MAC_VIA_QUADRA,
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB190,
                .name           = "PowerBook 190",
@@ -698,7 +698,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .ide_type       = MAC_IDE_BABOON,
                .scc_type       = MAC_SCC_QUADRA,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM 2 */
        }, {
                .ident          = MAC_MODEL_PB520,
                .name           = "PowerBook 520",
@@ -707,7 +707,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM 2 */
        },
 
        /*
@@ -724,7 +724,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_DUO,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB230,
                .name           = "PowerBook Duo 230",
@@ -733,7 +733,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_DUO,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB250,
                .name           = "PowerBook Duo 250",
@@ -742,7 +742,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_DUO,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB270C,
                .name           = "PowerBook Duo 270c",
@@ -751,7 +751,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_DUO,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB280,
                .name           = "PowerBook Duo 280",
@@ -760,7 +760,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_DUO,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        }, {
                .ident          = MAC_MODEL_PB280C,
                .name           = "PowerBook Duo 280c",
@@ -769,7 +769,7 @@ static struct mac_model mac_data_table[] = {
                .scsi_type      = MAC_SCSI_DUO,
                .scc_type       = MAC_SCC_QUADRA,
                .expansion_type = MAC_EXP_NUBUS,
-               .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
+               .floppy_type    = MAC_FLOPPY_OLD, /* SWIM */
        },
 
        /*
@@ -960,7 +960,7 @@ static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
 
 int __init mac_platform_init(void)
 {
-       u8 *swim_base;
+       phys_addr_t swim_base = 0;
 
        if (!MACH_IS_MAC)
                return -ENODEV;
@@ -977,22 +977,22 @@ int __init mac_platform_init(void)
         */
 
        switch (macintosh_config->floppy_type) {
-       case MAC_FLOPPY_SWIM_ADDR1:
-               swim_base = (u8 *)(VIA1_BASE + 0x1E000);
+       case MAC_FLOPPY_QUADRA:
+               swim_base = 0x5001E000;
                break;
-       case MAC_FLOPPY_SWIM_ADDR2:
-               swim_base = (u8 *)(VIA1_BASE + 0x16000);
+       case MAC_FLOPPY_OLD:
+               swim_base = 0x50016000;
                break;
-       default:
-               swim_base = NULL;
+       case MAC_FLOPPY_LC:
+               swim_base = 0x50F16000;
                break;
        }
 
        if (swim_base) {
                struct resource swim_rsrc = {
                        .flags = IORESOURCE_MEM,
-                       .start = (resource_size_t)swim_base,
-                       .end   = (resource_size_t)swim_base + 0x1FFF,
+                       .start = swim_base,
+                       .end   = swim_base + 0x1FFF,
                };
 
                platform_device_register_simple("swim", -1, &swim_rsrc, 1);
index fe61513982b4a916a8ba50b545b8c9bf207d6780..330b19fcd990317d7c34e5a986f677f54e3f2851 100644 (file)
@@ -316,6 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                                regs->uregs[0] = -EINTR;
                                break;
                        }
+                       /* Else, fall through */
                case -ERESTARTNOINTR:
                        regs->uregs[0] = regs->orig_r0;
                        regs->ipc -= 4;
@@ -360,6 +361,7 @@ static void do_signal(struct pt_regs *regs)
                switch (regs->uregs[0]) {
                case -ERESTART_RESTARTBLOCK:
                        regs->uregs[15] = __NR_restart_syscall;
+                       /* Fall through */
                case -ERESTARTNOHAND:
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
index c345b79414a96f07b9abae13eae09735cfa2dcdc..403f7e193833ad4f745b029715913f4f71e60a55 100644 (file)
@@ -39,13 +39,11 @@ endif
 uname := $(shell uname -m)
 KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig
 
-ifdef CONFIG_PPC64
 new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
 
 ifeq ($(new_nm),y)
 NM             := $(NM) --synthetic
 endif
-endif
 
 # BITS is used as extension for files which are available in a 32 bit
 # and a 64 bit version to simplify shared Makefiles.
diff --git a/arch/powerpc/include/asm/error-injection.h b/arch/powerpc/include/asm/error-injection.h
deleted file mode 100644 (file)
index 62fd247..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-
-#ifndef _ASM_ERROR_INJECTION_H
-#define _ASM_ERROR_INJECTION_H
-
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm-generic/error-injection.h>
-
-void override_function_with_return(struct pt_regs *regs);
-
-#endif /* _ASM_ERROR_INJECTION_H */
index 8fc4de0d22b4ce9cf8b3bbf3e6906211172d6a07..7a84c9f1778e6ade2878ea5387b58d7c91794c8f 100644 (file)
@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
        }
 }
 
-static bool tm_active_with_fp(struct task_struct *tsk)
-{
-       return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
-               (tsk->thread.ckpt_regs.msr & MSR_FP);
-}
-
-static bool tm_active_with_altivec(struct task_struct *tsk)
-{
-       return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
-               (tsk->thread.ckpt_regs.msr & MSR_VEC);
-}
 #else
 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
-static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
-static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
 bool strict_msr_control;
@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
 
 static int restore_fp(struct task_struct *tsk)
 {
-       if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
+       if (tsk->thread.load_fp) {
                load_fp_state(&current->thread.fp_state);
                current->thread.load_fp++;
                return 1;
@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 
 static int restore_altivec(struct task_struct *tsk)
 {
-       if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
-               (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
+       if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
                load_vr_state(&tsk->thread.vr_state);
                tsk->thread.used_vr = 1;
                tsk->thread.load_vec++;
@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
        if (!tsk->thread.regs)
                return;
 
+       check_if_tm_restore_required(tsk);
+
        usermsr = tsk->thread.regs->msr;
 
        if ((usermsr & msr_all_available) == 0)
                return;
 
        msr_check_and_set(msr_all_available);
-       check_if_tm_restore_required(tsk);
 
        WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
 
index e99a14798ab0b833c5566243856187248a38c5fc..c4b606fe73ebcefb3b58594213f5bbea746c6257 100644 (file)
@@ -660,8 +660,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                }
                tce = be64_to_cpu(tce);
 
-               if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua))
-                       return H_PARAMETER;
+               if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
+                       ret = H_PARAMETER;
+                       goto unlock_exit;
+               }
 
                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
                        ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
index f50bbeedfc66b3c4022f56776731e176e6ca51f1..b4f20f13b8604a3fa1f431c2f5c37e7c03ccbf24 100644 (file)
@@ -556,8 +556,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
 
                ua = 0;
-               if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
-                       return H_PARAMETER;
+               if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
+                       ret = H_PARAMETER;
+                       goto unlock_exit;
+               }
 
                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
                        ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
index d4acf6fa0596ff8c978fa5b4f47e4268972771c1..bf60983a58c7818c5ba2d36471b37df45cf26c90 100644 (file)
@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
 #ifdef CONFIG_PPC_FSL_BOOK3E
        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
                unsigned int num_cams;
-               int __maybe_unused cpu = smp_processor_id();
                bool map = true;
 
                /* use a quarter of the TLBCAM for bolted linear map */
index ef28e106f24723271274b4d8a1ac57b36bf13a90..344db5244547c18b53e10e1183084839fb49960b 100644 (file)
@@ -3,7 +3,8 @@
 #ifndef __ASM_IMAGE_H
 #define __ASM_IMAGE_H
 
-#define RISCV_IMAGE_MAGIC      "RISCV"
+#define RISCV_IMAGE_MAGIC      "RISCV\0\0\0"
+#define RISCV_IMAGE_MAGIC2     "RSC\x05"
 
 #define RISCV_IMAGE_FLAG_BE_SHIFT      0
 #define RISCV_IMAGE_FLAG_BE_MASK       0x1
@@ -23,7 +24,7 @@
 #define __HEAD_FLAGS           (__HEAD_FLAG(BE))
 
 #define RISCV_HEADER_VERSION_MAJOR 0
-#define RISCV_HEADER_VERSION_MINOR 1
+#define RISCV_HEADER_VERSION_MINOR 2
 
 #define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
                              RISCV_HEADER_VERSION_MINOR)
@@ -39,9 +40,8 @@
  * @version:           version
  * @res1:              reserved
  * @res2:              reserved
- * @magic:             Magic number
- * @res3:              reserved (will be used for additional RISC-V specific
- *                     header)
+ * @magic:             Magic number (RISC-V specific; deprecated)
+ * @magic2:            Magic number 2 (to match the ARM64 'magic' field pos)
  * @res4:              reserved (will be used for PE COFF offset)
  *
  * The intention is for this header format to be shared between multiple
@@ -58,7 +58,7 @@ struct riscv_image_header {
        u32 res1;
        u64 res2;
        u64 magic;
-       u32 res3;
+       u32 magic2;
        u32 res4;
 };
 #endif /* __ASSEMBLY__ */
index 86049aed8251988d8ffbeddaece177dd953173f3..15a9189f91ad27f445f96983d56ab1f3186940b4 100644 (file)
@@ -39,9 +39,9 @@ ENTRY(_start)
        .word RISCV_HEADER_VERSION
        .word 0
        .dword 0
-       .asciz RISCV_IMAGE_MAGIC
-       .word 0
+       .ascii RISCV_IMAGE_MAGIC
        .balign 4
+       .ascii RISCV_IMAGE_MAGIC2
        .word 0
 
 .global _start_kernel
index b5fd6e85657c2fb1fabf796f29deba382307400d..d1ccc168c0714604011f859926d256d4830edecb 100644 (file)
@@ -1961,6 +1961,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
        case KVM_S390_MCHK:
                irq->u.mchk.mcic = s390int->parm64;
                break;
+       case KVM_S390_INT_PFAULT_INIT:
+               irq->u.ext.ext_params = s390int->parm;
+               irq->u.ext.ext_params2 = s390int->parm64;
+               break;
+       case KVM_S390_RESTART:
+       case KVM_S390_INT_CLOCK_COMP:
+       case KVM_S390_INT_CPU_TIMER:
+               break;
+       default:
+               return -EINVAL;
        }
        return 0;
 }
index f329dcb3f44cda289d7ab9291c57a153a307d686..39cff07bf2ebef248e2f9630517e4be31ab3530c 100644 (file)
@@ -1018,6 +1018,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
        /* mark all the pages in active slots as dirty */
        for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
                ms = slots->memslots + slotnr;
+               if (!ms->dirty_bitmap)
+                       return -EINVAL;
                /*
                 * The second half of the bitmap is only used on x86,
                 * and would be wasted otherwise, so we put it to good
@@ -4323,7 +4325,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
        }
        case KVM_S390_INTERRUPT: {
                struct kvm_s390_interrupt s390int;
-               struct kvm_s390_irq s390irq;
+               struct kvm_s390_irq s390irq = {};
 
                if (copy_from_user(&s390int, argp, sizeof(s390int)))
                        return -EFAULT;
index e636728ab452e1818f3566727396945264f3af3b..955eb355c2fdea049412ecbcf81b84427e4cf406 100644 (file)
@@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                break;
        case BPF_ALU64 | BPF_NEG: /* dst = -dst */
                /* lcgr %dst,%dst */
-               EMIT4(0xb9130000, dst_reg, dst_reg);
+               EMIT4(0xb9030000, dst_reg, dst_reg);
                break;
        /*
         * BPF_FROM_BE/LE
@@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                /* llgf %w1,map.max_entries(%b2) */
                EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
                              offsetof(struct bpf_array, map.max_entries));
-               /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
-               EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+               /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
+               EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
                                  REG_W1, 0, 0xa);
 
                /*
@@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                 *         goto out;
                 */
 
-               /* sllg %r1,%b3,3: %r1 = index * 8 */
-               EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+               /* llgfr %r1,%b3: %r1 = (u32) index */
+               EMIT4(0xb9160000, REG_1, BPF_REG_3);
+               /* sllg %r1,%r1,3: %r1 *= 8 */
+               EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
                /* lg %r1,prog(%b2,%r1) */
                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
                              REG_1, offsetof(struct bpf_array, ptrs));
index ccc88926bc000b9573af14f578d74098f7c7a8ff..9f41a6f5a032da07f280c29233525694bbbf8540 100644 (file)
@@ -336,25 +336,28 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
 {
        long err;
 
+       if (!IS_ENABLED(CONFIG_SYSVIPC))
+               return -ENOSYS;
+
        /* No need for backward compatibility. We can start fresh... */
        if (call <= SEMTIMEDOP) {
                switch (call) {
                case SEMOP:
-                       err = sys_semtimedop(first, ptr,
-                                            (unsigned int)second, NULL);
+                       err = ksys_semtimedop(first, ptr,
+                                             (unsigned int)second, NULL);
                        goto out;
                case SEMTIMEDOP:
-                       err = sys_semtimedop(first, ptr, (unsigned int)second,
+                       err = ksys_semtimedop(first, ptr, (unsigned int)second,
                                (const struct __kernel_timespec __user *)
-                                            (unsigned long) fifth);
+                                             (unsigned long) fifth);
                        goto out;
                case SEMGET:
-                       err = sys_semget(first, (int)second, (int)third);
+                       err = ksys_semget(first, (int)second, (int)third);
                        goto out;
                case SEMCTL: {
-                       err = sys_semctl(first, second,
-                                        (int)third | IPC_64,
-                                        (unsigned long) ptr);
+                       err = ksys_old_semctl(first, second,
+                                             (int)third | IPC_64,
+                                             (unsigned long) ptr);
                        goto out;
                }
                default:
@@ -365,18 +368,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
        if (call <= MSGCTL) {
                switch (call) {
                case MSGSND:
-                       err = sys_msgsnd(first, ptr, (size_t)second,
+                       err = ksys_msgsnd(first, ptr, (size_t)second,
                                         (int)third);
                        goto out;
                case MSGRCV:
-                       err = sys_msgrcv(first, ptr, (size_t)second, fifth,
+                       err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
                                         (int)third);
                        goto out;
                case MSGGET:
-                       err = sys_msgget((key_t)first, (int)second);
+                       err = ksys_msgget((key_t)first, (int)second);
                        goto out;
                case MSGCTL:
-                       err = sys_msgctl(first, (int)second | IPC_64, ptr);
+                       err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
                        goto out;
                default:
                        err = -ENOSYS;
@@ -396,13 +399,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
                        goto out;
                }
                case SHMDT:
-                       err = sys_shmdt(ptr);
+                       err = ksys_shmdt(ptr);
                        goto out;
                case SHMGET:
-                       err = sys_shmget(first, (size_t)second, (int)third);
+                       err = ksys_shmget(first, (size_t)second, (int)third);
                        goto out;
                case SHMCTL:
-                       err = sys_shmctl(first, (int)second | IPC_64, ptr);
+                       err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
                        goto out;
                default:
                        err = -ENOSYS;
index d1129828c41e07c3aa2356dbe37ae87bd5b75907..c014ae3c3e488ddd1892184b379ddbc4dd175f34 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/kallsyms.h>
 #include <linux/proc_fs.h>
 #include <linux/syscore_ops.h>
-#include <linux/gpio.h>
 
 #include <mach/hardware.h>
 
index 56e748a7679f4b931b420eaec6bd63f174337e59..94df0868804bcb4b9b37f7abf4985ac95158c4f8 100644 (file)
@@ -38,6 +38,7 @@ REALMODE_CFLAGS       := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
 
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
 export REALMODE_CFLAGS
 
index 2faddeb0398afff4bd24228bbaededb18893c976..c8862696a47b949be25235531a39a65db94eb8ba 100644 (file)
@@ -72,7 +72,7 @@ static unsigned long find_trampoline_placement(void)
 
        /* Find the first usable memory region under bios_start. */
        for (i = boot_params->e820_entries - 1; i >= 0; i--) {
-               unsigned long new;
+               unsigned long new = bios_start;
 
                entry = &boot_params->e820_table[i];
 
index 62f317c9113afc0ff252ffb4b05c82b1ac158bd6..5b35b7ea5d7282d870b7d0d2a519620793deba76 100644 (file)
@@ -661,10 +661,17 @@ fail:
 
        throttle = perf_event_overflow(event, &data, &regs);
 out:
-       if (throttle)
+       if (throttle) {
                perf_ibs_stop(event, 0);
-       else
-               perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
+       } else {
+               period >>= 4;
+
+               if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
+                   (*config & IBS_OP_CNT_CTL))
+                       period |= *config & IBS_OP_CUR_CNT_RAND;
+
+               perf_ibs_enable_event(perf_ibs, hwc, period);
+       }
 
        perf_event_update_userpage(event);
 
index 648260b5f3674bdd4eaab0267faec9458e75e5bb..e4c2cb65ea50a15ef8c35a934e87f23dfdf32ee0 100644 (file)
@@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
        return left;
 }
 
+static u64 nhm_limit_period(struct perf_event *event, u64 left)
+{
+       return max(left, 32ULL);
+}
+
 PMU_FORMAT_ATTR(event, "config:0-7"    );
 PMU_FORMAT_ATTR(umask, "config:8-15"   );
 PMU_FORMAT_ATTR(edge,  "config:18"     );
@@ -4606,6 +4611,7 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
                x86_pmu.extra_regs = intel_nehalem_extra_regs;
+               x86_pmu.limit_period = nhm_limit_period;
 
                mem_attr = nhm_mem_events_attrs;
 
index e65d7fe6489f3e80512a7b38724c929ee647d12f..5208ba49c89a96144ba52d82cabc21ad39a743e8 100644 (file)
@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
                 * Lower 12 bits encode the number of additional
                 * pages to flush (in addition to the 'cur' page).
                 */
-               if (diff >= HV_TLB_FLUSH_UNIT)
+               if (diff >= HV_TLB_FLUSH_UNIT) {
                        gva_list[gva_n] |= ~PAGE_MASK;
-               else if (diff)
+                       cur += HV_TLB_FLUSH_UNIT;
+               }  else if (diff) {
                        gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
+                       cur = end;
+               }
 
-               cur += HV_TLB_FLUSH_UNIT;
                gva_n++;
 
        } while (cur < end);
index 9e5f3c722c338dc116f8200b726de8d0167507c3..981fe923a59fe5983d385ee042e339aa6cfd131f 100644 (file)
@@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
                        BOOT_PARAM_PRESERVE(eddbuf_entries),
                        BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
                        BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
+                       BOOT_PARAM_PRESERVE(secure_boot),
                        BOOT_PARAM_PRESERVE(hdr),
                        BOOT_PARAM_PRESERVE(e820_table),
                        BOOT_PARAM_PRESERVE(eddbuf),
diff --git a/arch/x86/include/asm/error-injection.h b/arch/x86/include/asm/error-injection.h
deleted file mode 100644 (file)
index 47b7a12..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ERROR_INJECTION_H
-#define _ASM_ERROR_INJECTION_H
-
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm-generic/error-injection.h>
-
-asmlinkage void just_return_func(void);
-void override_function_with_return(struct pt_regs *regs);
-
-#endif /* _ASM_ERROR_INJECTION_H */
index 287f1f7b2e529419b0401c30c53f3ae7604dcf78..c38a66661576bf7f800de5860b16bd8bae02ccf0 100644 (file)
@@ -16,7 +16,6 @@
 #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 
 #ifndef __ASSEMBLY__
-extern void mcount(void);
 extern atomic_t modifying_ftrace_code;
 extern void __fentry__(void);
 
index baedab8ac5385f7d40ccbe7f17e583f8589bdb49..b91623d521d9f0ffeb23e825221353cba05e0ced 100644 (file)
@@ -4,7 +4,6 @@
 
 extern int force_iommu, no_iommu;
 extern int iommu_detected;
-extern int iommu_pass_through;
 
 /* 10 seconds */
 #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
index 74e88e5edd9cf42792cd6e17fa4f5f516b91eff8..bdc16b0aa7c6f8846a0c6864f3d098592542892c 100644 (file)
@@ -335,6 +335,7 @@ struct kvm_mmu_page {
        int root_count;          /* Currently serving as active root */
        unsigned int unsync_children;
        struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
+       unsigned long mmu_valid_gen;
        DECLARE_BITMAP(unsync_child_bitmap, 512);
 
 #ifdef CONFIG_X86_32
@@ -856,6 +857,7 @@ struct kvm_arch {
        unsigned long n_requested_mmu_pages;
        unsigned long n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
+       unsigned long mmu_valid_gen;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
         * Hash table of struct kvm_mmu_page.
index 1392d5e6e8d671fe7d503646399c193dfce2dafa..ee26e9215f18745085ec2e9d13a807558c8de9c9 100644 (file)
@@ -252,16 +252,20 @@ struct pebs_lbr {
 #define IBSCTL_LVT_OFFSET_VALID                (1ULL<<8)
 #define IBSCTL_LVT_OFFSET_MASK         0x0F
 
-/* ibs fetch bits/masks */
+/* IBS fetch bits/masks */
 #define IBS_FETCH_RAND_EN      (1ULL<<57)
 #define IBS_FETCH_VAL          (1ULL<<49)
 #define IBS_FETCH_ENABLE       (1ULL<<48)
 #define IBS_FETCH_CNT          0xFFFF0000ULL
 #define IBS_FETCH_MAX_CNT      0x0000FFFFULL
 
-/* ibs op bits/masks */
-/* lower 4 bits of the current count are ignored: */
-#define IBS_OP_CUR_CNT         (0xFFFF0ULL<<32)
+/*
+ * IBS op bits/masks
+ * The lower 7 bits of the current count are random bits
+ * preloaded by hardware and ignored in software
+ */
+#define IBS_OP_CUR_CNT         (0xFFF80ULL<<32)
+#define IBS_OP_CUR_CNT_RAND    (0x0007FULL<<32)
 #define IBS_OP_CNT_CTL         (1ULL<<19)
 #define IBS_OP_VAL             (1ULL<<18)
 #define IBS_OP_ENABLE          (1ULL<<17)
index 9c4435307ff89b92dde662b5d0d4adb31cb9f220..35c225ede0e4fb55476c94b6055325a64486d534 100644 (file)
@@ -444,8 +444,10 @@ __pu_label:                                                        \
 ({                                                                     \
        int __gu_err;                                                   \
        __inttype(*(ptr)) __gu_val;                                     \
+       __typeof__(ptr) __gu_ptr = (ptr);                               \
+       __typeof__(size) __gu_size = (size);                            \
        __uaccess_begin_nospec();                                       \
-       __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
+       __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT);      \
        __uaccess_end();                                                \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __builtin_expect(__gu_err, 0);                                  \
index d63e63b7d1d9581d8036a98e439185e460eaffe8..251c795b4eb3cd75991aabd6b1cffa6b3fedb8fe 100644 (file)
@@ -21,6 +21,7 @@
 #define PCI_DEVICE_ID_AMD_17H_DF_F4    0x1464
 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
 
 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
 static DEFINE_MUTEX(smn_mutex);
@@ -50,6 +51,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
        {}
 };
 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -63,6 +65,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
        {}
 };
index aa5495d0f47832fbc02d102a8fb66180a550b6e9..08fb79f377936c7725af66bf7bdeaf8f94c70c02 100644 (file)
@@ -834,6 +834,10 @@ bool __init apic_needs_pit(void)
        if (!boot_cpu_has(X86_FEATURE_APIC))
                return true;
 
+       /* Virt guests may lack ARAT, but still have DEADLINE */
+       if (!boot_cpu_has(X86_FEATURE_ARAT))
+               return true;
+
        /* Deadline timer is based on TSC so no further PIT action required */
        if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
                return false;
index afee386ff711e95132dd5c01e79317ac22bd9a56..caedd8d60d3610b8b9bc11f5669de16521b3ac2a 100644 (file)
@@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu)
        return early_per_cpu(x86_cpu_to_apicid, cpu);
 }
 
-static inline unsigned long calculate_ldr(int cpu)
-{
-       unsigned long val, id;
-
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       id = per_cpu(x86_bios_cpu_apicid, cpu);
-       val |= SET_APIC_LOGICAL_ID(id);
-
-       return val;
-}
-
 /*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
+ * bigsmp enables physical destination mode
+ * and doesn't use LDR and DFR
  */
 static void bigsmp_init_apic_ldr(void)
 {
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_FLAT);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
 }
 
 static void bigsmp_setup_apic_routing(void)
index c7bb6c69f21c98cbc24c9684820ee01122a5045b..d6af97fd170a98262a4618e613dd576f3e9f9f0b 100644 (file)
@@ -2438,7 +2438,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
         * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
         * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
         */
-       return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
+       if (!ioapic_initialized)
+               return gsi_top;
+       /*
+        * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+        * updated. So simply return @from if ioapic_dynirq_base == 0.
+        */
+       return ioapic_dynirq_base ? : from;
 }
 
 #ifdef CONFIG_X86_32
index 210f1f5db5f75e3f320002a9b6660c62b7806683..87bcdc6dc2f0c54048447faac2f53e5d4aa1733e 100644 (file)
@@ -107,11 +107,11 @@ static struct severity {
         */
        MCESEV(
                AO, "Action optional: memory scrubbing error",
-               SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
+               SER, MASK(MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
                ),
        MCESEV(
                AO, "Action optional: last level cache writeback error",
-               SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
+               SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
                ),
 
        /* ignore OVER for UCNA */
index f62b498b18fb7f5e7cc6766feba88827e8fce079..fa4352dce491c855ba6f0a1866390c750b4b42a9 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/dma-direct.h>
 #include <linux/dma-debug.h>
+#include <linux/iommu.h>
 #include <linux/dmar.h>
 #include <linux/export.h>
 #include <linux/memblock.h>
@@ -34,21 +35,6 @@ int no_iommu __read_mostly;
 /* Set this to 1 if there is a HW IOMMU in the system */
 int iommu_detected __read_mostly = 0;
 
-/*
- * This variable becomes 1 if iommu=pt is passed on the kernel command line.
- * If this variable is 1, IOMMU implementations do no DMA translation for
- * devices and allow every device to access to whole physical memory. This is
- * useful if a user wants to use an IOMMU only for KVM device assignment to
- * guests and not for driver dma translation.
- * It is also possible to disable by default in kernel config, and enable with
- * iommu=nopt at boot time.
- */
-#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
-int iommu_pass_through __read_mostly = 1;
-#else
-int iommu_pass_through __read_mostly;
-#endif
-
 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
 
 void __init pci_iommu_alloc(void)
@@ -120,9 +106,9 @@ static __init int iommu_setup(char *p)
                        swiotlb = 1;
 #endif
                if (!strncmp(p, "pt", 2))
-                       iommu_pass_through = 1;
+                       iommu_set_default_passthrough(true);
                if (!strncmp(p, "nopt", 4))
-                       iommu_pass_through = 0;
+                       iommu_set_default_translated(true);
 
                gart_parse_options(p);
 
index d8359ebeea70c26bb7f621a3f8fea1ffb3fbb421..8cd745ef8c7b78662b561cb46e2d6c2beb388876 100644 (file)
@@ -508,9 +508,12 @@ struct uprobe_xol_ops {
        void    (*abort)(struct arch_uprobe *, struct pt_regs *);
 };
 
-static inline int sizeof_long(void)
+static inline int sizeof_long(struct pt_regs *regs)
 {
-       return in_ia32_syscall() ? 4 : 8;
+       /*
+        * Check registers for mode as in_xxx_syscall() does not apply here.
+        */
+       return user_64bit_mode(regs) ? 8 : 4;
 }
 
 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -521,9 +524,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
 
 static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
 {
-       unsigned long new_sp = regs->sp - sizeof_long();
+       unsigned long new_sp = regs->sp - sizeof_long(regs);
 
-       if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
+       if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
                return -EFAULT;
 
        regs->sp = new_sp;
@@ -556,7 +559,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
                long correction = utask->vaddr - utask->xol_vaddr;
                regs->ip += correction;
        } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
-               regs->sp += sizeof_long(); /* Pop incorrect return address */
+               regs->sp += sizeof_long(regs); /* Pop incorrect return address */
                if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
                        return -ERESTART;
        }
@@ -675,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
         * "call" insn was executed out-of-line. Just restore ->sp and restart.
         * We could also restore ->ip and try to call branch_emulate_op() again.
         */
-       regs->sp += sizeof_long();
+       regs->sp += sizeof_long(regs);
        return -ERESTART;
 }
 
@@ -1056,7 +1059,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
 unsigned long
 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
 {
-       int rasize = sizeof_long(), nleft;
+       int rasize = sizeof_long(regs), nleft;
        unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
 
        if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
index c10a8b10b203b706ce2b84b28e45fa0042bb1d77..fff790a3f4ee9484606d4d6aa1614715fe01ad4e 100644 (file)
@@ -1781,7 +1781,7 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
 int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
                                struct kvm_cpuid_entry2 __user *entries)
 {
-       uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
+       uint16_t evmcs_ver = 0;
        struct kvm_cpuid_entry2 cpuid_entries[] = {
                { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
                { .function = HYPERV_CPUID_INTERFACE },
@@ -1793,6 +1793,9 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
        };
        int i, nent = ARRAY_SIZE(cpuid_entries);
 
+       if (kvm_x86_ops->nested_get_evmcs_version)
+               evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
+
        /* Skip NESTED_FEATURES if eVMCS is not supported */
        if (!evmcs_ver)
                --nent;
index 218b277bfda3fdd2b1dd6a7569fc360855f3cc53..a63964e7cec7bdd67f2b1886adefe98f03972185 100644 (file)
@@ -2095,6 +2095,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
        if (!direct)
                sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+
+       /*
+        * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
+        * depends on valid pages being added to the head of the list.  See
+        * comments in kvm_zap_obsolete_pages().
+        */
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
        kvm_mod_used_mmu_pages(vcpu->kvm, +1);
        return sp;
@@ -2244,7 +2250,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 #define for_each_valid_sp(_kvm, _sp, _gfn)                             \
        hlist_for_each_entry(_sp,                                       \
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
-               if ((_sp)->role.invalid) {    \
+               if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
                } else
 
 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                        \
@@ -2301,6 +2307,11 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
 static void mmu_audit_disable(void) { }
 #endif
 
+static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
+}
+
 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                         struct list_head *invalid_list)
 {
@@ -2525,6 +2536,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
        }
+       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
        clear_page(sp->spt);
        trace_kvm_mmu_get_page(sp, true);
 
@@ -4233,6 +4245,13 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
                        return false;
 
                if (cached_root_available(vcpu, new_cr3, new_role)) {
+                       /*
+                        * It is possible that the cached previous root page is
+                        * obsolete because of a change in the MMU generation
+                        * number. However, changing the generation number is
+                        * accompanied by KVM_REQ_MMU_RELOAD, which will free
+                        * the root set here and allocate a new one.
+                        */
                        kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
                        if (!skip_tlb_flush) {
                                kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
@@ -5649,11 +5668,89 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
        return alloc_mmu_pages(vcpu);
 }
 
+
+static void kvm_zap_obsolete_pages(struct kvm *kvm)
+{
+       struct kvm_mmu_page *sp, *node;
+       LIST_HEAD(invalid_list);
+       int ign;
+
+restart:
+       list_for_each_entry_safe_reverse(sp, node,
+             &kvm->arch.active_mmu_pages, link) {
+               /*
+                * No obsolete valid page exists before a newly created page
+                * since active_mmu_pages is a FIFO list.
+                */
+               if (!is_obsolete_sp(kvm, sp))
+                       break;
+
+               /*
+                * Do not repeatedly zap a root page to avoid unnecessary
+                * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
+                * progress:
+                *    vcpu 0                        vcpu 1
+                *                         call vcpu_enter_guest():
+                *                            1): handle KVM_REQ_MMU_RELOAD
+                *                                and require mmu-lock to
+                *                                load mmu
+                * repeat:
+                *    1): zap root page and
+                *        send KVM_REQ_MMU_RELOAD
+                *
+                *    2): if (cond_resched_lock(mmu-lock))
+                *
+                *                            2): hold mmu-lock and load mmu
+                *
+                *                            3): see KVM_REQ_MMU_RELOAD bit
+                *                                on vcpu->requests is set
+                *                                then return 1 to call
+                *                                vcpu_enter_guest() again.
+                *            goto repeat;
+                *
+                * Since we are reversely walking the list and the invalid
+                * list will be moved to the head, skip the invalid page
+                * can help us to avoid the infinity list walking.
+                */
+               if (sp->role.invalid)
+                       continue;
+
+               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+                       cond_resched_lock(&kvm->mmu_lock);
+                       goto restart;
+               }
+
+               if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
+                       goto restart;
+       }
+
+       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+}
+
+/*
+ * Fast invalidate all shadow pages and use lock-break technique
+ * to zap obsolete pages.
+ *
+ * It's required when memslot is being deleted or VM is being
+ * destroyed, in these cases, we should ensure that KVM MMU does
+ * not use any resource of the being-deleted slot or all slots
+ * after calling the function.
+ */
+static void kvm_mmu_zap_all_fast(struct kvm *kvm)
+{
+       spin_lock(&kvm->mmu_lock);
+       kvm->arch.mmu_valid_gen++;
+
+       kvm_zap_obsolete_pages(kvm);
+       spin_unlock(&kvm->mmu_lock);
+}
+
 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
                        struct kvm_memory_slot *slot,
                        struct kvm_page_track_notifier_node *node)
 {
-       kvm_mmu_zap_all(kvm);
+       kvm_mmu_zap_all_fast(kvm);
 }
 
 void kvm_mmu_init_vm(struct kvm *kvm)
index e3d3b2128f2b66c031fd6a9cbb47c9b4f379b87d..e0368076a1ef90660a460bf64f56bfa7b549fd2c 100644 (file)
@@ -7128,12 +7128,6 @@ failed:
        return ret;
 }
 
-static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
-{
-       /* Not supported */
-       return 0;
-}
-
 static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
                                   uint16_t *vmcs_version)
 {
@@ -7332,7 +7326,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .mem_enc_unreg_region = svm_unregister_enc_region,
 
        .nested_enable_evmcs = nested_enable_evmcs,
-       .nested_get_evmcs_version = nested_get_evmcs_version,
+       .nested_get_evmcs_version = NULL,
 
        .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
 };
index ced9fba32598d8435ef9f1d59e963f5c3e134d20..a3cba321b5c5ddb7b2730835c01635c0307e1bb1 100644 (file)
@@ -4540,6 +4540,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
        int len;
        gva_t gva = 0;
        struct vmcs12 *vmcs12;
+       struct x86_exception e;
        short offset;
 
        if (!nested_vmx_check_permission(vcpu))
@@ -4588,7 +4589,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                                vmx_instruction_info, true, len, &gva))
                        return 1;
                /* _system ok, nested_vmx_check_permission has verified cpl=0 */
-               kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
+               if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
+                       kvm_inject_page_fault(vcpu, &e);
        }
 
        return nested_vmx_succeed(vcpu);
index 42ed3faa6af8364733b4900895e4b503934538ec..c030c96fc81a817f6e11e3b1580aa907b8bc63f7 100644 (file)
@@ -7797,6 +7797,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .set_nested_state = NULL,
        .get_vmcs12_pages = NULL,
        .nested_enable_evmcs = NULL,
+       .nested_get_evmcs_version = NULL,
        .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
 };
 
index 93b0bd45ac738f6fefea3c7a834758d2e5d91340..91602d310a3fbf0be916d794d0eff9c3beb1b9b2 100644 (file)
@@ -5312,6 +5312,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
        /* kvm_write_guest_virt_system can pull in tons of pages. */
        vcpu->arch.l1tf_flush_l1d = true;
 
+       /*
+        * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+        * is returned, but our callers are not ready for that and they blindly
+        * call kvm_inject_page_fault.  Ensure that they at least do not leak
+        * uninitialized kernel stack memory into cr2 and error code.
+        */
+       memset(exception, 0, sizeof(*exception));
        return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
                                           PFERR_WRITE_MASK, exception);
 }
@@ -6594,12 +6601,13 @@ restart:
                unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
                toggle_interruptibility(vcpu, ctxt->interruptibility);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
-               kvm_rip_write(vcpu, ctxt->eip);
-               if (r == EMULATE_DONE && ctxt->tf)
-                       kvm_vcpu_do_singlestep(vcpu, &r);
                if (!ctxt->have_exception ||
-                   exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+                   exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+                       kvm_rip_write(vcpu, ctxt->eip);
+                       if (r == EMULATE_DONE && ctxt->tf)
+                               kvm_vcpu_do_singlestep(vcpu, &r);
                        __kvm_set_rflags(vcpu, ctxt->eflags);
+               }
 
                /*
                 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
index 6a9a77a403c928e642a754f68c168ebfa6f7b3c1..e14e95ea7338a3adaf013acb7cd848d909f4dd95 100644 (file)
@@ -516,7 +516,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
  */
 static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
                                          unsigned long pfn, unsigned long npg,
-                                         int warnlvl)
+                                         unsigned long lpsize, int warnlvl)
 {
        pgprotval_t forbidden, res;
        unsigned long end;
@@ -535,9 +535,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
        check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
        forbidden = res;
 
-       res = protect_kernel_text_ro(start, end);
-       check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
-       forbidden |= res;
+       /*
+        * Special case to preserve a large page. If the change spawns the
+        * full large page mapping then there is no point to split it
+        * up. Happens with ftrace and is going to be removed once ftrace
+        * switched to text_poke().
+        */
+       if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
+               res = protect_kernel_text_ro(start, end);
+               check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
+               forbidden |= res;
+       }
 
        /* Check the PFN directly */
        res = protect_pci_bios(pfn, pfn + npg - 1);
@@ -819,7 +827,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
         * extra conditional required here.
         */
        chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
-                                     CPA_CONFLICT);
+                                     psize, CPA_CONFLICT);
 
        if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
                /*
@@ -855,7 +863,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
         * protection requirement in the large page.
         */
        new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
-                                     CPA_DETECT);
+                                     psize, CPA_DETECT);
 
        /*
         * If there is a conflict, split the large page.
@@ -906,7 +914,8 @@ static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
        if (!cpa->force_static_prot)
                goto set;
 
-       prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
+       /* Hand in lpsize = 0 to enforce the protection mechanism */
+       prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
 
        if (pgprot_val(prot) == pgprot_val(ref_prot))
                goto set;
@@ -1503,7 +1512,8 @@ repeat:
                pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
 
                cpa_inc_4k_install();
-               new_prot = static_protections(new_prot, address, pfn, 1,
+               /* Hand in lpsize = 0 to enforce the protection mechanism */
+               new_prot = static_protections(new_prot, address, pfn, 1, 0,
                                              CPA_PROTECT);
 
                new_prot = pgprot_clear_protnone_bits(new_prot);
index 8901a1f89cf57bd16cbf79d6625a804274c6afe4..10fb42da0007e9d3c914143ec4e427dd1eec42f5 100644 (file)
@@ -18,37 +18,40 @@ targets += purgatory.ro
 KASAN_SANITIZE := n
 KCOV_INSTRUMENT := n
 
+# These are adjustments to the compiler flags used for objects that
+# make up the standalone purgatory.ro
+
+PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
 # sure how to relocate those.
 ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_sha256.o         += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_purgatory.o      += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_string.o         += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_kexec-purgatory.o        += $(CC_FLAGS_FTRACE)
+PURGATORY_CFLAGS_REMOVE                += $(CC_FLAGS_FTRACE)
 endif
 
 ifdef CONFIG_STACKPROTECTOR
-CFLAGS_REMOVE_sha256.o         += -fstack-protector
-CFLAGS_REMOVE_purgatory.o      += -fstack-protector
-CFLAGS_REMOVE_string.o         += -fstack-protector
-CFLAGS_REMOVE_kexec-purgatory.o        += -fstack-protector
+PURGATORY_CFLAGS_REMOVE                += -fstack-protector
 endif
 
 ifdef CONFIG_STACKPROTECTOR_STRONG
-CFLAGS_REMOVE_sha256.o         += -fstack-protector-strong
-CFLAGS_REMOVE_purgatory.o      += -fstack-protector-strong
-CFLAGS_REMOVE_string.o         += -fstack-protector-strong
-CFLAGS_REMOVE_kexec-purgatory.o        += -fstack-protector-strong
+PURGATORY_CFLAGS_REMOVE                += -fstack-protector-strong
 endif
 
 ifdef CONFIG_RETPOLINE
-CFLAGS_REMOVE_sha256.o         += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_purgatory.o      += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_string.o         += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_kexec-purgatory.o        += $(RETPOLINE_CFLAGS)
+PURGATORY_CFLAGS_REMOVE                += $(RETPOLINE_CFLAGS)
 endif
 
+CFLAGS_REMOVE_purgatory.o      += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_purgatory.o             += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_sha256.o         += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_sha256.o                        += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_string.o         += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_string.o                        += $(PURGATORY_CFLAGS)
+
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
                $(call if_changed,ld)
 
index 8569b79e8b581e894b129871a5fac2bc35994c62..5a7551d060f250b39b345646abd0a3299a515aa0 100644 (file)
@@ -1256,12 +1256,12 @@ static int  __init arm_smmu_v3_set_proximity(struct device *dev,
 
        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
        if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
-               int node = acpi_map_pxm_to_node(smmu->pxm);
+               int dev_node = acpi_map_pxm_to_node(smmu->pxm);
 
-               if (node != NUMA_NO_NODE && !node_online(node))
+               if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
                        return -EINVAL;
 
-               set_dev_node(dev, node);
+               set_dev_node(dev, dev_node);
                pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
                        smmu->base_address,
                        smmu->pxm);
index 1e7ac0bd0d3a090c42136e2b4fc213a31924516c..f31544d3656e3dfb400ce86d51ea46679eafde3b 100644 (file)
@@ -540,6 +540,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
        return retval;
 }
 
+/**
+ * check_acpi_cpu_flag() - Determine if CPU node has a flag set
+ * @cpu: Kernel logical CPU number
+ * @rev: The minimum PPTT revision defining the flag
+ * @flag: The flag itself
+ *
+ * Check the node representing a CPU for a given flag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or
+ *        the table revision isn't new enough.
+ *        1, any passed flag set
+ *        0, flag unset
+ */
+static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
+{
+       struct acpi_table_header *table;
+       acpi_status status;
+       u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+       struct acpi_pptt_processor *cpu_node = NULL;
+       int ret = -ENOENT;
+
+       status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+       if (ACPI_FAILURE(status)) {
+               acpi_pptt_warn_missing();
+               return ret;
+       }
+
+       if (table->revision >= rev)
+               cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+
+       if (cpu_node)
+               ret = (cpu_node->flags & flag) != 0;
+
+       acpi_put_table(table);
+
+       return ret;
+}
+
 /**
  * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
  * @cpu: Kernel logical CPU number
@@ -604,6 +642,20 @@ int cache_setup_acpi(unsigned int cpu)
        return status;
 }
 
+/**
+ * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread
+ * @cpu: Kernel logical CPU number
+ *
+ * Return: 1, a thread
+ *         0, not a thread
+ *         -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or
+ *         the table revision isn't new enough.
+ */
+int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+       return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
+}
+
 /**
  * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
  * @cpu: Kernel logical CPU number
@@ -664,7 +716,6 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
        return ret;
 }
 
-
 /**
  * find_acpi_cpu_topology_package() - Determine a unique CPU package value
  * @cpu: Kernel logical CPU number
index 2e2efa577437e82ecdeb20d9d41b6ce63635bf16..8c37294f1d1ee4c07dacaec843a0190b0320d5fc 100644 (file)
@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
          make the card work).
 
 config ATM_NICSTAR_USE_IDT77105
-       bool "Use IDT77015 PHY driver (25Mbps)"
+       bool "Use IDT77105 PHY driver (25Mbps)"
        depends on ATM_NICSTAR
        help
          Support for the PHYsical layer chip in ForeRunner LE25 cards. In
index e5e1b3a01b1a3e9099f5f745bc0e3a04320d2ed2..e72843fe41dfeea565b2c0dc2722688561670228 100644 (file)
@@ -588,14 +588,6 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
        }
 
        map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
-       if (!map->debugfs) {
-               dev_warn(map->dev,
-                        "Failed to create %s debugfs directory\n", name);
-
-               kfree(map->debugfs_name);
-               map->debugfs_name = NULL;
-               return;
-       }
 
        debugfs_create_file("name", 0400, map->debugfs,
                            map, &regmap_name_fops);
@@ -672,10 +664,6 @@ void regmap_debugfs_initcall(void)
        struct regmap_debugfs_node *node, *tmp;
 
        regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
-       if (!regmap_debugfs_root) {
-               pr_warn("regmap: Failed to create debugfs root\n");
-               return;
-       }
 
        mutex_lock(&regmap_debugfs_early_lock);
        list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
index c9dc70ceca5f104a9c329f0d01fd7bd6a5c9bed7..3d64c9331a82aa61cdab5be03fca8c5c08b4aea6 100644 (file)
@@ -370,7 +370,6 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
                if (ret < 0) {
                        dev_err(map->dev, "IRQ thread failed to resume: %d\n",
                                ret);
-                       pm_runtime_put(map->dev);
                        goto exit;
                }
        }
@@ -425,8 +424,6 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
                                        dev_err(map->dev,
                                                "Failed to read IRQ status %d\n",
                                                ret);
-                                       if (chip->runtime_pm)
-                                               pm_runtime_put(map->dev);
                                        goto exit;
                                }
                        }
@@ -478,8 +475,6 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
                                dev_err(map->dev,
                                        "Failed to read IRQ status: %d\n",
                                        ret);
-                               if (chip->runtime_pm)
-                                       pm_runtime_put(map->dev);
                                goto exit;
                        }
                }
@@ -513,10 +508,10 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
                }
        }
 
+exit:
        if (chip->runtime_pm)
                pm_runtime_put(map->dev);
 
-exit:
        if (chip->handle_post_irq)
                chip->handle_post_irq(chip->irq_drv_data);
 
index 3327192bb71f73c9e4f4500203f39d8c1de01a0a..c8fb886aebd4e9ea569203c446cf537c756026d0 100644 (file)
@@ -3038,6 +3038,17 @@ again:
                }
                return true;
        case RBD_OBJ_READ_PARENT:
+               /*
+                * The parent image is read only up to the overlap -- zero-fill
+                * from the overlap to the end of the request.
+                */
+               if (!*result) {
+                       u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
+
+                       if (obj_overlap < obj_req->ex.oe_len)
+                               rbd_obj_zero_range(obj_req, obj_overlap,
+                                           obj_req->ex.oe_len - obj_overlap);
+               }
                return true;
        default:
                BUG();
index a0e84538cec88bfb7b80ee0dae4cef69282b589f..1fa58c059cbf5c6e115ccf5c3970b1165afe89b2 100644 (file)
@@ -337,7 +337,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 
        usb_free_urb(urb);
 
-       return 0;
+       return err;
 }
 
 static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
index 5cf0734eb31bf923de8c18cb8e8ad5d23a284a2f..ba41490543040a723bbdb868626225be0612846d 100644 (file)
@@ -384,6 +384,9 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
 
+       /* Additional Realtek 8822CE Bluetooth devices */
+       { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+
        /* Silicon Wave based devices */
        { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
 
@@ -1170,10 +1173,6 @@ static int btusb_open(struct hci_dev *hdev)
        }
 
        data->intf->needs_remote_wakeup = 1;
-       /* device specific wakeup source enabled and required for USB
-        * remote wakeup while host is suspended
-        */
-       device_wakeup_enable(&data->udev->dev);
 
        if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
                goto done;
@@ -1238,7 +1237,6 @@ static int btusb_close(struct hci_dev *hdev)
                goto failed;
 
        data->intf->needs_remote_wakeup = 0;
-       device_wakeup_disable(&data->udev->dev);
        usb_autopm_put_interface(data->intf);
 
 failed:
index 9a970fd1975aab411d96912593606b5096d81180..31dec435767be3757adcfeadad1ce2cfbcad620a 100644 (file)
@@ -309,13 +309,14 @@ static void qca_wq_awake_device(struct work_struct *work)
                                            ws_awake_device);
        struct hci_uart *hu = qca->hu;
        unsigned long retrans_delay;
+       unsigned long flags;
 
        BT_DBG("hu %p wq awake device", hu);
 
        /* Vote for serial clock */
        serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
 
-       spin_lock(&qca->hci_ibs_lock);
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
 
        /* Send wake indication to device */
        if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
@@ -327,7 +328,7 @@ static void qca_wq_awake_device(struct work_struct *work)
        retrans_delay = msecs_to_jiffies(qca->wake_retrans);
        mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
 
-       spin_unlock(&qca->hci_ibs_lock);
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
 
        /* Actually send the packets */
        hci_uart_tx_wakeup(hu);
@@ -338,12 +339,13 @@ static void qca_wq_awake_rx(struct work_struct *work)
        struct qca_data *qca = container_of(work, struct qca_data,
                                            ws_awake_rx);
        struct hci_uart *hu = qca->hu;
+       unsigned long flags;
 
        BT_DBG("hu %p wq awake rx", hu);
 
        serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
 
-       spin_lock(&qca->hci_ibs_lock);
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
        qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
 
        /* Always acknowledge device wake up,
@@ -354,7 +356,7 @@ static void qca_wq_awake_rx(struct work_struct *work)
 
        qca->ibs_sent_wacks++;
 
-       spin_unlock(&qca->hci_ibs_lock);
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
 
        /* Actually send the packets */
        hci_uart_tx_wakeup(hu);
index 19d7b6ff2f1793b23d1e80507d733b4de3f8d9b8..20c957185af203ff6e5dbc0e466198188ed3bfb1 100644 (file)
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
        size_t pdata_size;
 };
 
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+       struct acpi_device *adev = ACPI_COMPANION(hostdev);
+       struct acpi_device *child;
+
+       device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+
+       list_for_each_entry(child, &adev->children, node)
+               acpi_device_clear_enumerated(child);
+}
+
 /*
  * hisi_lpc_acpi_probe - probe children for ACPI FW
  * @hostdev: LPC host device pointer
@@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
        return 0;
 
 fail:
-       device_for_each_child(hostdev, NULL,
-                             hisi_lpc_acpi_remove_subdev);
+       hisi_lpc_acpi_remove(hostdev);
        return ret;
 }
 
@@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
 {
        return -ENODEV;
 }
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+}
 #endif // CONFIG_ACPI
 
 /*
@@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
        range->fwnode = dev->fwnode;
        range->flags = LOGIC_PIO_INDIRECT;
        range->size = PIO_INDIRECT_SIZE;
+       range->hostdata = lpcdev;
+       range->ops = &hisi_lpc_ops;
+       lpcdev->io_host = range;
 
        ret = logic_pio_register_range(range);
        if (ret) {
                dev_err(dev, "register IO range failed (%d)!\n", ret);
                return ret;
        }
-       lpcdev->io_host = range;
 
        /* register the LPC host PIO resources */
        if (acpi_device)
                ret = hisi_lpc_acpi_probe(dev);
        else
                ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
-       if (ret)
+       if (ret) {
+               logic_pio_unregister_range(range);
                return ret;
+       }
 
-       lpcdev->io_host->hostdata = lpcdev;
-       lpcdev->io_host->ops = &hisi_lpc_ops;
+       dev_set_drvdata(dev, lpcdev);
 
        io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
        dev_info(dev, "registered range [%pa - %pa]\n",
@@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
        return ret;
 }
 
+static int hisi_lpc_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+       struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
+       struct logic_pio_hwaddr *range = lpcdev->io_host;
+
+       if (acpi_device)
+               hisi_lpc_acpi_remove(dev);
+       else
+               of_platform_depopulate(dev);
+
+       logic_pio_unregister_range(range);
+
+       return 0;
+}
+
 static const struct of_device_id hisi_lpc_of_match[] = {
        { .compatible = "hisilicon,hip06-lpc", },
        { .compatible = "hisilicon,hip07-lpc", },
@@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = {
                .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
        },
        .probe = hisi_lpc_probe,
+       .remove = hisi_lpc_remove,
 };
 builtin_platform_driver(hisi_lpc_driver);
index e6deabd8305d789a9942988f26b6aa584cae2e46..2db474ab4c6bea105a5f355f9036d3f7027b3143 100644 (file)
@@ -949,7 +949,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
                *best_mode = SYSC_IDLE_SMART_WKUP;
        else if (idlemodes & BIT(SYSC_IDLE_SMART))
                *best_mode = SYSC_IDLE_SMART;
-       else if (idlemodes & SYSC_IDLE_FORCE)
+       else if (idlemodes & BIT(SYSC_IDLE_FORCE))
                *best_mode = SYSC_IDLE_FORCE;
        else
                return -EINVAL;
@@ -1267,7 +1267,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
        SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
                   0xffff00f0, 0),
-       SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
+       SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
+       SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
        SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
        SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
        SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
@@ -1692,10 +1693,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
        if (error)
                return 0;
 
-       if (val)
-               ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
-       else
-               ddata->cfg.sysc_val = ddata->cap->sysc_mask;
+       ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
 
        return 0;
 }
@@ -2385,27 +2383,27 @@ static int sysc_probe(struct platform_device *pdev)
 
        error = sysc_init_dts_quirks(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_map_and_check_registers(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_sysc_mask(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_idlemodes(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_syss_mask(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        error = sysc_init_pdata(ddata);
        if (error)
-               goto unprepare;
+               return error;
 
        sysc_init_early_quirks(ddata);
 
@@ -2415,7 +2413,7 @@ static int sysc_probe(struct platform_device *pdev)
 
        error = sysc_init_resets(ddata);
        if (error)
-               return error;
+               goto unprepare;
 
        error = sysc_init_module(ddata);
        if (error)
index 3e866885a40513596148b9c5a5a0db5add1c42ff..2794f4b3f62db6f0cd0472ff1d04c708e63e48b3 100644 (file)
@@ -573,3 +573,12 @@ config RANDOM_TRUST_CPU
        has not installed a hidden back door to compromise the CPU's
        random number generation facilities. This can also be configured
        at boot with "random.trust_cpu=on/off".
+
+config RANDOM_TRUST_BOOTLOADER
+       bool "Trust the bootloader to initialize Linux's CRNG"
+       help
+       Some bootloaders can provide entropy to increase the kernel's initial
+       device randomness. Say Y here to assume the entropy provided by the
+       booloader is trustworthy so it will be added to the kernel's entropy
+       pool. Otherwise, say N here so it will be regarded as device input that
+       only mixes the entropy pool.
\ No newline at end of file
index 5d5ea4ce144293e3e99cc7b7bcc68250cf12df5b..566922df4b7b551e6abdeda96b727b938696c0c9 100644 (file)
@@ -2445,3 +2445,17 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
        credit_entropy_bits(poolp, entropy);
 }
 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+
+/* Handle random seed passed by bootloader.
+ * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
+ * it would be regarded as device data.
+ * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
+ */
+void add_bootloader_randomness(const void *buf, unsigned int size)
+{
+       if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+               add_hwgenerator_randomness(buf, size, size * 8);
+       else
+               add_device_randomness(buf, size);
+}
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
\ No newline at end of file
index 88a3c06fc153e087640460a4c52713377729a1b5..9c37047f4b5625b8ea3fd5b3b93796729d1b4bdc 100644 (file)
@@ -164,6 +164,11 @@ config TCG_VTPM_PROXY
          /dev/vtpmX and a server-side file descriptor on which the vTPM
          can receive commands.
 
+config TCG_FTPM_TEE
+       tristate "TEE based fTPM Interface"
+       depends on TEE && OPTEE
+       help
+         This driver proxies for firmware TPM running in TEE.
 
 source "drivers/char/tpm/st33zp24/Kconfig"
 endif # TCG_TPM
index a01c4cab902a662bd842427e2708bbbfd8fbc660..c354cdff9c625fad7f813a269924053de0293a32 100644 (file)
@@ -33,3 +33,4 @@ obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
 obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
 obj-$(CONFIG_TCG_CRB) += tpm_crb.o
 obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
+obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
index 4838c6a9f0f2c702e932066fba62d7a336a87f36..3d6d394a866188107f48495298dfe5a43d8e93f0 100644 (file)
@@ -287,12 +287,9 @@ static void tpm_devs_release(struct device *dev)
  * @dev: device to which the chip is associated.
  *
  * Issues a TPM2_Shutdown command prior to loss of power, as required by the
- * TPM 2.0 spec.
- * Then, calls bus- and device- specific shutdown code.
+ * TPM 2.0 spec. Then, calls bus- and device- specific shutdown code.
  *
- * XXX: This codepath relies on the fact that sysfs is not enabled for
- * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
- * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ * Return: always 0 (i.e. success)
  */
 static int tpm_class_shutdown(struct device *dev)
 {
index d9caedda075b2f5e690b5a34f086e1902ac774b4..edfa89160010c5cf29e1d2d4e76016323a45e306 100644 (file)
@@ -329,16 +329,9 @@ static const struct attribute_group tpm_dev_group = {
 
 void tpm_sysfs_add_device(struct tpm_chip *chip)
 {
-       /* XXX: If you wish to remove this restriction, you must first update
-        * tpm_sysfs to explicitly lock chip->ops.
-        */
        if (chip->flags & TPM_CHIP_FLAG_TPM2)
                return;
 
-       /* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
-        * is called before ops is null'd and the sysfs core synchronizes this
-        * removal so that no callbacks are running or can run again
-        */
        WARN_ON(chip->groups_cnt != 0);
        chip->groups[chip->groups_cnt++] = &tpm_dev_group;
 }
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
new file mode 100644 (file)
index 0000000..6640a14
--- /dev/null
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Microsoft Corporation
+ *
+ * Implements a firmware TPM as described here:
+ * https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/
+ *
+ * A reference implementation is available here:
+ * https://github.com/microsoft/ms-tpm-20-ref/tree/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM
+ */
+
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+#include "tpm.h"
+#include "tpm_ftpm_tee.h"
+
+/*
+ * TA_FTPM_UUID: BC50D971-D4C9-42C4-82CB-343FB7F37896
+ *
+ * Randomly generated, and must correspond to the GUID on the TA side.
+ * Defined here in the reference implementation:
+ * https://github.com/microsoft/ms-tpm-20-ref/blob/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM/include/fTPM.h#L42
+ */
+static const uuid_t ftpm_ta_uuid =
+       UUID_INIT(0xBC50D971, 0xD4C9, 0x42C4,
+                 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
+
+/**
+ * ftpm_tee_tpm_op_recv - retrieve fTPM response.
+ * @chip:      the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @buf:       the buffer to store data.
+ * @count:     the number of bytes to read.
+ *
+ * Return:
+ *     In case of success the number of bytes received.
+ *     On failure, -errno.
+ */
+static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+       struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+       size_t len;
+
+       len = pvt_data->resp_len;
+       if (count < len) {
+               dev_err(&chip->dev,
+                       "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
+                       __func__, count, len);
+               return -EIO;
+       }
+
+       memcpy(buf, pvt_data->resp_buf, len);
+       pvt_data->resp_len = 0;
+
+       return len;
+}
+
+/**
+ * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory.
+ * @chip:      the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @buf:       the buffer to send.
+ * @len:       the number of bytes to send.
+ *
+ * Return:
+ *     In case of success, returns 0.
+ *     On failure, -errno
+ */
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+       struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+       size_t resp_len;
+       int rc;
+       u8 *temp_buf;
+       struct tpm_header *resp_header;
+       struct tee_ioctl_invoke_arg transceive_args;
+       struct tee_param command_params[4];
+       struct tee_shm *shm = pvt_data->shm;
+
+       if (len > MAX_COMMAND_SIZE) {
+               dev_err(&chip->dev,
+                       "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
+                       __func__, len);
+               return -EIO;
+       }
+
+       memset(&transceive_args, 0, sizeof(transceive_args));
+       memset(command_params, 0, sizeof(command_params));
+       pvt_data->resp_len = 0;
+
+       /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
+       transceive_args = (struct tee_ioctl_invoke_arg) {
+               .func = FTPM_OPTEE_TA_SUBMIT_COMMAND,
+               .session = pvt_data->session,
+               .num_params = 4,
+       };
+
+       /* Fill FTPM_OPTEE_TA_SUBMIT_COMMAND parameters */
+       command_params[0] = (struct tee_param) {
+               .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
+               .u.memref = {
+                       .shm = shm,
+                       .size = len,
+                       .shm_offs = 0,
+               },
+       };
+
+       temp_buf = tee_shm_get_va(shm, 0);
+       if (IS_ERR(temp_buf)) {
+               dev_err(&chip->dev, "%s: tee_shm_get_va failed for transmit\n",
+                       __func__);
+               return PTR_ERR(temp_buf);
+       }
+       memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
+       memcpy(temp_buf, buf, len);
+
+       command_params[1] = (struct tee_param) {
+               .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+               .u.memref = {
+                       .shm = shm,
+                       .size = MAX_RESPONSE_SIZE,
+                       .shm_offs = MAX_COMMAND_SIZE,
+               },
+       };
+
+       rc = tee_client_invoke_func(pvt_data->ctx, &transceive_args,
+                                   command_params);
+       if ((rc < 0) || (transceive_args.ret != 0)) {
+               dev_err(&chip->dev, "%s: SUBMIT_COMMAND invoke error: 0x%x\n",
+                       __func__, transceive_args.ret);
+               return (rc < 0) ? rc : transceive_args.ret;
+       }
+
+       temp_buf = tee_shm_get_va(shm, command_params[1].u.memref.shm_offs);
+       if (IS_ERR(temp_buf)) {
+               dev_err(&chip->dev, "%s: tee_shm_get_va failed for receive\n",
+                       __func__);
+               return PTR_ERR(temp_buf);
+       }
+
+       resp_header = (struct tpm_header *)temp_buf;
+       resp_len = be32_to_cpu(resp_header->length);
+
+       /* sanity check resp_len */
+       if (resp_len < TPM_HEADER_SIZE) {
+               dev_err(&chip->dev, "%s: tpm response header too small\n",
+                       __func__);
+               return -EIO;
+       }
+       if (resp_len > MAX_RESPONSE_SIZE) {
+               dev_err(&chip->dev,
+                       "%s: resp_len=%zd exceeds MAX_RESPONSE_SIZE\n",
+                       __func__, resp_len);
+               return -EIO;
+       }
+
+       /* sanity checks look good, cache the response */
+       memcpy(pvt_data->resp_buf, temp_buf, resp_len);
+       pvt_data->resp_len = resp_len;
+
+       return 0;
+}
+
+static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
+{
+       /* not supported */
+}
+
+static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
+{
+       return 0;
+}
+
+static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+       return 0;
+}
+
+static const struct tpm_class_ops ftpm_tee_tpm_ops = {
+       .flags = TPM_OPS_AUTO_STARTUP,
+       .recv = ftpm_tee_tpm_op_recv,
+       .send = ftpm_tee_tpm_op_send,
+       .cancel = ftpm_tee_tpm_op_cancel,
+       .status = ftpm_tee_tpm_op_status,
+       .req_complete_mask = 0,
+       .req_complete_val = 0,
+       .req_canceled = ftpm_tee_tpm_req_canceled,
+};
+
+/*
+ * Check whether this driver supports the fTPM TA in the TEE instance
+ * represented by the params (ver/data) to this function.
+ */
+static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+       /*
+        * Currently this driver only support GP Complaint OPTEE based fTPM TA
+        */
+       if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
+               (ver->gen_caps & TEE_GEN_CAP_GP))
+               return 1;
+       else
+               return 0;
+}
+
+/**
+ * ftpm_tee_probe - initialize the fTPM
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ *     On success, 0. On failure, -errno.
+ */
+static int ftpm_tee_probe(struct platform_device *pdev)
+{
+       int rc;
+       struct tpm_chip *chip;
+       struct device *dev = &pdev->dev;
+       struct ftpm_tee_private *pvt_data = NULL;
+       struct tee_ioctl_open_session_arg sess_arg;
+
+       pvt_data = devm_kzalloc(dev, sizeof(struct ftpm_tee_private),
+                               GFP_KERNEL);
+       if (!pvt_data)
+               return -ENOMEM;
+
+       dev_set_drvdata(dev, pvt_data);
+
+       /* Open context with TEE driver */
+       pvt_data->ctx = tee_client_open_context(NULL, ftpm_tee_match, NULL,
+                                               NULL);
+       if (IS_ERR(pvt_data->ctx)) {
+               if (PTR_ERR(pvt_data->ctx) == -ENOENT)
+                       return -EPROBE_DEFER;
+               dev_err(dev, "%s: tee_client_open_context failed\n", __func__);
+               return PTR_ERR(pvt_data->ctx);
+       }
+
+       /* Open a session with fTPM TA */
+       memset(&sess_arg, 0, sizeof(sess_arg));
+       memcpy(sess_arg.uuid, ftpm_ta_uuid.b, TEE_IOCTL_UUID_LEN);
+       sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+       sess_arg.num_params = 0;
+
+       rc = tee_client_open_session(pvt_data->ctx, &sess_arg, NULL);
+       if ((rc < 0) || (sess_arg.ret != 0)) {
+               dev_err(dev, "%s: tee_client_open_session failed, err=%x\n",
+                       __func__, sess_arg.ret);
+               rc = -EINVAL;
+               goto out_tee_session;
+       }
+       pvt_data->session = sess_arg.session;
+
+       /* Allocate dynamic shared memory with fTPM TA */
+       pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
+                                     MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
+                                     TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+       if (IS_ERR(pvt_data->shm)) {
+               dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+               rc = -ENOMEM;
+               goto out_shm_alloc;
+       }
+
+       /* Allocate new struct tpm_chip instance */
+       chip = tpm_chip_alloc(dev, &ftpm_tee_tpm_ops);
+       if (IS_ERR(chip)) {
+               dev_err(dev, "%s: tpm_chip_alloc failed\n", __func__);
+               rc = PTR_ERR(chip);
+               goto out_chip_alloc;
+       }
+
+       pvt_data->chip = chip;
+       pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+       /* Create a character device for the fTPM */
+       rc = tpm_chip_register(pvt_data->chip);
+       if (rc) {
+               dev_err(dev, "%s: tpm_chip_register failed with rc=%d\n",
+                       __func__, rc);
+               goto out_chip;
+       }
+
+       return 0;
+
+out_chip:
+       put_device(&pvt_data->chip->dev);
+out_chip_alloc:
+       tee_shm_free(pvt_data->shm);
+out_shm_alloc:
+       tee_client_close_session(pvt_data->ctx, pvt_data->session);
+out_tee_session:
+       tee_client_close_context(pvt_data->ctx);
+
+       return rc;
+}
+
+/**
+ * ftpm_tee_remove - remove the TPM device
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ *     0 always.
+ */
+static int ftpm_tee_remove(struct platform_device *pdev)
+{
+       struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+
+       /* Release the chip */
+       tpm_chip_unregister(pvt_data->chip);
+
+       /* frees chip */
+       put_device(&pvt_data->chip->dev);
+
+       /* Free the shared memory pool */
+       tee_shm_free(pvt_data->shm);
+
+       /* close the existing session with fTPM TA*/
+       tee_client_close_session(pvt_data->ctx, pvt_data->session);
+
+       /* close the context with TEE driver */
+       tee_client_close_context(pvt_data->ctx);
+
+       /* memory allocated with devm_kzalloc() is freed automatically */
+
+       return 0;
+}
+
+static const struct of_device_id of_ftpm_tee_ids[] = {
+       { .compatible = "microsoft,ftpm" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
+
+static struct platform_driver ftpm_tee_driver = {
+       .driver = {
+               .name = "ftpm-tee",
+               .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+       },
+       .probe = ftpm_tee_probe,
+       .remove = ftpm_tee_remove,
+};
+
+module_platform_driver(ftpm_tee_driver);
+
+MODULE_AUTHOR("Thirupathaiah Annapureddy <thiruan@microsoft.com>");
+MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
new file mode 100644 (file)
index 0000000..f98daa7
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Microsoft Corporation
+ */
+
+#ifndef __TPM_FTPM_TEE_H__
+#define __TPM_FTPM_TEE_H__
+
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+/* The TAFs ID implemented in this TA */
+#define FTPM_OPTEE_TA_SUBMIT_COMMAND  (0)
+#define FTPM_OPTEE_TA_EMULATE_PPI     (1)
+
+/* max. buffer size supported by fTPM  */
+#define MAX_COMMAND_SIZE       4096
+#define MAX_RESPONSE_SIZE      4096
+
+/**
+ * struct ftpm_tee_private - fTPM's private data
+ * @chip:     struct tpm_chip instance registered with tpm framework.
+ * @state:    internal state
+ * @session:  fTPM TA session identifier.
+ * @resp_len: cached response buffer length.
+ * @resp_buf: cached response buffer.
+ * @ctx:      TEE context handler.
+ * @shm:      Memory pool shared with fTPM TA in TEE.
+ */
+struct ftpm_tee_private {
+       struct tpm_chip *chip;
+       u32 session;
+       size_t resp_len;
+       u8 resp_buf[MAX_RESPONSE_SIZE];
+       struct tee_context *ctx;
+       struct tee_shm *shm;
+};
+
+#endif /* __TPM_FTPM_TEE_H__ */
index c3181ea9f27132e4013c6a1c86612d296a803c3d..270f43acbb77d485939c13c55026b9a942110bb1 100644 (file)
@@ -980,6 +980,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
                        goto out_err;
                }
 
+               tpm_chip_start(chip);
+               chip->flags |= TPM_CHIP_FLAG_IRQ;
                if (irq) {
                        tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
                                                 irq);
@@ -989,6 +991,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
                } else {
                        tpm_tis_probe_irq(chip, intmask);
                }
+               tpm_chip_stop(chip);
        }
 
        rc = tpm_chip_register(chip);
index 48cb3d4bb7d1344afb921e1b53d7b1fcb32ad162..d8530475493cb552e308021b449aaab3d12c5382 100644 (file)
@@ -13,6 +13,16 @@ config ARM_CPUIDLE
           initialized by calling the CPU operations init idle hook
           provided by architecture code.
 
+config ARM_PSCI_CPUIDLE
+       bool "PSCI CPU idle Driver"
+       depends on ARM_PSCI_FW
+       select DT_IDLE_STATES
+       select CPU_IDLE_MULTIPLE_DRIVERS
+       help
+         Select this to enable PSCI firmware based CPUidle driver for ARM.
+         It provides an idle driver that is capable of detecting and
+         managing idle states through the PSCI firmware interface.
+
 config ARM_BIG_LITTLE_CPUIDLE
        bool "Support for ARM big.LITTLE processors"
        depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
index 9d7176cee3d376195a6bbe7ebbe67956e4a5ca52..40d016339b29e2e6f24da7d66af105bedb891381 100644 (file)
@@ -20,6 +20,7 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
 obj-$(CONFIG_ARM_AT91_CPUIDLE)          += cpuidle-at91.o
 obj-$(CONFIG_ARM_EXYNOS_CPUIDLE)        += cpuidle-exynos.o
 obj-$(CONFIG_ARM_CPUIDLE)              += cpuidle-arm.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE)         += cpuidle-psci.o
 
 ###############################################################################
 # MIPS drivers
index 5bcd82c35dcf72fa92c49409aeb975852ff65e9d..9e5156d3962769e54f6742ea5f72a30f2bd565b7 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/slab.h>
-#include <linux/topology.h>
 
 #include <asm/cpuidle.h>
 
@@ -106,11 +105,17 @@ static int __init arm_idle_init_cpu(int cpu)
        ret = arm_cpuidle_init(cpu);
 
        /*
-        * Allow the initialization to continue for other CPUs, if the reported
-        * failure is a HW misconfiguration/breakage (-ENXIO).
+        * Allow the initialization to continue for other CPUs, if the
+        * reported failure is a HW misconfiguration/breakage (-ENXIO).
+        *
+        * Some platforms do not support idle operations
+        * (arm_cpuidle_init() returning -EOPNOTSUPP), we should
+        * not flag this case as an error, it is a valid
+        * configuration.
         */
        if (ret) {
-               pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+               if (ret != -EOPNOTSUPP)
+                       pr_err("CPU %d failed to init idle CPU ops\n", cpu);
                ret = ret == -ENXIO ? 0 : ret;
                goto out_kfree_drv;
        }
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
new file mode 100644 (file)
index 0000000..f3c1a23
--- /dev/null
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PSCI CPU idle driver.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ */
+
+#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+
+#include <asm/cpuidle.h>
+
+#include "dt_idle_states.h"
+
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_enter_idle_state(struct cpuidle_device *dev,
+                               struct cpuidle_driver *drv, int idx)
+{
+       u32 *state = __this_cpu_read(psci_power_state);
+
+       return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+                                          idx, state[idx - 1]);
+}
+
+static struct cpuidle_driver psci_idle_driver __initdata = {
+       .name = "psci_idle",
+       .owner = THIS_MODULE,
+       /*
+        * PSCI idle states relies on architectural WFI to
+        * be represented as state index 0.
+        */
+       .states[0] = {
+               .enter                  = psci_enter_idle_state,
+               .exit_latency           = 1,
+               .target_residency       = 1,
+               .power_usage            = UINT_MAX,
+               .name                   = "WFI",
+               .desc                   = "ARM WFI",
+       }
+};
+
+static const struct of_device_id psci_idle_state_match[] __initconst = {
+       { .compatible = "arm,idle-state",
+         .data = psci_enter_idle_state },
+       { },
+};
+
+static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+       int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
+
+       if (err) {
+               pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
+               return err;
+       }
+
+       if (!psci_power_state_is_valid(*state)) {
+               pr_warn("Invalid PSCI power state %#x\n", *state);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+       int i, ret = 0, count = 0;
+       u32 *psci_states;
+       struct device_node *state_node;
+
+       /* Count idle states */
+       while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+                                             count))) {
+               count++;
+               of_node_put(state_node);
+       }
+
+       if (!count)
+               return -ENODEV;
+
+       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+       if (!psci_states)
+               return -ENOMEM;
+
+       for (i = 0; i < count; i++) {
+               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+               ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
+               of_node_put(state_node);
+
+               if (ret)
+                       goto free_mem;
+
+               pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
+       }
+
+       /* Idle states parsed correctly, initialize per-cpu pointer */
+       per_cpu(psci_power_state, cpu) = psci_states;
+       return 0;
+
+free_mem:
+       kfree(psci_states);
+       return ret;
+}
+
+static __init int psci_cpu_init_idle(unsigned int cpu)
+{
+       struct device_node *cpu_node;
+       int ret;
+
+       /*
+        * If the PSCI cpu_suspend function hook has not been initialized
+        * idle states must not be enabled, so bail out
+        */
+       if (!psci_ops.cpu_suspend)
+               return -EOPNOTSUPP;
+
+       cpu_node = of_cpu_device_node_get(cpu);
+       if (!cpu_node)
+               return -ENODEV;
+
+       ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+       of_node_put(cpu_node);
+
+       return ret;
+}
+
+static int __init psci_idle_init_cpu(int cpu)
+{
+       struct cpuidle_driver *drv;
+       struct device_node *cpu_node;
+       const char *enable_method;
+       int ret = 0;
+
+       cpu_node = of_cpu_device_node_get(cpu);
+       if (!cpu_node)
+               return -ENODEV;
+
+       /*
+        * Check whether the enable-method for the cpu is PSCI, fail
+        * if it is not.
+        */
+       enable_method = of_get_property(cpu_node, "enable-method", NULL);
+       if (!enable_method || (strcmp(enable_method, "psci")))
+               ret = -ENODEV;
+
+       of_node_put(cpu_node);
+       if (ret)
+               return ret;
+
+       drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL);
+       if (!drv)
+               return -ENOMEM;
+
+       drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+       /*
+        * Initialize idle states data, starting at index 1, since
+        * by default idle state 0 is the quiescent state reached
+        * by the cpu by executing the wfi instruction.
+        *
+        * If no DT idle states are detected (ret == 0) let the driver
+        * initialization fail accordingly since there is no reason to
+        * initialize the idle driver if only wfi is supported, the
+        * default archictectural back-end already executes wfi
+        * on idle entry.
+        */
+       ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
+       if (ret <= 0) {
+               ret = ret ? : -ENODEV;
+               goto out_kfree_drv;
+       }
+
+       /*
+        * Initialize PSCI idle states.
+        */
+       ret = psci_cpu_init_idle(cpu);
+       if (ret) {
+               pr_err("CPU %d failed to PSCI idle\n", cpu);
+               goto out_kfree_drv;
+       }
+
+       ret = cpuidle_register(drv, NULL);
+       if (ret)
+               goto out_kfree_drv;
+
+       return 0;
+
+out_kfree_drv:
+       kfree(drv);
+       return ret;
+}
+
+/*
+ * psci_idle_init - Initializes PSCI cpuidle driver
+ *
+ * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
+ * to register cpuidle driver then rollback to cancel all CPUs
+ * registration.
+ */
+static int __init psci_idle_init(void)
+{
+       int cpu, ret;
+       struct cpuidle_driver *drv;
+       struct cpuidle_device *dev;
+
+       for_each_possible_cpu(cpu) {
+               ret = psci_idle_init_cpu(cpu);
+               if (ret)
+                       goto out_fail;
+       }
+
+       return 0;
+
+out_fail:
+       while (--cpu >= 0) {
+               dev = per_cpu(cpuidle_devices, cpu);
+               drv = cpuidle_get_cpu_driver(dev);
+               cpuidle_unregister(drv);
+               kfree(drv);
+       }
+
+       return ret;
+}
+device_initcall(psci_idle_init);
index f79eede71c62d8e1d50cb7640f7ea64023b1b590..edefa669153f49c4df1591431c24d4180d3f4b7c 100644 (file)
@@ -540,6 +540,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
        unsigned long flags;
        unsigned int i;
 
+       /* If there's no device there's nothing to do */
+       if (!ccp)
+               return 0;
+
        spin_lock_irqsave(&ccp->cmd_lock, flags);
 
        ccp->suspending = 1;
@@ -564,6 +568,10 @@ int ccp_dev_resume(struct sp_device *sp)
        unsigned long flags;
        unsigned int i;
 
+       /* If there's no device there's nothing to do */
+       if (!ccp)
+               return 0;
+
        spin_lock_irqsave(&ccp->cmd_lock, flags);
 
        ccp->suspending = 0;
index 8101ff2f05c1c326f67c4214a2bd93f73ce9c158..67100e4e1083b49f07285d7ca2d1ea48f6a8b19f 100644 (file)
 #define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
 #define BCM2835_DMA_CHAN_NAME_SIZE 8
 
+/**
+ * struct bcm2835_dmadev - BCM2835 DMA controller
+ * @ddev: DMA device
+ * @base: base address of register map
+ * @dma_parms: DMA parameters (to convey 1 GByte max segment size to clients)
+ * @zero_page: bus address of zero page (to detect transactions copying from
+ *     zero page and avoid accessing memory if so)
+ */
 struct bcm2835_dmadev {
        struct dma_device ddev;
        void __iomem *base;
        struct device_dma_parameters dma_parms;
+       dma_addr_t zero_page;
 };
 
 struct bcm2835_dma_cb {
@@ -687,11 +696,12 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
        size_t period_len, enum dma_transfer_direction direction,
        unsigned long flags)
 {
+       struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device);
        struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
        struct bcm2835_desc *d;
        dma_addr_t src, dst;
        u32 info = BCM2835_DMA_WAIT_RESP;
-       u32 extra = BCM2835_DMA_INT_EN;
+       u32 extra = 0;
        size_t max_len = bcm2835_dma_max_frame_length(c);
        size_t frames;
 
@@ -707,6 +717,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
                return NULL;
        }
 
+       if (flags & DMA_PREP_INTERRUPT)
+               extra |= BCM2835_DMA_INT_EN;
+       else
+               period_len = buf_len;
+
        /*
         * warn if buf_len is not a multiple of period_len - this may leed
         * to unexpected latencies for interrupts and thus audiable clicks
@@ -732,6 +747,10 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
                dst = c->cfg.dst_addr;
                src = buf_addr;
                info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
+
+               /* non-lite channels can write zeroes w/o accessing memory */
+               if (buf_addr == od->zero_page && !c->is_lite_channel)
+                       info |= BCM2835_DMA_S_IGNORE;
        }
 
        /* calculate number of frames */
@@ -778,7 +797,10 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
 
        /* stop DMA activity */
        if (c->desc) {
-               vchan_terminate_vdesc(&c->desc->vd);
+               if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
+                       vchan_terminate_vdesc(&c->desc->vd);
+               else
+                       vchan_vdesc_fini(&c->desc->vd);
                c->desc = NULL;
                bcm2835_dma_abort(c);
        }
@@ -831,6 +853,9 @@ static void bcm2835_dma_free(struct bcm2835_dmadev *od)
                list_del(&c->vc.chan.device_node);
                tasklet_kill(&c->vc.task);
        }
+
+       dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
+                            DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 }
 
 static const struct of_device_id bcm2835_dma_of_match[] = {
@@ -907,11 +932,20 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
        od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
                              BIT(DMA_MEM_TO_MEM);
        od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       od->ddev.descriptor_reuse = true;
        od->ddev.dev = &pdev->dev;
        INIT_LIST_HEAD(&od->ddev.channels);
 
        platform_set_drvdata(pdev, od);
 
+       od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
+                                          PAGE_SIZE, DMA_TO_DEVICE,
+                                          DMA_ATTR_SKIP_CPU_SYNC);
+       if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
+               dev_err(&pdev->dev, "Failed to map zero page\n");
+               return -ENOMEM;
+       }
+
        /* Request DMA channel mask from device tree */
        if (of_property_read_u32(pdev->dev.of_node,
                        "brcm,dma-channel-mask",
index 9c41a4e425759fcca6235e80c4ff42bf9fcb6c4b..1072c450c37ab89d7c78ea0f521927fcb13410ae 100644 (file)
@@ -192,6 +192,7 @@ struct rcar_dmac_chan {
  * @iomem: remapped I/O memory base
  * @n_channels: number of available channels
  * @channels: array of DMAC channels
+ * @channels_mask: bitfield of which DMA channels are managed by this driver
  * @modules: bitmask of client modules in use
  */
 struct rcar_dmac {
@@ -202,6 +203,7 @@ struct rcar_dmac {
 
        unsigned int n_channels;
        struct rcar_dmac_chan *channels;
+       unsigned int channels_mask;
 
        DECLARE_BITMAP(modules, 256);
 };
@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
        u16 dmaor;
 
        /* Clear all channels and enable the DMAC globally. */
-       rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
+       rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
        rcar_dmac_write(dmac, RCAR_DMAOR,
                        RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
 
@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
        for (i = 0; i < dmac->n_channels; ++i) {
                struct rcar_dmac_chan *chan = &dmac->channels[i];
 
+               if (!(dmac->channels_mask & BIT(i)))
+                       continue;
+
                /* Stop and reinitialize the channel. */
                spin_lock_irq(&chan->lock);
                rcar_dmac_chan_halt(chan);
@@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
        return 0;
 }
 
+#define RCAR_DMAC_MAX_CHANNELS 32
+
 static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
 {
        struct device_node *np = dev->of_node;
@@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
                return ret;
        }
 
-       if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+       /* The hardware and driver don't support more than 32 bits in CHCLR */
+       if (dmac->n_channels <= 0 ||
+           dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
                dev_err(dev, "invalid number of channels %u\n",
                        dmac->n_channels);
                return -EINVAL;
        }
 
+       dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+
        return 0;
 }
 
@@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
                DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
                DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
                DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
-       unsigned int channels_offset = 0;
        struct dma_device *engine;
        struct rcar_dmac *dmac;
        struct resource *mem;
@@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
         * level we can't disable it selectively, so ignore channel 0 for now if
         * the device is part of an IOMMU group.
         */
-       if (device_iommu_mapped(&pdev->dev)) {
-               dmac->n_channels--;
-               channels_offset = 1;
-       }
+       if (device_iommu_mapped(&pdev->dev))
+               dmac->channels_mask &= ~BIT(0);
 
        dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
                                      sizeof(*dmac->channels), GFP_KERNEL);
@@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
        INIT_LIST_HEAD(&engine->channels);
 
        for (i = 0; i < dmac->n_channels; ++i) {
-               ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
-                                          i + channels_offset);
+               if (!(dmac->channels_mask & BIT(i)))
+                       continue;
+
+               ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
                if (ret < 0)
                        goto error;
        }
index baac476c86224f2615fe7e2f8010d68b6e1dfdb9..525dc7338fe3b86842f4d2aa3ffd9857d34ec5f6 100644 (file)
@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
        struct dma_slave_config *slave_cfg = &schan->slave_cfg;
        dma_addr_t src = 0, dst = 0;
+       dma_addr_t start_src = 0, start_dst = 0;
        struct sprd_dma_desc *sdesc;
        struct scatterlist *sg;
        u32 len = 0;
@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        dst = sg_dma_address(sg);
                }
 
+               if (!i) {
+                       start_src = src;
+                       start_dst = dst;
+               }
+
                /*
                 * The link-list mode needs at least 2 link-list
                 * configurations. If there is only one sg, it doesn't
@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                }
        }
 
-       ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
-                                dir, flags, slave_cfg);
+       ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
+                                start_dst, len, dir, flags, slave_cfg);
        if (ret) {
                kfree(sdesc);
                return NULL;
index ad2f0a4cd6a4d1e413ef619eb95ef88c0266cd59..f255056696eecc9aa1f7535bf15eebcba9782850 100644 (file)
@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
 
                ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
                                                 nelm * 2);
-               if (ret)
+               if (ret) {
+                       kfree(rsv_events);
                        return ret;
+               }
 
                for (i = 0; i < nelm; i++) {
                        ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
index ba27802efcd0a900609893931e2f2fc4555dac0c..d07c0d5de7a256388e1806afa4264e91f2f51e2d 100644 (file)
@@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev)
 
                rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
                                      IRQF_SHARED, "omap-dma-engine", od);
-               if (rc)
+               if (rc) {
+                       omap_dma_free(od);
                        return rc;
+               }
        }
 
        if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
index 200c04ce5b0ed20361157a1c655c99bb14bee47c..2a2603bfb918547e86720dac5f0584dc57f33fdb 100644 (file)
@@ -510,4 +510,11 @@ config EDAC_ASPEED
          First, ECC must be configured in the bootloader. Then, this driver
          will expose error counters via the EDAC kernel framework.
 
+config EDAC_BLUEFIELD
+       tristate "Mellanox BlueField Memory ECC"
+       depends on ARM64 && ((MELLANOX_PLATFORM && ACPI) || COMPILE_TEST)
+       help
+         Support for error detection and correction on the
+         Mellanox BlueField SoCs.
+
 endif # EDAC
index 165ca65e1a3a55a9d0181fe5c23b71ba956704be..d265ff9311f02eb914e857d2593934da8e037f68 100644 (file)
@@ -85,3 +85,4 @@ obj-$(CONFIG_EDAC_XGENE)              += xgene_edac.o
 obj-$(CONFIG_EDAC_TI)                  += ti_edac.o
 obj-$(CONFIG_EDAC_QCOM)                        += qcom_edac.o
 obj-$(CONFIG_EDAC_ASPEED)              += aspeed_edac.o
+obj-$(CONFIG_EDAC_BLUEFIELD)           += bluefield_edac.o
index c2e693e34d434e53c801546d02700e5ee219644f..fbda4b876afd19937db8dcd7f0846006bd6b7743 100644 (file)
@@ -222,7 +222,6 @@ static unsigned long get_total_mem(void)
 static const struct of_device_id altr_sdram_ctrl_of_match[] = {
        { .compatible = "altr,sdram-edac", .data = &c5_data},
        { .compatible = "altr,sdram-edac-a10", .data = &a10_data},
-       { .compatible = "altr,sdram-edac-s10", .data = &a10_data},
        {},
 };
 MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
@@ -1170,6 +1169,24 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
        return 0;
 }
 
+/*********************** SDRAM EDAC Device Functions *********************/
+
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+
+static const struct edac_device_prv_data s10_sdramecc_data = {
+       .setup = altr_check_ecc_deps,
+       .ce_clear_mask = ALTR_S10_ECC_SERRPENA,
+       .ue_clear_mask = ALTR_S10_ECC_DERRPENA,
+       .ecc_enable_mask = ALTR_S10_ECC_EN,
+       .ecc_en_ofst = ALTR_S10_ECC_CTRL_SDRAM_OFST,
+       .ce_set_mask = ALTR_S10_ECC_TSERRA,
+       .ue_set_mask = ALTR_S10_ECC_TDERRA,
+       .set_err_ofst = ALTR_S10_ECC_INTTEST_OFST,
+       .ecc_irq_handler = altr_edac_a10_ecc_irq,
+       .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+#endif /* CONFIG_EDAC_ALTERA_SDRAM */
+
 /*********************** OCRAM EDAC Device Functions *********************/
 
 #ifdef CONFIG_EDAC_ALTERA_OCRAM
@@ -1758,6 +1775,9 @@ static const struct of_device_id altr_edac_a10_device_of_match[] = {
 #endif
 #ifdef CONFIG_EDAC_ALTERA_SDMMC
        { .compatible = "altr,socfpga-sdmmc-ecc", .data = &a10_sdmmcecca_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+       { .compatible = "altr,sdram-edac-s10", .data = &s10_sdramecc_data },
 #endif
        {},
 };
@@ -1866,6 +1886,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
        struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
        int irq = irq_desc_get_irq(desc);
+       unsigned long bits;
 
        dberr = (irq == edac->db_irq) ? 1 : 0;
        sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
@@ -1875,7 +1896,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
 
        regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
 
-       for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
+       bits = irq_status;
+       for_each_set_bit(bit, &bits, 32) {
                irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
                if (irq)
                        generic_handle_irq(irq);
@@ -1889,6 +1911,10 @@ static int validate_parent_available(struct device_node *np)
        struct device_node *parent;
        int ret = 0;
 
+       /* SDRAM must be present for Linux (implied parent) */
+       if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
+               return 0;
+
        /* Ensure parent device is enabled if parent node exists */
        parent = of_parse_phandle(np, "altr,ecc-parent", 0);
        if (parent && !of_device_is_available(parent))
@@ -1898,6 +1924,22 @@ static int validate_parent_available(struct device_node *np)
        return ret;
 }
 
+static int get_s10_sdram_edac_resource(struct device_node *np,
+                                      struct resource *res)
+{
+       struct device_node *parent;
+       int ret;
+
+       parent = of_parse_phandle(np, "altr,sdr-syscon", 0);
+       if (!parent)
+               return -ENODEV;
+
+       ret = of_address_to_resource(parent, 0, res);
+       of_node_put(parent);
+
+       return ret;
+}
+
 static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
                                    struct device_node *np)
 {
@@ -1925,7 +1967,11 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
        if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
                return -ENOMEM;
 
-       rc = of_address_to_resource(np, 0, &res);
+       if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
+               rc = get_s10_sdram_edac_resource(np, &res);
+       else
+               rc = of_address_to_resource(np, 0, &res);
+
        if (rc < 0) {
                edac_printk(KERN_ERR, EDAC_DEVICE,
                            "%s: no resource address\n", ecc_name);
@@ -2231,13 +2277,15 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
                    of_device_is_compatible(child, "altr,socfpga-dma-ecc") ||
                    of_device_is_compatible(child, "altr,socfpga-usb-ecc") ||
                    of_device_is_compatible(child, "altr,socfpga-qspi-ecc") ||
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+                   of_device_is_compatible(child, "altr,sdram-edac-s10") ||
+#endif
                    of_device_is_compatible(child, "altr,socfpga-sdmmc-ecc"))
 
                        altr_edac_a10_device_add(edac, child);
 
 #ifdef CONFIG_EDAC_ALTERA_SDRAM
-               else if ((of_device_is_compatible(child, "altr,sdram-edac-a10")) ||
-                        (of_device_is_compatible(child, "altr,sdram-edac-s10")))
+               else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
                        of_platform_populate(pdev->dev.of_node,
                                             altr_sdram_ctrl_of_match,
                                             NULL, &pdev->dev);
index 55654cc4bcdf35373517dd2c95d2af109d47de28..3727e72c8c2e70a98df2854608e1858bafb7d1fe 100644 (file)
@@ -289,6 +289,29 @@ struct altr_sdram_mc_data {
 #define ALTR_A10_ECC_INIT_WATCHDOG_10US      10000
 
 /************* Stratix10 Defines **************/
+#define ALTR_S10_ECC_CTRL_SDRAM_OFST      0x00
+#define ALTR_S10_ECC_EN                   BIT(0)
+
+#define ALTR_S10_ECC_ERRINTEN_OFST        0x10
+#define ALTR_S10_ECC_ERRINTENS_OFST       0x14
+#define ALTR_S10_ECC_ERRINTENR_OFST       0x18
+#define ALTR_S10_ECC_SERRINTEN            BIT(0)
+
+#define ALTR_S10_ECC_INTMODE_OFST         0x1C
+#define ALTR_S10_ECC_INTMODE              BIT(0)
+
+#define ALTR_S10_ECC_INTSTAT_OFST         0x20
+#define ALTR_S10_ECC_SERRPENA             BIT(0)
+#define ALTR_S10_ECC_DERRPENA             BIT(8)
+#define ALTR_S10_ECC_ERRPENA_MASK         (ALTR_S10_ECC_SERRPENA | \
+                                          ALTR_S10_ECC_DERRPENA)
+
+#define ALTR_S10_ECC_INTTEST_OFST         0x24
+#define ALTR_S10_ECC_TSERRA               BIT(0)
+#define ALTR_S10_ECC_TDERRA               BIT(8)
+#define ALTR_S10_ECC_TSERRB               BIT(16)
+#define ALTR_S10_ECC_TDERRB               BIT(24)
+
 #define ALTR_S10_DERR_ADDRA_OFST          0x2C
 
 /* Stratix10 ECC Manager Defines */
@@ -300,7 +323,7 @@ struct altr_sdram_mc_data {
 #define S10_SYSMGR_UE_ADDR_OFST           0x224
 
 #define S10_DDR0_IRQ_MASK                 BIT(16)
-#define S10_DBE_IRQ_MASK                  0x3FE
+#define S10_DBE_IRQ_MASK                  0x3FFFE
 
 /* Define ECC Block Offsets for peripherals */
 #define ECC_BLK_ADDRESS_OFST              0x40
index 873437be86d9c2d571953ef7624c06a0bb868f78..c1d4536ae466e7b3d683ecb0741baf7e7f653407 100644 (file)
@@ -788,51 +788,45 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
                 (dclr & BIT(15)) ?  "yes" : "no");
 }
 
-/*
- * The Address Mask should be a contiguous set of bits in the non-interleaved
- * case. So to check for CS interleaving, find the most- and least-significant
- * bits of the mask, generate a contiguous bitmask, and compare the two.
- */
-static bool f17_cs_interleaved(struct amd64_pvt *pvt, u8 ctrl, int cs)
+#define CS_EVEN_PRIMARY                BIT(0)
+#define CS_ODD_PRIMARY         BIT(1)
+#define CS_EVEN_SECONDARY      BIT(2)
+#define CS_ODD_SECONDARY       BIT(3)
+
+#define CS_EVEN                        (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
+#define CS_ODD                 (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
+
+static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
 {
-       u32 mask = pvt->csels[ctrl].csmasks[cs >> 1];
-       u32 msb = fls(mask) - 1, lsb = ffs(mask) - 1;
-       u32 test_mask = GENMASK(msb, lsb);
+       int cs_mode = 0;
 
-       edac_dbg(1, "mask=0x%08x test_mask=0x%08x\n", mask, test_mask);
+       if (csrow_enabled(2 * dimm, ctrl, pvt))
+               cs_mode |= CS_EVEN_PRIMARY;
 
-       return mask ^ test_mask;
+       if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
+               cs_mode |= CS_ODD_PRIMARY;
+
+       /* Asymmetric dual-rank DIMM support. */
+       if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
+               cs_mode |= CS_ODD_SECONDARY;
+
+       return cs_mode;
 }
 
 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
 {
-       int dimm, size0, size1, cs0, cs1;
+       int dimm, size0, size1, cs0, cs1, cs_mode;
 
        edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
 
-       for (dimm = 0; dimm < 4; dimm++) {
-               size0 = 0;
+       for (dimm = 0; dimm < 2; dimm++) {
                cs0 = dimm * 2;
-
-               if (csrow_enabled(cs0, ctrl, pvt))
-                       size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
-
-               size1 = 0;
                cs1 = dimm * 2 + 1;
 
-               if (csrow_enabled(cs1, ctrl, pvt)) {
-                       /*
-                        * CS interleaving is only supported if both CSes have
-                        * the same amount of memory. Because they are
-                        * interleaved, it will look like both CSes have the
-                        * full amount of memory. Save the size for both as
-                        * half the amount we found on CS0, if interleaved.
-                        */
-                       if (f17_cs_interleaved(pvt, ctrl, cs1))
-                               size1 = size0 = (size0 >> 1);
-                       else
-                               size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
-               }
+               cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
+
+               size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
+               size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
 
                amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
                                cs0,    size0,
@@ -942,89 +936,119 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
        } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
                pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
                pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
+       } else if (pvt->fam >= 0x17) {
+               int umc;
+
+               for_each_umc(umc) {
+                       pvt->csels[umc].b_cnt = 4;
+                       pvt->csels[umc].m_cnt = 2;
+               }
+
        } else {
                pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
                pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
        }
 }
 
+static void read_umc_base_mask(struct amd64_pvt *pvt)
+{
+       u32 umc_base_reg, umc_base_reg_sec;
+       u32 umc_mask_reg, umc_mask_reg_sec;
+       u32 base_reg, base_reg_sec;
+       u32 mask_reg, mask_reg_sec;
+       u32 *base, *base_sec;
+       u32 *mask, *mask_sec;
+       int cs, umc;
+
+       for_each_umc(umc) {
+               umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
+               umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
+
+               for_each_chip_select(cs, umc, pvt) {
+                       base = &pvt->csels[umc].csbases[cs];
+                       base_sec = &pvt->csels[umc].csbases_sec[cs];
+
+                       base_reg = umc_base_reg + (cs * 4);
+                       base_reg_sec = umc_base_reg_sec + (cs * 4);
+
+                       if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
+                               edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
+                                        umc, cs, *base, base_reg);
+
+                       if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
+                               edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
+                                        umc, cs, *base_sec, base_reg_sec);
+               }
+
+               umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
+               umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
+
+               for_each_chip_select_mask(cs, umc, pvt) {
+                       mask = &pvt->csels[umc].csmasks[cs];
+                       mask_sec = &pvt->csels[umc].csmasks_sec[cs];
+
+                       mask_reg = umc_mask_reg + (cs * 4);
+                       mask_reg_sec = umc_mask_reg_sec + (cs * 4);
+
+                       if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
+                               edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
+                                        umc, cs, *mask, mask_reg);
+
+                       if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
+                               edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
+                                        umc, cs, *mask_sec, mask_reg_sec);
+               }
+       }
+}
+
 /*
  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
  */
 static void read_dct_base_mask(struct amd64_pvt *pvt)
 {
-       int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
+       int cs;
 
        prep_chip_selects(pvt);
 
-       if (pvt->umc) {
-               base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
-               base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
-               mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
-               mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
-       } else {
-               base_reg0 = DCSB0;
-               base_reg1 = DCSB1;
-               mask_reg0 = DCSM0;
-               mask_reg1 = DCSM1;
-       }
+       if (pvt->umc)
+               return read_umc_base_mask(pvt);
 
        for_each_chip_select(cs, 0, pvt) {
-               int reg0   = base_reg0 + (cs * 4);
-               int reg1   = base_reg1 + (cs * 4);
+               int reg0   = DCSB0 + (cs * 4);
+               int reg1   = DCSB1 + (cs * 4);
                u32 *base0 = &pvt->csels[0].csbases[cs];
                u32 *base1 = &pvt->csels[1].csbases[cs];
 
-               if (pvt->umc) {
-                       if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
-                               edac_dbg(0, "  DCSB0[%d]=0x%08x reg: 0x%x\n",
-                                        cs, *base0, reg0);
+               if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+                       edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
+                                cs, *base0, reg0);
 
-                       if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
-                               edac_dbg(0, "  DCSB1[%d]=0x%08x reg: 0x%x\n",
-                                        cs, *base1, reg1);
-               } else {
-                       if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
-                               edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
-                                        cs, *base0, reg0);
-
-                       if (pvt->fam == 0xf)
-                               continue;
+               if (pvt->fam == 0xf)
+                       continue;
 
-                       if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
-                               edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
-                                        cs, *base1, (pvt->fam == 0x10) ? reg1
-                                                               : reg0);
-               }
+               if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+                       edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
+                                cs, *base1, (pvt->fam == 0x10) ? reg1
+                                                       : reg0);
        }
 
        for_each_chip_select_mask(cs, 0, pvt) {
-               int reg0   = mask_reg0 + (cs * 4);
-               int reg1   = mask_reg1 + (cs * 4);
+               int reg0   = DCSM0 + (cs * 4);
+               int reg1   = DCSM1 + (cs * 4);
                u32 *mask0 = &pvt->csels[0].csmasks[cs];
                u32 *mask1 = &pvt->csels[1].csmasks[cs];
 
-               if (pvt->umc) {
-                       if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
-                               edac_dbg(0, "    DCSM0[%d]=0x%08x reg: 0x%x\n",
-                                        cs, *mask0, reg0);
-
-                       if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
-                               edac_dbg(0, "    DCSM1[%d]=0x%08x reg: 0x%x\n",
-                                        cs, *mask1, reg1);
-               } else {
-                       if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
-                               edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
-                                        cs, *mask0, reg0);
+               if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+                       edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
+                                cs, *mask0, reg0);
 
-                       if (pvt->fam == 0xf)
-                               continue;
+               if (pvt->fam == 0xf)
+                       continue;
 
-                       if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
-                               edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
-                                        cs, *mask1, (pvt->fam == 0x10) ? reg1
-                                                               : reg0);
-               }
+               if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+                       edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
+                                cs, *mask1, (pvt->fam == 0x10) ? reg1
+                                                       : reg0);
        }
 }
 
@@ -1556,18 +1580,58 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
                return ddr3_cs_size(cs_mode, false);
 }
 
-static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
                                    unsigned int cs_mode, int csrow_nr)
 {
-       u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
+       u32 addr_mask_orig, addr_mask_deinterleaved;
+       u32 msb, weight, num_zero_bits;
+       int dimm, size = 0;
 
-       /*  Each mask is used for every two base addresses. */
-       u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
+       /* No Chip Selects are enabled. */
+       if (!cs_mode)
+               return size;
 
-       /*  Register [31:1] = Address [39:9]. Size is in kBs here. */
-       u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
+       /* Requested size of an even CS but none are enabled. */
+       if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
+               return size;
 
-       edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
+       /* Requested size of an odd CS but none are enabled. */
+       if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
+               return size;
+
+       /*
+        * There is one mask per DIMM, and two Chip Selects per DIMM.
+        *      CS0 and CS1 -> DIMM0
+        *      CS2 and CS3 -> DIMM1
+        */
+       dimm = csrow_nr >> 1;
+
+       /* Asymmetric dual-rank DIMM support. */
+       if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
+               addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
+       else
+               addr_mask_orig = pvt->csels[umc].csmasks[dimm];
+
+       /*
+        * The number of zero bits in the mask is equal to the number of bits
+        * in a full mask minus the number of bits in the current mask.
+        *
+        * The MSB is the number of bits in the full mask because BIT[0] is
+        * always 0.
+        */
+       msb = fls(addr_mask_orig) - 1;
+       weight = hweight_long(addr_mask_orig);
+       num_zero_bits = msb - weight;
+
+       /* Take the number of zero bits off from the top of the mask. */
+       addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+
+       edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
+       edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
+       edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+
+       /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+       size = (addr_mask_deinterleaved >> 2) + 1;
 
        /* Return size in MBs. */
        return size >> 10;
@@ -2232,7 +2296,7 @@ static struct amd64_family_type family_types[] = {
                .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
                .ops = {
                        .early_channel_count    = f17_early_channel_count,
-                       .dbam_to_cs             = f17_base_addr_to_cs_size,
+                       .dbam_to_cs             = f17_addr_mask_to_cs_size,
                }
        },
        [F17_M10H_CPUS] = {
@@ -2241,7 +2305,7 @@ static struct amd64_family_type family_types[] = {
                .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
                .ops = {
                        .early_channel_count    = f17_early_channel_count,
-                       .dbam_to_cs             = f17_base_addr_to_cs_size,
+                       .dbam_to_cs             = f17_addr_mask_to_cs_size,
                }
        },
        [F17_M30H_CPUS] = {
@@ -2250,7 +2314,16 @@ static struct amd64_family_type family_types[] = {
                .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
                .ops = {
                        .early_channel_count    = f17_early_channel_count,
-                       .dbam_to_cs             = f17_base_addr_to_cs_size,
+                       .dbam_to_cs             = f17_addr_mask_to_cs_size,
+               }
+       },
+       [F17_M70H_CPUS] = {
+               .ctl_name = "F17h_M70h",
+               .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
+               .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
+               .ops = {
+                       .early_channel_count    = f17_early_channel_count,
+                       .dbam_to_cs             = f17_addr_mask_to_cs_size,
                }
        },
 };
@@ -2537,13 +2610,6 @@ static void decode_umc_error(int node_id, struct mce *m)
 
        err.channel = find_umc_channel(m);
 
-       if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
-               err.err_code = ERR_NORM_ADDR;
-               goto log_error;
-       }
-
-       error_address_to_page_and_offset(sys_addr, &err);
-
        if (!(m->status & MCI_STATUS_SYNDV)) {
                err.err_code = ERR_SYND;
                goto log_error;
@@ -2560,6 +2626,13 @@ static void decode_umc_error(int node_id, struct mce *m)
 
        err.csrow = m->synd & 0x7;
 
+       if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+               err.err_code = ERR_NORM_ADDR;
+               goto log_error;
+       }
+
+       error_address_to_page_and_offset(sys_addr, &err);
+
 log_error:
        __log_ecc_error(mci, &err, ecc_type);
 }
@@ -2809,10 +2882,12 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
        int csrow_nr = csrow_nr_orig;
        u32 cs_mode, nr_pages;
 
-       if (!pvt->umc)
+       if (!pvt->umc) {
                csrow_nr >>= 1;
-
-       cs_mode = DBAM_DIMM(csrow_nr, dbam);
+               cs_mode = DBAM_DIMM(csrow_nr, dbam);
+       } else {
+               cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
+       }
 
        nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
        nr_pages <<= 20 - PAGE_SHIFT;
@@ -2824,6 +2899,49 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
        return nr_pages;
 }
 
+static int init_csrows_df(struct mem_ctl_info *mci)
+{
+       struct amd64_pvt *pvt = mci->pvt_info;
+       enum edac_type edac_mode = EDAC_NONE;
+       enum dev_type dev_type = DEV_UNKNOWN;
+       struct dimm_info *dimm;
+       int empty = 1;
+       u8 umc, cs;
+
+       if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
+               edac_mode = EDAC_S16ECD16ED;
+               dev_type = DEV_X16;
+       } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
+               edac_mode = EDAC_S8ECD8ED;
+               dev_type = DEV_X8;
+       } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
+               edac_mode = EDAC_S4ECD4ED;
+               dev_type = DEV_X4;
+       } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
+               edac_mode = EDAC_SECDED;
+       }
+
+       for_each_umc(umc) {
+               for_each_chip_select(cs, umc, pvt) {
+                       if (!csrow_enabled(cs, umc, pvt))
+                               continue;
+
+                       empty = 0;
+                       dimm = mci->csrows[cs]->channels[umc]->dimm;
+
+                       edac_dbg(1, "MC node: %d, csrow: %d\n",
+                                       pvt->mc_node_id, cs);
+
+                       dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
+                       dimm->mtype = pvt->dram_type;
+                       dimm->edac_mode = edac_mode;
+                       dimm->dtype = dev_type;
+               }
+       }
+
+       return empty;
+}
+
 /*
  * Initialize the array of csrow attribute instances, based on the values
  * from pci config hardware registers.
@@ -2838,15 +2956,16 @@ static int init_csrows(struct mem_ctl_info *mci)
        int nr_pages = 0;
        u32 val;
 
-       if (!pvt->umc) {
-               amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+       if (pvt->umc)
+               return init_csrows_df(mci);
 
-               pvt->nbcfg = val;
+       amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
 
-               edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
-                        pvt->mc_node_id, val,
-                        !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
-       }
+       pvt->nbcfg = val;
+
+       edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+                pvt->mc_node_id, val,
+                !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
 
        /*
         * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
@@ -2883,13 +3002,7 @@ static int init_csrows(struct mem_ctl_info *mci)
                edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
 
                /* Determine DIMM ECC mode: */
-               if (pvt->umc) {
-                       if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
-                               edac_mode = EDAC_S4ECD4ED;
-                       else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
-                               edac_mode = EDAC_SECDED;
-
-               } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+               if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
                        edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
                                        ? EDAC_S4ECD4ED
                                        : EDAC_SECDED;
@@ -3137,12 +3250,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
 static inline void
 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
 {
-       u8 i, ecc_en = 1, cpk_en = 1;
+       u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
 
        for_each_umc(i) {
                if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
                        ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
                        cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+
+                       dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
+                       dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
                }
        }
 
@@ -3150,8 +3266,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
        if (ecc_en) {
                mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
 
-               if (cpk_en)
+               if (!cpk_en)
+                       return;
+
+               if (dev_x4)
                        mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+               else if (dev_x16)
+                       mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
+               else
+                       mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
        }
 }
 
@@ -3241,6 +3364,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
                        fam_type = &family_types[F17_M30H_CPUS];
                        pvt->ops = &family_types[F17_M30H_CPUS].ops;
                        break;
+               } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
+                       fam_type = &family_types[F17_M70H_CPUS];
+                       pvt->ops = &family_types[F17_M70H_CPUS].ops;
+                       break;
                }
                /* fall through */
        case 0x18:
index 8f66472f7adc221ee5e4e6787f35547c663b8930..8c3cda81e6192b3f95600bbdd6261a280d15fe50 100644 (file)
@@ -96,6 +96,7 @@
 /* Hardware limit on ChipSelect rows per MC and processors per system */
 #define NUM_CHIPSELECTS                        8
 #define DRAM_RANGES                    8
+#define NUM_CONTROLLERS                        8
 
 #define ON true
 #define OFF false
 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
 
 /*
  * Function 1 - Address Map
 #define DCSM0                          0x60
 #define DCSM1                          0x160
 
-#define csrow_enabled(i, dct, pvt)     ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
+#define csrow_enabled(i, dct, pvt)     ((pvt)->csels[(dct)].csbases[(i)]     & DCSB_CS_ENABLE)
+#define csrow_sec_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases_sec[(i)] & DCSB_CS_ENABLE)
 
 #define DRAM_CONTROL                   0x78
 
 
 /* UMC CH register offsets */
 #define UMCCH_BASE_ADDR                        0x0
+#define UMCCH_BASE_ADDR_SEC            0x10
 #define UMCCH_ADDR_MASK                        0x20
+#define UMCCH_ADDR_MASK_SEC            0x28
 #define UMCCH_ADDR_CFG                 0x30
 #define UMCCH_DIMM_CFG                 0x80
 #define UMCCH_UMC_CFG                  0x100
@@ -285,6 +291,7 @@ enum amd_families {
        F17_CPUS,
        F17_M10H_CPUS,
        F17_M30H_CPUS,
+       F17_M70H_CPUS,
        NUM_FAMILIES,
 };
 
@@ -311,9 +318,11 @@ struct dram_range {
 /* A DCT chip selects collection */
 struct chip_select {
        u32 csbases[NUM_CHIPSELECTS];
+       u32 csbases_sec[NUM_CHIPSELECTS];
        u8 b_cnt;
 
        u32 csmasks[NUM_CHIPSELECTS];
+       u32 csmasks_sec[NUM_CHIPSELECTS];
        u8 m_cnt;
 };
 
@@ -351,8 +360,8 @@ struct amd64_pvt {
        u32 dbam0;              /* DRAM Base Address Mapping reg for DCT0 */
        u32 dbam1;              /* DRAM Base Address Mapping reg for DCT1 */
 
-       /* one for each DCT */
-       struct chip_select csels[2];
+       /* one for each DCT/UMC */
+       struct chip_select csels[NUM_CONTROLLERS];
 
        /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
        struct dram_range ranges[DRAM_RANGES];
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
new file mode 100644 (file)
index 0000000..e4736eb
--- /dev/null
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Bluefield-specific EDAC driver.
+ *
+ * Copyright (c) 2019 Mellanox Technologies.
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "edac_module.h"
+
+#define DRIVER_NAME            "bluefield-edac"
+
+/*
+ * Mellanox BlueField EMI (External Memory Interface) register definitions.
+ */
+
+#define MLXBF_ECC_CNT 0x340
+#define MLXBF_ECC_CNT__SERR_CNT GENMASK(15, 0)
+#define MLXBF_ECC_CNT__DERR_CNT GENMASK(31, 16)
+
+#define MLXBF_ECC_ERR 0x348
+#define MLXBF_ECC_ERR__SECC BIT(0)
+#define MLXBF_ECC_ERR__DECC BIT(16)
+
+#define MLXBF_ECC_LATCH_SEL 0x354
+#define MLXBF_ECC_LATCH_SEL__START BIT(24)
+
+#define MLXBF_ERR_ADDR_0 0x358
+
+#define MLXBF_ERR_ADDR_1 0x37c
+
+#define MLXBF_SYNDROM 0x35c
+#define MLXBF_SYNDROM__DERR BIT(0)
+#define MLXBF_SYNDROM__SERR BIT(1)
+#define MLXBF_SYNDROM__SYN GENMASK(25, 16)
+
+#define MLXBF_ADD_INFO 0x364
+#define MLXBF_ADD_INFO__ERR_PRANK GENMASK(9, 8)
+
+#define MLXBF_EDAC_MAX_DIMM_PER_MC     2
+#define MLXBF_EDAC_ERROR_GRAIN         8
+
+/*
+ * Request MLNX_SIP_GET_DIMM_INFO
+ *
+ * Retrieve information about DIMM on a certain slot.
+ *
+ * Call register usage:
+ * a0: MLNX_SIP_GET_DIMM_INFO
+ * a1: (Memory controller index) << 16 | (Dimm index in memory controller)
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: MLXBF_DIMM_INFO defined below describing the DIMM.
+ * a1-3: not used.
+ */
+#define MLNX_SIP_GET_DIMM_INFO         0x82000008
+
+/* Format for the SMC response about the memory information */
+#define MLXBF_DIMM_INFO__SIZE_GB GENMASK_ULL(15, 0)
+#define MLXBF_DIMM_INFO__IS_RDIMM BIT(16)
+#define MLXBF_DIMM_INFO__IS_LRDIMM BIT(17)
+#define MLXBF_DIMM_INFO__IS_NVDIMM BIT(18)
+#define MLXBF_DIMM_INFO__RANKS GENMASK_ULL(23, 21)
+#define MLXBF_DIMM_INFO__PACKAGE_X GENMASK_ULL(31, 24)
+
+struct bluefield_edac_priv {
+       int dimm_ranks[MLXBF_EDAC_MAX_DIMM_PER_MC];
+       void __iomem *emi_base;
+       int dimm_per_mc;
+};
+
+static u64 smc_call1(u64 smc_op, u64 smc_arg)
+{
+       struct arm_smccc_res res;
+
+       arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
+
+       return res.a0;
+}
+
+/*
+ * Gather the ECC information from the External Memory Interface registers
+ * and report it to the edac handler.
+ */
+static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
+                                       int error_cnt,
+                                       int is_single_ecc)
+{
+       struct bluefield_edac_priv *priv = mci->pvt_info;
+       u32 dram_additional_info, err_prank, edea0, edea1;
+       u32 ecc_latch_select, dram_syndrom, serr, derr, syndrom;
+       enum hw_event_mc_err_type ecc_type;
+       u64 ecc_dimm_addr;
+       int ecc_dimm;
+
+       ecc_type = is_single_ecc ? HW_EVENT_ERR_CORRECTED :
+                                  HW_EVENT_ERR_UNCORRECTED;
+
+       /*
+        * Tell the External Memory Interface to populate the relevant
+        * registers with information about the last ECC error occurrence.
+        */
+       ecc_latch_select = MLXBF_ECC_LATCH_SEL__START;
+       writel(ecc_latch_select, priv->emi_base + MLXBF_ECC_LATCH_SEL);
+
+       /*
+        * Verify that the ECC reported info in the registers is of the
+        * same type as the one asked to report. If not, just report the
+        * error without the detailed information.
+        */
+       dram_syndrom = readl(priv->emi_base + MLXBF_SYNDROM);
+       serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
+       derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
+       syndrom = FIELD_GET(MLXBF_SYNDROM__SYN, dram_syndrom);
+
+       if ((is_single_ecc && !serr) || (!is_single_ecc && !derr)) {
+               edac_mc_handle_error(ecc_type, mci, error_cnt, 0, 0, 0,
+                                    0, 0, -1, mci->ctl_name, "");
+               return;
+       }
+
+       dram_additional_info = readl(priv->emi_base + MLXBF_ADD_INFO);
+       err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
+
+       ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
+
+       edea0 = readl(priv->emi_base + MLXBF_ERR_ADDR_0);
+       edea1 = readl(priv->emi_base + MLXBF_ERR_ADDR_1);
+
+       ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
+
+       edac_mc_handle_error(ecc_type, mci, error_cnt,
+                            PFN_DOWN(ecc_dimm_addr),
+                            offset_in_page(ecc_dimm_addr),
+                            syndrom, ecc_dimm, 0, 0, mci->ctl_name, "");
+}
+
+static void bluefield_edac_check(struct mem_ctl_info *mci)
+{
+       struct bluefield_edac_priv *priv = mci->pvt_info;
+       u32 ecc_count, single_error_count, double_error_count, ecc_error = 0;
+
+       /*
+        * The memory controller might not be initialized by the firmware
+        * when there isn't memory, which may lead to bad register readings.
+        */
+       if (mci->edac_cap == EDAC_FLAG_NONE)
+               return;
+
+       ecc_count = readl(priv->emi_base + MLXBF_ECC_CNT);
+       single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
+       double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
+
+       if (single_error_count) {
+               ecc_error |= MLXBF_ECC_ERR__SECC;
+
+               bluefield_gather_report_ecc(mci, single_error_count, 1);
+       }
+
+       if (double_error_count) {
+               ecc_error |= MLXBF_ECC_ERR__DECC;
+
+               bluefield_gather_report_ecc(mci, double_error_count, 0);
+       }
+
+       /* Write to clear reported errors. */
+       if (ecc_count)
+               writel(ecc_error, priv->emi_base + MLXBF_ECC_ERR);
+}
+
+/* Initialize the DIMMs information for the given memory controller. */
+static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
+{
+       struct bluefield_edac_priv *priv = mci->pvt_info;
+       int mem_ctrl_idx = mci->mc_idx;
+       struct dimm_info *dimm;
+       u64 smc_info, smc_arg;
+       int is_empty = 1, i;
+
+       for (i = 0; i < priv->dimm_per_mc; i++) {
+               dimm = mci->dimms[i];
+
+               smc_arg = mem_ctrl_idx << 16 | i;
+               smc_info = smc_call1(MLNX_SIP_GET_DIMM_INFO, smc_arg);
+
+               if (!FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info)) {
+                       dimm->mtype = MEM_EMPTY;
+                       continue;
+               }
+
+               is_empty = 0;
+
+               dimm->edac_mode = EDAC_SECDED;
+
+               if (FIELD_GET(MLXBF_DIMM_INFO__IS_NVDIMM, smc_info))
+                       dimm->mtype = MEM_NVDIMM;
+               else if (FIELD_GET(MLXBF_DIMM_INFO__IS_LRDIMM, smc_info))
+                       dimm->mtype = MEM_LRDDR4;
+               else if (FIELD_GET(MLXBF_DIMM_INFO__IS_RDIMM, smc_info))
+                       dimm->mtype = MEM_RDDR4;
+               else
+                       dimm->mtype = MEM_DDR4;
+
+               dimm->nr_pages =
+                       FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info) *
+                       (SZ_1G / PAGE_SIZE);
+               dimm->grain = MLXBF_EDAC_ERROR_GRAIN;
+
+               /* Mem controller for BlueField only supports x4, x8 and x16 */
+               switch (FIELD_GET(MLXBF_DIMM_INFO__PACKAGE_X, smc_info)) {
+               case 4:
+                       dimm->dtype = DEV_X4;
+                       break;
+               case 8:
+                       dimm->dtype = DEV_X8;
+                       break;
+               case 16:
+                       dimm->dtype = DEV_X16;
+                       break;
+               default:
+                       dimm->dtype = DEV_UNKNOWN;
+               }
+
+               priv->dimm_ranks[i] =
+                       FIELD_GET(MLXBF_DIMM_INFO__RANKS, smc_info);
+       }
+
+       if (is_empty)
+               mci->edac_cap = EDAC_FLAG_NONE;
+       else
+               mci->edac_cap = EDAC_FLAG_SECDED;
+}
+
+static int bluefield_edac_mc_probe(struct platform_device *pdev)
+{
+       struct bluefield_edac_priv *priv;
+       struct device *dev = &pdev->dev;
+       struct edac_mc_layer layers[1];
+       struct mem_ctl_info *mci;
+       struct resource *emi_res;
+       unsigned int mc_idx, dimm_count;
+       int rc, ret;
+
+       /* Read the MSS (Memory SubSystem) index from ACPI table. */
+       if (device_property_read_u32(dev, "mss_number", &mc_idx)) {
+               dev_warn(dev, "bf_edac: MSS number unknown\n");
+               return -EINVAL;
+       }
+
+       /* Read the DIMMs per MC from ACPI table. */
+       if (device_property_read_u32(dev, "dimm_per_mc", &dimm_count)) {
+               dev_warn(dev, "bf_edac: DIMMs per MC unknown\n");
+               return -EINVAL;
+       }
+
+       if (dimm_count > MLXBF_EDAC_MAX_DIMM_PER_MC) {
+               dev_warn(dev, "bf_edac: DIMMs per MC not valid\n");
+               return -EINVAL;
+       }
+
+       emi_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!emi_res)
+               return -EINVAL;
+
+       layers[0].type = EDAC_MC_LAYER_SLOT;
+       layers[0].size = dimm_count;
+       layers[0].is_virt_csrow = true;
+
+       mci = edac_mc_alloc(mc_idx, ARRAY_SIZE(layers), layers, sizeof(*priv));
+       if (!mci)
+               return -ENOMEM;
+
+       priv = mci->pvt_info;
+
+       priv->dimm_per_mc = dimm_count;
+       priv->emi_base = devm_ioremap_resource(dev, emi_res);
+       if (IS_ERR(priv->emi_base)) {
+               dev_err(dev, "failed to map EMI IO resource\n");
+               ret = PTR_ERR(priv->emi_base);
+               goto err;
+       }
+
+       mci->pdev = dev;
+       mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
+                        MEM_FLAG_LRDDR4 | MEM_FLAG_NVDIMM;
+       mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+
+       mci->mod_name = DRIVER_NAME;
+       mci->ctl_name = "BlueField_Memory_Controller";
+       mci->dev_name = dev_name(dev);
+       mci->edac_check = bluefield_edac_check;
+
+       /* Initialize mci with the actual populated DIMM information. */
+       bluefield_edac_init_dimms(mci);
+
+       platform_set_drvdata(pdev, mci);
+
+       /* Register with EDAC core */
+       rc = edac_mc_add_mc(mci);
+       if (rc) {
+               dev_err(dev, "failed to register with EDAC core\n");
+               ret = rc;
+               goto err;
+       }
+
+       /* Only POLL mode supported so far. */
+       edac_op_state = EDAC_OPSTATE_POLL;
+
+       return 0;
+
+err:
+       edac_mc_free(mci);
+
+       return ret;
+
+}
+
+static int bluefield_edac_mc_remove(struct platform_device *pdev)
+{
+       struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+       edac_mc_del_mc(&pdev->dev);
+       edac_mc_free(mci);
+
+       return 0;
+}
+
+static const struct acpi_device_id bluefield_mc_acpi_ids[] = {
+       {"MLNXBF08", 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(acpi, bluefield_mc_acpi_ids);
+
+static struct platform_driver bluefield_edac_mc_driver = {
+       .driver = {
+               .name = DRIVER_NAME,
+               .acpi_match_table = bluefield_mc_acpi_ids,
+       },
+       .probe = bluefield_edac_mc_probe,
+       .remove = bluefield_edac_mc_remove,
+};
+
+module_platform_driver(bluefield_edac_mc_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField memory edac driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL v2");
index 64922c8fa7e3b729ea6b7919b371fcd76515491b..e6fd079783bd27f6a75da1976f8db4edeb9e6377 100644 (file)
@@ -114,8 +114,8 @@ static const struct kernel_param_ops edac_report_ops = {
 
 module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644);
 
-unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
-                                unsigned len)
+unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
+                                    unsigned int len)
 {
        struct mem_ctl_info *mci = dimm->mci;
        int i, n, count = 0;
@@ -236,9 +236,9 @@ EXPORT_SYMBOL_GPL(edac_mem_types);
  * At return, the pointer 'p' will be incremented to be used on a next call
  * to this function.
  */
-void *edac_align_ptr(void **p, unsigned size, int n_elems)
+void *edac_align_ptr(void **p, unsigned int size, int n_elems)
 {
-       unsigned align, r;
+       unsigned int align, r;
        void *ptr = *p;
 
        *p += size * n_elems;
@@ -275,38 +275,37 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
 
 static void _edac_mc_free(struct mem_ctl_info *mci)
 {
-       int i, chn, row;
        struct csrow_info *csr;
-       const unsigned int tot_dimms = mci->tot_dimms;
-       const unsigned int tot_channels = mci->num_cschannel;
-       const unsigned int tot_csrows = mci->nr_csrows;
+       int i, chn, row;
 
        if (mci->dimms) {
-               for (i = 0; i < tot_dimms; i++)
+               for (i = 0; i < mci->tot_dimms; i++)
                        kfree(mci->dimms[i]);
                kfree(mci->dimms);
        }
+
        if (mci->csrows) {
-               for (row = 0; row < tot_csrows; row++) {
+               for (row = 0; row < mci->nr_csrows; row++) {
                        csr = mci->csrows[row];
-                       if (csr) {
-                               if (csr->channels) {
-                                       for (chn = 0; chn < tot_channels; chn++)
-                                               kfree(csr->channels[chn]);
-                                       kfree(csr->channels);
-                               }
-                               kfree(csr);
+                       if (!csr)
+                               continue;
+
+                       if (csr->channels) {
+                               for (chn = 0; chn < mci->num_cschannel; chn++)
+                                       kfree(csr->channels[chn]);
+                               kfree(csr->channels);
                        }
+                       kfree(csr);
                }
                kfree(mci->csrows);
        }
        kfree(mci);
 }
 
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
-                                  unsigned n_layers,
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+                                  unsigned int n_layers,
                                   struct edac_mc_layer *layers,
-                                  unsigned sz_pvt)
+                                  unsigned int sz_pvt)
 {
        struct mem_ctl_info *mci;
        struct edac_mc_layer *layer;
@@ -314,9 +313,9 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
        struct rank_info *chan;
        struct dimm_info *dimm;
        u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
-       unsigned pos[EDAC_MAX_LAYERS];
-       unsigned size, tot_dimms = 1, count = 1;
-       unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+       unsigned int pos[EDAC_MAX_LAYERS];
+       unsigned int size, tot_dimms = 1, count = 1;
+       unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
        void *pvt, *p, *ptr = NULL;
        int i, j, row, chn, n, len, off;
        bool per_rank = false;
@@ -1235,9 +1234,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
        if (p > e->location)
                *(p - 1) = '\0';
 
-       /* Report the error via the trace interface */
-       grain_bits = fls_long(e->grain) + 1;
+       /* Sanity-check driver-supplied grain value. */
+       if (WARN_ON_ONCE(!e->grain))
+               e->grain = 1;
 
+       grain_bits = fls_long(e->grain - 1);
+
+       /* Report the error via the trace interface */
        if (IS_ENABLED(CONFIG_RAS))
                trace_mc_event(type, e->msg, e->label, e->error_count,
                               mci->mc_idx, e->top_layer, e->mid_layer,
index 4165e15995ad8a16975744ac5440b2db10ca4b8e..02aac5c61d0044a89546651a9b67f697869287e3 100644 (file)
@@ -122,10 +122,10 @@ do {                                                                      \
  *     On success, return a pointer to struct mem_ctl_info pointer;
  *     %NULL otherwise
  */
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
-                                  unsigned n_layers,
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+                                  unsigned int n_layers,
                                   struct edac_mc_layer *layers,
-                                  unsigned sz_pvt);
+                                  unsigned int sz_pvt);
 
 /**
  * edac_get_owner - Return the owner's mod_name of EDAC MC
index 4386ea4b9b5af81d069b88fb240ee721a707e4d2..32d016f1ecd1b0c5f8b4042b428952e0edb187dd 100644 (file)
@@ -131,7 +131,7 @@ static const char * const edac_caps[] = {
 
 struct dev_ch_attribute {
        struct device_attribute attr;
-       int channel;
+       unsigned int channel;
 };
 
 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
@@ -200,7 +200,7 @@ static ssize_t channel_dimm_label_show(struct device *dev,
                                       char *data)
 {
        struct csrow_info *csrow = to_csrow(dev);
-       unsigned chan = to_channel(mattr);
+       unsigned int chan = to_channel(mattr);
        struct rank_info *rank = csrow->channels[chan];
 
        /* if field has not been initialized, there is nothing to send */
@@ -216,7 +216,7 @@ static ssize_t channel_dimm_label_store(struct device *dev,
                                        const char *data, size_t count)
 {
        struct csrow_info *csrow = to_csrow(dev);
-       unsigned chan = to_channel(mattr);
+       unsigned int chan = to_channel(mattr);
        struct rank_info *rank = csrow->channels[chan];
        size_t copy_count = count;
 
@@ -240,7 +240,7 @@ static ssize_t channel_ce_count_show(struct device *dev,
                                     struct device_attribute *mattr, char *data)
 {
        struct csrow_info *csrow = to_csrow(dev);
-       unsigned chan = to_channel(mattr);
+       unsigned int chan = to_channel(mattr);
        struct rank_info *rank = csrow->channels[chan];
 
        return sprintf(data, "%u\n", rank->ce_count);
@@ -278,7 +278,7 @@ static void csrow_attr_release(struct device *dev)
 {
        struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
 
-       edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+       edac_dbg(1, "device %s released\n", dev_name(dev));
        kfree(csrow);
 }
 
@@ -414,14 +414,16 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
        dev_set_name(&csrow->dev, "csrow%d", index);
        dev_set_drvdata(&csrow->dev, csrow);
 
-       edac_dbg(0, "creating (virtual) csrow node %s\n",
-                dev_name(&csrow->dev));
-
        err = device_add(&csrow->dev);
-       if (err)
+       if (err) {
+               edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
                put_device(&csrow->dev);
+               return err;
+       }
 
-       return err;
+       edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
+
+       return 0;
 }
 
 /* Create a CSROW object under specifed edac_mc_device */
@@ -435,12 +437,8 @@ static int edac_create_csrow_objects(struct mem_ctl_info *mci)
                if (!nr_pages_per_csrow(csrow))
                        continue;
                err = edac_create_csrow_object(mci, mci->csrows[i], i);
-               if (err < 0) {
-                       edac_dbg(1,
-                                "failure: create csrow objects for csrow %d\n",
-                                i);
+               if (err < 0)
                        goto error;
-               }
        }
        return 0;
 
@@ -624,7 +622,7 @@ static void dimm_attr_release(struct device *dev)
 {
        struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
 
-       edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
+       edac_dbg(1, "device %s released\n", dev_name(dev));
        kfree(dimm);
 }
 
@@ -653,12 +651,21 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
        pm_runtime_forbid(&mci->dev);
 
        err = device_add(&dimm->dev);
-       if (err)
+       if (err) {
+               edac_dbg(1, "failure: create device %s\n", dev_name(&dimm->dev));
                put_device(&dimm->dev);
+               return err;
+       }
 
-       edac_dbg(0, "created rank/dimm device %s\n", dev_name(&dimm->dev));
+       if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
+               char location[80];
 
-       return err;
+               edac_dimm_info_location(dimm, location, sizeof(location));
+               edac_dbg(0, "device %s created at location %s\n",
+                       dev_name(&dimm->dev), location);
+       }
+
+       return 0;
 }
 
 /*
@@ -901,7 +908,7 @@ static void mci_attr_release(struct device *dev)
 {
        struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
 
-       edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+       edac_dbg(1, "device %s released\n", dev_name(dev));
        kfree(mci);
 }
 
@@ -933,14 +940,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
        dev_set_drvdata(&mci->dev, mci);
        pm_runtime_forbid(&mci->dev);
 
-       edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
        err = device_add(&mci->dev);
        if (err < 0) {
                edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
                put_device(&mci->dev);
-               goto out;
+               return err;
        }
 
+       edac_dbg(0, "device %s created\n", dev_name(&mci->dev));
+
        /*
         * Create the dimm/rank devices
         */
@@ -950,22 +958,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
                if (!dimm->nr_pages)
                        continue;
 
-#ifdef CONFIG_EDAC_DEBUG
-               edac_dbg(1, "creating dimm%d, located at ", i);
-               if (edac_debug_level >= 1) {
-                       int lay;
-                       for (lay = 0; lay < mci->n_layers; lay++)
-                               printk(KERN_CONT "%s %d ",
-                                       edac_layer_name[mci->layers[lay].type],
-                                       dimm->location[lay]);
-                       printk(KERN_CONT "\n");
-               }
-#endif
                err = edac_create_dimm_object(mci, dimm, i);
-               if (err) {
-                       edac_dbg(1, "failure: create dimm %d obj\n", i);
+               if (err)
                        goto fail_unregister_dimm;
-               }
        }
 
 #ifdef CONFIG_EDAC_LEGACY_SYSFS
@@ -987,7 +982,6 @@ fail_unregister_dimm:
        }
        device_unregister(&mci->dev);
 
-out:
        return err;
 }
 
@@ -1011,14 +1005,14 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
                struct dimm_info *dimm = mci->dimms[i];
                if (dimm->nr_pages == 0)
                        continue;
-               edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
+               edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
                device_unregister(&dimm->dev);
        }
 }
 
 void edac_unregister_sysfs(struct mem_ctl_info *mci)
 {
-       edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+       edac_dbg(1, "unregistering device %s\n", dev_name(&mci->dev));
        device_unregister(&mci->dev);
 }
 
@@ -1029,7 +1023,7 @@ static void mc_attr_release(struct device *dev)
         * parent device, used to create the /sys/devices/mc sysfs node.
         * So, there are no attributes on it.
         */
-       edac_dbg(1, "Releasing device %s\n", dev_name(dev));
+       edac_dbg(1, "device %s released\n", dev_name(dev));
        kfree(dev);
 }
 
@@ -1044,10 +1038,8 @@ int __init edac_mc_sysfs_init(void)
        int err;
 
        mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
-       if (!mci_pdev) {
-               err = -ENOMEM;
-               goto out;
-       }
+       if (!mci_pdev)
+               return -ENOMEM;
 
        mci_pdev->bus = edac_get_sysfs_subsys();
        mci_pdev->type = &mc_attr_type;
@@ -1055,17 +1047,15 @@ int __init edac_mc_sysfs_init(void)
        dev_set_name(mci_pdev, "mc");
 
        err = device_add(mci_pdev);
-       if (err < 0)
-               goto out_put_device;
+       if (err < 0) {
+               edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev));
+               put_device(mci_pdev);
+               return err;
+       }
 
        edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
 
        return 0;
-
- out_put_device:
-       put_device(mci_pdev);
- out:
-       return err;
 }
 
 void edac_mc_sysfs_exit(void)
index 7f19f1c672c3aaf1cf8e22de6612ee26634a5b72..d413a0bdc9ad9da8637b811e79a81023c39c1906 100644 (file)
@@ -68,7 +68,7 @@ struct memdev_dmi_entry {
 
 struct ghes_edac_dimm_fill {
        struct mem_ctl_info *mci;
-       unsigned count;
+       unsigned int count;
 };
 
 static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
index b506eef6b146d8570f6dcba9be5205d96ad9e9f9..251f2b692785d8729d2c48e35a663dacffdadf4e 100644 (file)
@@ -417,7 +417,8 @@ static const char *i5100_err_msg(unsigned err)
 }
 
 /* convert csrow index into a rank (per channel -- 0..5) */
-static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
+static unsigned int i5100_csrow_to_rank(const struct mem_ctl_info *mci,
+                                       unsigned int csrow)
 {
        const struct i5100_priv *priv = mci->pvt_info;
 
@@ -425,7 +426,8 @@ static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
 }
 
 /* convert csrow index into a channel (0..1) */
-static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
+static unsigned int i5100_csrow_to_chan(const struct mem_ctl_info *mci,
+                                       unsigned int csrow)
 {
        const struct i5100_priv *priv = mci->pvt_info;
 
@@ -653,11 +655,11 @@ static struct pci_dev *pci_get_device_func(unsigned vendor,
        return ret;
 }
 
-static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow)
+static unsigned long i5100_npages(struct mem_ctl_info *mci, unsigned int csrow)
 {
        struct i5100_priv *priv = mci->pvt_info;
-       const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
-       const unsigned chan = i5100_csrow_to_chan(mci, csrow);
+       const unsigned int chan_rank = i5100_csrow_to_rank(mci, csrow);
+       const unsigned int chan = i5100_csrow_to_chan(mci, csrow);
        unsigned addr_lines;
 
        /* dimm present? */
@@ -852,8 +854,8 @@ static void i5100_init_csrows(struct mem_ctl_info *mci)
        for (i = 0; i < mci->tot_dimms; i++) {
                struct dimm_info *dimm;
                const unsigned long npages = i5100_npages(mci, i);
-               const unsigned chan = i5100_csrow_to_chan(mci, i);
-               const unsigned rank = i5100_csrow_to_rank(mci, i);
+               const unsigned int chan = i5100_csrow_to_chan(mci, i);
+               const unsigned int rank = i5100_csrow_to_rank(mci, i);
 
                if (!npages)
                        continue;
index ca25f8fe57ef3272ab90b0b42816bc1b0e73ebab..1ad538baaa4a95b391964e87b5b4608bbb03d9d2 100644 (file)
@@ -260,11 +260,14 @@ static u64 get_sideband_reg_base_addr(void)
        }
 }
 
+#define DNV_MCHBAR_SIZE  0x8000
+#define DNV_SB_PORT_SIZE 0x10000
 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
 {
        struct pci_dev *pdev;
        char *base;
        u64 addr;
+       unsigned long size;
 
        if (op == 4) {
                pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
@@ -279,15 +282,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
                        addr = get_mem_ctrl_hub_base_addr();
                        if (!addr)
                                return -ENODEV;
+                       size = DNV_MCHBAR_SIZE;
                } else {
                        /* MMIO via sideband register base address */
                        addr = get_sideband_reg_base_addr();
                        if (!addr)
                                return -ENODEV;
                        addr += (port << 16);
+                       size = DNV_SB_PORT_SIZE;
                }
 
-               base = ioremap((resource_size_t)addr, 0x10000);
+               base = ioremap((resource_size_t)addr, size);
                if (!base)
                        return -ENODEV;
 
index f82ccd39a913f2c4b8fe726b79b8b04c52ccd3b5..84f4ff351c6290309db7afdada06fb470f3ffb82 100644 (file)
@@ -103,7 +103,7 @@ static inline bool psci_power_state_loses_context(u32 state)
        return state & mask;
 }
 
-static inline bool psci_power_state_is_valid(u32 state)
+bool psci_power_state_is_valid(u32 state)
 {
        const u32 valid_mask = psci_has_ext_power_state() ?
                               PSCI_1_0_EXT_POWER_STATE_MASK :
@@ -277,175 +277,24 @@ static int __init psci_features(u32 psci_func_id)
 }
 
 #ifdef CONFIG_CPU_IDLE
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-
-static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
-{
-       int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
-
-       if (err) {
-               pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
-               return err;
-       }
-
-       if (!psci_power_state_is_valid(*state)) {
-               pr_warn("Invalid PSCI power state %#x\n", *state);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
-{
-       int i, ret = 0, count = 0;
-       u32 *psci_states;
-       struct device_node *state_node;
-
-       /* Count idle states */
-       while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
-                                             count))) {
-               count++;
-               of_node_put(state_node);
-       }
-
-       if (!count)
-               return -ENODEV;
-
-       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
-       if (!psci_states)
-               return -ENOMEM;
-
-       for (i = 0; i < count; i++) {
-               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
-               ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
-               of_node_put(state_node);
-
-               if (ret)
-                       goto free_mem;
-
-               pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
-       }
-
-       /* Idle states parsed correctly, initialize per-cpu pointer */
-       per_cpu(psci_power_state, cpu) = psci_states;
-       return 0;
-
-free_mem:
-       kfree(psci_states);
-       return ret;
-}
-
-#ifdef CONFIG_ACPI
-#include <acpi/processor.h>
-
-static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
-{
-       int i, count;
-       u32 *psci_states;
-       struct acpi_lpi_state *lpi;
-       struct acpi_processor *pr = per_cpu(processors, cpu);
-
-       if (unlikely(!pr || !pr->flags.has_lpi))
-               return -EINVAL;
-
-       count = pr->power.count - 1;
-       if (count <= 0)
-               return -ENODEV;
-
-       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
-       if (!psci_states)
-               return -ENOMEM;
-
-       for (i = 0; i < count; i++) {
-               u32 state;
-
-               lpi = &pr->power.lpi_states[i + 1];
-               /*
-                * Only bits[31:0] represent a PSCI power_state while
-                * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
-                */
-               state = lpi->address;
-               if (!psci_power_state_is_valid(state)) {
-                       pr_warn("Invalid PSCI power state %#x\n", state);
-                       kfree(psci_states);
-                       return -EINVAL;
-               }
-               psci_states[i] = state;
-       }
-       /* Idle states parsed correctly, initialize per-cpu pointer */
-       per_cpu(psci_power_state, cpu) = psci_states;
-       return 0;
-}
-#else
-static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
-{
-       return -EINVAL;
-}
-#endif
-
-int psci_cpu_init_idle(unsigned int cpu)
-{
-       struct device_node *cpu_node;
-       int ret;
-
-       /*
-        * If the PSCI cpu_suspend function hook has not been initialized
-        * idle states must not be enabled, so bail out
-        */
-       if (!psci_ops.cpu_suspend)
-               return -EOPNOTSUPP;
-
-       if (!acpi_disabled)
-               return psci_acpi_cpu_init_idle(cpu);
-
-       cpu_node = of_get_cpu_node(cpu, NULL);
-       if (!cpu_node)
-               return -ENODEV;
-
-       ret = psci_dt_cpu_init_idle(cpu_node, cpu);
-
-       of_node_put(cpu_node);
-
-       return ret;
-}
-
-static int psci_suspend_finisher(unsigned long index)
+static int psci_suspend_finisher(unsigned long state)
 {
-       u32 *state = __this_cpu_read(psci_power_state);
+       u32 power_state = state;
 
-       return psci_ops.cpu_suspend(state[index - 1],
-                                   __pa_symbol(cpu_resume));
+       return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume));
 }
 
-int psci_cpu_suspend_enter(unsigned long index)
+int psci_cpu_suspend_enter(u32 state)
 {
        int ret;
-       u32 *state = __this_cpu_read(psci_power_state);
-       /*
-        * idle state index 0 corresponds to wfi, should never be called
-        * from the cpu_suspend operations
-        */
-       if (WARN_ON_ONCE(!index))
-               return -EINVAL;
 
-       if (!psci_power_state_loses_context(state[index - 1]))
-               ret = psci_ops.cpu_suspend(state[index - 1], 0);
+       if (!psci_power_state_loses_context(state))
+               ret = psci_ops.cpu_suspend(state, 0);
        else
-               ret = cpu_suspend(index, psci_suspend_finisher);
+               ret = cpu_suspend(state, psci_suspend_finisher);
 
        return ret;
 }
-
-/* ARM specific CPU idle operations */
-#ifdef CONFIG_ARM
-static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
-       .suspend = psci_cpu_suspend_enter,
-       .init = psci_dt_cpu_init_idle,
-};
-
-CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
-#endif
 #endif
 
 static int psci_system_suspend(unsigned long unused)
index f3659443f8c2cc14ee46cc878f977eb8f9434685..6a445397771ca027841e82b6ab93f48db877ef28 100644 (file)
@@ -228,8 +228,11 @@ out_free_cpus:
 
 static void dummy_callback(struct timer_list *unused) {}
 
-static int suspend_cpu(int index, bool broadcast)
+static int suspend_cpu(struct cpuidle_device *dev,
+                      struct cpuidle_driver *drv, int index)
 {
+       struct cpuidle_state *state = &drv->states[index];
+       bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
        int ret;
 
        arch_cpu_idle_enter();
@@ -254,11 +257,7 @@ static int suspend_cpu(int index, bool broadcast)
                }
        }
 
-       /*
-        * Replicate the common ARM cpuidle enter function
-        * (arm_enter_idle_state).
-        */
-       ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
+       ret = state->enter(dev, drv, index);
 
        if (broadcast)
                tick_broadcast_exit();
@@ -301,9 +300,8 @@ static int suspend_test_thread(void *arg)
                 * doesn't use PSCI).
                 */
                for (index = 1; index < drv->state_count; ++index) {
-                       struct cpuidle_state *state = &drv->states[index];
-                       bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
                        int ret;
+                       struct cpuidle_state *state = &drv->states[index];
 
                        /*
                         * Set the timer to wake this CPU up in some time (which
@@ -318,7 +316,7 @@ static int suspend_test_thread(void *arg)
                        /* IRQs must be disabled during suspend operations. */
                        local_irq_disable();
 
-                       ret = suspend_cpu(index, broadcast);
+                       ret = suspend_cpu(dev, drv, index);
 
                        /*
                         * We have woken up. Re-enable IRQs to handle any
index a13f224303c699a8bc05165084e2b5ec1eb9f72f..0221dee8dd4c6b3e90f159ec024f09e4ee847390 100644 (file)
@@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
                return -EIO;
        }
 
-       if (!IS_ERR(conf->confd)) {
+       if (conf->confd) {
                if (!gpiod_get_raw_value_cansleep(conf->confd)) {
                        dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
                        return -EIO;
@@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi)
                return PTR_ERR(conf->status);
        }
 
-       conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+       conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
        if (IS_ERR(conf->confd)) {
-               dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
-                        PTR_ERR(conf->confd));
+               dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
+                       PTR_ERR(conf->confd));
+               return PTR_ERR(conf->confd);
+       } else if (!conf->confd) {
+               dev_warn(&spi->dev, "Not using confd gpio");
        }
 
        /* Register manager with unique name */
index 343153d47e5b640fe992bde7d31a601d78491d80..004dc03ccf09c190497a9f597f76e70b708f646c 100644 (file)
@@ -38,8 +38,7 @@
 #define SCOM_STATUS_PIB_RESP_MASK      0x00007000
 #define SCOM_STATUS_PIB_RESP_SHIFT     12
 
-#define SCOM_STATUS_ANY_ERR            (SCOM_STATUS_ERR_SUMMARY | \
-                                        SCOM_STATUS_PROTECTION | \
+#define SCOM_STATUS_ANY_ERR            (SCOM_STATUS_PROTECTION | \
                                         SCOM_STATUS_PARITY |     \
                                         SCOM_STATUS_PIB_ABORT | \
                                         SCOM_STATUS_PIB_RESP_MASK)
@@ -251,11 +250,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
        /* Return -EBUSY on PIB abort to force a retry */
        if (status & SCOM_STATUS_PIB_ABORT)
                return -EBUSY;
-       if (status & SCOM_STATUS_ERR_SUMMARY) {
-               fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
-                                sizeof(uint32_t));
-               return -EIO;
-       }
        return 0;
 }
 
index bb13c266c32968be8bd0864ddd2cf00266d910e7..e193c76948c412205942505aa4ea1f52e16ce5e7 100644 (file)
@@ -288,7 +288,7 @@ config GPIO_IXP4XX
        depends on ARM # For <asm/mach-types.h>
        depends on ARCH_IXP4XX
        select GPIO_GENERIC
-       select IRQ_DOMAIN
+       select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
        help
          Say yes here to support the GPIO functionality of a number of Intel
@@ -311,6 +311,13 @@ config GPIO_LPC18XX
          Select this option to enable GPIO driver for
          NXP LPC18XX/43XX devices.
 
+config GPIO_LPC32XX
+       tristate "NXP LPC32XX GPIO support"
+       depends on OF_GPIO && (ARCH_LPC32XX || COMPILE_TEST)
+       help
+         Select this option to enable GPIO driver for
+         NXP LPC32XX devices.
+
 config GPIO_LYNXPOINT
        tristate "Intel Lynxpoint GPIO support"
        depends on ACPI && X86
@@ -539,6 +546,7 @@ config GPIO_THUNDERX
        tristate "Cavium ThunderX/OCTEON-TX GPIO"
        depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
        depends on PCI_MSI
+       select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
        select IRQ_FASTEOI_HIERARCHY_HANDLERS
        help
@@ -1465,7 +1473,6 @@ endmenu
 
 config GPIO_MOCKUP
        tristate "GPIO Testing Driver"
-       depends on GPIOLIB
        select IRQ_SIM
        help
          This enables GPIO Testing driver, which provides a way to test GPIO
index a4e91175c70805508d2bfe90c567102264bb59ca..f3e051fb50e64ff1a8c7c9f05672314156805e6f 100644 (file)
@@ -67,14 +67,13 @@ obj-$(CONFIG_GPIO_IT87)                     += gpio-it87.o
 obj-$(CONFIG_GPIO_IXP4XX)              += gpio-ixp4xx.o
 obj-$(CONFIG_GPIO_JANZ_TTL)            += gpio-janz-ttl.o
 obj-$(CONFIG_GPIO_KEMPLD)              += gpio-kempld.o
-obj-$(CONFIG_ARCH_KS8695)              += gpio-ks8695.o
 obj-$(CONFIG_GPIO_LOONGSON1)           += gpio-loongson1.o
 obj-$(CONFIG_GPIO_LOONGSON)            += gpio-loongson.o
 obj-$(CONFIG_GPIO_LP3943)              += gpio-lp3943.o
 obj-$(CONFIG_GPIO_LP873X)              += gpio-lp873x.o
 obj-$(CONFIG_GPIO_LP87565)             += gpio-lp87565.o
 obj-$(CONFIG_GPIO_LPC18XX)             += gpio-lpc18xx.o
-obj-$(CONFIG_ARCH_LPC32XX)             += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_LPC32XX)             += gpio-lpc32xx.o
 obj-$(CONFIG_GPIO_LYNXPOINT)           += gpio-lynxpoint.o
 obj-$(CONFIG_GPIO_MADERA)              += gpio-madera.o
 obj-$(CONFIG_GPIO_MAX3191X)            += gpio-max3191x.o
index c07fad975049d5fe935ad5b0b3ce49ad4275c052..5640efe5e75049a3c3b44f759126dd5bab740181 100644 (file)
@@ -142,7 +142,7 @@ static const struct gpio_chip template_chip = {
 static int arizona_gpio_probe(struct platform_device *pdev)
 {
        struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
-       struct arizona_pdata *pdata = dev_get_platdata(arizona->dev);
+       struct arizona_pdata *pdata = &arizona->pdata;
        struct arizona_gpio *arizona_gpio;
        int ret;
 
@@ -177,7 +177,7 @@ static int arizona_gpio_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       if (pdata && pdata->gpio_base)
+       if (pdata->gpio_base)
                arizona_gpio->gpio_chip.base = pdata->gpio_base;
        else
                arizona_gpio->gpio_chip.base = -1;
index 13d80bfbc3b6b47cec33ffcb07c4ff6a74d3c0ce..09e53c5f3b0a462170c0d83195bb8ea34097d409 100644 (file)
@@ -52,6 +52,7 @@ struct aspeed_gpio_config {
  */
 struct aspeed_gpio {
        struct gpio_chip chip;
+       struct irq_chip irqc;
        spinlock_t lock;
        void __iomem *base;
        int irq;
@@ -661,12 +662,14 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
        struct gpio_chip *gc = irq_desc_get_handler_data(desc);
        struct irq_chip *ic = irq_desc_get_chip(desc);
        struct aspeed_gpio *data = gpiochip_get_data(gc);
-       unsigned int i, p, girq;
+       unsigned int i, p, girq, banks;
        unsigned long reg;
+       struct aspeed_gpio *gpio = gpiochip_get_data(gc);
 
        chained_irq_enter(ic, desc);
 
-       for (i = 0; i < ARRAY_SIZE(aspeed_gpio_banks); i++) {
+       banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
+       for (i = 0; i < banks; i++) {
                const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
 
                reg = ioread32(bank_reg(data, bank, reg_irq_status));
@@ -681,16 +684,11 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
        chained_irq_exit(ic, desc);
 }
 
-static struct irq_chip aspeed_gpio_irqchip = {
-       .name           = "aspeed-gpio",
-       .irq_ack        = aspeed_gpio_irq_ack,
-       .irq_mask       = aspeed_gpio_irq_mask,
-       .irq_unmask     = aspeed_gpio_irq_unmask,
-       .irq_set_type   = aspeed_gpio_set_type,
-};
-
-static void set_irq_valid_mask(struct aspeed_gpio *gpio)
+static void aspeed_init_irq_valid_mask(struct gpio_chip *gc,
+                                      unsigned long *valid_mask,
+                                      unsigned int ngpios)
 {
+       struct aspeed_gpio *gpio = gpiochip_get_data(gc);
        const struct aspeed_bank_props *props = gpio->config->props;
 
        while (!is_bank_props_sentinel(props)) {
@@ -701,42 +699,16 @@ static void set_irq_valid_mask(struct aspeed_gpio *gpio)
                for_each_clear_bit(offset, &input, 32) {
                        unsigned int i = props->bank * 32 + offset;
 
-                       if (i >= gpio->config->nr_gpios)
+                       if (i >= gpio->chip.ngpio)
                                break;
 
-                       clear_bit(i, gpio->chip.irq.valid_mask);
+                       clear_bit(i, valid_mask);
                }
 
                props++;
        }
 }
 
-static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
-               struct platform_device *pdev)
-{
-       int rc;
-
-       rc = platform_get_irq(pdev, 0);
-       if (rc < 0)
-               return rc;
-
-       gpio->irq = rc;
-
-       set_irq_valid_mask(gpio);
-
-       rc = gpiochip_irqchip_add(&gpio->chip, &aspeed_gpio_irqchip,
-                       0, handle_bad_irq, IRQ_TYPE_NONE);
-       if (rc) {
-               dev_info(&pdev->dev, "Could not add irqchip\n");
-               return rc;
-       }
-
-       gpiochip_set_chained_irqchip(&gpio->chip, &aspeed_gpio_irqchip,
-                                    gpio->irq, aspeed_gpio_irq_handler);
-
-       return 0;
-}
-
 static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
                                        unsigned int offset, bool enable)
 {
@@ -1040,10 +1012,10 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
        unsigned long flags;
 
        if (!gpio->cf_copro_bankmap)
-               gpio->cf_copro_bankmap = kzalloc(gpio->config->nr_gpios >> 3, GFP_KERNEL);
+               gpio->cf_copro_bankmap = kzalloc(gpio->chip.ngpio >> 3, GFP_KERNEL);
        if (!gpio->cf_copro_bankmap)
                return -ENOMEM;
-       if (offset < 0 || offset > gpio->config->nr_gpios)
+       if (offset < 0 || offset > gpio->chip.ngpio)
                return -EINVAL;
        bindex = offset >> 3;
 
@@ -1088,7 +1060,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
        if (!gpio->cf_copro_bankmap)
                return -ENXIO;
 
-       if (offset < 0 || offset > gpio->config->nr_gpios)
+       if (offset < 0 || offset > gpio->chip.ngpio)
                return -EINVAL;
        bindex = offset >> 3;
 
@@ -1141,9 +1113,25 @@ static const struct aspeed_gpio_config ast2500_config =
        /* 232 for simplicity, actual number is 228 (4-GPIO hole in GPIOAB) */
        { .nr_gpios = 232, .props = ast2500_bank_props, };
 
+static const struct aspeed_bank_props ast2600_bank_props[] = {
+       /*     input      output   */
+       {5, 0xffffffff,  0x0000ffff}, /* U/V/W/X */
+       {6, 0xffff0000,  0x0fff0000}, /* Y/Z */
+       { },
+};
+
+static const struct aspeed_gpio_config ast2600_config =
+       /*
+        * ast2600 has two controllers one with 208 GPIOs and one with 36 GPIOs.
+        * We expect ngpio being set in the device tree and this is a fallback
+        * option.
+        */
+       { .nr_gpios = 208, .props = ast2600_bank_props, };
+
 static const struct of_device_id aspeed_gpio_of_table[] = {
        { .compatible = "aspeed,ast2400-gpio", .data = &ast2400_config, },
        { .compatible = "aspeed,ast2500-gpio", .data = &ast2500_config, },
+       { .compatible = "aspeed,ast2600-gpio", .data = &ast2600_config, },
        {}
 };
 MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
@@ -1152,7 +1140,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
 {
        const struct of_device_id *gpio_id;
        struct aspeed_gpio *gpio;
-       int rc, i, banks;
+       int rc, i, banks, err;
+       u32 ngpio;
 
        gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
        if (!gpio)
@@ -1178,7 +1167,10 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
        gpio->config = gpio_id->data;
 
        gpio->chip.parent = &pdev->dev;
-       gpio->chip.ngpio = gpio->config->nr_gpios;
+       err = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpio);
+       gpio->chip.ngpio = (u16) ngpio;
+       if (err)
+               gpio->chip.ngpio = gpio->config->nr_gpios;
        gpio->chip.direction_input = aspeed_gpio_dir_in;
        gpio->chip.direction_output = aspeed_gpio_dir_out;
        gpio->chip.get_direction = aspeed_gpio_get_direction;
@@ -1189,10 +1181,9 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
        gpio->chip.set_config = aspeed_gpio_set_config;
        gpio->chip.label = dev_name(&pdev->dev);
        gpio->chip.base = -1;
-       gpio->chip.irq.need_valid_mask = true;
 
        /* Allocate a cache of the output registers */
-       banks = gpio->config->nr_gpios >> 5;
+       banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
        gpio->dcache = devm_kcalloc(&pdev->dev,
                                    banks, sizeof(u32), GFP_KERNEL);
        if (!gpio->dcache)
@@ -1212,16 +1203,42 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
                aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM);
        }
 
-       rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
-       if (rc < 0)
-               return rc;
+       /* Optionally set up an irqchip if there is an IRQ */
+       rc = platform_get_irq(pdev, 0);
+       if (rc > 0) {
+               struct gpio_irq_chip *girq;
+
+               gpio->irq = rc;
+               girq = &gpio->chip.irq;
+               girq->chip = &gpio->irqc;
+               girq->chip->name = dev_name(&pdev->dev);
+               girq->chip->irq_ack = aspeed_gpio_irq_ack;
+               girq->chip->irq_mask = aspeed_gpio_irq_mask;
+               girq->chip->irq_unmask = aspeed_gpio_irq_unmask;
+               girq->chip->irq_set_type = aspeed_gpio_set_type;
+               girq->parent_handler = aspeed_gpio_irq_handler;
+               girq->num_parents = 1;
+               girq->parents = devm_kcalloc(&pdev->dev, 1,
+                                            sizeof(*girq->parents),
+                                            GFP_KERNEL);
+               if (!girq->parents)
+                       return -ENOMEM;
+               girq->parents[0] = gpio->irq;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_bad_irq;
+               girq->init_valid_mask = aspeed_init_irq_valid_mask;
+       }
 
        gpio->offset_timer =
                devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
        if (!gpio->offset_timer)
                return -ENOMEM;
 
-       return aspeed_gpio_setup_irqs(gpio, pdev);
+       rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+       if (rc < 0)
+               return rc;
+
+       return 0;
 }
 
 static struct platform_driver aspeed_gpio_driver = {
index fd85605d2dabbf802b6b1a0c7c2b24a5ffedea54..0c1ead12d8839036382392a8d8f35c4131646fd2 100644 (file)
@@ -36,7 +36,7 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
                break;
        default:
                dev_err(bdgpio->chip.dev,
-                       "Invalid debouce value %u\n", debounce);
+                       "Invalid debounce value %u\n", debounce);
                return -EINVAL;
        }
        return regmap_update_bits(bdgpio->chip.regmap, GPIO_IN_REG(offset),
@@ -153,7 +153,7 @@ static int bd70528_gpio_get_i(struct bd70528_gpio *bdgpio, unsigned int offset)
 
 static int bd70528_gpio_get(struct gpio_chip *chip, unsigned int offset)
 {
-       int ret = -EINVAL;
+       int ret;
        struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
 
        /*
index af936dcca6596f2e60fb66f5615ff80210e1905d..05e3f99ae59cfba62ee1dd2f91aac538ea406ab2 100644 (file)
@@ -636,10 +636,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
 
        if (of_property_read_bool(np, "interrupt-controller")) {
                priv->parent_irq = platform_get_irq(pdev, 0);
-               if (priv->parent_irq <= 0) {
-                       dev_err(dev, "Couldn't get IRQ");
+               if (priv->parent_irq <= 0)
                        return -ENOENT;
-               }
        } else {
                priv->parent_irq = -ENOENT;
        }
index 712ae212b0b4eb65b35403e56eb8072e28b27c0d..a4d3239d25944dd772a7d1d75498331d7acce3d6 100644 (file)
@@ -214,27 +214,33 @@ static int cdns_gpio_probe(struct platform_device *pdev)
                goto err_revert_dir;
        }
 
-       ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
-               goto err_disable_clk;
-       }
-
        /*
-        * irq_chip support
+        * Optional irq_chip support
         */
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0) {
-               ret = gpiochip_irqchip_add(&cgpio->gc, &cdns_gpio_irqchip,
-                                          0, handle_level_irq,
-                                          IRQ_TYPE_NONE);
-               if (ret) {
-                       dev_err(&pdev->dev, "Could not add irqchip, %d\n",
-                               ret);
+               struct gpio_irq_chip *girq;
+
+               girq = &cgpio->gc.irq;
+               girq->chip = &cdns_gpio_irqchip;
+               girq->parent_handler = cdns_gpio_irq_handler;
+               girq->num_parents = 1;
+               girq->parents = devm_kcalloc(&pdev->dev, 1,
+                                            sizeof(*girq->parents),
+                                            GFP_KERNEL);
+               if (!girq->parents) {
+                       ret = -ENOMEM;
                        goto err_disable_clk;
                }
-               gpiochip_set_chained_irqchip(&cgpio->gc, &cdns_gpio_irqchip,
-                                            irq, cdns_gpio_irq_handler);
+               girq->parents[0] = irq;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_level_irq;
+       }
+
+       ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+               goto err_disable_clk;
        }
 
        cgpio->bypass_orig = ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE);
index 8cbc94d0d424ea682f6056c4804bea24690f7537..ff19a8ad5663a2bd3dd76b9a10a4dcb0aa54cd33 100644 (file)
@@ -137,7 +137,6 @@ static int creg_gpio_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        struct device *dev = &pdev->dev;
        struct creg_gpio *hcg;
-       struct resource *mem;
        u32 ngpios;
        int ret;
 
@@ -145,8 +144,7 @@ static int creg_gpio_probe(struct platform_device *pdev)
        if (!hcg)
                return -ENOMEM;
 
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hcg->regs = devm_ioremap_resource(dev, mem);
+       hcg->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hcg->regs))
                return PTR_ERR(hcg->regs);
 
index 3108be5e208cf65106506124a3e4cc55eb69ea74..92e127e74813438847a97924e36197d26176d6da 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 
 #include "gpiolib.h"
+#include "gpiolib-acpi.h"
 
 #define GPIO_SWPORTA_DR                0x00
 #define GPIO_SWPORTA_DDR       0x04
index 7b9ac4a12c20be762b17b3d30d0cf62f7d94afdd..fe7a73f52329b7b2b40d0cfcdf8df37623e992e3 100644 (file)
@@ -584,10 +584,8 @@ static int sprd_eic_probe(struct platform_device *pdev)
        sprd_eic->type = pdata->type;
 
        sprd_eic->irq = platform_get_irq(pdev, 0);
-       if (sprd_eic->irq < 0) {
-               dev_err(&pdev->dev, "Failed to get EIC interrupt.\n");
+       if (sprd_eic->irq < 0)
                return sprd_eic->irq;
-       }
 
        for (i = 0; i < SPRD_EIC_MAX_BANK; i++) {
                /*
index a87951293aaac50f72dc9ea2cbb86e2851a79847..620f25b7efb402c1133d952cde8f5e558b95d626 100644 (file)
@@ -272,11 +272,12 @@ static int em_gio_probe(struct platform_device *pdev)
        struct resource *io[2], *irq[2];
        struct gpio_chip *gpio_chip;
        struct irq_chip *irq_chip;
-       const char *name = dev_name(&pdev->dev);
+       struct device *dev = &pdev->dev;
+       const char *name = dev_name(dev);
        unsigned int ngpios;
        int ret;
 
-       p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
+       p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
        if (!p)
                return -ENOMEM;
 
@@ -290,27 +291,27 @@ static int em_gio_probe(struct platform_device *pdev)
        irq[1] = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
 
        if (!io[0] || !io[1] || !irq[0] || !irq[1]) {
-               dev_err(&pdev->dev, "missing IRQ or IOMEM\n");
+               dev_err(dev, "missing IRQ or IOMEM\n");
                return -EINVAL;
        }
 
-       p->base0 = devm_ioremap_nocache(&pdev->dev, io[0]->start,
+       p->base0 = devm_ioremap_nocache(dev, io[0]->start,
                                        resource_size(io[0]));
        if (!p->base0)
                return -ENOMEM;
 
-       p->base1 = devm_ioremap_nocache(&pdev->dev, io[1]->start,
+       p->base1 = devm_ioremap_nocache(dev, io[1]->start,
                                   resource_size(io[1]));
        if (!p->base1)
                return -ENOMEM;
 
-       if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
-               dev_err(&pdev->dev, "Missing ngpios OF property\n");
+       if (of_property_read_u32(dev->of_node, "ngpios", &ngpios)) {
+               dev_err(dev, "Missing ngpios OF property\n");
                return -EINVAL;
        }
 
        gpio_chip = &p->gpio_chip;
-       gpio_chip->of_node = pdev->dev.of_node;
+       gpio_chip->of_node = dev->of_node;
        gpio_chip->direction_input = em_gio_direction_input;
        gpio_chip->get = em_gio_get;
        gpio_chip->direction_output = em_gio_direction_output;
@@ -319,7 +320,7 @@ static int em_gio_probe(struct platform_device *pdev)
        gpio_chip->request = em_gio_request;
        gpio_chip->free = em_gio_free;
        gpio_chip->label = name;
-       gpio_chip->parent = &pdev->dev;
+       gpio_chip->parent = dev;
        gpio_chip->owner = THIS_MODULE;
        gpio_chip->base = -1;
        gpio_chip->ngpio = ngpios;
@@ -333,33 +334,33 @@ static int em_gio_probe(struct platform_device *pdev)
        irq_chip->irq_release_resources = em_gio_irq_relres;
        irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
 
-       p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, ngpios, 0,
+       p->irq_domain = irq_domain_add_simple(dev->of_node, ngpios, 0,
                                              &em_gio_irq_domain_ops, p);
        if (!p->irq_domain) {
-               dev_err(&pdev->dev, "cannot initialize irq domain\n");
+               dev_err(dev, "cannot initialize irq domain\n");
                return -ENXIO;
        }
 
-       ret = devm_add_action_or_reset(&pdev->dev, em_gio_irq_domain_remove,
+       ret = devm_add_action_or_reset(dev, em_gio_irq_domain_remove,
                                       p->irq_domain);
        if (ret)
                return ret;
 
-       if (devm_request_irq(&pdev->dev, irq[0]->start,
+       if (devm_request_irq(dev, irq[0]->start,
                             em_gio_irq_handler, 0, name, p)) {
-               dev_err(&pdev->dev, "failed to request low IRQ\n");
+               dev_err(dev, "failed to request low IRQ\n");
                return -ENOENT;
        }
 
-       if (devm_request_irq(&pdev->dev, irq[1]->start,
+       if (devm_request_irq(dev, irq[1]->start,
                             em_gio_irq_handler, 0, name, p)) {
-               dev_err(&pdev->dev, "failed to request high IRQ\n");
+               dev_err(dev, "failed to request high IRQ\n");
                return -ENOENT;
        }
 
-       ret = devm_gpiochip_add_data(&pdev->dev, gpio_chip, p);
+       ret = devm_gpiochip_add_data(dev, gpio_chip, p);
        if (ret) {
-               dev_err(&pdev->dev, "failed to add GPIO controller\n");
+               dev_err(dev, "failed to add GPIO controller\n");
                return ret;
        }
 
index a90870a60c1518e726d98cb292a3d542dcf7fa0c..226da8df6f1000ce41afd2666d6e0118fbc64b45 100644 (file)
@@ -269,56 +269,6 @@ static struct irq_chip ep93xx_gpio_irq_chip = {
        .irq_set_type   = ep93xx_gpio_irq_type,
 };
 
-static int ep93xx_gpio_init_irq(struct platform_device *pdev,
-                               struct ep93xx_gpio *epg)
-{
-       int ab_parent_irq = platform_get_irq(pdev, 0);
-       struct device *dev = &pdev->dev;
-       int gpio_irq;
-       int ret;
-       int i;
-
-       /* The A bank */
-       ret = gpiochip_irqchip_add(&epg->gc[0], &ep93xx_gpio_irq_chip,
-                                   64, handle_level_irq,
-                                   IRQ_TYPE_NONE);
-       if (ret) {
-               dev_err(dev, "Could not add irqchip 0\n");
-               return ret;
-       }
-       gpiochip_set_chained_irqchip(&epg->gc[0], &ep93xx_gpio_irq_chip,
-                                    ab_parent_irq,
-                                    ep93xx_gpio_ab_irq_handler);
-
-       /* The B bank */
-       ret = gpiochip_irqchip_add(&epg->gc[1], &ep93xx_gpio_irq_chip,
-                                   72, handle_level_irq,
-                                   IRQ_TYPE_NONE);
-       if (ret) {
-               dev_err(dev, "Could not add irqchip 1\n");
-               return ret;
-       }
-       gpiochip_set_chained_irqchip(&epg->gc[1], &ep93xx_gpio_irq_chip,
-                                    ab_parent_irq,
-                                    ep93xx_gpio_ab_irq_handler);
-
-       /* The F bank */
-       for (i = 0; i < 8; i++) {
-               gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
-               irq_set_chip_data(gpio_irq, &epg->gc[5]);
-               irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
-                                        handle_level_irq);
-               irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
-       }
-
-       for (i = 1; i <= 8; i++)
-               irq_set_chained_handler_and_data(platform_get_irq(pdev, i),
-                                                ep93xx_gpio_f_irq_handler,
-                                                &epg->gc[i]);
-       return 0;
-}
-
-
 /*************************************************************************
  * gpiolib interface for EP93xx on-chip GPIOs
  *************************************************************************/
@@ -328,26 +278,33 @@ struct ep93xx_gpio_bank {
        int             dir;
        int             base;
        bool            has_irq;
+       bool            has_hierarchical_irq;
+       unsigned int    irq_base;
 };
 
-#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq) \
+#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \
        {                                                       \
                .label          = _label,                       \
                .data           = _data,                        \
                .dir            = _dir,                         \
                .base           = _base,                        \
                .has_irq        = _has_irq,                     \
+               .has_hierarchical_irq = _has_hier,              \
+               .irq_base       = _irq_base,                    \
        }
 
 static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
-       EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true), /* Bank A has 8 IRQs */
-       EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true), /* Bank B has 8 IRQs */
-       EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false),
-       EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false),
-       EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false),
-       EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, true), /* Bank F has 8 IRQs */
-       EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false),
-       EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false),
+       /* Bank A has 8 IRQs */
+       EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64),
+       /* Bank B has 8 IRQs */
+       EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72),
+       EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0),
+       EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0),
+       EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0),
+       /* Bank F has 8 IRQs */
+       EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0),
+       EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0),
+       EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0),
 };
 
 static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
@@ -369,12 +326,15 @@ static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
        return EP93XX_GPIO_F_IRQ_BASE + offset;
 }
 
-static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
+static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
+                               struct platform_device *pdev,
                                struct ep93xx_gpio *epg,
                                struct ep93xx_gpio_bank *bank)
 {
        void __iomem *data = epg->base + bank->data;
        void __iomem *dir = epg->base + bank->dir;
+       struct device *dev = &pdev->dev;
+       struct gpio_irq_chip *girq;
        int err;
 
        err = bgpio_init(gc, dev, 1, data, NULL, NULL, dir, NULL, 0);
@@ -384,8 +344,59 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
        gc->label = bank->label;
        gc->base = bank->base;
 
-       if (bank->has_irq)
+       girq = &gc->irq;
+       if (bank->has_irq || bank->has_hierarchical_irq) {
                gc->set_config = ep93xx_gpio_set_config;
+               girq->chip = &ep93xx_gpio_irq_chip;
+       }
+
+       if (bank->has_irq) {
+               int ab_parent_irq = platform_get_irq(pdev, 0);
+
+               girq->parent_handler = ep93xx_gpio_ab_irq_handler;
+               girq->num_parents = 1;
+               girq->parents = devm_kcalloc(dev, 1,
+                                            sizeof(*girq->parents),
+                                            GFP_KERNEL);
+               if (!girq->parents)
+                       return -ENOMEM;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_level_irq;
+               girq->parents[0] = ab_parent_irq;
+               girq->first = bank->irq_base;
+       }
+
+       /* Only bank F has especially funky IRQ handling */
+       if (bank->has_hierarchical_irq) {
+               int gpio_irq;
+               int i;
+
+               /*
+                * FIXME: convert this to use hierarchical IRQ support!
+                * this requires fixing the root irqchip to be hierarchial.
+                */
+               girq->parent_handler = ep93xx_gpio_f_irq_handler;
+               girq->num_parents = 8;
+               girq->parents = devm_kcalloc(dev, 8,
+                                            sizeof(*girq->parents),
+                                            GFP_KERNEL);
+               if (!girq->parents)
+                       return -ENOMEM;
+               /* Pick resources 1..8 for these IRQs */
+               for (i = 1; i <= 8; i++)
+                       girq->parents[i - 1] = platform_get_irq(pdev, i);
+               for (i = 0; i < 8; i++) {
+                       gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
+                       irq_set_chip_data(gpio_irq, &epg->gc[5]);
+                       irq_set_chip_and_handler(gpio_irq,
+                                                &ep93xx_gpio_irq_chip,
+                                                handle_level_irq);
+                       irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
+               }
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_level_irq;
+               gc->to_irq = ep93xx_gpio_f_to_irq;
+       }
 
        return devm_gpiochip_add_data(dev, gc, epg);
 }
@@ -407,16 +418,11 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
                struct gpio_chip *gc = &epg->gc[i];
                struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
 
-               if (ep93xx_gpio_add_bank(gc, &pdev->dev, epg, bank))
+               if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))
                        dev_warn(&pdev->dev, "Unable to add gpio bank %s\n",
                                 bank->label);
-               /* Only bank F has especially funky IRQ handling */
-               if (i == 5)
-                       gc->to_irq = ep93xx_gpio_f_to_irq;
        }
 
-       ep93xx_gpio_init_irq(pdev, epg);
-
        return 0;
 }
 
index 250e71f3e68825833bca14c5b04116618be997a6..fbddb166242874d3669ed83ba6cddc2dec225be8 100644 (file)
@@ -290,16 +290,14 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
        girq->num_parents = 1;
        girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
                                     GFP_KERNEL);
-       if (!girq->parents)
-               return -ENOMEM;
+       if (!girq->parents) {
+               ret = -ENOMEM;
+               goto dis_clk;
+       }
        girq->default_type = IRQ_TYPE_NONE;
        girq->handler = handle_bad_irq;
        girq->parents[0] = irq;
 
-       ret = devm_gpiochip_add_data(dev, &g->gc, g);
-       if (ret)
-               goto dis_clk;
-
        /* Disable, unmask and clear all interrupts */
        writel(0x0, g->base + GPIO_INT_EN);
        writel(0x0, g->base + GPIO_INT_MASK);
@@ -308,6 +306,10 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
        /* Clear any use of debounce */
        writel(0x0, g->base + GPIO_DEBOUNCE_EN);
 
+       ret = devm_gpiochip_add_data(dev, &g->gc, g);
+       if (ret)
+               goto dis_clk;
+
        platform_set_drvdata(pdev, g);
        dev_info(dev, "FTGPIO010 @%p registered\n", g->base);
 
index 0937b605e134276882ec58f99a2cc9001f60e715..08234e64993a951879d21bfc22b8f3debe1a1e74 100644 (file)
@@ -408,8 +408,6 @@ static int grgpio_probe(struct platform_device *ofdev)
                                 * Continue without irq functionality for that
                                 * gpio line
                                 */
-                               dev_err(priv->dev,
-                                       "Failed to get irq for offset %d\n", i);
                                continue;
                        }
                        priv->uirqs[lirq->index].uirq = ret;
index e5fa00f8145f3e41eba8f48f851d832b42d88ac8..4a17599f6d44d23db22eb18d6ef9c5a409c10706 100644 (file)
@@ -244,43 +244,45 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
                ngpios = 32;
        hlwd->gpioc.ngpio = ngpios;
 
-       res = devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
-       if (res)
-               return res;
-
        /* Mask and ack all interrupts */
        iowrite32be(0, hlwd->regs + HW_GPIOB_INTMASK);
        iowrite32be(0xffffffff, hlwd->regs + HW_GPIOB_INTFLAG);
 
        /*
         * If this GPIO controller is not marked as an interrupt controller in
-        * the DT, return.
+        * the DT, skip interrupt support.
         */
-       if (!of_property_read_bool(pdev->dev.of_node, "interrupt-controller"))
-               return 0;
-
-       hlwd->irq = platform_get_irq(pdev, 0);
-       if (hlwd->irq < 0) {
-               dev_info(&pdev->dev, "platform_get_irq returned %d\n",
-                        hlwd->irq);
-               return hlwd->irq;
+       if (of_property_read_bool(pdev->dev.of_node, "interrupt-controller")) {
+               struct gpio_irq_chip *girq;
+
+               hlwd->irq = platform_get_irq(pdev, 0);
+               if (hlwd->irq < 0) {
+                       dev_info(&pdev->dev, "platform_get_irq returned %d\n",
+                                hlwd->irq);
+                       return hlwd->irq;
+               }
+
+               hlwd->irqc.name = dev_name(&pdev->dev);
+               hlwd->irqc.irq_mask = hlwd_gpio_irq_mask;
+               hlwd->irqc.irq_unmask = hlwd_gpio_irq_unmask;
+               hlwd->irqc.irq_enable = hlwd_gpio_irq_enable;
+               hlwd->irqc.irq_set_type = hlwd_gpio_irq_set_type;
+
+               girq = &hlwd->gpioc.irq;
+               girq->chip = &hlwd->irqc;
+               girq->parent_handler = hlwd_gpio_irqhandler;
+               girq->num_parents = 1;
+               girq->parents = devm_kcalloc(&pdev->dev, 1,
+                                            sizeof(*girq->parents),
+                                            GFP_KERNEL);
+               if (!girq->parents)
+                       return -ENOMEM;
+               girq->parents[0] = hlwd->irq;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_level_irq;
        }
 
-       hlwd->irqc.name = dev_name(&pdev->dev);
-       hlwd->irqc.irq_mask = hlwd_gpio_irq_mask;
-       hlwd->irqc.irq_unmask = hlwd_gpio_irq_unmask;
-       hlwd->irqc.irq_enable = hlwd_gpio_irq_enable;
-       hlwd->irqc.irq_set_type = hlwd_gpio_irq_set_type;
-
-       res = gpiochip_irqchip_add(&hlwd->gpioc, &hlwd->irqc, 0,
-                                  handle_level_irq, IRQ_TYPE_NONE);
-       if (res)
-               return res;
-
-       gpiochip_set_chained_irqchip(&hlwd->gpioc, &hlwd->irqc,
-                                    hlwd->irq, hlwd_gpio_irqhandler);
-
-       return 0;
+       return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
 }
 
 static const struct of_device_id hlwd_gpio_match[] = {
index 9d3ac51a765c1405b4fe0ec68e2beec9b2dd1dfd..6eb56f7ab9c94e73dbb1aeb761511450620d105c 100644 (file)
@@ -118,20 +118,6 @@ static void egpio_handler(struct irq_desc *desc)
        }
 }
 
-int htc_egpio_get_wakeup_irq(struct device *dev)
-{
-       struct egpio_info *ei = dev_get_drvdata(dev);
-
-       /* Read current pins. */
-       u16 readval = egpio_readw(ei, ei->ack_register);
-       /* Ack/unmask interrupts. */
-       ack_irqs(ei);
-       /* Return first set pin. */
-       readval &= ei->irqs_enabled;
-       return ei->irq_start + ffs(readval) - 1;
-}
-EXPORT_SYMBOL(htc_egpio_get_wakeup_irq);
-
 static inline int egpio_pos(struct egpio_info *ei, int bit)
 {
        return bit >> ei->reg_shift;
index 4e803baf980e63fb34820238ebd3a8ceb6c8d6d8..4d835f9089df8b95892270b7756604202123e21e 100644 (file)
@@ -329,6 +329,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
        u32 gpio_base;
        u32 irq_base;
        int retval;
+       struct gpio_irq_chip *girq;
        struct intel_mid_gpio_ddata *ddata =
                                (struct intel_mid_gpio_ddata *)id->driver_data;
 
@@ -369,6 +370,22 @@ static int intel_gpio_probe(struct pci_dev *pdev,
 
        spin_lock_init(&priv->lock);
 
+       girq = &priv->chip.irq;
+       girq->chip = &intel_mid_irqchip;
+       girq->parent_handler = intel_mid_irq_handler;
+       girq->num_parents = 1;
+       girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+                                    sizeof(*girq->parents),
+                                    GFP_KERNEL);
+       if (!girq->parents)
+               return -ENOMEM;
+       girq->parents[0] = pdev->irq;
+       girq->first = irq_base;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_simple_irq;
+
+       intel_mid_irq_init_hw(priv);
+
        pci_set_drvdata(pdev, priv);
        retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
        if (retval) {
@@ -376,24 +393,6 @@ static int intel_gpio_probe(struct pci_dev *pdev,
                return retval;
        }
 
-       retval = gpiochip_irqchip_add(&priv->chip,
-                                     &intel_mid_irqchip,
-                                     irq_base,
-                                     handle_simple_irq,
-                                     IRQ_TYPE_NONE);
-       if (retval) {
-               dev_err(&pdev->dev,
-                       "could not connect irqchip to gpiochip\n");
-               return retval;
-       }
-
-       intel_mid_irq_init_hw(priv);
-
-       gpiochip_set_chained_irqchip(&priv->chip,
-                                    &intel_mid_irqchip,
-                                    pdev->irq,
-                                    intel_mid_irq_handler);
-
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_allow(&pdev->dev);
 
index 670c2a85a35bffc1e01d944220058e79882b64dc..b3b050604e0be02ab165fc2a7ce6a15a66e14ad8 100644 (file)
@@ -47,7 +47,6 @@
  * @dev: containing device for this instance
  * @fwnode: the fwnode for this GPIO chip
  * @gc: gpiochip for this instance
- * @domain: irqdomain for this chip instance
  * @base: remapped I/O-memory base
  * @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
  * 0: level triggered
@@ -56,48 +55,22 @@ struct ixp4xx_gpio {
        struct device *dev;
        struct fwnode_handle *fwnode;
        struct gpio_chip gc;
-       struct irq_domain *domain;
        void __iomem *base;
        unsigned long long irq_edge;
 };
 
-/**
- * struct ixp4xx_gpio_map - IXP4 GPIO to parent IRQ map
- * @gpio_offset: offset of the IXP4 GPIO line
- * @parent_hwirq: hwirq on the parent IRQ controller
- */
-struct ixp4xx_gpio_map {
-       int gpio_offset;
-       int parent_hwirq;
-};
-
-/* GPIO lines 0..12 have corresponding IRQs, GPIOs 13..15 have no IRQs */
-const struct ixp4xx_gpio_map ixp4xx_gpiomap[] = {
-       { .gpio_offset = 0, .parent_hwirq = 6 },
-       { .gpio_offset = 1, .parent_hwirq = 7 },
-       { .gpio_offset = 2, .parent_hwirq = 19 },
-       { .gpio_offset = 3, .parent_hwirq = 20 },
-       { .gpio_offset = 4, .parent_hwirq = 21 },
-       { .gpio_offset = 5, .parent_hwirq = 22 },
-       { .gpio_offset = 6, .parent_hwirq = 23 },
-       { .gpio_offset = 7, .parent_hwirq = 24 },
-       { .gpio_offset = 8, .parent_hwirq = 25 },
-       { .gpio_offset = 9, .parent_hwirq = 26 },
-       { .gpio_offset = 10, .parent_hwirq = 27 },
-       { .gpio_offset = 11, .parent_hwirq = 28 },
-       { .gpio_offset = 12, .parent_hwirq = 29 },
-};
-
 static void ixp4xx_gpio_irq_ack(struct irq_data *d)
 {
-       struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct ixp4xx_gpio *g = gpiochip_get_data(gc);
 
        __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
 }
 
 static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
 {
-       struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct ixp4xx_gpio *g = gpiochip_get_data(gc);
 
        /* ACK when unmasking if not edge-triggered */
        if (!(g->irq_edge & BIT(d->hwirq)))
@@ -108,7 +81,8 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
 
 static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-       struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct ixp4xx_gpio *g = gpiochip_get_data(gc);
        int line = d->hwirq;
        unsigned long flags;
        u32 int_style;
@@ -187,122 +161,31 @@ static struct irq_chip ixp4xx_gpio_irqchip = {
        .irq_set_type = ixp4xx_gpio_irq_set_type,
 };
 
-static int ixp4xx_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+                                            unsigned int child,
+                                            unsigned int child_type,
+                                            unsigned int *parent,
+                                            unsigned int *parent_type)
 {
-       struct ixp4xx_gpio *g = gpiochip_get_data(gc);
-       struct irq_fwspec fwspec;
-
-       fwspec.fwnode = g->fwnode;
-       fwspec.param_count = 2;
-       fwspec.param[0] = offset;
-       fwspec.param[1] = IRQ_TYPE_NONE;
-
-       return irq_create_fwspec_mapping(&fwspec);
-}
+       /* All these interrupts are level high in the CPU */
+       *parent_type = IRQ_TYPE_LEVEL_HIGH;
 
-static int ixp4xx_gpio_irq_domain_translate(struct irq_domain *domain,
-                                           struct irq_fwspec *fwspec,
-                                           unsigned long *hwirq,
-                                           unsigned int *type)
-{
-       int ret;
-
-       /* We support standard DT translation */
-       if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
-               return irq_domain_translate_twocell(domain, fwspec,
-                                                   hwirq, type);
+       /* GPIO lines 0..12 have dedicated IRQs */
+       if (child == 0) {
+               *parent = 6;
+               return 0;
        }
-
-       /* This goes away when we transition to DT */
-       if (is_fwnode_irqchip(fwspec->fwnode)) {
-               ret = irq_domain_translate_twocell(domain, fwspec,
-                                                  hwirq, type);
-               if (ret)
-                       return ret;
-               WARN_ON(*type == IRQ_TYPE_NONE);
+       if (child == 1) {
+               *parent = 7;
                return 0;
        }
-       return -EINVAL;
-}
-
-static int ixp4xx_gpio_irq_domain_alloc(struct irq_domain *d,
-                                       unsigned int irq, unsigned int nr_irqs,
-                                       void *data)
-{
-       struct ixp4xx_gpio *g = d->host_data;
-       irq_hw_number_t hwirq;
-       unsigned int type = IRQ_TYPE_NONE;
-       struct irq_fwspec *fwspec = data;
-       int ret;
-       int i;
-
-       ret = ixp4xx_gpio_irq_domain_translate(d, fwspec, &hwirq, &type);
-       if (ret)
-               return ret;
-
-       dev_dbg(g->dev, "allocate IRQ %d..%d, hwirq %lu..%lu\n",
-               irq, irq + nr_irqs - 1,
-               hwirq, hwirq + nr_irqs - 1);
-
-       for (i = 0; i < nr_irqs; i++) {
-               struct irq_fwspec parent_fwspec;
-               const struct ixp4xx_gpio_map *map;
-               int j;
-
-               /* Not all lines support IRQs */
-               for (j = 0; j < ARRAY_SIZE(ixp4xx_gpiomap); j++) {
-                       map = &ixp4xx_gpiomap[j];
-                       if (map->gpio_offset == hwirq)
-                               break;
-               }
-               if (j == ARRAY_SIZE(ixp4xx_gpiomap)) {
-                       dev_err(g->dev, "can't look up hwirq %lu\n", hwirq);
-                       return -EINVAL;
-               }
-               dev_dbg(g->dev, "found parent hwirq %u\n", map->parent_hwirq);
-
-               /*
-                * We set handle_bad_irq because the .set_type() should
-                * always be invoked and set the right type of handler.
-                */
-               irq_domain_set_info(d,
-                                   irq + i,
-                                   hwirq + i,
-                                   &ixp4xx_gpio_irqchip,
-                                   g,
-                                   handle_bad_irq,
-                                   NULL, NULL);
-               irq_set_probe(irq + i);
-
-               /*
-                * Create a IRQ fwspec to send up to the parent irqdomain:
-                * specify the hwirq we address on the parent and tie it
-                * all together up the chain.
-                */
-               parent_fwspec.fwnode = d->parent->fwnode;
-               parent_fwspec.param_count = 2;
-               parent_fwspec.param[0] = map->parent_hwirq;
-               /* This parent only handles asserted level IRQs */
-               parent_fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
-               dev_dbg(g->dev, "alloc_irqs_parent for %d parent hwirq %d\n",
-                       irq + i, map->parent_hwirq);
-               ret = irq_domain_alloc_irqs_parent(d, irq + i, 1,
-                                                  &parent_fwspec);
-               if (ret)
-                       dev_err(g->dev,
-                               "failed to allocate parent hwirq %d for hwirq %lu\n",
-                               map->parent_hwirq, hwirq);
+       if (child >= 2 && child <= 12) {
+               *parent = child + 17;
+               return 0;
        }
-
-       return 0;
+       return -EINVAL;
 }
 
-static const struct irq_domain_ops ixp4xx_gpio_irqdomain_ops = {
-       .translate = ixp4xx_gpio_irq_domain_translate,
-       .alloc = ixp4xx_gpio_irq_domain_alloc,
-       .free = irq_domain_free_irqs_common,
-};
-
 static int ixp4xx_gpio_probe(struct platform_device *pdev)
 {
        unsigned long flags;
@@ -311,8 +194,8 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
        struct irq_domain *parent;
        struct resource *res;
        struct ixp4xx_gpio *g;
+       struct gpio_irq_chip *girq;
        int ret;
-       int i;
 
        g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
        if (!g)
@@ -321,9 +204,36 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        g->base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(g->base)) {
-               dev_err(dev, "ioremap error\n");
+       if (IS_ERR(g->base))
                return PTR_ERR(g->base);
+
+       /*
+        * When we convert to device tree we will simply look up the
+        * parent irqdomain using irq_find_host(parent) as parent comes
+        * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
+        * the fwnode. For now we need this boardfile style code.
+        */
+       if (np) {
+               struct device_node *irq_parent;
+
+               irq_parent = of_irq_find_parent(np);
+               if (!irq_parent) {
+                       dev_err(dev, "no IRQ parent node\n");
+                       return -ENODEV;
+               }
+               parent = irq_find_host(irq_parent);
+               if (!parent) {
+                       dev_err(dev, "no IRQ parent domain\n");
+                       return -ENODEV;
+               }
+               g->fwnode = of_node_to_fwnode(np);
+       } else {
+               parent = ixp4xx_get_irq_domain();
+               g->fwnode = irq_domain_alloc_fwnode(&res->start);
+               if (!g->fwnode) {
+                       dev_err(dev, "no domain base\n");
+                       return -ENODEV;
+               }
        }
 
        /*
@@ -360,7 +270,6 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
                dev_err(dev, "unable to init generic GPIO\n");
                return ret;
        }
-       g->gc.to_irq = ixp4xx_gpio_to_irq;
        g->gc.ngpio = 16;
        g->gc.label = "IXP4XX_GPIO_CHIP";
        /*
@@ -372,86 +281,22 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
        g->gc.parent = &pdev->dev;
        g->gc.owner = THIS_MODULE;
 
+       girq = &g->gc.irq;
+       girq->chip = &ixp4xx_gpio_irqchip;
+       girq->fwnode = g->fwnode;
+       girq->parent_domain = parent;
+       girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
+       girq->handler = handle_bad_irq;
+       girq->default_type = IRQ_TYPE_NONE;
+
        ret = devm_gpiochip_add_data(dev, &g->gc, g);
        if (ret) {
                dev_err(dev, "failed to add SoC gpiochip\n");
                return ret;
        }
 
-       /*
-        * When we convert to device tree we will simply look up the
-        * parent irqdomain using irq_find_host(parent) as parent comes
-        * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
-        * the fwnode. For now we need this boardfile style code.
-        */
-       if (np) {
-               struct device_node *irq_parent;
-
-               irq_parent = of_irq_find_parent(np);
-               if (!irq_parent) {
-                       dev_err(dev, "no IRQ parent node\n");
-                       return -ENODEV;
-               }
-               parent = irq_find_host(irq_parent);
-               if (!parent) {
-                       dev_err(dev, "no IRQ parent domain\n");
-                       return -ENODEV;
-               }
-               g->fwnode = of_node_to_fwnode(np);
-       } else {
-               parent = ixp4xx_get_irq_domain();
-               g->fwnode = irq_domain_alloc_fwnode(g->base);
-               if (!g->fwnode) {
-                       dev_err(dev, "no domain base\n");
-                       return -ENODEV;
-               }
-       }
-       g->domain = irq_domain_create_hierarchy(parent,
-                                               IRQ_DOMAIN_FLAG_HIERARCHY,
-                                               ARRAY_SIZE(ixp4xx_gpiomap),
-                                               g->fwnode,
-                                               &ixp4xx_gpio_irqdomain_ops,
-                                               g);
-       if (!g->domain) {
-               irq_domain_free_fwnode(g->fwnode);
-               dev_err(dev, "no hierarchical irq domain\n");
-               return ret;
-       }
-
-       /*
-        * After adding OF support, this is no longer needed: irqs
-        * will be allocated for the respective fwnodes.
-        */
-       if (!np) {
-               for (i = 0; i < ARRAY_SIZE(ixp4xx_gpiomap); i++) {
-                       const struct ixp4xx_gpio_map *map = &ixp4xx_gpiomap[i];
-                       struct irq_fwspec fwspec;
-
-                       fwspec.fwnode = g->fwnode;
-                       /* This is the hwirq for the GPIO line side of things */
-                       fwspec.param[0] = map->gpio_offset;
-                       fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
-                       fwspec.param_count = 2;
-                       ret = __irq_domain_alloc_irqs(g->domain,
-                                                     -1, /* just pick something */
-                                                     1,
-                                                     NUMA_NO_NODE,
-                                                     &fwspec,
-                                                     false,
-                                                     NULL);
-                       if (ret < 0) {
-                               irq_domain_free_fwnode(g->fwnode);
-                               dev_err(dev,
-                                       "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
-                                       map->gpio_offset, map->parent_hwirq,
-                                       ret);
-                               return ret;
-                       }
-               }
-       }
-
        platform_set_drvdata(pdev, g);
-       dev_info(dev, "IXP4 GPIO @%p registered\n", g->base);
+       dev_info(dev, "IXP4 GPIO registered\n");
 
        return 0;
 }
diff --git a/drivers/gpio/gpio-ks8695.c b/drivers/gpio/gpio-ks8695.c
deleted file mode 100644 (file)
index a0f87c1..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-ks8695/gpio.c
- *
- * Copyright (C) 2006 Andrew Victor
- * Updated to GPIOLIB, Copyright 2008 Simtec Electronics
- *                     Daniel Silverstone <dsilvers@simtec.co.uk>
- */
-#include <linux/gpio/driver.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/module.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/mach/irq.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/gpio-ks8695.h>
-
-/*
- * Configure a GPIO line for either GPIO function, or its internal
- * function (Interrupt, Timer, etc).
- */
-static void ks8695_gpio_mode(unsigned int pin, short gpio)
-{
-       unsigned int enable[] = { IOPC_IOEINT0EN, IOPC_IOEINT1EN, IOPC_IOEINT2EN, IOPC_IOEINT3EN, IOPC_IOTIM0EN, IOPC_IOTIM1EN };
-       unsigned long x, flags;
-
-       if (pin > KS8695_GPIO_5)        /* only GPIO 0..5 have internal functions */
-               return;
-
-       local_irq_save(flags);
-
-       x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
-       if (gpio)                       /* GPIO: set bit to 0 */
-               x &= ~enable[pin];
-       else                            /* Internal function: set bit to 1 */
-               x |= enable[pin];
-       __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPC);
-
-       local_irq_restore(flags);
-}
-
-
-static unsigned short gpio_irq[] = { KS8695_IRQ_EXTERN0, KS8695_IRQ_EXTERN1, KS8695_IRQ_EXTERN2, KS8695_IRQ_EXTERN3 };
-
-/*
- * Configure GPIO pin as external interrupt source.
- */
-int ks8695_gpio_interrupt(unsigned int pin, unsigned int type)
-{
-       unsigned long x, flags;
-
-       if (pin > KS8695_GPIO_3)        /* only GPIO 0..3 can generate IRQ */
-               return -EINVAL;
-
-       local_irq_save(flags);
-
-       /* set pin as input */
-       x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
-       x &= ~IOPM(pin);
-       __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
-
-       local_irq_restore(flags);
-
-       /* Set IRQ triggering type */
-       irq_set_irq_type(gpio_irq[pin], type);
-
-       /* enable interrupt mode */
-       ks8695_gpio_mode(pin, 0);
-
-       return 0;
-}
-EXPORT_SYMBOL(ks8695_gpio_interrupt);
-
-
-
-/* .... Generic GPIO interface .............................................. */
-
-/*
- * Configure the GPIO line as an input.
- */
-static int ks8695_gpio_direction_input(struct gpio_chip *gc, unsigned int pin)
-{
-       unsigned long x, flags;
-
-       if (pin > KS8695_GPIO_15)
-               return -EINVAL;
-
-       /* set pin to GPIO mode */
-       ks8695_gpio_mode(pin, 1);
-
-       local_irq_save(flags);
-
-       /* set pin as input */
-       x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
-       x &= ~IOPM(pin);
-       __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
-
-       local_irq_restore(flags);
-
-       return 0;
-}
-
-
-/*
- * Configure the GPIO line as an output, with default state.
- */
-static int ks8695_gpio_direction_output(struct gpio_chip *gc,
-                                       unsigned int pin, int state)
-{
-       unsigned long x, flags;
-
-       if (pin > KS8695_GPIO_15)
-               return -EINVAL;
-
-       /* set pin to GPIO mode */
-       ks8695_gpio_mode(pin, 1);
-
-       local_irq_save(flags);
-
-       /* set line state */
-       x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
-       if (state)
-               x |= IOPD(pin);
-       else
-               x &= ~IOPD(pin);
-       __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPD);
-
-       /* set pin as output */
-       x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
-       x |= IOPM(pin);
-       __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
-
-       local_irq_restore(flags);
-
-       return 0;
-}
-
-
-/*
- * Set the state of an output GPIO line.
- */
-static void ks8695_gpio_set_value(struct gpio_chip *gc,
-                                 unsigned int pin, int state)
-{
-       unsigned long x, flags;
-
-       if (pin > KS8695_GPIO_15)
-               return;
-
-       local_irq_save(flags);
-
-       /* set output line state */
-       x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
-       if (state)
-               x |= IOPD(pin);
-       else
-               x &= ~IOPD(pin);
-       __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPD);
-
-       local_irq_restore(flags);
-}
-
-
-/*
- * Read the state of a GPIO line.
- */
-static int ks8695_gpio_get_value(struct gpio_chip *gc, unsigned int pin)
-{
-       unsigned long x;
-
-       if (pin > KS8695_GPIO_15)
-               return -EINVAL;
-
-       x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
-       return (x & IOPD(pin)) != 0;
-}
-
-
-/*
- * Map GPIO line to IRQ number.
- */
-static int ks8695_gpio_to_irq(struct gpio_chip *gc, unsigned int pin)
-{
-       if (pin > KS8695_GPIO_3)        /* only GPIO 0..3 can generate IRQ */
-               return -EINVAL;
-
-       return gpio_irq[pin];
-}
-
-/* GPIOLIB interface */
-
-static struct gpio_chip ks8695_gpio_chip = {
-       .label                  = "KS8695",
-       .direction_input        = ks8695_gpio_direction_input,
-       .direction_output       = ks8695_gpio_direction_output,
-       .get                    = ks8695_gpio_get_value,
-       .set                    = ks8695_gpio_set_value,
-       .to_irq                 = ks8695_gpio_to_irq,
-       .base                   = 0,
-       .ngpio                  = 16,
-       .can_sleep              = false,
-};
-
-/* Register the GPIOs */
-void ks8695_register_gpios(void)
-{
-       if (gpiochip_add_data(&ks8695_gpio_chip, NULL))
-               printk(KERN_ERR "Unable to register core GPIOs\n");
-}
-
-/* .... Debug interface ..................................................... */
-
-#ifdef CONFIG_DEBUG_FS
-
-static int ks8695_gpio_show(struct seq_file *s, void *unused)
-{
-       unsigned int enable[] = { IOPC_IOEINT0EN, IOPC_IOEINT1EN, IOPC_IOEINT2EN, IOPC_IOEINT3EN, IOPC_IOTIM0EN, IOPC_IOTIM1EN };
-       unsigned int intmask[] = { IOPC_IOEINT0TM, IOPC_IOEINT1TM, IOPC_IOEINT2TM, IOPC_IOEINT3TM };
-       unsigned long mode, ctrl, data;
-       int i;
-
-       mode = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
-       ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
-       data = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
-
-       seq_printf(s, "Pin\tI/O\tFunction\tState\n\n");
-
-       for (i = KS8695_GPIO_0; i <= KS8695_GPIO_15 ; i++) {
-               seq_printf(s, "%i:\t", i);
-
-               seq_printf(s, "%s\t", (mode & IOPM(i)) ? "Output" : "Input");
-
-               if (i <= KS8695_GPIO_3) {
-                       if (ctrl & enable[i]) {
-                               seq_printf(s, "EXT%i ", i);
-
-                               switch ((ctrl & intmask[i]) >> (4 * i)) {
-                               case IOPC_TM_LOW:
-                                       seq_printf(s, "(Low)");         break;
-                               case IOPC_TM_HIGH:
-                                       seq_printf(s, "(High)");        break;
-                               case IOPC_TM_RISING:
-                                       seq_printf(s, "(Rising)");      break;
-                               case IOPC_TM_FALLING:
-                                       seq_printf(s, "(Falling)");     break;
-                               case IOPC_TM_EDGE:
-                                       seq_printf(s, "(Edges)");       break;
-                               }
-                       } else
-                               seq_printf(s, "GPIO\t");
-               } else if (i <= KS8695_GPIO_5) {
-                       if (ctrl & enable[i])
-                               seq_printf(s, "TOUT%i\t", i - KS8695_GPIO_4);
-                       else
-                               seq_printf(s, "GPIO\t");
-               } else {
-                       seq_printf(s, "GPIO\t");
-               }
-
-               seq_printf(s, "\t");
-
-               seq_printf(s, "%i\n", (data & IOPD(i)) ? 1 : 0);
-       }
-       return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ks8695_gpio);
-
-static int __init ks8695_gpio_debugfs_init(void)
-{
-       /* /sys/kernel/debug/ks8695_gpio */
-       debugfs_create_file("ks8695_gpio", S_IFREG | S_IRUGO, NULL, NULL,
-                               &ks8695_gpio_fops);
-       return 0;
-}
-postcore_initcall(ks8695_gpio_debugfs_init);
-
-#endif
index 24885b3db3d504d203b5985a694175603eab82a1..4e626c4235c2e161f704ebe39902d2753bd9c9f5 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/module.h>
 
-#include <mach/hardware.h>
-#include <mach/platform.h>
-
-#define LPC32XX_GPIO_P3_INP_STATE              _GPREG(0x000)
-#define LPC32XX_GPIO_P3_OUTP_SET               _GPREG(0x004)
-#define LPC32XX_GPIO_P3_OUTP_CLR               _GPREG(0x008)
-#define LPC32XX_GPIO_P3_OUTP_STATE             _GPREG(0x00C)
-#define LPC32XX_GPIO_P2_DIR_SET                        _GPREG(0x010)
-#define LPC32XX_GPIO_P2_DIR_CLR                        _GPREG(0x014)
-#define LPC32XX_GPIO_P2_DIR_STATE              _GPREG(0x018)
-#define LPC32XX_GPIO_P2_INP_STATE              _GPREG(0x01C)
-#define LPC32XX_GPIO_P2_OUTP_SET               _GPREG(0x020)
-#define LPC32XX_GPIO_P2_OUTP_CLR               _GPREG(0x024)
-#define LPC32XX_GPIO_P2_MUX_SET                        _GPREG(0x028)
-#define LPC32XX_GPIO_P2_MUX_CLR                        _GPREG(0x02C)
-#define LPC32XX_GPIO_P2_MUX_STATE              _GPREG(0x030)
-#define LPC32XX_GPIO_P0_INP_STATE              _GPREG(0x040)
-#define LPC32XX_GPIO_P0_OUTP_SET               _GPREG(0x044)
-#define LPC32XX_GPIO_P0_OUTP_CLR               _GPREG(0x048)
-#define LPC32XX_GPIO_P0_OUTP_STATE             _GPREG(0x04C)
-#define LPC32XX_GPIO_P0_DIR_SET                        _GPREG(0x050)
-#define LPC32XX_GPIO_P0_DIR_CLR                        _GPREG(0x054)
-#define LPC32XX_GPIO_P0_DIR_STATE              _GPREG(0x058)
-#define LPC32XX_GPIO_P1_INP_STATE              _GPREG(0x060)
-#define LPC32XX_GPIO_P1_OUTP_SET               _GPREG(0x064)
-#define LPC32XX_GPIO_P1_OUTP_CLR               _GPREG(0x068)
-#define LPC32XX_GPIO_P1_OUTP_STATE             _GPREG(0x06C)
-#define LPC32XX_GPIO_P1_DIR_SET                        _GPREG(0x070)
-#define LPC32XX_GPIO_P1_DIR_CLR                        _GPREG(0x074)
-#define LPC32XX_GPIO_P1_DIR_STATE              _GPREG(0x078)
+#define LPC32XX_GPIO_P3_INP_STATE              (0x000)
+#define LPC32XX_GPIO_P3_OUTP_SET               (0x004)
+#define LPC32XX_GPIO_P3_OUTP_CLR               (0x008)
+#define LPC32XX_GPIO_P3_OUTP_STATE             (0x00C)
+#define LPC32XX_GPIO_P2_DIR_SET                        (0x010)
+#define LPC32XX_GPIO_P2_DIR_CLR                        (0x014)
+#define LPC32XX_GPIO_P2_DIR_STATE              (0x018)
+#define LPC32XX_GPIO_P2_INP_STATE              (0x01C)
+#define LPC32XX_GPIO_P2_OUTP_SET               (0x020)
+#define LPC32XX_GPIO_P2_OUTP_CLR               (0x024)
+#define LPC32XX_GPIO_P2_MUX_SET                        (0x028)
+#define LPC32XX_GPIO_P2_MUX_CLR                        (0x02C)
+#define LPC32XX_GPIO_P2_MUX_STATE              (0x030)
+#define LPC32XX_GPIO_P0_INP_STATE              (0x040)
+#define LPC32XX_GPIO_P0_OUTP_SET               (0x044)
+#define LPC32XX_GPIO_P0_OUTP_CLR               (0x048)
+#define LPC32XX_GPIO_P0_OUTP_STATE             (0x04C)
+#define LPC32XX_GPIO_P0_DIR_SET                        (0x050)
+#define LPC32XX_GPIO_P0_DIR_CLR                        (0x054)
+#define LPC32XX_GPIO_P0_DIR_STATE              (0x058)
+#define LPC32XX_GPIO_P1_INP_STATE              (0x060)
+#define LPC32XX_GPIO_P1_OUTP_SET               (0x064)
+#define LPC32XX_GPIO_P1_OUTP_CLR               (0x068)
+#define LPC32XX_GPIO_P1_OUTP_STATE             (0x06C)
+#define LPC32XX_GPIO_P1_DIR_SET                        (0x070)
+#define LPC32XX_GPIO_P1_DIR_CLR                        (0x074)
+#define LPC32XX_GPIO_P1_DIR_STATE              (0x078)
 
 #define GPIO012_PIN_TO_BIT(x)                  (1 << (x))
 #define GPIO3_PIN_TO_BIT(x)                    (1 << ((x) + 25))
 #define LPC32XX_GPO_P3_GRP     (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
 
 struct gpio_regs {
-       void __iomem *inp_state;
-       void __iomem *outp_state;
-       void __iomem *outp_set;
-       void __iomem *outp_clr;
-       void __iomem *dir_set;
-       void __iomem *dir_clr;
+       unsigned long inp_state;
+       unsigned long outp_state;
+       unsigned long outp_set;
+       unsigned long outp_clr;
+       unsigned long dir_set;
+       unsigned long dir_clr;
 };
 
 /*
@@ -165,16 +162,27 @@ static struct gpio_regs gpio_grp_regs_p3 = {
 struct lpc32xx_gpio_chip {
        struct gpio_chip        chip;
        struct gpio_regs        *gpio_grp;
+       void __iomem            *reg_base;
 };
 
+static inline u32 gpreg_read(struct lpc32xx_gpio_chip *group, unsigned long offset)
+{
+       return __raw_readl(group->reg_base + offset);
+}
+
+static inline void gpreg_write(struct lpc32xx_gpio_chip *group, u32 val, unsigned long offset)
+{
+       __raw_writel(val, group->reg_base + offset);
+}
+
 static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group,
        unsigned pin, int input)
 {
        if (input)
-               __raw_writel(GPIO012_PIN_TO_BIT(pin),
+               gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
                        group->gpio_grp->dir_clr);
        else
-               __raw_writel(GPIO012_PIN_TO_BIT(pin),
+               gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
                        group->gpio_grp->dir_set);
 }
 
@@ -184,19 +192,19 @@ static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group,
        u32 u = GPIO3_PIN_TO_BIT(pin);
 
        if (input)
-               __raw_writel(u, group->gpio_grp->dir_clr);
+               gpreg_write(group, u, group->gpio_grp->dir_clr);
        else
-               __raw_writel(u, group->gpio_grp->dir_set);
+               gpreg_write(group, u, group->gpio_grp->dir_set);
 }
 
 static void __set_gpio_level_p012(struct lpc32xx_gpio_chip *group,
        unsigned pin, int high)
 {
        if (high)
-               __raw_writel(GPIO012_PIN_TO_BIT(pin),
+               gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
                        group->gpio_grp->outp_set);
        else
-               __raw_writel(GPIO012_PIN_TO_BIT(pin),
+               gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
                        group->gpio_grp->outp_clr);
 }
 
@@ -206,31 +214,31 @@ static void __set_gpio_level_p3(struct lpc32xx_gpio_chip *group,
        u32 u = GPIO3_PIN_TO_BIT(pin);
 
        if (high)
-               __raw_writel(u, group->gpio_grp->outp_set);
+               gpreg_write(group, u, group->gpio_grp->outp_set);
        else
-               __raw_writel(u, group->gpio_grp->outp_clr);
+               gpreg_write(group, u, group->gpio_grp->outp_clr);
 }
 
 static void __set_gpo_level_p3(struct lpc32xx_gpio_chip *group,
        unsigned pin, int high)
 {
        if (high)
-               __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_set);
+               gpreg_write(group, GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_set);
        else
-               __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_clr);
+               gpreg_write(group, GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_clr);
 }
 
 static int __get_gpio_state_p012(struct lpc32xx_gpio_chip *group,
        unsigned pin)
 {
-       return GPIO012_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state),
+       return GPIO012_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->inp_state),
                pin);
 }
 
 static int __get_gpio_state_p3(struct lpc32xx_gpio_chip *group,
        unsigned pin)
 {
-       int state = __raw_readl(group->gpio_grp->inp_state);
+       int state = gpreg_read(group, group->gpio_grp->inp_state);
 
        /*
         * P3 GPIO pin input mapping is not contiguous, GPIOP3-0..4 is mapped
@@ -242,13 +250,13 @@ static int __get_gpio_state_p3(struct lpc32xx_gpio_chip *group,
 static int __get_gpi_state_p3(struct lpc32xx_gpio_chip *group,
        unsigned pin)
 {
-       return GPI3_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state), pin);
+       return GPI3_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->inp_state), pin);
 }
 
 static int __get_gpo_state_p3(struct lpc32xx_gpio_chip *group,
        unsigned pin)
 {
-       return GPO3_PIN_IN_SEL(__raw_readl(group->gpio_grp->outp_state), pin);
+       return GPO3_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->outp_state), pin);
 }
 
 /*
@@ -497,12 +505,18 @@ static int lpc32xx_of_xlate(struct gpio_chip *gc,
 static int lpc32xx_gpio_probe(struct platform_device *pdev)
 {
        int i;
+       void __iomem *reg_base;
+
+       reg_base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(reg_base))
+               return PTR_ERR(reg_base);
 
        for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++) {
                if (pdev->dev.of_node) {
                        lpc32xx_gpiochip[i].chip.of_xlate = lpc32xx_of_xlate;
                        lpc32xx_gpiochip[i].chip.of_gpio_n_cells = 3;
                        lpc32xx_gpiochip[i].chip.of_node = pdev->dev.of_node;
+                       lpc32xx_gpiochip[i].reg_base = reg_base;
                }
                devm_gpiochip_add_data(&pdev->dev, &lpc32xx_gpiochip[i].chip,
                                  &lpc32xx_gpiochip[i]);
@@ -527,3 +541,7 @@ static struct platform_driver lpc32xx_gpio_driver = {
 };
 
 module_platform_driver(lpc32xx_gpio_driver);
+
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GPIO driver for LPC32xx SoC");
index 31b4a091ab60aeded67e06607020006e0d49092d..6bb9741ad036affb084a47937dfaa73a5f691b37 100644 (file)
@@ -358,25 +358,30 @@ static int lp_gpio_probe(struct platform_device *pdev)
        gc->can_sleep = false;
        gc->parent = dev;
 
-       ret = devm_gpiochip_add_data(dev, gc, lg);
-       if (ret) {
-               dev_err(dev, "failed adding lp-gpio chip\n");
-               return ret;
-       }
-
        /* set up interrupts  */
        if (irq_rc && irq_rc->start) {
+               struct gpio_irq_chip *girq;
+
+               girq = &gc->irq;
+               girq->chip = &lp_irqchip;
+               girq->parent_handler = lp_gpio_irq_handler;
+               girq->num_parents = 1;
+               girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+                                            sizeof(*girq->parents),
+                                            GFP_KERNEL);
+               if (!girq->parents)
+                       return -ENOMEM;
+               girq->parents[0] = (unsigned)irq_rc->start;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_simple_irq;
+
                lp_gpio_irq_init_hw(lg);
-               ret = gpiochip_irqchip_add(gc, &lp_irqchip, 0,
-                                          handle_simple_irq, IRQ_TYPE_NONE);
-               if (ret) {
-                       dev_err(dev, "failed to add irqchip\n");
-                       return ret;
-               }
+       }
 
-               gpiochip_set_chained_irqchip(gc, &lp_irqchip,
-                                            (unsigned)irq_rc->start,
-                                            lp_gpio_irq_handler);
+       ret = devm_gpiochip_add_data(dev, gc, lg);
+       if (ret) {
+               dev_err(dev, "failed adding lp-gpio chip\n");
+               return ret;
        }
 
        pm_runtime_enable(dev);
index 4dbc837d12155446bd977982d6de35eedd5a56d6..7086f8b5388fd712dc7c8198a7f1fe83fc280a4b 100644 (file)
@@ -120,7 +120,7 @@ static const struct gpio_chip madera_gpio_chip = {
 static int madera_gpio_probe(struct platform_device *pdev)
 {
        struct madera *madera = dev_get_drvdata(pdev->dev.parent);
-       struct madera_pdata *pdata = dev_get_platdata(madera->dev);
+       struct madera_pdata *pdata = &madera->pdata;
        struct madera_gpio *madera_gpio;
        int ret;
 
@@ -136,6 +136,9 @@ static int madera_gpio_probe(struct platform_device *pdev)
        madera_gpio->gpio_chip.parent = pdev->dev.parent;
 
        switch (madera->type) {
+       case CS47L15:
+               madera_gpio->gpio_chip.ngpio = CS47L15_NUM_GPIOS;
+               break;
        case CS47L35:
                madera_gpio->gpio_chip.ngpio = CS47L35_NUM_GPIOS;
                break;
@@ -147,13 +150,18 @@ static int madera_gpio_probe(struct platform_device *pdev)
        case CS47L91:
                madera_gpio->gpio_chip.ngpio = CS47L90_NUM_GPIOS;
                break;
+       case CS42L92:
+       case CS47L92:
+       case CS47L93:
+               madera_gpio->gpio_chip.ngpio = CS47L92_NUM_GPIOS;
+               break;
        default:
                dev_err(&pdev->dev, "Unknown chip variant %d\n", madera->type);
                return -EINVAL;
        }
 
        /* We want to be usable on systems that don't use devicetree or acpi */
-       if (pdata && pdata->gpio_base)
+       if (pdata->gpio_base)
                madera_gpio->gpio_chip.base = pdata->gpio_base;
        else
                madera_gpio->gpio_chip.base = -1;
index b7d89e30131e2db1bc541bd4fa4b30c32cafe422..47d05e357e61a05b742fd75d892fa3d3e5506af0 100644 (file)
@@ -270,10 +270,8 @@ static int max77620_gpio_probe(struct platform_device *pdev)
        int ret;
 
        gpio_irq = platform_get_irq(pdev, 0);
-       if (gpio_irq <= 0) {
-               dev_err(&pdev->dev, "GPIO irq not available %d\n", gpio_irq);
+       if (gpio_irq <= 0)
                return -ENODEV;
-       }
 
        mgpio = devm_kzalloc(&pdev->dev, sizeof(*mgpio), GFP_KERNEL);
        if (!mgpio)
index 3f03f4e8956c0b04d34873950a5825479fd5ada2..3075f2513c6f08981c4d4de772d19029d52248af 100644 (file)
@@ -188,3 +188,4 @@ module_platform_driver(max77650_gpio_driver);
 MODULE_DESCRIPTION("MAXIM 77650/77651 GPIO driver");
 MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:max77650-gpio");
index 8f466993cd241cfbda0effba3b10efed3a991768..501e89548f533bb796589a2bcaeeccc9d19bfc32 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 
 #include "gpiolib.h"
+#include "gpiolib-acpi.h"
 
 /*
  * Only first 8bits of a register correspond to each pin,
index 3302125e5265110722d131c7979a8a5d4da0e081..4f27ddfe1e2f07363cec6ab7d20d65f819be61f1 100644 (file)
@@ -397,6 +397,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
 {
        const struct mrfld_gpio_pinrange *range;
        const char *pinctrl_dev_name;
+       struct gpio_irq_chip *girq;
        struct mrfld_gpio *priv;
        u32 gpio_base, irq_base;
        void __iomem *base;
@@ -444,6 +445,21 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
 
        raw_spin_lock_init(&priv->lock);
 
+       girq = &priv->chip.irq;
+       girq->chip = &mrfld_irqchip;
+       girq->parent_handler = mrfld_irq_handler;
+       girq->num_parents = 1;
+       girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+                                    sizeof(*girq->parents),
+                                    GFP_KERNEL);
+       if (!girq->parents)
+               return -ENOMEM;
+       girq->parents[0] = pdev->irq;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_bad_irq;
+
+       mrfld_irq_init_hw(priv);
+
        pci_set_drvdata(pdev, priv);
        retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
        if (retval) {
@@ -465,18 +481,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
                }
        }
 
-       retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
-                                     handle_bad_irq, IRQ_TYPE_NONE);
-       if (retval) {
-               dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
-               return retval;
-       }
-
-       mrfld_irq_init_hw(priv);
-
-       gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
-                                    mrfld_irq_handler);
-
        return 0;
 }
 
index f1a9c0544e3f7f5fc63c8f87298c5630c3254e4b..213aedc97dc2e7d37c04194b889d95acb7763df5 100644 (file)
@@ -309,6 +309,7 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
        .read = gpio_mockup_debugfs_read,
        .write = gpio_mockup_debugfs_write,
        .llseek = no_llseek,
+       .release = single_release,
 };
 
 static void gpio_mockup_debugfs_setup(struct device *dev,
index c8673a5d941223c0b27e3a4062f2d7685e0ed178..16a47de29c94c43bbcbceb0063510e77a50df753 100644 (file)
@@ -32,6 +32,7 @@
 #define GPIO_IMR               0x10
 #define GPIO_ICR               0x14
 #define GPIO_ICR2              0x18
+#define GPIO_IBE               0x18
 
 struct mpc8xxx_gpio_chip {
        struct gpio_chip        gc;
@@ -45,6 +46,27 @@ struct mpc8xxx_gpio_chip {
        unsigned int irqn;
 };
 
+/* The GPIO Input Buffer Enable register(GPIO_IBE) is used to
+ * control the input enable of each individual GPIO port.
+ * When an individual GPIO port’s direction is set to
+ * input (GPIO_GPDIR[DRn=0]), the associated input enable must be
+ * set (GPIOxGPIE[IEn]=1) to propagate the port value to the GPIO
+ * Data Register.
+ */
+static int ls1028a_gpio_dir_in_init(struct gpio_chip *gc)
+{
+       unsigned long flags;
+       struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
+
+       spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+       gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+
+       spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+       return 0;
+}
+
 /*
  * This hardware has a big endian bit assignment such that GPIO line 0 is
  * connected to bit 31, line 1 to bit 30 ... line 31 to bit 0.
@@ -261,6 +283,7 @@ static const struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
 };
 
 struct mpc8xxx_gpio_devtype {
+       int (*gpio_dir_in_init)(struct gpio_chip *chip);
        int (*gpio_dir_out)(struct gpio_chip *, unsigned int, int);
        int (*gpio_get)(struct gpio_chip *, unsigned int);
        int (*irq_set_type)(struct irq_data *, unsigned int);
@@ -271,6 +294,10 @@ static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
        .irq_set_type = mpc512x_irq_set_type,
 };
 
+static const struct mpc8xxx_gpio_devtype ls1028a_gpio_devtype = {
+       .gpio_dir_in_init = ls1028a_gpio_dir_in_init,
+};
+
 static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
        .gpio_dir_out = mpc5125_gpio_dir_out,
        .irq_set_type = mpc512x_irq_set_type,
@@ -291,6 +318,8 @@ static const struct of_device_id mpc8xxx_gpio_ids[] = {
        { .compatible = "fsl,mpc5121-gpio", .data = &mpc512x_gpio_devtype, },
        { .compatible = "fsl,mpc5125-gpio", .data = &mpc5125_gpio_devtype, },
        { .compatible = "fsl,pq3-gpio",     },
+       { .compatible = "fsl,ls1028a-gpio", .data = &ls1028a_gpio_devtype, },
+       { .compatible = "fsl,ls1088a-gpio", .data = &ls1028a_gpio_devtype, },
        { .compatible = "fsl,qoriq-gpio",   },
        {}
 };
@@ -376,6 +405,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
        /* ack and mask all irqs */
        gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
        gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
+       /* enable input buffer  */
+       if (devtype->gpio_dir_in_init)
+               devtype->gpio_dir_in_init(gc);
 
        irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
                                         mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
index 79654fb2e50f2d55c73bce8e029345aa7a77f79c..d1d785f983a79164af6d8d05d4cc968a78355068 100644 (file)
@@ -241,13 +241,6 @@ mediatek_gpio_bank_probe(struct device *dev,
        if (!rg->chip.label)
                return -ENOMEM;
 
-       ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
-       if (ret < 0) {
-               dev_err(dev, "Could not register gpio %d, ret=%d\n",
-                       rg->chip.ngpio, ret);
-               return ret;
-       }
-
        rg->irq_chip.name = dev_name(dev);
        rg->irq_chip.parent_device = dev;
        rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
@@ -256,8 +249,10 @@ mediatek_gpio_bank_probe(struct device *dev,
        rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
 
        if (mtk->gpio_irq) {
+               struct gpio_irq_chip *girq;
+
                /*
-                * Manually request the irq here instead of passing
+                * Directly request the irq here instead of passing
                 * a flow-handler to gpiochip_set_chained_irqchip,
                 * because the irq is shared.
                 */
@@ -271,15 +266,21 @@ mediatek_gpio_bank_probe(struct device *dev,
                        return ret;
                }
 
-               ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
-                                          0, handle_simple_irq, IRQ_TYPE_NONE);
-               if (ret) {
-                       dev_err(dev, "failed to add gpiochip_irqchip\n");
-                       return ret;
-               }
+               girq = &rg->chip.irq;
+               girq->chip = &rg->irq_chip;
+               /* This will let us handle the parent IRQ in the driver */
+               girq->parent_handler = NULL;
+               girq->num_parents = 0;
+               girq->parents = NULL;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_simple_irq;
+       }
 
-               gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
-                                            mtk->gpio_irq, NULL);
+       ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+       if (ret < 0) {
+               dev_err(dev, "Could not register gpio %d, ret=%d\n",
+                       rg->chip.ngpio, ret);
+               return ret;
        }
 
        /* set polarity to low for all gpios */
index b2813580c5825a08d7f05b45885b8798f44b17fa..7907a875586626d4592ca2d71ffedfd72131f684 100644 (file)
@@ -435,12 +435,9 @@ static int mxc_gpio_probe(struct platform_device *pdev)
                return port->irq;
 
        /* the controller clock is optional */
-       port->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(port->clk)) {
-               if (PTR_ERR(port->clk) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-               port->clk = NULL;
-       }
+       port->clk = devm_clk_get_optional(&pdev->dev, NULL);
+       if (IS_ERR(port->clk))
+               return PTR_ERR(port->clk);
 
        err = clk_prepare_enable(port->clk);
        if (err) {
index 378b206d2dc9495dc32c3ffc6fdf06e35262a31e..de5d1383f28da9538336a16e67a127be14f8f9a2 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/bits.h>
 #include <linux/gpio/driver.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
@@ -28,9 +29,9 @@
 #define PCA953X_INVERT         0x02
 #define PCA953X_DIRECTION      0x03
 
-#define REG_ADDR_MASK          0x3f
-#define REG_ADDR_EXT           0x40
-#define REG_ADDR_AI            0x80
+#define REG_ADDR_MASK          GENMASK(5, 0)
+#define REG_ADDR_EXT           BIT(6)
+#define REG_ADDR_AI            BIT(7)
 
 #define PCA957X_IN             0x00
 #define PCA957X_INVRT          0x01
 #define PCAL6524_OUT_INDCONF   0x2c
 #define PCAL6524_DEBOUNCE      0x2d
 
-#define PCA_GPIO_MASK          0x00FF
+#define PCA_GPIO_MASK          GENMASK(7, 0)
 
-#define PCAL_GPIO_MASK         0x1f
-#define PCAL_PINCTRL_MASK      0x60
+#define PCAL_GPIO_MASK         GENMASK(4, 0)
+#define PCAL_PINCTRL_MASK      GENMASK(6, 5)
 
-#define PCA_INT                        0x0100
-#define PCA_PCAL               0x0200
+#define PCA_INT                        BIT(8)
+#define PCA_PCAL               BIT(9)
 #define PCA_LATCH_INT          (PCA_PCAL | PCA_INT)
-#define PCA953X_TYPE           0x1000
-#define PCA957X_TYPE           0x2000
-#define PCA_TYPE_MASK          0xF000
+#define PCA953X_TYPE           BIT(12)
+#define PCA957X_TYPE           BIT(13)
+#define PCA_TYPE_MASK          GENMASK(15, 12)
 
 #define PCA_CHIP_TYPE(x)       ((x) & PCA_TYPE_MASK)
 
@@ -565,7 +566,7 @@ static void pca953x_irq_mask(struct irq_data *d)
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
 
-       chip->irq_mask[d->hwirq / BANK_SZ] &= ~(1 << (d->hwirq % BANK_SZ));
+       chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ);
 }
 
 static void pca953x_irq_unmask(struct irq_data *d)
@@ -573,7 +574,7 @@ static void pca953x_irq_unmask(struct irq_data *d)
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
 
-       chip->irq_mask[d->hwirq / BANK_SZ] |= 1 << (d->hwirq % BANK_SZ);
+       chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ);
 }
 
 static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -604,10 +605,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
        u8 new_irqs;
        int level, i;
        u8 invert_irq_mask[MAX_BANK];
-       int reg_direction[MAX_BANK];
+       u8 reg_direction[MAX_BANK];
 
-       regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
-                        NBANK(chip));
+       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
 
        if (chip->driver_data & PCA_PCAL) {
                /* Enable latch on interrupt-enabled inputs */
@@ -641,7 +641,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
        int bank_nb = d->hwirq / BANK_SZ;
-       u8 mask = 1 << (d->hwirq % BANK_SZ);
+       u8 mask = BIT(d->hwirq % BANK_SZ);
 
        if (!(type & IRQ_TYPE_EDGE_BOTH)) {
                dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -666,7 +666,7 @@ static void pca953x_irq_shutdown(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
-       u8 mask = 1 << (d->hwirq % BANK_SZ);
+       u8 mask = BIT(d->hwirq % BANK_SZ);
 
        chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
        chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
@@ -679,7 +679,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
        bool pending_seen = false;
        bool trigger_seen = false;
        u8 trigger[MAX_BANK];
-       int reg_direction[MAX_BANK];
+       u8 reg_direction[MAX_BANK];
        int ret, i;
 
        if (chip->driver_data & PCA_PCAL) {
@@ -710,8 +710,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
                return false;
 
        /* Remove output pins from the equation */
-       regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
-                        NBANK(chip));
+       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
        for (i = 0; i < NBANK(chip); i++)
                cur_stat[i] &= reg_direction[i];
 
@@ -768,7 +767,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
 {
        struct i2c_client *client = chip->client;
        struct irq_chip *irq_chip = &chip->irq_chip;
-       int reg_direction[MAX_BANK];
+       u8 reg_direction[MAX_BANK];
        int ret, i;
 
        if (!client->irq)
@@ -789,8 +788,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
         * interrupt.  We have to rely on the previous read for
         * this purpose.
         */
-       regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
-                        NBANK(chip));
+       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
        for (i = 0; i < NBANK(chip); i++)
                chip->irq_stat[i] &= reg_direction[i];
        mutex_init(&chip->irq_lock);
@@ -849,12 +847,12 @@ static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
 
        ret = regcache_sync_region(chip->regmap, chip->regs->output,
                                   chip->regs->output + NBANK(chip));
-       if (ret != 0)
+       if (ret)
                goto out;
 
        ret = regcache_sync_region(chip->regmap, chip->regs->direction,
                                   chip->regs->direction + NBANK(chip));
-       if (ret != 0)
+       if (ret)
                goto out;
 
        /* set platform specific polarity inversion */
@@ -949,19 +947,15 @@ static int pca953x_probe(struct i2c_client *client,
        if (i2c_id) {
                chip->driver_data = i2c_id->driver_data;
        } else {
-               const struct acpi_device_id *acpi_id;
-               struct device *dev = &client->dev;
-
-               chip->driver_data = (uintptr_t)of_device_get_match_data(dev);
-               if (!chip->driver_data) {
-                       acpi_id = acpi_match_device(pca953x_acpi_ids, dev);
-                       if (!acpi_id) {
-                               ret = -ENODEV;
-                               goto err_exit;
-                       }
-
-                       chip->driver_data = acpi_id->driver_data;
+               const void *match;
+
+               match = device_get_match_data(&client->dev);
+               if (!match) {
+                       ret = -ENODEV;
+                       goto err_exit;
                }
+
+               chip->driver_data = (uintptr_t)match;
        }
 
        i2c_set_clientdata(client, chip);
@@ -1041,8 +1035,7 @@ static int pca953x_remove(struct i2c_client *client)
                ret = pdata->teardown(client, chip->gpio_chip.base,
                                chip->gpio_chip.ngpio, pdata->context);
                if (ret < 0)
-                       dev_err(&client->dev, "%s failed, %d\n",
-                                       "teardown", ret);
+                       dev_err(&client->dev, "teardown failed, %d\n", ret);
        } else {
                ret = 0;
        }
@@ -1064,14 +1057,14 @@ static int pca953x_regcache_sync(struct device *dev)
         */
        ret = regcache_sync_region(chip->regmap, chip->regs->direction,
                                   chip->regs->direction + NBANK(chip));
-       if (ret != 0) {
+       if (ret) {
                dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
                return ret;
        }
 
        ret = regcache_sync_region(chip->regmap, chip->regs->output,
                                   chip->regs->output + NBANK(chip));
-       if (ret != 0) {
+       if (ret) {
                dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
                return ret;
        }
@@ -1080,7 +1073,7 @@ static int pca953x_regcache_sync(struct device *dev)
        if (chip->driver_data & PCA_PCAL) {
                ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
                                           PCAL953X_IN_LATCH + NBANK(chip));
-               if (ret != 0) {
+               if (ret) {
                        dev_err(dev, "Failed to sync INT latch registers: %d\n",
                                ret);
                        return ret;
@@ -1088,7 +1081,7 @@ static int pca953x_regcache_sync(struct device *dev)
 
                ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
                                           PCAL953X_INT_MASK + NBANK(chip));
-               if (ret != 0) {
+               if (ret) {
                        dev_err(dev, "Failed to sync INT mask registers: %d\n",
                                ret);
                        return ret;
@@ -1120,7 +1113,7 @@ static int pca953x_resume(struct device *dev)
 
        if (!atomic_read(&chip->wakeup_path)) {
                ret = regulator_enable(chip->regulator);
-               if (ret != 0) {
+               if (ret) {
                        dev_err(dev, "Failed to enable regulator: %d\n", ret);
                        return 0;
                }
@@ -1133,7 +1126,7 @@ static int pca953x_resume(struct device *dev)
                return ret;
 
        ret = regcache_sync(chip->regmap);
-       if (ret != 0) {
+       if (ret) {
                dev_err(dev, "Failed to restore register map: %d\n", ret);
                return ret;
        }
index 1d99293096f27ceac6f1a45e4144fbd1fc148040..3f3d9a94b7093bda5cc4ca456b2c142efd34d8f6 100644 (file)
@@ -409,8 +409,7 @@ static int pch_gpio_probe(struct pci_dev *pdev,
 
 static int __maybe_unused pch_gpio_suspend(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct pch_gpio *chip = pci_get_drvdata(pdev);
+       struct pch_gpio *chip = dev_get_drvdata(dev);
        unsigned long flags;
 
        spin_lock_irqsave(&chip->spinlock, flags);
@@ -422,8 +421,7 @@ static int __maybe_unused pch_gpio_suspend(struct device *dev)
 
 static int __maybe_unused pch_gpio_resume(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct pch_gpio *chip = pci_get_drvdata(pdev);
+       struct pch_gpio *chip = dev_get_drvdata(dev);
        unsigned long flags;
 
        spin_lock_irqsave(&chip->spinlock, flags);
index 24228cf79afc151ea60afebf4a426f9e94325f93..05000cace9b2415cd7cd003bcddc9109b61d226d 100644 (file)
@@ -305,10 +305,8 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
        mutex_init(&pmic_eic->buslock);
 
        pmic_eic->irq = platform_get_irq(pdev, 0);
-       if (pmic_eic->irq < 0) {
-               dev_err(&pdev->dev, "Failed to get PMIC EIC interrupt.\n");
+       if (pmic_eic->irq < 0)
                return pmic_eic->irq;
-       }
 
        pmic_eic->map = dev_get_regmap(pdev->dev.parent, NULL);
        if (!pmic_eic->map)
index f5c8b3a351d593cc7347cc516f8196bc3c68ac1d..d7314d39ab65ba25d00ff2d1ae00f79b679ce6e8 100644 (file)
@@ -226,10 +226,8 @@ static int sprd_gpio_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        sprd_gpio->irq = platform_get_irq(pdev, 0);
-       if (sprd_gpio->irq < 0) {
-               dev_err(&pdev->dev, "Failed to get GPIO interrupt.\n");
+       if (sprd_gpio->irq < 0)
                return sprd_gpio->irq;
-       }
 
        sprd_gpio->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sprd_gpio->base))
index dbf9cbe36b2bd5523db5eec8193155ef68dbb8d3..994d542daf53fca19537e17dcbae4ff1b7cbe2cc 100644 (file)
@@ -429,6 +429,23 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
+static void stmpe_init_irq_valid_mask(struct gpio_chip *gc,
+                                     unsigned long *valid_mask,
+                                     unsigned int ngpios)
+{
+       struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
+       int i;
+
+       if (!stmpe_gpio->norequest_mask)
+               return;
+
+       /* Forbid unused lines to be mapped as IRQs */
+       for (i = 0; i < sizeof(u32); i++) {
+               if (stmpe_gpio->norequest_mask & BIT(i))
+                       clear_bit(i, valid_mask);
+       }
+}
+
 static int stmpe_gpio_probe(struct platform_device *pdev)
 {
        struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
@@ -454,14 +471,21 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
        stmpe_gpio->chip.parent = &pdev->dev;
        stmpe_gpio->chip.of_node = np;
        stmpe_gpio->chip.base = -1;
+       /*
+        * REVISIT: this makes sure the valid mask gets allocated and
+        * filled in when adding the gpio_chip, but the rest of the
+        * gpio_irqchip is still filled in using the old method
+        * in gpiochip_irqchip_add_nested() so clean this up once we
+        * get the gpio_irqchip to initialize while adding the
+        * gpio_chip also for threaded irqchips.
+        */
+       stmpe_gpio->chip.irq.init_valid_mask = stmpe_init_irq_valid_mask;
 
        if (IS_ENABLED(CONFIG_DEBUG_FS))
                 stmpe_gpio->chip.dbg_show = stmpe_dbg_show;
 
        of_property_read_u32(np, "st,norequest-mask",
                        &stmpe_gpio->norequest_mask);
-       if (stmpe_gpio->norequest_mask)
-               stmpe_gpio->chip.irq.need_valid_mask = true;
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
@@ -487,14 +511,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
                        goto out_disable;
                }
-               if (stmpe_gpio->norequest_mask) {
-                       int i;
-
-                       /* Forbid unused lines to be mapped as IRQs */
-                       for (i = 0; i < sizeof(u32); i++)
-                               if (stmpe_gpio->norequest_mask & BIT(i))
-                                       clear_bit(i, stmpe_gpio->chip.irq.valid_mask);
-               }
                ret =  gpiochip_irqchip_add_nested(&stmpe_gpio->chip,
                                                   &stmpe_gpio_irq_chip,
                                                   0,
index bd1f3f775ce95ecca0c1cafb515a26f93c7ffb65..5e375186f90ef77408e7040114c81f5798154b1b 100644 (file)
@@ -171,10 +171,8 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
                struct irq_chip_generic *gc;
 
                ret = platform_get_irq(pdev, 0);
-               if (ret < 0) {
-                       dev_err(dev, "No interrupt specified.\n");
+               if (ret < 0)
                        return ret;
-               }
 
                tb10x_gpio->gc.to_irq   = tb10x_gpio_to_irq;
                tb10x_gpio->irq         = ret;
index 0f59161a4701d12a384379618690f4a741536911..8a01d3694b2817ebf43685b43f88e6e883c54f2a 100644 (file)
@@ -624,10 +624,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
 
        for (i = 0; i < tgi->bank_count; i++) {
                ret = platform_get_irq(pdev, i);
-               if (ret < 0) {
-                       dev_err(&pdev->dev, "Missing IRQ resource: %d\n", ret);
+               if (ret < 0)
                        return ret;
-               }
 
                bank = &tgi->bank_info[i];
                bank->bank = i;
index 715371b5102a5c6a8663649aa2539f709e804a26..ddad5c7ea61769446ef9d73e291ec42b6b2e7c08 100644 (file)
@@ -53,7 +53,6 @@ struct thunderx_line {
 struct thunderx_gpio {
        struct gpio_chip        chip;
        u8 __iomem              *register_base;
-       struct irq_domain       *irqd;
        struct msix_entry       *msix_entries;  /* per line MSI-X */
        struct thunderx_line    *line_entries;  /* per line irq info */
        raw_spinlock_t          lock;
@@ -283,54 +282,60 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
        }
 }
 
-static void thunderx_gpio_irq_ack(struct irq_data *data)
+static void thunderx_gpio_irq_ack(struct irq_data *d)
 {
-       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
        writeq(GPIO_INTR_INTR,
-              txline->txgpio->register_base + intr_reg(txline->line));
+              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
 }
 
-static void thunderx_gpio_irq_mask(struct irq_data *data)
+static void thunderx_gpio_irq_mask(struct irq_data *d)
 {
-       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
        writeq(GPIO_INTR_ENA_W1C,
-              txline->txgpio->register_base + intr_reg(txline->line));
+              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
 }
 
-static void thunderx_gpio_irq_mask_ack(struct irq_data *data)
+static void thunderx_gpio_irq_mask_ack(struct irq_data *d)
 {
-       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
        writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR,
-              txline->txgpio->register_base + intr_reg(txline->line));
+              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
 }
 
-static void thunderx_gpio_irq_unmask(struct irq_data *data)
+static void thunderx_gpio_irq_unmask(struct irq_data *d)
 {
-       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
        writeq(GPIO_INTR_ENA_W1S,
-              txline->txgpio->register_base + intr_reg(txline->line));
+              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
 }
 
-static int thunderx_gpio_irq_set_type(struct irq_data *data,
+static int thunderx_gpio_irq_set_type(struct irq_data *d,
                                      unsigned int flow_type)
 {
-       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
-       struct thunderx_gpio *txgpio = txline->txgpio;
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline =
+               &txgpio->line_entries[irqd_to_hwirq(d)];
        u64 bit_cfg;
 
-       irqd_set_trigger_type(data, flow_type);
+       irqd_set_trigger_type(d, flow_type);
 
        bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
 
        if (flow_type & IRQ_TYPE_EDGE_BOTH) {
-               irq_set_handler_locked(data, handle_fasteoi_ack_irq);
+               irq_set_handler_locked(d, handle_fasteoi_ack_irq);
                bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
        } else {
-               irq_set_handler_locked(data, handle_fasteoi_mask_irq);
+               irq_set_handler_locked(d, handle_fasteoi_mask_irq);
        }
 
        raw_spin_lock(&txgpio->lock);
@@ -359,33 +364,6 @@ static void thunderx_gpio_irq_disable(struct irq_data *data)
        irq_chip_disable_parent(data);
 }
 
-static int thunderx_gpio_irq_request_resources(struct irq_data *data)
-{
-       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
-       struct thunderx_gpio *txgpio = txline->txgpio;
-       int r;
-
-       r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
-       if (r)
-               return r;
-
-       r = irq_chip_request_resources_parent(data);
-       if (r)
-               gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
-
-       return r;
-}
-
-static void thunderx_gpio_irq_release_resources(struct irq_data *data)
-{
-       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
-       struct thunderx_gpio *txgpio = txline->txgpio;
-
-       irq_chip_release_resources_parent(data);
-
-       gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
-}
-
 /*
  * Interrupts are chained from underlying MSI-X vectors.  We have
  * these irq_chip functions to be able to handle level triggering
@@ -402,48 +380,22 @@ static struct irq_chip thunderx_gpio_irq_chip = {
        .irq_unmask             = thunderx_gpio_irq_unmask,
        .irq_eoi                = irq_chip_eoi_parent,
        .irq_set_affinity       = irq_chip_set_affinity_parent,
-       .irq_request_resources  = thunderx_gpio_irq_request_resources,
-       .irq_release_resources  = thunderx_gpio_irq_release_resources,
        .irq_set_type           = thunderx_gpio_irq_set_type,
 
        .flags                  = IRQCHIP_SET_TYPE_MASKED
 };
 
-static int thunderx_gpio_irq_translate(struct irq_domain *d,
-                                      struct irq_fwspec *fwspec,
-                                      irq_hw_number_t *hwirq,
-                                      unsigned int *type)
+static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+                                              unsigned int child,
+                                              unsigned int child_type,
+                                              unsigned int *parent,
+                                              unsigned int *parent_type)
 {
-       struct thunderx_gpio *txgpio = d->host_data;
-
-       if (WARN_ON(fwspec->param_count < 2))
-               return -EINVAL;
-       if (fwspec->param[0] >= txgpio->chip.ngpio)
-               return -EINVAL;
-       *hwirq = fwspec->param[0];
-       *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
-       return 0;
-}
-
-static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
-                                  unsigned int nr_irqs, void *arg)
-{
-       struct thunderx_line *txline = arg;
+       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
 
-       return irq_domain_set_hwirq_and_chip(d, virq, txline->line,
-                                            &thunderx_gpio_irq_chip, txline);
-}
-
-static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
-       .alloc          = thunderx_gpio_irq_alloc,
-       .translate      = thunderx_gpio_irq_translate
-};
-
-static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
-       struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
-
-       return irq_find_mapping(txgpio->irqd, offset);
+       *parent = txgpio->base_msi + (2 * child);
+       *parent_type = IRQ_TYPE_LEVEL_HIGH;
+       return 0;
 }
 
 static int thunderx_gpio_probe(struct pci_dev *pdev,
@@ -453,6 +405,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        struct device *dev = &pdev->dev;
        struct thunderx_gpio *txgpio;
        struct gpio_chip *chip;
+       struct gpio_irq_chip *girq;
        int ngpio, i;
        int err = 0;
 
@@ -497,8 +450,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        }
 
        txgpio->msix_entries = devm_kcalloc(dev,
-                                         ngpio, sizeof(struct msix_entry),
-                                         GFP_KERNEL);
+                                           ngpio, sizeof(struct msix_entry),
+                                           GFP_KERNEL);
        if (!txgpio->msix_entries) {
                err = -ENOMEM;
                goto out;
@@ -539,27 +492,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        if (err < 0)
                goto out;
 
-       /*
-        * Push GPIO specific irqdomain on hierarchy created as a side
-        * effect of the pci_enable_msix()
-        */
-       txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
-                                                  0, 0, of_node_to_fwnode(dev->of_node),
-                                                  &thunderx_gpio_irqd_ops, txgpio);
-       if (!txgpio->irqd) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       /* Push on irq_data and the domain for each line. */
-       for (i = 0; i < ngpio; i++) {
-               err = irq_domain_push_irq(txgpio->irqd,
-                                         txgpio->msix_entries[i].vector,
-                                         &txgpio->line_entries[i]);
-               if (err < 0)
-                       dev_err(dev, "irq_domain_push_irq: %d\n", err);
-       }
-
        chip->label = KBUILD_MODNAME;
        chip->parent = dev;
        chip->owner = THIS_MODULE;
@@ -574,11 +506,28 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        chip->set = thunderx_gpio_set;
        chip->set_multiple = thunderx_gpio_set_multiple;
        chip->set_config = thunderx_gpio_set_config;
-       chip->to_irq = thunderx_gpio_to_irq;
+       girq = &chip->irq;
+       girq->chip = &thunderx_gpio_irq_chip;
+       girq->fwnode = of_node_to_fwnode(dev->of_node);
+       girq->parent_domain =
+               irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
+       girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
+       girq->handler = handle_bad_irq;
+       girq->default_type = IRQ_TYPE_NONE;
+
        err = devm_gpiochip_add_data(dev, chip, txgpio);
        if (err)
                goto out;
 
+       /* Push on irq_data and the domain for each line. */
+       for (i = 0; i < ngpio; i++) {
+               err = irq_domain_push_irq(chip->irq.domain,
+                                         txgpio->msix_entries[i].vector,
+                                         chip);
+               if (err < 0)
+                       dev_err(dev, "irq_domain_push_irq: %d\n", err);
+       }
+
        dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
                 ngpio, chip->base);
        return 0;
@@ -593,10 +542,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev)
        struct thunderx_gpio *txgpio = pci_get_drvdata(pdev);
 
        for (i = 0; i < txgpio->chip.ngpio; i++)
-               irq_domain_pop_irq(txgpio->irqd,
+               irq_domain_pop_irq(txgpio->chip.irq.domain,
                                   txgpio->msix_entries[i].vector);
 
-       irq_domain_remove(txgpio->irqd);
+       irq_domain_remove(txgpio->chip.irq.domain);
 
        pci_set_drvdata(pdev, NULL);
 }
index d5880db7f9d47bae4db8267bf6dd7af1fb655f30..a3109bcaa0ac289911a2fe0b7d9f5882c5afd5e7 100644 (file)
@@ -214,11 +214,23 @@ static const struct dev_pm_ops tqmx86_gpio_dev_pm_ops = {
                           tqmx86_gpio_runtime_resume, NULL)
 };
 
+static void tqmx86_init_irq_valid_mask(struct gpio_chip *chip,
+                                      unsigned long *valid_mask,
+                                      unsigned int ngpios)
+{
+       /* Only GPIOs 4-7 are valid for interrupts. Clear the others */
+       clear_bit(0, valid_mask);
+       clear_bit(1, valid_mask);
+       clear_bit(2, valid_mask);
+       clear_bit(3, valid_mask);
+}
+
 static int tqmx86_gpio_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct tqmx86_gpio_data *gpio;
        struct gpio_chip *chip;
+       struct gpio_irq_chip *girq;
        void __iomem *io_base;
        struct resource *res;
        int ret, irq;
@@ -259,17 +271,10 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
        chip->get = tqmx86_gpio_get;
        chip->set = tqmx86_gpio_set;
        chip->ngpio = TQMX86_NGPIO;
-       chip->irq.need_valid_mask = true;
        chip->parent = pdev->dev.parent;
 
        pm_runtime_enable(&pdev->dev);
 
-       ret = devm_gpiochip_add_data(dev, chip, gpio);
-       if (ret) {
-               dev_err(dev, "Could not register GPIO chip\n");
-               goto out_pm_dis;
-       }
-
        if (irq) {
                struct irq_chip *irq_chip = &gpio->irq_chip;
                u8 irq_status;
@@ -287,23 +292,28 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
                irq_status = tqmx86_gpio_read(gpio, TQMX86_GPIIS);
                tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
 
-               ret = gpiochip_irqchip_add(chip, irq_chip,
-                                          0, handle_simple_irq,
-                                          IRQ_TYPE_EDGE_BOTH);
-               if (ret) {
-                       dev_err(dev, "Could not add irq chip\n");
+               girq = &chip->irq;
+               girq->chip = irq_chip;
+               girq->parent_handler = tqmx86_gpio_irq_handler;
+               girq->num_parents = 1;
+               girq->parents = devm_kcalloc(&pdev->dev, 1,
+                                            sizeof(*girq->parents),
+                                            GFP_KERNEL);
+               if (!girq->parents) {
+                       ret = -ENOMEM;
                        goto out_pm_dis;
                }
-
-               gpiochip_set_chained_irqchip(chip, irq_chip,
-                                            irq, tqmx86_gpio_irq_handler);
+               girq->parents[0] = irq;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_simple_irq;
+               girq->init_valid_mask = tqmx86_init_irq_valid_mask;
        }
 
-       /* Only GPIOs 4-7 are valid for interrupts. Clear the others */
-       clear_bit(0, chip->irq.valid_mask);
-       clear_bit(1, chip->irq.valid_mask);
-       clear_bit(2, chip->irq.valid_mask);
-       clear_bit(3, chip->irq.valid_mask);
+       ret = devm_gpiochip_add_data(dev, chip, gpio);
+       if (ret) {
+               dev_err(dev, "Could not register GPIO chip\n");
+               goto out_pm_dis;
+       }
 
        dev_info(dev, "GPIO functionality initialized with %d pins\n",
                 chip->ngpio);
index 7ba668db171ba5854825b4b6178a5428aeeda2d2..58776f2d69ff84e3c05c90d28a69c2a2248e115f 100644 (file)
@@ -243,6 +243,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
        struct device_node *np = dev->of_node;
        struct vf610_gpio_port *port;
        struct gpio_chip *gc;
+       struct gpio_irq_chip *girq;
        struct irq_chip *ic;
        int i;
        int ret;
@@ -318,10 +319,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
        ic->irq_set_type = vf610_gpio_irq_set_type;
        ic->irq_set_wake = vf610_gpio_irq_set_wake;
 
-       ret = devm_gpiochip_add_data(dev, gc, port);
-       if (ret < 0)
-               return ret;
-
        /* Mask all GPIO interrupts */
        for (i = 0; i < gc->ngpio; i++)
                vf610_gpio_writel(0, port->base + PORT_PCR(i));
@@ -329,15 +326,20 @@ static int vf610_gpio_probe(struct platform_device *pdev)
        /* Clear the interrupt status register for all GPIO's */
        vf610_gpio_writel(~0, port->base + PORT_ISFR);
 
-       ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE);
-       if (ret) {
-               dev_err(dev, "failed to add irqchip\n");
-               return ret;
-       }
-       gpiochip_set_chained_irqchip(gc, ic, port->irq,
-                                    vf610_gpio_irq_handler);
+       girq = &gc->irq;
+       girq->chip = ic;
+       girq->parent_handler = vf610_gpio_irq_handler;
+       girq->num_parents = 1;
+       girq->parents = devm_kcalloc(&pdev->dev, 1,
+                                    sizeof(*girq->parents),
+                                    GFP_KERNEL);
+       if (!girq->parents)
+               return -ENOMEM;
+       girq->parents[0] = port->irq;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_edge_irq;
 
-       return 0;
+       return devm_gpiochip_add_data(dev, gc, port);
 }
 
 static struct platform_driver vf610_gpio_driver = {
index 9b604f13e3029d9411c9cba18c90bdac4c2d89f7..c301c1d56dd289c358727c98b5e4db5229caeda8 100644 (file)
@@ -79,7 +79,7 @@ MODULE_PARM_DESC(gpioa_freq,
 /* ----- begin of gipo a chip -------------------------------------------- */
 
 static int vprbrd_gpioa_get(struct gpio_chip *chip,
-               unsigned offset)
+               unsigned int offset)
 {
        int ret, answer, error = 0;
        struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -129,7 +129,7 @@ static int vprbrd_gpioa_get(struct gpio_chip *chip,
 }
 
 static void vprbrd_gpioa_set(struct gpio_chip *chip,
-               unsigned offset, int value)
+               unsigned int offset, int value)
 {
        int ret;
        struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -170,7 +170,7 @@ static void vprbrd_gpioa_set(struct gpio_chip *chip,
 }
 
 static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
-                       unsigned offset)
+                       unsigned int offset)
 {
        int ret;
        struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -207,7 +207,7 @@ static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
 }
 
 static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
-                       unsigned offset, int value)
+                       unsigned int offset, int value)
 {
        int ret;
        struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -251,8 +251,8 @@ static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
 
 /* ----- begin of gipo b chip -------------------------------------------- */
 
-static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
-       unsigned dir)
+static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned int offset,
+       unsigned int dir)
 {
        struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
        int ret;
@@ -273,7 +273,7 @@ static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
 }
 
 static int vprbrd_gpiob_get(struct gpio_chip *chip,
-               unsigned offset)
+               unsigned int offset)
 {
        int ret;
        u16 val;
@@ -305,7 +305,7 @@ static int vprbrd_gpiob_get(struct gpio_chip *chip,
 }
 
 static void vprbrd_gpiob_set(struct gpio_chip *chip,
-               unsigned offset, int value)
+               unsigned int offset, int value)
 {
        int ret;
        struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -338,7 +338,7 @@ static void vprbrd_gpiob_set(struct gpio_chip *chip,
 }
 
 static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
-                       unsigned offset)
+                       unsigned int offset)
 {
        int ret;
        struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -359,7 +359,7 @@ static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
 }
 
 static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
-                       unsigned offset, int value)
+                       unsigned int offset, int value)
 {
        int ret;
        struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
index 38c01912c7b2313aa917dd48dc03bc23ae137d79..25d86441666edde84bb16fd4884f34604ae56060 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/acpi.h>
 
 #include "gpiolib.h"
+#include "gpiolib-acpi.h"
 
 /* Common property names */
 #define XGENE_NIRQ_PROPERTY            "apm,nr-irqs"
index 54d3359444f356659576d34053cfa25b6f3cad24..d7b16bb9e4e4c515051e5b725416c395873eca37 100644 (file)
@@ -290,6 +290,7 @@ MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids);
 static int xlp_gpio_probe(struct platform_device *pdev)
 {
        struct gpio_chip *gc;
+       struct gpio_irq_chip *girq;
        struct xlp_gpio_priv *priv;
        void __iomem *gpio_base;
        int irq_base, irq, err;
@@ -395,27 +396,27 @@ static int xlp_gpio_probe(struct platform_device *pdev)
                irq_base = 0;
        }
 
+       girq = &gc->irq;
+       girq->chip = &xlp_gpio_irq_chip;
+       girq->parent_handler = xlp_gpio_generic_handler;
+       girq->num_parents = 1;
+       girq->parents = devm_kcalloc(&pdev->dev, 1,
+                                    sizeof(*girq->parents),
+                                    GFP_KERNEL);
+       if (!girq->parents)
+               return -ENOMEM;
+       girq->parents[0] = irq;
+       girq->first = irq_base;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_level_irq;
+
        err = gpiochip_add_data(gc, priv);
        if (err < 0)
                return err;
 
-       err = gpiochip_irqchip_add(gc, &xlp_gpio_irq_chip, irq_base,
-                               handle_level_irq, IRQ_TYPE_NONE);
-       if (err) {
-               dev_err(&pdev->dev, "Could not connect irqchip to gpiochip!\n");
-               goto out_gpio_remove;
-       }
-
-       gpiochip_set_chained_irqchip(gc, &xlp_gpio_irq_chip, irq,
-                       xlp_gpio_generic_handler);
-
        dev_info(&pdev->dev, "registered %d GPIOs\n", gc->ngpio);
 
        return 0;
-
-out_gpio_remove:
-       gpiochip_remove(gc);
-       return err;
 }
 
 #ifdef CONFIG_ACPI
index 8637adb6bc205c16c6ed3768f5ae9d6cd39e0b1a..98cbaf0e415e7dc9759f8a69e75066e523ef267c 100644 (file)
@@ -215,6 +215,7 @@ static int zx_gpio_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct zx_gpio *chip;
+       struct gpio_irq_chip *girq;
        int irq, id, ret;
 
        chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
@@ -242,32 +243,30 @@ static int zx_gpio_probe(struct platform_device *pdev)
        chip->gc.parent = dev;
        chip->gc.owner = THIS_MODULE;
 
-       ret = gpiochip_add_data(&chip->gc, chip);
-       if (ret)
-               return ret;
-
        /*
         * irq_chip support
         */
        writew_relaxed(0xffff, chip->base + ZX_GPIO_IM);
        writew_relaxed(0, chip->base + ZX_GPIO_IE);
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "invalid IRQ\n");
-               gpiochip_remove(&chip->gc);
-               return -ENODEV;
-       }
+       if (irq < 0)
+               return irq;
+       girq = &chip->gc.irq;
+       girq->chip = &zx_irqchip;
+       girq->parent_handler = zx_irq_handler;
+       girq->num_parents = 1;
+       girq->parents = devm_kcalloc(&pdev->dev, 1,
+                                    sizeof(*girq->parents),
+                                    GFP_KERNEL);
+       if (!girq->parents)
+               return -ENOMEM;
+       girq->parents[0] = irq;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_simple_irq;
 
-       ret = gpiochip_irqchip_add(&chip->gc, &zx_irqchip,
-                                  0, handle_simple_irq,
-                                  IRQ_TYPE_NONE);
-       if (ret) {
-               dev_err(dev, "could not add irqchip\n");
-               gpiochip_remove(&chip->gc);
+       ret = gpiochip_add_data(&chip->gc, chip);
+       if (ret)
                return ret;
-       }
-       gpiochip_set_chained_irqchip(&chip->gc, &zx_irqchip,
-                                    irq, zx_irq_handler);
 
        platform_set_drvdata(pdev, chip);
        dev_info(dev, "ZX GPIO chip registered\n");
index f241b6c13dbeef7d17b0fdd28075874a429e4fea..cd475ff4bcadbace686d7dc85d4e3ea9985e0589 100644 (file)
@@ -830,6 +830,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
        int ret, bank_num;
        struct zynq_gpio *gpio;
        struct gpio_chip *chip;
+       struct gpio_irq_chip *girq;
        const struct of_device_id *match;
 
        gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -849,10 +850,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
                return PTR_ERR(gpio->base_addr);
 
        gpio->irq = platform_get_irq(pdev, 0);
-       if (gpio->irq < 0) {
-               dev_err(&pdev->dev, "invalid IRQ\n");
+       if (gpio->irq < 0)
                return gpio->irq;
-       }
 
        /* configure the gpio chip */
        chip = &gpio->chip;
@@ -887,34 +886,38 @@ static int zynq_gpio_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_pm_dis;
 
-       /* report a bug if gpio chip registration fails */
-       ret = gpiochip_add_data(chip, gpio);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to add gpio chip\n");
-               goto err_pm_put;
-       }
-
        /* disable interrupts for all banks */
        for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++)
                writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
                               ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
 
-       ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0,
-                                  handle_level_irq, IRQ_TYPE_NONE);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to add irq chip\n");
-               goto err_rm_gpiochip;
+       /* Set up the GPIO irqchip */
+       girq = &chip->irq;
+       girq->chip = &zynq_gpio_edge_irqchip;
+       girq->parent_handler = zynq_gpio_irqhandler;
+       girq->num_parents = 1;
+       girq->parents = devm_kcalloc(&pdev->dev, 1,
+                                    sizeof(*girq->parents),
+                                    GFP_KERNEL);
+       if (!girq->parents) {
+               ret = -ENOMEM;
+               goto err_pm_put;
        }
+       girq->parents[0] = gpio->irq;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_level_irq;
 
-       gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, gpio->irq,
-                                    zynq_gpio_irqhandler);
+       /* report a bug if gpio chip registration fails */
+       ret = gpiochip_add_data(chip, gpio);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to add gpio chip\n");
+               goto err_pm_put;
+       }
 
        pm_runtime_put(&pdev->dev);
 
        return 0;
 
-err_rm_gpiochip:
-       gpiochip_remove(chip);
 err_pm_put:
        pm_runtime_put(&pdev->dev);
 err_pm_dis:
index 39f2f9035c11df2b7c0d7446745a0c83cf7e0cc2..609ed16ae9333bd25f154e3f4f399d6cfeda5234 100644 (file)
@@ -7,6 +7,7 @@
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
  */
 
+#include <linux/dmi.h>
 #include <linux/errno.h>
 #include <linux/gpio/consumer.h>
 #include <linux/gpio/driver.h>
 #include <linux/pinctrl/pinctrl.h>
 
 #include "gpiolib.h"
+#include "gpiolib-acpi.h"
+
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+                "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
 
 /**
  * struct acpi_gpio_event - ACPI GPIO event handler data
@@ -170,10 +177,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
        event->irq_requested = true;
 
        /* Make sure we trigger the initial state of edge-triggered IRQs */
-       value = gpiod_get_raw_value_cansleep(event->desc);
-       if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-           ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
-               event->handler(event->irq, event);
+       if (run_edge_events_on_boot &&
+           (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
+               value = gpiod_get_raw_value_cansleep(event->desc);
+               if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+                   ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+                       event->handler(event->irq, event);
+       }
 }
 
 static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
@@ -382,6 +392,13 @@ int acpi_dev_add_driver_gpios(struct acpi_device *adev,
 }
 EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios);
 
+void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
+{
+       if (adev)
+               adev->driver_gpios = NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios);
+
 static void devm_acpi_dev_release_driver_gpios(struct device *dev, void *res)
 {
        acpi_dev_remove_driver_gpios(ACPI_COMPANION(dev));
@@ -720,6 +737,16 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
        return ret ? ERR_PTR(ret) : lookup.desc;
 }
 
+static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
+                                    const char *con_id)
+{
+       /* Never allow fallback if the device has properties */
+       if (acpi_dev_has_props(adev) || adev->driver_gpios)
+               return false;
+
+       return con_id == NULL;
+}
+
 struct gpio_desc *acpi_find_gpio(struct device *dev,
                                 const char *con_id,
                                 unsigned int idx,
@@ -1256,15 +1283,6 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
        return count ? count : -ENOENT;
 }
 
-bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
-{
-       /* Never allow fallback if the device has properties */
-       if (acpi_dev_has_props(adev) || adev->driver_gpios)
-               return false;
-
-       return con_id == NULL;
-}
-
 /* Run deferred acpi_gpiochip_request_irqs() */
 static int acpi_gpio_handle_deferred_request_irqs(void)
 {
@@ -1283,3 +1301,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
 late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+               }
+       },
+       {} /* Terminating entry */
+};
+
+static int acpi_gpio_setup_params(void)
+{
+       if (run_edge_events_on_boot < 0) {
+               if (dmi_check_system(run_edge_events_on_boot_blacklist))
+                       run_edge_events_on_boot = 0;
+               else
+                       run_edge_events_on_boot = 1;
+       }
+
+       return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
new file mode 100644 (file)
index 0000000..1c6d65c
--- /dev/null
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ACPI helpers for GPIO API
+ *
+ * Copyright (C) 2012,2019 Intel Corporation
+ */
+
+#ifndef GPIOLIB_ACPI_H
+#define GPIOLIB_ACPI_H
+
+struct acpi_device;
+
+/**
+ * struct acpi_gpio_info - ACPI GPIO specific information
+ * @adev: reference to ACPI device which consumes GPIO resource
+ * @flags: GPIO initialization flags
+ * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @pin_config: pin bias as provided by ACPI
+ * @polarity: interrupt polarity as provided by ACPI
+ * @triggering: triggering type as provided by ACPI
+ * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
+ */
+struct acpi_gpio_info {
+       struct acpi_device *adev;
+       enum gpiod_flags flags;
+       bool gpioint;
+       int pin_config;
+       int polarity;
+       int triggering;
+       unsigned int quirks;
+};
+
+#ifdef CONFIG_ACPI
+void acpi_gpiochip_add(struct gpio_chip *chip);
+void acpi_gpiochip_remove(struct gpio_chip *chip);
+
+void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
+void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
+
+int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
+                                struct acpi_gpio_info *info);
+int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+                                       struct acpi_gpio_info *info);
+
+struct gpio_desc *acpi_find_gpio(struct device *dev,
+                                const char *con_id,
+                                unsigned int idx,
+                                enum gpiod_flags *dflags,
+                                unsigned long *lookupflags);
+struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
+                                     const char *propname, int index,
+                                     struct acpi_gpio_info *info);
+
+int acpi_gpio_count(struct device *dev, const char *con_id);
+#else
+static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
+static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
+
+static inline void
+acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
+
+static inline void
+acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
+
+static inline int
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
+{
+       return 0;
+}
+static inline int
+acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+                                   struct acpi_gpio_info *info)
+{
+       return 0;
+}
+
+static inline struct gpio_desc *
+acpi_find_gpio(struct device *dev, const char *con_id,
+              unsigned int idx, enum gpiod_flags *dflags,
+              unsigned long *lookupflags)
+{
+       return ERR_PTR(-ENOENT);
+}
+static inline struct gpio_desc *
+acpi_node_get_gpiod(struct fwnode_handle *fwnode, const char *propname,
+                   int index, struct acpi_gpio_info *info)
+{
+       return ERR_PTR(-ENXIO);
+}
+static inline int acpi_gpio_count(struct device *dev, const char *con_id)
+{
+       return -ENODEV;
+}
+#endif
+
+#endif /* GPIOLIB_ACPI_H */
index 0acc2cc6e868fdefec661540d97513959e00575e..98e3c20d9730e66ad46bff8317e96fb4996eda32 100644 (file)
@@ -59,7 +59,7 @@ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
 {
        return devm_gpiod_get_index(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL(devm_gpiod_get);
+EXPORT_SYMBOL_GPL(devm_gpiod_get);
 
 /**
  * devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
@@ -77,7 +77,7 @@ struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
 {
        return devm_gpiod_get_index_optional(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL(devm_gpiod_get_optional);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_optional);
 
 /**
  * devm_gpiod_get_index - Resource-managed gpiod_get_index()
@@ -127,7 +127,7 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
 
        return desc;
 }
-EXPORT_SYMBOL(devm_gpiod_get_index);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_index);
 
 /**
  * devm_gpiod_get_from_of_node() - obtain a GPIO from an OF node
@@ -182,7 +182,7 @@ struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
 
        return desc;
 }
-EXPORT_SYMBOL(devm_gpiod_get_from_of_node);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_from_of_node);
 
 /**
  * devm_fwnode_get_index_gpiod_from_child - get a GPIO descriptor from a
@@ -239,7 +239,7 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
 
        return desc;
 }
-EXPORT_SYMBOL(devm_fwnode_get_index_gpiod_from_child);
+EXPORT_SYMBOL_GPL(devm_fwnode_get_index_gpiod_from_child);
 
 /**
  * devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional()
@@ -268,7 +268,7 @@ struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
 
        return desc;
 }
-EXPORT_SYMBOL(devm_gpiod_get_index_optional);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_index_optional);
 
 /**
  * devm_gpiod_get_array - Resource-managed gpiod_get_array()
@@ -303,7 +303,7 @@ struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
 
        return descs;
 }
-EXPORT_SYMBOL(devm_gpiod_get_array);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_array);
 
 /**
  * devm_gpiod_get_array_optional - Resource-managed gpiod_get_array_optional()
@@ -328,7 +328,7 @@ devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
 
        return descs;
 }
-EXPORT_SYMBOL(devm_gpiod_get_array_optional);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_array_optional);
 
 /**
  * devm_gpiod_put - Resource-managed gpiod_put()
@@ -344,7 +344,7 @@ void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
        WARN_ON(devres_release(dev, devm_gpiod_release, devm_gpiod_match,
                &desc));
 }
-EXPORT_SYMBOL(devm_gpiod_put);
+EXPORT_SYMBOL_GPL(devm_gpiod_put);
 
 /**
  * devm_gpiod_unhinge - Remove resource management from a gpio descriptor
@@ -374,7 +374,7 @@ void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
        /* Anything else we should warn about */
        WARN_ON(ret);
 }
-EXPORT_SYMBOL(devm_gpiod_unhinge);
+EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
 
 /**
  * devm_gpiod_put_array - Resource-managed gpiod_put_array()
@@ -390,7 +390,7 @@ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
        WARN_ON(devres_release(dev, devm_gpiod_release_array,
                               devm_gpiod_match_array, &descs));
 }
-EXPORT_SYMBOL(devm_gpiod_put_array);
+EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
 
 
 
@@ -444,7 +444,7 @@ int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
 
        return 0;
 }
-EXPORT_SYMBOL(devm_gpio_request);
+EXPORT_SYMBOL_GPL(devm_gpio_request);
 
 /**
  *     devm_gpio_request_one - request a single GPIO with initial setup
@@ -474,7 +474,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
 
        return 0;
 }
-EXPORT_SYMBOL(devm_gpio_request_one);
+EXPORT_SYMBOL_GPL(devm_gpio_request_one);
 
 /**
  *      devm_gpio_free - free a GPIO
@@ -492,4 +492,4 @@ void devm_gpio_free(struct device *dev, unsigned int gpio)
        WARN_ON(devres_release(dev, devm_gpio_release, devm_gpio_match,
                &gpio));
 }
-EXPORT_SYMBOL(devm_gpio_free);
+EXPORT_SYMBOL_GPL(devm_gpio_free);
index 9762dd6d99fa85181d542703449f7f12a5432b58..1eea2c6c2e1d827453dd16dc4c7831f5bd7d600c 100644 (file)
 #include <linux/gpio/machine.h>
 
 #include "gpiolib.h"
+#include "gpiolib-of.h"
+
+/*
+ * This is used by external users of of_gpio_count() from <linux/of_gpio.h>
+ *
+ * FIXME: get rid of those external users by converting them to GPIO
+ * descriptors and let them all use gpiod_get_count()
+ */
+int of_gpio_get_count(struct device *dev, const char *con_id)
+{
+       int ret;
+       char propname[32];
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+               if (con_id)
+                       snprintf(propname, sizeof(propname), "%s-%s",
+                                con_id, gpio_suffixes[i]);
+               else
+                       snprintf(propname, sizeof(propname), "%s",
+                                gpio_suffixes[i]);
+
+               ret = of_gpio_named_count(dev->of_node, propname);
+               if (ret > 0)
+                       break;
+       }
+       return ret ? ret : -ENOENT;
+}
 
 static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
 {
@@ -53,6 +81,23 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
        return gpiochip_get_desc(chip, ret);
 }
 
+/**
+ * of_gpio_need_valid_mask() - figure out if the OF GPIO driver needs
+ * to set the .valid_mask
+ * @dev: the device for the GPIO provider
+ * @return: true if the valid mask needs to be set
+ */
+bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
+{
+       int size;
+       struct device_node *np = gc->of_node;
+
+       size = of_property_count_u32_elems(np,  "gpio-reserved-ranges");
+       if (size > 0 && size % 2 == 0)
+               return true;
+       return false;
+}
+
 static void of_gpio_flags_quirks(struct device_node *np,
                                 const char *propname,
                                 enum of_gpio_flags *flags,
@@ -178,7 +223,7 @@ static void of_gpio_flags_quirks(struct device_node *np,
  * value on the error condition. If @flags is not NULL the function also fills
  * in flags for the GPIO.
  */
-struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+static struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
                     const char *propname, int index, enum of_gpio_flags *flags)
 {
        struct of_phandle_args gpiospec;
@@ -229,7 +274,76 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
        else
                return desc_to_gpio(desc);
 }
-EXPORT_SYMBOL(of_get_named_gpio_flags);
+EXPORT_SYMBOL_GPL(of_get_named_gpio_flags);
+
+/**
+ * gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @node:      handle of the OF node
+ * @propname:  name of the DT property representing the GPIO
+ * @index:     index of the GPIO to obtain for the consumer
+ * @dflags:    GPIO initialization flags
+ * @label:     label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+                                        const char *propname, int index,
+                                        enum gpiod_flags dflags,
+                                        const char *label)
+{
+       unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+       struct gpio_desc *desc;
+       enum of_gpio_flags flags;
+       bool active_low = false;
+       bool single_ended = false;
+       bool open_drain = false;
+       bool transitory = false;
+       int ret;
+
+       desc = of_get_named_gpiod_flags(node, propname,
+                                       index, &flags);
+
+       if (!desc || IS_ERR(desc)) {
+               return desc;
+       }
+
+       active_low = flags & OF_GPIO_ACTIVE_LOW;
+       single_ended = flags & OF_GPIO_SINGLE_ENDED;
+       open_drain = flags & OF_GPIO_OPEN_DRAIN;
+       transitory = flags & OF_GPIO_TRANSITORY;
+
+       ret = gpiod_request(desc, label);
+       if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
+               return desc;
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (active_low)
+               lflags |= GPIO_ACTIVE_LOW;
+
+       if (single_ended) {
+               if (open_drain)
+                       lflags |= GPIO_OPEN_DRAIN;
+               else
+                       lflags |= GPIO_OPEN_SOURCE;
+       }
+
+       if (transitory)
+               lflags |= GPIO_TRANSITORY;
+
+       ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+       if (ret < 0) {
+               gpiod_put(desc);
+               return ERR_PTR(ret);
+       }
+
+       return desc;
+}
+EXPORT_SYMBOL_GPL(gpiod_get_from_of_node);
 
 /*
  * The SPI GPIO bindings happened before we managed to establish that GPIO
@@ -324,6 +438,19 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
        return desc;
 }
 
+static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
+                                             const char *con_id,
+                                             enum of_gpio_flags *of_flags)
+{
+       if (!IS_ENABLED(CONFIG_MFD_ARIZONA))
+               return ERR_PTR(-ENOENT);
+
+       if (!con_id || strcmp(con_id, "wlf,reset"))
+               return ERR_PTR(-ENOENT);
+
+       return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+}
+
 struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
                               unsigned int idx, unsigned long *flags)
 {
@@ -343,36 +470,30 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
 
                desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
                                                &of_flags);
-               /*
-                * -EPROBE_DEFER in our case means that we found a
-                * valid GPIO property, but no controller has been
-                * registered so far.
-                *
-                * This means we don't need to look any further for
-                * alternate name conventions, and we should really
-                * preserve the return code for our user to be able to
-                * retry probing later.
-                */
-               if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
-                       return desc;
 
-               if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
+               if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT)
                        break;
        }
 
-       /* Special handling for SPI GPIOs if used */
-       if (IS_ERR(desc))
+       if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+               /* Special handling for SPI GPIOs if used */
                desc = of_find_spi_gpio(dev, con_id, &of_flags);
-       if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) {
+       }
+
+       if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
                /* This quirk looks up flags and all */
                desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
                if (!IS_ERR(desc))
                        return desc;
        }
 
-       /* Special handling for regulator GPIOs if used */
-       if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
+       if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+               /* Special handling for regulator GPIOs if used */
                desc = of_find_regulator_gpio(dev, con_id, &of_flags);
+       }
+
+       if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+               desc = of_find_arizona_gpio(dev, con_id, &of_flags);
 
        if (IS_ERR(desc))
                return desc;
@@ -523,8 +644,9 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
  * GPIO chips. This function performs only one sanity check: whether GPIO
  * is less than ngpios (that is specified in the gpio_chip).
  */
-int of_gpio_simple_xlate(struct gpio_chip *gc,
-                        const struct of_phandle_args *gpiospec, u32 *flags)
+static int of_gpio_simple_xlate(struct gpio_chip *gc,
+                               const struct of_phandle_args *gpiospec,
+                               u32 *flags)
 {
        /*
         * We're discouraging gpio_cells < 2, since that way you'll have to
@@ -548,7 +670,6 @@ int of_gpio_simple_xlate(struct gpio_chip *gc,
 
        return gpiospec->args[0];
 }
-EXPORT_SYMBOL(of_gpio_simple_xlate);
 
 /**
  * of_mm_gpiochip_add_data - Add memory mapped GPIO chip (bank)
@@ -605,7 +726,7 @@ err0:
        pr_err("%pOF: GPIO chip registration failed with status %d\n", np, ret);
        return ret;
 }
-EXPORT_SYMBOL(of_mm_gpiochip_add_data);
+EXPORT_SYMBOL_GPL(of_mm_gpiochip_add_data);
 
 /**
  * of_mm_gpiochip_remove - Remove memory mapped GPIO chip (bank)
@@ -622,7 +743,7 @@ void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc)
        iounmap(mm_gc->regs);
        kfree(gc->label);
 }
-EXPORT_SYMBOL(of_mm_gpiochip_remove);
+EXPORT_SYMBOL_GPL(of_mm_gpiochip_remove);
 
 static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
 {
@@ -734,7 +855,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; }
 
 int of_gpiochip_add(struct gpio_chip *chip)
 {
-       int status;
+       int ret;
 
        if (!chip->of_node)
                return 0;
@@ -749,9 +870,9 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
        of_gpiochip_init_valid_mask(chip);
 
-       status = of_gpiochip_add_pin_range(chip);
-       if (status)
-               return status;
+       ret = of_gpiochip_add_pin_range(chip);
+       if (ret)
+               return ret;
 
        /* If the chip defines names itself, these take precedence */
        if (!chip->names)
@@ -760,13 +881,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
        of_node_get(chip->of_node);
 
-       status = of_gpiochip_scan_gpios(chip);
-       if (status) {
+       ret = of_gpiochip_scan_gpios(chip);
+       if (ret) {
                of_node_put(chip->of_node);
                gpiochip_remove_pin_ranges(chip);
        }
 
-       return status;
+       return ret;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
new file mode 100644 (file)
index 0000000..9768831
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef GPIOLIB_OF_H
+#define GPIOLIB_OF_H
+
+struct gpio_chip;
+enum of_gpio_flags;
+
+#ifdef CONFIG_OF_GPIO
+struct gpio_desc *of_find_gpio(struct device *dev,
+                              const char *con_id,
+                              unsigned int idx,
+                              unsigned long *lookupflags);
+int of_gpiochip_add(struct gpio_chip *gc);
+void of_gpiochip_remove(struct gpio_chip *gc);
+int of_gpio_get_count(struct device *dev, const char *con_id);
+bool of_gpio_need_valid_mask(const struct gpio_chip *gc);
+#else
+static inline struct gpio_desc *of_find_gpio(struct device *dev,
+                                            const char *con_id,
+                                            unsigned int idx,
+                                            unsigned long *lookupflags)
+{
+       return ERR_PTR(-ENOENT);
+}
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
+static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+static inline int of_gpio_get_count(struct device *dev, const char *con_id)
+{
+       return 0;
+}
+static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
+{
+       return false;
+}
+#endif /* CONFIG_OF_GPIO */
+
+#endif /* GPIOLIB_OF_H */
index cca749010cd00afe966ac43ae91ae35b756092aa..822988818efcb201926344e77de9724076a4b232 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/gpio.h>
-#include <linux/of_gpio.h>
 #include <linux/idr.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
@@ -30,6 +29,8 @@
 #include <uapi/linux/gpio.h>
 
 #include "gpiolib.h"
+#include "gpiolib-of.h"
+#include "gpiolib-acpi.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/gpio.h>
@@ -213,7 +214,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
 {
        struct gpio_chip *chip;
        unsigned offset;
-       int status;
+       int ret;
 
        chip = gpiod_to_chip(desc);
        offset = gpio_chip_hwgpio(desc);
@@ -221,17 +222,17 @@ int gpiod_get_direction(struct gpio_desc *desc)
        if (!chip->get_direction)
                return -ENOTSUPP;
 
-       status = chip->get_direction(chip, offset);
-       if (status > 0) {
+       ret = chip->get_direction(chip, offset);
+       if (ret > 0) {
                /* GPIOF_DIR_IN, or other positive */
-               status = 1;
+               ret = 1;
                clear_bit(FLAG_IS_OUT, &desc->flags);
        }
-       if (status == 0) {
+       if (ret == 0) {
                /* GPIOF_DIR_OUT */
                set_bit(FLAG_IS_OUT, &desc->flags);
        }
-       return status;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(gpiod_get_direction);
 
@@ -350,7 +351,7 @@ static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip)
 {
        unsigned long *p;
 
-       p = kmalloc_array(BITS_TO_LONGS(chip->ngpio), sizeof(*p), GFP_KERNEL);
+       p = bitmap_alloc(chip->ngpio, GFP_KERNEL);
        if (!p)
                return NULL;
 
@@ -360,38 +361,31 @@ static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip)
        return p;
 }
 
-static int gpiochip_alloc_valid_mask(struct gpio_chip *gpiochip)
+static int gpiochip_alloc_valid_mask(struct gpio_chip *gc)
 {
-#ifdef CONFIG_OF_GPIO
-       int size;
-       struct device_node *np = gpiochip->of_node;
-
-       size = of_property_count_u32_elems(np,  "gpio-reserved-ranges");
-       if (size > 0 && size % 2 == 0)
-               gpiochip->need_valid_mask = true;
-#endif
-
-       if (!gpiochip->need_valid_mask)
+       if (!(of_gpio_need_valid_mask(gc) || gc->init_valid_mask))
                return 0;
 
-       gpiochip->valid_mask = gpiochip_allocate_mask(gpiochip);
-       if (!gpiochip->valid_mask)
+       gc->valid_mask = gpiochip_allocate_mask(gc);
+       if (!gc->valid_mask)
                return -ENOMEM;
 
        return 0;
 }
 
-static int gpiochip_init_valid_mask(struct gpio_chip *gpiochip)
+static int gpiochip_init_valid_mask(struct gpio_chip *gc)
 {
-       if (gpiochip->init_valid_mask)
-               return gpiochip->init_valid_mask(gpiochip);
+       if (gc->init_valid_mask)
+               return gc->init_valid_mask(gc,
+                                          gc->valid_mask,
+                                          gc->ngpio);
 
        return 0;
 }
 
 static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip)
 {
-       kfree(gpiochip->valid_mask);
+       bitmap_free(gpiochip->valid_mask);
        gpiochip->valid_mask = NULL;
 }
 
@@ -535,6 +529,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
        if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
                return -EINVAL;
 
+       /*
+        * Do not allow both INPUT & OUTPUT flags to be set as they are
+        * contradictory.
+        */
+       if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
+           (lflags & GPIOHANDLE_REQUEST_OUTPUT))
+               return -EINVAL;
+
        /*
         * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
         * the hardware actually supports enabling both at the same time the
@@ -857,7 +859,7 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
        }
 
        ret = kfifo_put(&le->events, ge);
-       if (ret != 0)
+       if (ret)
                wake_up_poll(&le->wait, EPOLLIN);
 
        return IRQ_HANDLED;
@@ -926,7 +928,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        }
 
        /* This is just wrong: we don't look for events on output lines */
-       if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+       if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+           (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+           (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
                ret = -EINVAL;
                goto out_free_label;
        }
@@ -940,10 +944,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
 
        if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-       if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
-               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
-       if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
-               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
 
        ret = gpiod_direction_input(desc);
        if (ret)
@@ -1176,21 +1176,21 @@ static void gpiodevice_release(struct device *dev)
 
 static int gpiochip_setup_dev(struct gpio_device *gdev)
 {
-       int status;
+       int ret;
 
        cdev_init(&gdev->chrdev, &gpio_fileops);
        gdev->chrdev.owner = THIS_MODULE;
        gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
 
-       status = cdev_device_add(&gdev->chrdev, &gdev->dev);
-       if (status)
-               return status;
+       ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
+       if (ret)
+               return ret;
 
        chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
                 MAJOR(gpio_devt), gdev->id);
 
-       status = gpiochip_sysfs_register(gdev);
-       if (status)
+       ret = gpiochip_sysfs_register(gdev);
+       if (ret)
                goto err_remove_device;
 
        /* From this point, the .release() function cleans up gpio_device */
@@ -1203,7 +1203,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
 
 err_remove_device:
        cdev_device_del(&gdev->chrdev, &gdev->dev);
-       return status;
+       return ret;
 }
 
 static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog)
@@ -1244,13 +1244,13 @@ static void machine_gpiochip_add(struct gpio_chip *chip)
 static void gpiochip_setup_devs(void)
 {
        struct gpio_device *gdev;
-       int err;
+       int ret;
 
        list_for_each_entry(gdev, &gpio_devices, list) {
-               err = gpiochip_setup_dev(gdev);
-               if (err)
+               ret = gpiochip_setup_dev(gdev);
+               if (ret)
                        pr_err("%s: Failed to initialize gpio device (%d)\n",
-                              dev_name(&gdev->dev), err);
+                              dev_name(&gdev->dev), ret);
        }
 }
 
@@ -1259,7 +1259,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
                               struct lock_class_key *request_key)
 {
        unsigned long   flags;
-       int             status = 0;
+       int             ret = 0;
        unsigned        i;
        int             base = chip->base;
        struct gpio_device *gdev;
@@ -1289,7 +1289,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
        if (gdev->id < 0) {
-               status = gdev->id;
+               ret = gdev->id;
                goto err_free_gdev;
        }
        dev_set_name(&gdev->dev, "gpiochip%d", gdev->id);
@@ -1305,13 +1305,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
        if (!gdev->descs) {
-               status = -ENOMEM;
+               ret = -ENOMEM;
                goto err_free_ida;
        }
 
        if (chip->ngpio == 0) {
                chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
-               status = -EINVAL;
+               ret = -EINVAL;
                goto err_free_descs;
        }
 
@@ -1321,7 +1321,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
        if (!gdev->label) {
-               status = -ENOMEM;
+               ret = -ENOMEM;
                goto err_free_descs;
        }
 
@@ -1340,7 +1340,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        if (base < 0) {
                base = gpiochip_find_base(chip->ngpio);
                if (base < 0) {
-                       status = base;
+                       ret = base;
                        spin_unlock_irqrestore(&gpio_lock, flags);
                        goto err_free_label;
                }
@@ -1354,8 +1354,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        }
        gdev->base = base;
 
-       status = gpiodev_add_to_list(gdev);
-       if (status) {
+       ret = gpiodev_add_to_list(gdev);
+       if (ret) {
                spin_unlock_irqrestore(&gpio_lock, flags);
                goto err_free_label;
        }
@@ -1369,20 +1369,20 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        INIT_LIST_HEAD(&gdev->pin_ranges);
 #endif
 
-       status = gpiochip_set_desc_names(chip);
-       if (status)
+       ret = gpiochip_set_desc_names(chip);
+       if (ret)
                goto err_remove_from_list;
 
-       status = gpiochip_alloc_valid_mask(chip);
-       if (status)
+       ret = gpiochip_alloc_valid_mask(chip);
+       if (ret)
                goto err_remove_from_list;
 
-       status = of_gpiochip_add(chip);
-       if (status)
+       ret = of_gpiochip_add(chip);
+       if (ret)
                goto err_free_gpiochip_mask;
 
-       status = gpiochip_init_valid_mask(chip);
-       if (status)
+       ret = gpiochip_init_valid_mask(chip);
+       if (ret)
                goto err_remove_of_chip;
 
        for (i = 0; i < chip->ngpio; i++) {
@@ -1405,12 +1405,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        machine_gpiochip_add(chip);
 
-       status = gpiochip_irqchip_init_valid_mask(chip);
-       if (status)
+       ret = gpiochip_irqchip_init_valid_mask(chip);
+       if (ret)
                goto err_remove_acpi_chip;
 
-       status = gpiochip_add_irqchip(chip, lock_key, request_key);
-       if (status)
+       ret = gpiochip_add_irqchip(chip, lock_key, request_key);
+       if (ret)
                goto err_remove_irqchip_mask;
 
        /*
@@ -1422,8 +1422,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
         * Otherwise, defer until later.
         */
        if (gpiolib_initialized) {
-               status = gpiochip_setup_dev(gdev);
-               if (status)
+               ret = gpiochip_setup_dev(gdev);
+               if (ret)
                        goto err_remove_irqchip;
        }
        return 0;
@@ -1453,9 +1453,9 @@ err_free_gdev:
        /* failures here can mean systems won't boot... */
        pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
               gdev->base, gdev->base + gdev->ngpio - 1,
-              chip->label ? : "generic", status);
+              chip->label ? : "generic", ret);
        kfree(gdev);
-       return status;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
 
@@ -1621,21 +1621,25 @@ static struct gpio_chip *find_chip_by_name(const char *name)
  * The following is irqchip helper code for gpiochips.
  */
 
-static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
+static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
 {
-       if (!gpiochip->irq.need_valid_mask)
+       struct gpio_irq_chip *girq = &gc->irq;
+
+       if (!girq->init_valid_mask)
                return 0;
 
-       gpiochip->irq.valid_mask = gpiochip_allocate_mask(gpiochip);
-       if (!gpiochip->irq.valid_mask)
+       girq->valid_mask = gpiochip_allocate_mask(gc);
+       if (!girq->valid_mask)
                return -ENOMEM;
 
+       girq->init_valid_mask(gc, girq->valid_mask, gc->ngpio);
+
        return 0;
 }
 
 static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
 {
-       kfree(gpiochip->irq.valid_mask);
+       bitmap_free(gpiochip->irq.valid_mask);
        gpiochip->irq.valid_mask = NULL;
 }
 
@@ -1735,6 +1739,273 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
 }
 EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
 
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+
+/**
+ * gpiochip_set_hierarchical_irqchip() - connects a hierarchical irqchip
+ * to a gpiochip
+ * @gc: the gpiochip to set the irqchip hierarchical handler to
+ * @irqchip: the irqchip to handle this level of the hierarchy, the interrupt
+ * will then percolate up to the parent
+ */
+static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc,
+                                             struct irq_chip *irqchip)
+{
+       /* DT will deal with mapping each IRQ as we go along */
+       if (is_of_node(gc->irq.fwnode))
+               return;
+
+       /*
+        * This is for legacy and boardfile "irqchip" fwnodes: allocate
+        * irqs upfront instead of dynamically since we don't have the
+        * dynamic type of allocation that hardware description languages
+        * provide. Once all GPIO drivers using board files are gone from
+        * the kernel we can delete this code, but for a transitional period
+        * it is necessary to keep this around.
+        */
+       if (is_fwnode_irqchip(gc->irq.fwnode)) {
+               int i;
+               int ret;
+
+               for (i = 0; i < gc->ngpio; i++) {
+                       struct irq_fwspec fwspec;
+                       unsigned int parent_hwirq;
+                       unsigned int parent_type;
+                       struct gpio_irq_chip *girq = &gc->irq;
+
+                       /*
+                        * We call the child to parent translation function
+                        * only to check if the child IRQ is valid or not.
+                        * Just pick the rising edge type here as that is what
+                        * we likely need to support.
+                        */
+                       ret = girq->child_to_parent_hwirq(gc, i,
+                                                         IRQ_TYPE_EDGE_RISING,
+                                                         &parent_hwirq,
+                                                         &parent_type);
+                       if (ret) {
+                               chip_err(gc, "skip set-up on hwirq %d\n",
+                                        i);
+                               continue;
+                       }
+
+                       fwspec.fwnode = gc->irq.fwnode;
+                       /* This is the hwirq for the GPIO line side of things */
+                       fwspec.param[0] = girq->child_offset_to_irq(gc, i);
+                       /* Just pick something */
+                       fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+                       fwspec.param_count = 2;
+                       ret = __irq_domain_alloc_irqs(gc->irq.domain,
+                                                     /* just pick something */
+                                                     -1,
+                                                     1,
+                                                     NUMA_NO_NODE,
+                                                     &fwspec,
+                                                     false,
+                                                     NULL);
+                       if (ret < 0) {
+                               chip_err(gc,
+                                        "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+                                        i, parent_hwirq,
+                                        ret);
+                       }
+               }
+       }
+
+       chip_err(gc, "%s unknown fwnode type proceed anyway\n", __func__);
+
+       return;
+}
+
+static int gpiochip_hierarchy_irq_domain_translate(struct irq_domain *d,
+                                                  struct irq_fwspec *fwspec,
+                                                  unsigned long *hwirq,
+                                                  unsigned int *type)
+{
+       /* We support standard DT translation */
+       if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+               return irq_domain_translate_twocell(d, fwspec, hwirq, type);
+       }
+
+       /* This is for board files and others not using DT */
+       if (is_fwnode_irqchip(fwspec->fwnode)) {
+               int ret;
+
+               ret = irq_domain_translate_twocell(d, fwspec, hwirq, type);
+               if (ret)
+                       return ret;
+               WARN_ON(*type == IRQ_TYPE_NONE);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
+                                              unsigned int irq,
+                                              unsigned int nr_irqs,
+                                              void *data)
+{
+       struct gpio_chip *gc = d->host_data;
+       irq_hw_number_t hwirq;
+       unsigned int type = IRQ_TYPE_NONE;
+       struct irq_fwspec *fwspec = data;
+       struct irq_fwspec parent_fwspec;
+       unsigned int parent_hwirq;
+       unsigned int parent_type;
+       struct gpio_irq_chip *girq = &gc->irq;
+       int ret;
+
+       /*
+        * The nr_irqs parameter is always one except for PCI multi-MSI
+        * so this should not happen.
+        */
+       WARN_ON(nr_irqs != 1);
+
+       ret = gc->irq.child_irq_domain_ops.translate(d, fwspec, &hwirq, &type);
+       if (ret)
+               return ret;
+
+       chip_info(gc, "allocate IRQ %d, hwirq %lu\n", irq,  hwirq);
+
+       ret = girq->child_to_parent_hwirq(gc, hwirq, type,
+                                         &parent_hwirq, &parent_type);
+       if (ret) {
+               chip_err(gc, "can't look up hwirq %lu\n", hwirq);
+               return ret;
+       }
+       chip_info(gc, "found parent hwirq %u\n", parent_hwirq);
+
+       /*
+        * We set handle_bad_irq because the .set_type() should
+        * always be invoked and set the right type of handler.
+        */
+       irq_domain_set_info(d,
+                           irq,
+                           hwirq,
+                           gc->irq.chip,
+                           gc,
+                           girq->handler,
+                           NULL, NULL);
+       irq_set_probe(irq);
+
+       /*
+        * Create a IRQ fwspec to send up to the parent irqdomain:
+        * specify the hwirq we address on the parent and tie it
+        * all together up the chain.
+        */
+       parent_fwspec.fwnode = d->parent->fwnode;
+       /* This parent only handles asserted level IRQs */
+       girq->populate_parent_fwspec(gc, &parent_fwspec, parent_hwirq,
+                                    parent_type);
+       chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+                 irq, parent_hwirq);
+       ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec);
+       if (ret)
+               chip_err(gc,
+                        "failed to allocate parent hwirq %d for hwirq %lu\n",
+                        parent_hwirq, hwirq);
+
+       return ret;
+}
+
+static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *chip,
+                                                     unsigned int offset)
+{
+       return offset;
+}
+
+static void gpiochip_hierarchy_setup_domain_ops(struct irq_domain_ops *ops)
+{
+       ops->activate = gpiochip_irq_domain_activate;
+       ops->deactivate = gpiochip_irq_domain_deactivate;
+       ops->alloc = gpiochip_hierarchy_irq_domain_alloc;
+       ops->free = irq_domain_free_irqs_common;
+
+       /*
+        * We only allow overriding the translate() function for
+        * hierarchical chips, and this should only be done if the user
+        * really need something other than 1:1 translation.
+        */
+       if (!ops->translate)
+               ops->translate = gpiochip_hierarchy_irq_domain_translate;
+}
+
+static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
+{
+       if (!gc->irq.child_to_parent_hwirq ||
+           !gc->irq.fwnode) {
+               chip_err(gc, "missing irqdomain vital data\n");
+               return -EINVAL;
+       }
+
+       if (!gc->irq.child_offset_to_irq)
+               gc->irq.child_offset_to_irq = gpiochip_child_offset_to_irq_noop;
+
+       if (!gc->irq.populate_parent_fwspec)
+               gc->irq.populate_parent_fwspec =
+                       gpiochip_populate_parent_fwspec_twocell;
+
+       gpiochip_hierarchy_setup_domain_ops(&gc->irq.child_irq_domain_ops);
+
+       gc->irq.domain = irq_domain_create_hierarchy(
+               gc->irq.parent_domain,
+               0,
+               gc->ngpio,
+               gc->irq.fwnode,
+               &gc->irq.child_irq_domain_ops,
+               gc);
+
+       if (!gc->irq.domain)
+               return -ENOMEM;
+
+       gpiochip_set_hierarchical_irqchip(gc, gc->irq.chip);
+
+       return 0;
+}
+
+static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
+{
+       return !!gc->irq.parent_domain;
+}
+
+void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+                                            struct irq_fwspec *fwspec,
+                                            unsigned int parent_hwirq,
+                                            unsigned int parent_type)
+{
+       fwspec->param_count = 2;
+       fwspec->param[0] = parent_hwirq;
+       fwspec->param[1] = parent_type;
+}
+EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
+
+void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
+                                             struct irq_fwspec *fwspec,
+                                             unsigned int parent_hwirq,
+                                             unsigned int parent_type)
+{
+       fwspec->param_count = 4;
+       fwspec->param[0] = 0;
+       fwspec->param[1] = parent_hwirq;
+       fwspec->param[2] = 0;
+       fwspec->param[3] = parent_type;
+}
+EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell);
+
+#else
+
+static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
+{
+       return -EINVAL;
+}
+
+static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
+{
+       return false;
+}
+
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
 /**
  * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
  * @d: the irqdomain used by this irqchip
@@ -1749,7 +2020,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
                     irq_hw_number_t hwirq)
 {
        struct gpio_chip *chip = d->host_data;
-       int err = 0;
+       int ret = 0;
 
        if (!gpiochip_irqchip_irq_valid(chip, hwirq))
                return -ENXIO;
@@ -1767,12 +2038,12 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
        irq_set_noprobe(irq);
 
        if (chip->irq.num_parents == 1)
-               err = irq_set_parent(irq, chip->irq.parents[0]);
+               ret = irq_set_parent(irq, chip->irq.parents[0]);
        else if (chip->irq.map)
-               err = irq_set_parent(irq, chip->irq.map[hwirq]);
+               ret = irq_set_parent(irq, chip->irq.map[hwirq]);
 
-       if (err < 0)
-               return err;
+       if (ret < 0)
+               return ret;
 
        /*
         * No set-up of the hardware will happen if IRQ_TYPE_NONE
@@ -1803,6 +2074,11 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
        .xlate  = irq_domain_xlate_twocell,
 };
 
+/*
+ * TODO: move these activate/deactivate in under the hierarchicial
+ * irqchip implementation as static once SPMI and SSBI (all external
+ * users) are phased over.
+ */
 /**
  * gpiochip_irq_domain_activate() - Lock a GPIO to be used as an IRQ
  * @domain: The IRQ domain used by this IRQ chip
@@ -1842,10 +2118,25 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate);
 
 static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
 {
+       struct irq_domain *domain = chip->irq.domain;
+
        if (!gpiochip_irqchip_irq_valid(chip, offset))
                return -ENXIO;
 
-       return irq_create_mapping(chip->irq.domain, offset);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+       if (irq_domain_is_hierarchy(domain)) {
+               struct irq_fwspec spec;
+
+               spec.fwnode = domain->fwnode;
+               spec.param_count = 2;
+               spec.param[0] = chip->irq.child_offset_to_irq(chip, offset);
+               spec.param[1] = IRQ_TYPE_NONE;
+
+               return irq_create_fwspec_mapping(&spec);
+       }
+#endif
+
+       return irq_create_mapping(domain, offset);
 }
 
 static int gpiochip_irq_reqres(struct irq_data *d)
@@ -1922,7 +2213,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
                                struct lock_class_key *request_key)
 {
        struct irq_chip *irqchip = gpiochip->irq.chip;
-       const struct irq_domain_ops *ops;
+       const struct irq_domain_ops *ops = NULL;
        struct device_node *np;
        unsigned int type;
        unsigned int i;
@@ -1958,16 +2249,25 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
        gpiochip->irq.lock_key = lock_key;
        gpiochip->irq.request_key = request_key;
 
-       if (gpiochip->irq.domain_ops)
-               ops = gpiochip->irq.domain_ops;
-       else
-               ops = &gpiochip_domain_ops;
-
-       gpiochip->irq.domain = irq_domain_add_simple(np, gpiochip->ngpio,
-                                                    gpiochip->irq.first,
-                                                    ops, gpiochip);
-       if (!gpiochip->irq.domain)
-               return -EINVAL;
+       /* If a parent irqdomain is provided, let's build a hierarchy */
+       if (gpiochip_hierarchy_is_hierarchical(gpiochip)) {
+               int ret = gpiochip_hierarchy_add_domain(gpiochip);
+               if (ret)
+                       return ret;
+       } else {
+               /* Some drivers provide custom irqdomain ops */
+               if (gpiochip->irq.domain_ops)
+                       ops = gpiochip->irq.domain_ops;
+
+               if (!ops)
+                       ops = &gpiochip_domain_ops;
+               gpiochip->irq.domain = irq_domain_add_simple(np,
+                       gpiochip->ngpio,
+                       gpiochip->irq.first,
+                       ops, gpiochip);
+               if (!gpiochip->irq.domain)
+                       return -EINVAL;
+       }
 
        if (gpiochip->irq.parent_handler) {
                void *data = gpiochip->irq.parent_handler_data ?: gpiochip;
@@ -2330,7 +2630,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
 static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
 {
        struct gpio_chip        *chip = desc->gdev->chip;
-       int                     status;
+       int                     ret;
        unsigned long           flags;
        unsigned                offset;
 
@@ -2348,10 +2648,10 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
 
        if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
                desc_set_label(desc, label ? : "?");
-               status = 0;
+               ret = 0;
        } else {
                kfree_const(label);
-               status = -EBUSY;
+               ret = -EBUSY;
                goto done;
        }
 
@@ -2360,12 +2660,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
                spin_unlock_irqrestore(&gpio_lock, flags);
                offset = gpio_chip_hwgpio(desc);
                if (gpiochip_line_is_valid(chip, offset))
-                       status = chip->request(chip, offset);
+                       ret = chip->request(chip, offset);
                else
-                       status = -EINVAL;
+                       ret = -EINVAL;
                spin_lock_irqsave(&gpio_lock, flags);
 
-               if (status < 0) {
+               if (ret < 0) {
                        desc_set_label(desc, NULL);
                        kfree_const(label);
                        clear_bit(FLAG_REQUESTED, &desc->flags);
@@ -2380,7 +2680,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
        }
 done:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       return status;
+       return ret;
 }
 
 /*
@@ -2423,24 +2723,24 @@ static int validate_desc(const struct gpio_desc *desc, const char *func)
 
 int gpiod_request(struct gpio_desc *desc, const char *label)
 {
-       int status = -EPROBE_DEFER;
+       int ret = -EPROBE_DEFER;
        struct gpio_device *gdev;
 
        VALIDATE_DESC(desc);
        gdev = desc->gdev;
 
        if (try_module_get(gdev->owner)) {
-               status = gpiod_request_commit(desc, label);
-               if (status < 0)
+               ret = gpiod_request_commit(desc, label);
+               if (ret < 0)
                        module_put(gdev->owner);
                else
                        get_device(&gdev->dev);
        }
 
-       if (status)
-               gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+       if (ret)
+               gpiod_dbg(desc, "%s: status %d\n", __func__, ret);
 
-       return status;
+       return ret;
 }
 
 static bool gpiod_free_commit(struct gpio_desc *desc)
@@ -2542,22 +2842,22 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
                                            enum gpiod_flags dflags)
 {
        struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum);
-       int err;
+       int ret;
 
        if (IS_ERR(desc)) {
                chip_err(chip, "failed to get GPIO descriptor\n");
                return desc;
        }
 
-       err = gpiod_request_commit(desc, label);
-       if (err < 0)
-               return ERR_PTR(err);
+       ret = gpiod_request_commit(desc, label);
+       if (ret < 0)
+               return ERR_PTR(ret);
 
-       err = gpiod_configure_flags(desc, label, lflags, dflags);
-       if (err) {
+       ret = gpiod_configure_flags(desc, label, lflags, dflags);
+       if (ret) {
                chip_err(chip, "setup of own GPIO %s failed\n", label);
                gpiod_free_commit(desc);
-               return ERR_PTR(err);
+               return ERR_PTR(ret);
        }
 
        return desc;
@@ -2620,7 +2920,7 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
 int gpiod_direction_input(struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
-       int                     status = 0;
+       int                     ret = 0;
 
        VALIDATE_DESC(desc);
        chip = desc->gdev->chip;
@@ -2644,7 +2944,7 @@ int gpiod_direction_input(struct gpio_desc *desc)
         * assume we are in input mode after this.
         */
        if (chip->direction_input) {
-               status = chip->direction_input(chip, gpio_chip_hwgpio(desc));
+               ret = chip->direction_input(chip, gpio_chip_hwgpio(desc));
        } else if (chip->get_direction &&
                  (chip->get_direction(chip, gpio_chip_hwgpio(desc)) != 1)) {
                gpiod_warn(desc,
@@ -2652,7 +2952,7 @@ int gpiod_direction_input(struct gpio_desc *desc)
                           __func__);
                return -EIO;
        }
-       if (status == 0)
+       if (ret == 0)
                clear_bit(FLAG_IS_OUT, &desc->flags);
 
        if (test_bit(FLAG_PULL_UP, &desc->flags))
@@ -2662,9 +2962,9 @@ int gpiod_direction_input(struct gpio_desc *desc)
                gpio_set_config(chip, gpio_chip_hwgpio(desc),
                                PIN_CONFIG_BIAS_PULL_DOWN);
 
-       trace_gpio_direction(desc_to_gpio(desc), 1, status);
+       trace_gpio_direction(desc_to_gpio(desc), 1, ret);
 
-       return status;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(gpiod_direction_input);
 
@@ -2936,7 +3236,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
                                  struct gpio_array *array_info,
                                  unsigned long *value_bitmap)
 {
-       int err, i = 0;
+       int ret, i = 0;
 
        /*
         * Validate array_info against desc_array and its size.
@@ -2949,11 +3249,11 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
                if (!can_sleep)
                        WARN_ON(array_info->chip->can_sleep);
 
-               err = gpio_chip_get_multiple(array_info->chip,
+               ret = gpio_chip_get_multiple(array_info->chip,
                                             array_info->get_mask,
                                             value_bitmap);
-               if (err)
-                       return err;
+               if (ret)
+                       return ret;
 
                if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
                        bitmap_xor(value_bitmap, value_bitmap,
@@ -3141,24 +3441,24 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
  */
 static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
 {
-       int err = 0;
+       int ret = 0;
        struct gpio_chip *chip = desc->gdev->chip;
        int offset = gpio_chip_hwgpio(desc);
 
        if (value) {
-               err = chip->direction_input(chip, offset);
-               if (!err)
+               ret = chip->direction_input(chip, offset);
+               if (!ret)
                        clear_bit(FLAG_IS_OUT, &desc->flags);
        } else {
-               err = chip->direction_output(chip, offset, 0);
-               if (!err)
+               ret = chip->direction_output(chip, offset, 0);
+               if (!ret)
                        set_bit(FLAG_IS_OUT, &desc->flags);
        }
-       trace_gpio_direction(desc_to_gpio(desc), value, err);
-       if (err < 0)
+       trace_gpio_direction(desc_to_gpio(desc), value, ret);
+       if (ret < 0)
                gpiod_err(desc,
                          "%s: Error in set_value for open drain err %d\n",
-                         __func__, err);
+                         __func__, ret);
 }
 
 /*
@@ -3168,24 +3468,24 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
  */
 static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
 {
-       int err = 0;
+       int ret = 0;
        struct gpio_chip *chip = desc->gdev->chip;
        int offset = gpio_chip_hwgpio(desc);
 
        if (value) {
-               err = chip->direction_output(chip, offset, 1);
-               if (!err)
+               ret = chip->direction_output(chip, offset, 1);
+               if (!ret)
                        set_bit(FLAG_IS_OUT, &desc->flags);
        } else {
-               err = chip->direction_input(chip, offset);
-               if (!err)
+               ret = chip->direction_input(chip, offset);
+               if (!ret)
                        clear_bit(FLAG_IS_OUT, &desc->flags);
        }
-       trace_gpio_direction(desc_to_gpio(desc), !value, err);
-       if (err < 0)
+       trace_gpio_direction(desc_to_gpio(desc), !value, ret);
+       if (ret < 0)
                gpiod_err(desc,
                          "%s: Error in set_value for open source err %d\n",
-                         __func__, err);
+                         __func__, ret);
 }
 
 static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
@@ -4002,27 +4302,6 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
        return desc;
 }
 
-static int dt_gpio_count(struct device *dev, const char *con_id)
-{
-       int ret;
-       char propname[32];
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
-               if (con_id)
-                       snprintf(propname, sizeof(propname), "%s-%s",
-                                con_id, gpio_suffixes[i]);
-               else
-                       snprintf(propname, sizeof(propname), "%s",
-                                gpio_suffixes[i]);
-
-               ret = of_gpio_named_count(dev->of_node, propname);
-               if (ret > 0)
-                       break;
-       }
-       return ret ? ret : -ENOENT;
-}
-
 static int platform_gpio_count(struct device *dev, const char *con_id)
 {
        struct gpiod_lookup_table *table;
@@ -4055,7 +4334,7 @@ int gpiod_count(struct device *dev, const char *con_id)
        int count = -ENOENT;
 
        if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
-               count = dt_gpio_count(dev, con_id);
+               count = of_gpio_get_count(dev, con_id);
        else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev))
                count = acpi_gpio_count(dev, con_id);
 
@@ -4117,7 +4396,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
 int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
                unsigned long lflags, enum gpiod_flags dflags)
 {
-       int status;
+       int ret;
 
        if (lflags & GPIO_ACTIVE_LOW)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -4150,9 +4429,9 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
        else if (lflags & GPIO_PULL_DOWN)
                set_bit(FLAG_PULL_DOWN, &desc->flags);
 
-       status = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
-       if (status < 0)
-               return status;
+       ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
+       if (ret < 0)
+               return ret;
 
        /* No particular flag request, return here... */
        if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
@@ -4162,12 +4441,12 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
 
        /* Process flags */
        if (dflags & GPIOD_FLAGS_BIT_DIR_OUT)
-               status = gpiod_direction_output(desc,
+               ret = gpiod_direction_output(desc,
                                !!(dflags & GPIOD_FLAGS_BIT_DIR_VAL));
        else
-               status = gpiod_direction_input(desc);
+               ret = gpiod_direction_input(desc);
 
-       return status;
+       return ret;
 }
 
 /**
@@ -4191,7 +4470,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
 {
        unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
        struct gpio_desc *desc = NULL;
-       int status;
+       int ret;
        /* Maybe we have a device name, maybe not */
        const char *devname = dev ? dev_name(dev) : "?";
 
@@ -4226,9 +4505,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
         * If a connection label was passed use that, else attempt to use
         * the device name as label
         */
-       status = gpiod_request(desc, con_id ? con_id : devname);
-       if (status < 0) {
-               if (status == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
+       ret = gpiod_request(desc, con_id ? con_id : devname);
+       if (ret < 0) {
+               if (ret == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
                        /*
                         * This happens when there are several consumers for
                         * the same GPIO line: we just return here without
@@ -4241,89 +4520,20 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
                                 con_id ? con_id : devname);
                        return desc;
                } else {
-                       return ERR_PTR(status);
+                       return ERR_PTR(ret);
                }
        }
 
-       status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
-       if (status < 0) {
-               dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
-               gpiod_put(desc);
-               return ERR_PTR(status);
-       }
-
-       return desc;
-}
-EXPORT_SYMBOL_GPL(gpiod_get_index);
-
-/**
- * gpiod_get_from_of_node() - obtain a GPIO from an OF node
- * @node:      handle of the OF node
- * @propname:  name of the DT property representing the GPIO
- * @index:     index of the GPIO to obtain for the consumer
- * @dflags:    GPIO initialization flags
- * @label:     label to attach to the requested GPIO
- *
- * Returns:
- * On successful request the GPIO pin is configured in accordance with
- * provided @dflags.
- *
- * In case of error an ERR_PTR() is returned.
- */
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
-                                        const char *propname, int index,
-                                        enum gpiod_flags dflags,
-                                        const char *label)
-{
-       unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
-       struct gpio_desc *desc;
-       enum of_gpio_flags flags;
-       bool active_low = false;
-       bool single_ended = false;
-       bool open_drain = false;
-       bool transitory = false;
-       int ret;
-
-       desc = of_get_named_gpiod_flags(node, propname,
-                                       index, &flags);
-
-       if (!desc || IS_ERR(desc)) {
-               return desc;
-       }
-
-       active_low = flags & OF_GPIO_ACTIVE_LOW;
-       single_ended = flags & OF_GPIO_SINGLE_ENDED;
-       open_drain = flags & OF_GPIO_OPEN_DRAIN;
-       transitory = flags & OF_GPIO_TRANSITORY;
-
-       ret = gpiod_request(desc, label);
-       if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
-               return desc;
-       if (ret)
-               return ERR_PTR(ret);
-
-       if (active_low)
-               lflags |= GPIO_ACTIVE_LOW;
-
-       if (single_ended) {
-               if (open_drain)
-                       lflags |= GPIO_OPEN_DRAIN;
-               else
-                       lflags |= GPIO_OPEN_SOURCE;
-       }
-
-       if (transitory)
-               lflags |= GPIO_TRANSITORY;
-
-       ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+       ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);
        if (ret < 0) {
+               dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
                gpiod_put(desc);
                return ERR_PTR(ret);
        }
 
        return desc;
 }
-EXPORT_SYMBOL(gpiod_get_from_of_node);
+EXPORT_SYMBOL_GPL(gpiod_get_index);
 
 /**
  * fwnode_get_named_gpiod - obtain a GPIO from firmware node
@@ -4433,7 +4643,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
        struct gpio_chip *chip;
        struct gpio_desc *local_desc;
        int hwnum;
-       int status;
+       int ret;
 
        chip = gpiod_to_chip(desc);
        hwnum = gpio_chip_hwgpio(desc);
@@ -4441,10 +4651,10 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
        local_desc = gpiochip_request_own_desc(chip, hwnum, name,
                                               lflags, dflags);
        if (IS_ERR(local_desc)) {
-               status = PTR_ERR(local_desc);
+               ret = PTR_ERR(local_desc);
                pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
-                      name, chip->label, hwnum, status);
-               return status;
+                      name, chip->label, hwnum, ret);
+               return ret;
        }
 
        /* Mark GPIO as hogged so it can be identified and removed later */
index 7c52c2442173e71dd2c712a418193118399beb9b..b8b10a409c7b9c6584e6e176d9246aec5c394258 100644 (file)
@@ -16,9 +16,6 @@
 #include <linux/module.h>
 #include <linux/cdev.h>
 
-enum of_gpio_flags;
-struct acpi_device;
-
 /**
  * struct gpio_device - internal state container for GPIO devices
  * @id: numerical ID number for the GPIO chip
@@ -69,126 +66,9 @@ struct gpio_device {
 #endif
 };
 
-/**
- * struct acpi_gpio_info - ACPI GPIO specific information
- * @adev: reference to ACPI device which consumes GPIO resource
- * @flags: GPIO initialization flags
- * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
- * @pin_config: pin bias as provided by ACPI
- * @polarity: interrupt polarity as provided by ACPI
- * @triggering: triggering type as provided by ACPI
- * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
- */
-struct acpi_gpio_info {
-       struct acpi_device *adev;
-       enum gpiod_flags flags;
-       bool gpioint;
-       int pin_config;
-       int polarity;
-       int triggering;
-       unsigned int quirks;
-};
-
 /* gpio suffixes used for ACPI and device tree lookup */
 static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
 
-#ifdef CONFIG_OF_GPIO
-struct gpio_desc *of_find_gpio(struct device *dev,
-                              const char *con_id,
-                              unsigned int idx,
-                              unsigned long *lookupflags);
-struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
-                  const char *list_name, int index, enum of_gpio_flags *flags);
-int of_gpiochip_add(struct gpio_chip *gc);
-void of_gpiochip_remove(struct gpio_chip *gc);
-#else
-static inline struct gpio_desc *of_find_gpio(struct device *dev,
-                                            const char *con_id,
-                                            unsigned int idx,
-                                            unsigned long *lookupflags)
-{
-       return ERR_PTR(-ENOENT);
-}
-static inline struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
-                  const char *list_name, int index, enum of_gpio_flags *flags)
-{
-       return ERR_PTR(-ENOENT);
-}
-static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
-static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
-#endif /* CONFIG_OF_GPIO */
-
-#ifdef CONFIG_ACPI
-void acpi_gpiochip_add(struct gpio_chip *chip);
-void acpi_gpiochip_remove(struct gpio_chip *chip);
-
-void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
-void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
-
-int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
-                                struct acpi_gpio_info *info);
-int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
-                                       struct acpi_gpio_info *info);
-
-struct gpio_desc *acpi_find_gpio(struct device *dev,
-                                const char *con_id,
-                                unsigned int idx,
-                                enum gpiod_flags *dflags,
-                                unsigned long *lookupflags);
-struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
-                                     const char *propname, int index,
-                                     struct acpi_gpio_info *info);
-
-int acpi_gpio_count(struct device *dev, const char *con_id);
-
-bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id);
-#else
-static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
-static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
-
-static inline void
-acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
-
-static inline void
-acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
-
-static inline int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
-{
-       return 0;
-}
-static inline int
-acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
-                                   struct acpi_gpio_info *info)
-{
-       return 0;
-}
-
-static inline struct gpio_desc *
-acpi_find_gpio(struct device *dev, const char *con_id,
-              unsigned int idx, enum gpiod_flags *dflags,
-              unsigned long *lookupflags)
-{
-       return ERR_PTR(-ENOENT);
-}
-static inline struct gpio_desc *
-acpi_node_get_gpiod(struct fwnode_handle *fwnode, const char *propname,
-                   int index, struct acpi_gpio_info *info)
-{
-       return ERR_PTR(-ENXIO);
-}
-static inline int acpi_gpio_count(struct device *dev, const char *con_id)
-{
-       return -ENODEV;
-}
-
-static inline bool acpi_can_fallback_to_crs(struct acpi_device *adev,
-                                           const char *con_id)
-{
-       return false;
-}
-#endif
-
 struct gpio_array {
        struct gpio_desc        **desc;
        unsigned int            size;
diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/sgpio-aspeed.c
new file mode 100644 (file)
index 0000000..7e99860
--- /dev/null
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2019 American Megatrends International LLC.
+ *
+ * Author: Karthikeyan Mani <karthikeyanm@amiindia.co.in>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#define MAX_NR_SGPIO                   80
+
+#define ASPEED_SGPIO_CTRL              0x54
+
+#define ASPEED_SGPIO_PINS_MASK         GENMASK(9, 6)
+#define ASPEED_SGPIO_CLK_DIV_MASK      GENMASK(31, 16)
+#define ASPEED_SGPIO_ENABLE            BIT(0)
+
+struct aspeed_sgpio {
+       struct gpio_chip chip;
+       struct clk *pclk;
+       spinlock_t lock;
+       void __iomem *base;
+       uint32_t dir_in[3];
+       int irq;
+};
+
+struct aspeed_sgpio_bank {
+       uint16_t    val_regs;
+       uint16_t    rdata_reg;
+       uint16_t    irq_regs;
+       const char  names[4][3];
+};
+
+/*
+ * Note: The "value" register returns the input value when the GPIO is
+ *      configured as an input.
+ *
+ *      The "rdata" register returns the output value when the GPIO is
+ *      configured as an output.
+ */
+static const struct aspeed_sgpio_bank aspeed_sgpio_banks[] = {
+       {
+               .val_regs = 0x0000,
+               .rdata_reg = 0x0070,
+               .irq_regs = 0x0004,
+               .names = { "A", "B", "C", "D" },
+       },
+       {
+               .val_regs = 0x001C,
+               .rdata_reg = 0x0074,
+               .irq_regs = 0x0020,
+               .names = { "E", "F", "G", "H" },
+       },
+       {
+               .val_regs = 0x0038,
+               .rdata_reg = 0x0078,
+               .irq_regs = 0x003C,
+               .names = { "I", "J" },
+       },
+};
+
+enum aspeed_sgpio_reg {
+       reg_val,
+       reg_rdata,
+       reg_irq_enable,
+       reg_irq_type0,
+       reg_irq_type1,
+       reg_irq_type2,
+       reg_irq_status,
+};
+
+#define GPIO_VAL_VALUE      0x00
+#define GPIO_IRQ_ENABLE     0x00
+#define GPIO_IRQ_TYPE0      0x04
+#define GPIO_IRQ_TYPE1      0x08
+#define GPIO_IRQ_TYPE2      0x0C
+#define GPIO_IRQ_STATUS     0x10
+
+static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
+                                    const struct aspeed_sgpio_bank *bank,
+                                    const enum aspeed_sgpio_reg reg)
+{
+       switch (reg) {
+       case reg_val:
+               return gpio->base + bank->val_regs + GPIO_VAL_VALUE;
+       case reg_rdata:
+               return gpio->base + bank->rdata_reg;
+       case reg_irq_enable:
+               return gpio->base + bank->irq_regs + GPIO_IRQ_ENABLE;
+       case reg_irq_type0:
+               return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE0;
+       case reg_irq_type1:
+               return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE1;
+       case reg_irq_type2:
+               return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE2;
+       case reg_irq_status:
+               return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
+       default:
+               /* acturally if code runs to here, it's an error case */
+               BUG_ON(1);
+       }
+}
+
+#define GPIO_BANK(x)    ((x) >> 5)
+#define GPIO_OFFSET(x)  ((x) & 0x1f)
+#define GPIO_BIT(x)     BIT(GPIO_OFFSET(x))
+
+static const struct aspeed_sgpio_bank *to_bank(unsigned int offset)
+{
+       unsigned int bank = GPIO_BANK(offset);
+
+       WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks));
+       return &aspeed_sgpio_banks[bank];
+}
+
+static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+       struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+       const struct aspeed_sgpio_bank *bank = to_bank(offset);
+       unsigned long flags;
+       enum aspeed_sgpio_reg reg;
+       bool is_input;
+       int rc = 0;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       is_input = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
+       reg = is_input ? reg_val : reg_rdata;
+       rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       return rc;
+}
+
+static void sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
+{
+       struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+       const struct aspeed_sgpio_bank *bank = to_bank(offset);
+       void __iomem *addr;
+       u32 reg = 0;
+
+       addr = bank_reg(gpio, bank, reg_val);
+       reg = ioread32(addr);
+
+       if (val)
+               reg |= GPIO_BIT(offset);
+       else
+               reg &= ~GPIO_BIT(offset);
+
+       iowrite32(reg, addr);
+}
+
+static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+{
+       struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       sgpio_set_value(gc, offset, val);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
+{
+       struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+       gpio->dir_in[GPIO_BANK(offset)] |= GPIO_BIT(offset);
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       return 0;
+}
+
+static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
+{
+       struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       gpio->dir_in[GPIO_BANK(offset)] &= ~GPIO_BIT(offset);
+       sgpio_set_value(gc, offset, val);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       return 0;
+}
+
+static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+       int dir_status;
+       struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+       dir_status = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       return dir_status;
+
+}
+
+static void irqd_to_aspeed_sgpio_data(struct irq_data *d,
+                                       struct aspeed_sgpio **gpio,
+                                       const struct aspeed_sgpio_bank **bank,
+                                       u32 *bit, int *offset)
+{
+       struct aspeed_sgpio *internal;
+
+       *offset = irqd_to_hwirq(d);
+       internal = irq_data_get_irq_chip_data(d);
+       WARN_ON(!internal);
+
+       *gpio = internal;
+       *bank = to_bank(*offset);
+       *bit = GPIO_BIT(*offset);
+}
+
+static void aspeed_sgpio_irq_ack(struct irq_data *d)
+{
+       const struct aspeed_sgpio_bank *bank;
+       struct aspeed_sgpio *gpio;
+       unsigned long flags;
+       void __iomem *status_addr;
+       int offset;
+       u32 bit;
+
+       irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
+
+       status_addr = bank_reg(gpio, bank, reg_irq_status);
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       iowrite32(bit, status_addr);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
+{
+       const struct aspeed_sgpio_bank *bank;
+       struct aspeed_sgpio *gpio;
+       unsigned long flags;
+       u32 reg, bit;
+       void __iomem *addr;
+       int offset;
+
+       irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
+       addr = bank_reg(gpio, bank, reg_irq_enable);
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       reg = ioread32(addr);
+       if (set)
+               reg |= bit;
+       else
+               reg &= ~bit;
+
+       iowrite32(reg, addr);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void aspeed_sgpio_irq_mask(struct irq_data *d)
+{
+       aspeed_sgpio_irq_set_mask(d, false);
+}
+
+static void aspeed_sgpio_irq_unmask(struct irq_data *d)
+{
+       aspeed_sgpio_irq_set_mask(d, true);
+}
+
+static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
+{
+       u32 type0 = 0;
+       u32 type1 = 0;
+       u32 type2 = 0;
+       u32 bit, reg;
+       const struct aspeed_sgpio_bank *bank;
+       irq_flow_handler_t handler;
+       struct aspeed_sgpio *gpio;
+       unsigned long flags;
+       void __iomem *addr;
+       int offset;
+
+       irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
+
+       switch (type & IRQ_TYPE_SENSE_MASK) {
+       case IRQ_TYPE_EDGE_BOTH:
+               type2 |= bit;
+               /* fall through */
+       case IRQ_TYPE_EDGE_RISING:
+               type0 |= bit;
+               /* fall through */
+       case IRQ_TYPE_EDGE_FALLING:
+               handler = handle_edge_irq;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               type0 |= bit;
+               /* fall through */
+       case IRQ_TYPE_LEVEL_LOW:
+               type1 |= bit;
+               handler = handle_level_irq;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       addr = bank_reg(gpio, bank, reg_irq_type0);
+       reg = ioread32(addr);
+       reg = (reg & ~bit) | type0;
+       iowrite32(reg, addr);
+
+       addr = bank_reg(gpio, bank, reg_irq_type1);
+       reg = ioread32(addr);
+       reg = (reg & ~bit) | type1;
+       iowrite32(reg, addr);
+
+       addr = bank_reg(gpio, bank, reg_irq_type2);
+       reg = ioread32(addr);
+       reg = (reg & ~bit) | type2;
+       iowrite32(reg, addr);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       irq_set_handler_locked(d, handler);
+
+       return 0;
+}
+
+static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
+{
+       struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+       struct irq_chip *ic = irq_desc_get_chip(desc);
+       struct aspeed_sgpio *data = gpiochip_get_data(gc);
+       unsigned int i, p, girq;
+       unsigned long reg;
+
+       chained_irq_enter(ic, desc);
+
+       for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
+               const struct aspeed_sgpio_bank *bank = &aspeed_sgpio_banks[i];
+
+               reg = ioread32(bank_reg(data, bank, reg_irq_status));
+
+               for_each_set_bit(p, &reg, 32) {
+                       girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
+                       generic_handle_irq(girq);
+               }
+
+       }
+
+       chained_irq_exit(ic, desc);
+}
+
+static struct irq_chip aspeed_sgpio_irqchip = {
+       .name       = "aspeed-sgpio",
+       .irq_ack    = aspeed_sgpio_irq_ack,
+       .irq_mask   = aspeed_sgpio_irq_mask,
+       .irq_unmask = aspeed_sgpio_irq_unmask,
+       .irq_set_type   = aspeed_sgpio_set_type,
+};
+
+static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
+                                  struct platform_device *pdev)
+{
+       int rc, i;
+       const struct aspeed_sgpio_bank *bank;
+       struct gpio_irq_chip *irq;
+
+       rc = platform_get_irq(pdev, 0);
+       if (rc < 0)
+               return rc;
+
+       gpio->irq = rc;
+
+       /* Disable IRQ and clear Interrupt status registers for all SPGIO Pins. */
+       for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
+               bank =  &aspeed_sgpio_banks[i];
+               /* disable irq enable bits */
+               iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable));
+               /* clear status bits */
+               iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status));
+       }
+
+       irq = &gpio->chip.irq;
+       irq->chip = &aspeed_sgpio_irqchip;
+       irq->handler = handle_bad_irq;
+       irq->default_type = IRQ_TYPE_NONE;
+       irq->parent_handler = aspeed_sgpio_irq_handler;
+       irq->parent_handler_data = gpio;
+       irq->parents = &gpio->irq;
+       irq->num_parents = 1;
+
+       /* set IRQ settings and Enable Interrupt */
+       for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
+               bank = &aspeed_sgpio_banks[i];
+               /* set falling or level-low irq */
+               iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
+               /* trigger type is edge */
+               iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
+               /* dual edge trigger mode. */
+               iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_type2));
+               /* enable irq */
+               iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_enable));
+       }
+
+       return 0;
+}
+
+static const struct of_device_id aspeed_sgpio_of_table[] = {
+       { .compatible = "aspeed,ast2400-sgpio" },
+       { .compatible = "aspeed,ast2500-sgpio" },
+       {}
+};
+
+MODULE_DEVICE_TABLE(of, aspeed_sgpio_of_table);
+
+static int __init aspeed_sgpio_probe(struct platform_device *pdev)
+{
+       struct aspeed_sgpio *gpio;
+       u32 nr_gpios, sgpio_freq, sgpio_clk_div;
+       int rc;
+       unsigned long apb_freq;
+
+       gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+       if (!gpio)
+               return -ENOMEM;
+
+       gpio->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(gpio->base))
+               return PTR_ERR(gpio->base);
+
+       rc = of_property_read_u32(pdev->dev.of_node, "ngpios", &nr_gpios);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "Could not read ngpios property\n");
+               return -EINVAL;
+       } else if (nr_gpios > MAX_NR_SGPIO) {
+               dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n",
+                       MAX_NR_SGPIO, nr_gpios);
+               return -EINVAL;
+       }
+
+       rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "Could not read bus-frequency property\n");
+               return -EINVAL;
+       }
+
+       gpio->pclk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(gpio->pclk)) {
+               dev_err(&pdev->dev, "devm_clk_get failed\n");
+               return PTR_ERR(gpio->pclk);
+       }
+
+       apb_freq = clk_get_rate(gpio->pclk);
+
+       /*
+        * From the datasheet,
+        *      SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1)
+        *      period = 2 * (GPIO254[31:16] + 1) / PCLK
+        *      frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK)
+        *      frequency = PCLK / (2 * (GPIO254[31:16] + 1))
+        *      frequency * 2 * (GPIO254[31:16] + 1) = PCLK
+        *      GPIO254[31:16] = PCLK / (frequency * 2) - 1
+        */
+       if (sgpio_freq == 0)
+               return -EINVAL;
+
+       sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1;
+
+       if (sgpio_clk_div > (1 << 16) - 1)
+               return -EINVAL;
+
+       iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) |
+                 FIELD_PREP(ASPEED_SGPIO_PINS_MASK, (nr_gpios / 8)) |
+                 ASPEED_SGPIO_ENABLE,
+                 gpio->base + ASPEED_SGPIO_CTRL);
+
+       spin_lock_init(&gpio->lock);
+
+       gpio->chip.parent = &pdev->dev;
+       gpio->chip.ngpio = nr_gpios;
+       gpio->chip.direction_input = aspeed_sgpio_dir_in;
+       gpio->chip.direction_output = aspeed_sgpio_dir_out;
+       gpio->chip.get_direction = aspeed_sgpio_get_direction;
+       gpio->chip.request = NULL;
+       gpio->chip.free = NULL;
+       gpio->chip.get = aspeed_sgpio_get;
+       gpio->chip.set = aspeed_sgpio_set;
+       gpio->chip.set_config = NULL;
+       gpio->chip.label = dev_name(&pdev->dev);
+       gpio->chip.base = -1;
+
+       /* set all SGPIO pins as input (1). */
+       memset(gpio->dir_in, 0xff, sizeof(gpio->dir_in));
+
+       aspeed_sgpio_setup_irqs(gpio, pdev);
+
+       rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+       if (rc < 0)
+               return rc;
+
+       return 0;
+}
+
+static struct platform_driver aspeed_sgpio_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = aspeed_sgpio_of_table,
+       },
+};
+
+module_platform_driver_probe(aspeed_sgpio_driver, aspeed_sgpio_probe);
+MODULE_DESCRIPTION("Aspeed Serial GPIO Driver");
+MODULE_LICENSE("GPL");
index 9b384a94d2f34b608108f5b971d0475781787ffa..3e35a8f2c5e553e4c714deff5e838ac1510a2f59 100644 (file)
@@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
        { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+       { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0, 0, 0, 0, 0 },
index f539a2a927747add782a608796775299128bd184..7398b4850649bd64c45061c10df4f6faec68d31e 100644 (file)
@@ -534,21 +534,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
                               struct drm_sched_entity *entity)
 {
        struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
-       unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
-       struct dma_fence *other = centity->fences[idx];
+       struct dma_fence *other;
+       unsigned idx;
+       long r;
 
-       if (other) {
-               signed long r;
-               r = dma_fence_wait(other, true);
-               if (r < 0) {
-                       if (r != -ERESTARTSYS)
-                               DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+       spin_lock(&ctx->ring_lock);
+       idx = centity->sequence & (amdgpu_sched_jobs - 1);
+       other = dma_fence_get(centity->fences[idx]);
+       spin_unlock(&ctx->ring_lock);
 
-                       return r;
-               }
-       }
+       if (!other)
+               return 0;
 
-       return 0;
+       r = dma_fence_wait(other, true);
+       if (r < 0 && r != -ERESTARTSYS)
+               DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
+       dma_fence_put(other);
+       return r;
 }
 
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
index 4ea67f94cae2417ad0269e187fe7809ca3b200bf..c066e1d3f981da65d7d19f2c9615a121c1aa19f0 100644 (file)
@@ -596,14 +596,14 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
        case CHIP_VEGA20:
                break;
        case CHIP_RAVEN:
-               if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
-                       break;
-               if ((adev->gfx.rlc_fw_version != 106 &&
-                    adev->gfx.rlc_fw_version < 531) ||
-                   (adev->gfx.rlc_fw_version == 53815) ||
-                   (adev->gfx.rlc_feature_version < 1) ||
-                   !adev->gfx.rlc.is_rlc_v2_1)
+               if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+                       &&((adev->gfx.rlc_fw_version != 106 &&
+                            adev->gfx.rlc_fw_version < 531) ||
+                           (adev->gfx.rlc_fw_version == 53815) ||
+                           (adev->gfx.rlc_feature_version < 1) ||
+                           !adev->gfx.rlc.is_rlc_v2_1))
                        adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
                if (adev->pm.pp_feature & PP_GFXOFF_MASK)
                        adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
                                AMD_PG_SUPPORT_CP |
index f27c6fbb192ed20133982db6df494893939f482a..90c4e87ac5ada9b09acc99d934df065e04a949d1 100644 (file)
@@ -2101,7 +2101,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
        if (ret)
                return ret;
 
-       *query = metrics_table.CurrSocketPower << 8;
+       /* For the 40.46 release, they changed the value name */
+       if (hwmgr->smu_version == 0x282e00)
+               *query = metrics_table.AverageSocketPower << 8;
+       else
+               *query = metrics_table.CurrSocketPower << 8;
 
        return ret;
 }
@@ -2349,12 +2353,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
                data->dpm_table.soc_table.dpm_state.soft_max_level =
                data->dpm_table.soc_table.dpm_levels[soft_level].value;
 
-       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+       ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+                                                FEATURE_DPM_UCLK_MASK |
+                                                FEATURE_DPM_SOCCLK_MASK);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to highest!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+       ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+                                                FEATURE_DPM_UCLK_MASK |
+                                                FEATURE_DPM_SOCCLK_MASK);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2387,12 +2395,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                data->dpm_table.soc_table.dpm_state.soft_max_level =
                data->dpm_table.soc_table.dpm_levels[soft_level].value;
 
-       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+       ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+                                                FEATURE_DPM_UCLK_MASK |
+                                                FEATURE_DPM_SOCCLK_MASK);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to highest!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+       ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+                                                FEATURE_DPM_UCLK_MASK |
+                                                FEATURE_DPM_SOCCLK_MASK);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2403,14 +2415,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
 
 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 {
+       struct vega20_hwmgr *data =
+                       (struct vega20_hwmgr *)(hwmgr->backend);
+       uint32_t soft_min_level, soft_max_level;
        int ret = 0;
 
-       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+       /* gfxclk soft min/max settings */
+       soft_min_level =
+               vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+       soft_max_level =
+               vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+       data->dpm_table.gfx_table.dpm_state.soft_min_level =
+               data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+       data->dpm_table.gfx_table.dpm_state.soft_max_level =
+               data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
+
+       /* uclk soft min/max settings */
+       soft_min_level =
+               vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+       soft_max_level =
+               vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+       data->dpm_table.mem_table.dpm_state.soft_min_level =
+               data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+       data->dpm_table.mem_table.dpm_state.soft_max_level =
+               data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+       /* socclk soft min/max settings */
+       soft_min_level =
+               vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
+       soft_max_level =
+               vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
+
+       data->dpm_table.soc_table.dpm_state.soft_min_level =
+               data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
+       data->dpm_table.soc_table.dpm_state.soft_max_level =
+               data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
+
+       ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+                                                FEATURE_DPM_UCLK_MASK |
+                                                FEATURE_DPM_SOCCLK_MASK);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload DPM Bootup Levels!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+       ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+                                                FEATURE_DPM_UCLK_MASK |
+                                                FEATURE_DPM_SOCCLK_MASK);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload DPM Max Levels!",
                        return ret);
index dd6fd1c8bf24e0db3a4d16be0f7817dade68dde5..6a14497257e43b8bc906f42b5d0c68fccdc93e1b 100644 (file)
@@ -3050,6 +3050,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
 
 static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
 {
+       uint32_t smu_version;
        int ret = 0;
        SmuMetrics_t metrics;
 
@@ -3060,7 +3061,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
        if (ret)
                return ret;
 
-       *value = metrics.CurrSocketPower << 8;
+       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       if (ret)
+               return ret;
+
+       /* For the 40.46 release, they changed the value name */
+       if (smu_version == 0x282e00)
+               *value = metrics.AverageSocketPower << 8;
+       else
+               *value = metrics.CurrSocketPower << 8;
 
        return 0;
 }
index a0eabc134dd6943f482302470caac50778ae07e3..9d4d5075cc647ef6686afba1cac57539bd64e7a6 100644 (file)
@@ -127,7 +127,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
        pipe->of_output_port =
                of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
 
-       pipe->of_node = np;
+       pipe->of_node = of_node_get(np);
 
        return 0;
 }
index d50e75f0b2bdce5c5bdf3daa6dcfcdff2f5e0c2a..69d9e26c60c812769164996f68aa299127371053 100644 (file)
@@ -14,8 +14,8 @@
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_irq.h>
-#include <drm/drm_vblank.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
 
 #include "komeda_dev.h"
 #include "komeda_framebuffer.h"
@@ -147,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
        struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
        struct komeda_plane_state *kplane_st;
        struct drm_plane_state *plane_st;
-       struct drm_framebuffer *fb;
        struct drm_plane *plane;
        struct list_head zorder_list;
        int order = 0, err;
@@ -173,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
 
        list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
                plane_st = &kplane_st->base;
-               fb = plane_st->fb;
                plane = plane_st->plane;
 
                plane_st->normalized_zpos = order++;
@@ -206,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev,
                            struct drm_atomic_state *state)
 {
        struct drm_crtc *crtc;
-       struct drm_crtc_state *old_crtc_st, *new_crtc_st;
+       struct drm_crtc_state *new_crtc_st;
        int i, err;
 
        err = drm_atomic_helper_check_modeset(dev, state);
@@ -217,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev,
         * so need to add all affected_planes (even unchanged) to
         * drm_atomic_state.
         */
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) {
+       for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
                err = drm_atomic_add_affected_planes(state, crtc);
                if (err)
                        return err;
@@ -308,11 +306,11 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
                               komeda_kms_irq_handler, IRQF_SHARED,
                               drm->driver->name, drm);
        if (err)
-               goto cleanup_mode_config;
+               goto free_component_binding;
 
        err = mdev->funcs->enable_irq(mdev);
        if (err)
-               goto cleanup_mode_config;
+               goto free_component_binding;
 
        drm->irq_enabled = true;
 
@@ -320,15 +318,21 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
 
        err = drm_dev_register(drm, 0);
        if (err)
-               goto cleanup_mode_config;
+               goto free_interrupts;
 
        return kms;
 
-cleanup_mode_config:
+free_interrupts:
        drm_kms_helper_poll_fini(drm);
        drm->irq_enabled = false;
+       mdev->funcs->disable_irq(mdev);
+free_component_binding:
+       component_unbind_all(mdev->dev, drm);
+cleanup_mode_config:
        drm_mode_config_cleanup(drm);
        komeda_kms_cleanup_private_objs(kms);
+       drm->dev_private = NULL;
+       drm_dev_put(drm);
 free_kms:
        kfree(kms);
        return ERR_PTR(err);
@@ -339,13 +343,14 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
        struct drm_device *drm = &kms->base;
        struct komeda_dev *mdev = drm->dev_private;
 
-       drm->irq_enabled = false;
-       mdev->funcs->disable_irq(mdev);
        drm_dev_unregister(drm);
        drm_kms_helper_poll_fini(drm);
+       drm_atomic_helper_shutdown(drm);
+       drm->irq_enabled = false;
+       mdev->funcs->disable_irq(mdev);
        component_unbind_all(mdev->dev, drm);
-       komeda_kms_cleanup_private_objs(kms);
        drm_mode_config_cleanup(drm);
+       komeda_kms_cleanup_private_objs(kms);
        drm->dev_private = NULL;
        drm_dev_put(drm);
 }
index a90bcbb3cb23d6fc71a76750d69da9c1324cb098..14b683164544d8c2ea7d8d272f6f5caadaa4427f 100644 (file)
@@ -480,6 +480,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
                                   struct seq_file *sf);
 
 /* component APIs */
+extern __printf(10, 11)
 struct komeda_component *
 komeda_component_add(struct komeda_pipeline *pipe,
                     size_t comp_sz, u32 id, u32 hw_id,
index 617e1f7b8472e874250bf5a951856df9fa6de36c..2851cac94d8699883dbaa9a4e9b8f202e33da0eb 100644 (file)
@@ -148,7 +148,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
        if (!kcrtc->master->wb_layer)
                return 0;
 
-       kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL);
+       kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
        if (!kwb_conn)
                return -ENOMEM;
 
index b0369e690f36cf67a9d60eac23f7b767dbb66074..c814bcef18a4007d90d5dcb85fd8abb6c69a985b 100644 (file)
@@ -1454,6 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
 }
 
 static int drm_mode_parse_cmdline_extra(const char *str, int length,
+                                       bool freestanding,
                                        const struct drm_connector *connector,
                                        struct drm_cmdline_mode *mode)
 {
@@ -1462,9 +1463,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length,
        for (i = 0; i < length; i++) {
                switch (str[i]) {
                case 'i':
+                       if (freestanding)
+                               return -EINVAL;
+
                        mode->interlace = true;
                        break;
                case 'm':
+                       if (freestanding)
+                               return -EINVAL;
+
                        mode->margins = true;
                        break;
                case 'D':
@@ -1542,6 +1549,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
                        if (extras) {
                                int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
                                                                       1,
+                                                                      false,
                                                                       connector,
                                                                       mode);
                                if (ret)
@@ -1669,6 +1677,22 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
        return 0;
 }
 
+static const char * const drm_named_modes_whitelist[] = {
+       "NTSC",
+       "PAL",
+};
+
+static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
+               if (!strncmp(mode, drm_named_modes_whitelist[i], size))
+                       return true;
+
+       return false;
+}
+
 /**
  * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
  * @mode_option: optional per connector mode option
@@ -1725,16 +1749,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
         * bunch of things:
         *   - We need to make sure that the first character (which
         *     would be our resolution in X) is a digit.
-        *   - However, if the X resolution is missing, then we end up
-        *     with something like x<yres>, with our first character
-        *     being an alpha-numerical character, which would be
-        *     considered a named mode.
+        *   - If not, then it's either a named mode or a force on/off.
+        *     To distinguish between the two, we need to run the
+        *     extra parsing function, and if not, then we consider it
+        *     a named mode.
         *
         * If this isn't enough, we should add more heuristics here,
         * and matching unit-tests.
         */
-       if (!isdigit(name[0]) && name[0] != 'x')
+       if (!isdigit(name[0]) && name[0] != 'x') {
+               unsigned int namelen = strlen(name);
+
+               /*
+                * Only the force on/off options can be in that case,
+                * and they all take a single character.
+                */
+               if (namelen == 1) {
+                       ret = drm_mode_parse_cmdline_extra(name, namelen, true,
+                                                          connector, mode);
+                       if (!ret)
+                               return true;
+               }
+
                named_mode = true;
+       }
 
        /* Try to locate the bpp and refresh specifiers, if any */
        bpp_ptr = strchr(name, '-');
@@ -1772,6 +1810,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
        if (named_mode) {
                if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
                        return false;
+
+               if (!drm_named_mode_is_in_whitelist(name, mode_end))
+                       return false;
+
                strscpy(mode->name, name, mode_end + 1);
        } else {
                ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
@@ -1811,7 +1853,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
            extra_ptr != options_ptr) {
                int len = strlen(name) - (extra_ptr - name);
 
-               ret = drm_mode_parse_cmdline_extra(extra_ptr, len,
+               ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
                                                   connector, mode);
                if (ret)
                        return false;
index 60652ebbdf610a1cfda5228147265c77fe30f438..8aa6a31e8ad07d8e02f4e88142e665ce20997c19 100644 (file)
@@ -128,7 +128,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
        limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
 
        limits.min_bpp = intel_dp_min_bpp(pipe_config);
-       limits.max_bpp = pipe_config->pipe_bpp;
+       /*
+        * FIXME: If all the streams can't fit into the link with
+        * their current pipe_bpp we should reduce pipe_bpp across
+        * the board until things start to fit. Until then we
+        * limit to <= 8bpc since that's what was hardcoded for all
+        * MST streams previously. This hack should be removed once
+        * we have the proper retry logic in place.
+        */
+       limits.max_bpp = min(pipe_config->pipe_bpp, 24);
 
        intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
 
@@ -539,7 +547,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 
        intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
-       drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+       /*
+        * Reuse the prop from the SST connector because we're
+        * not allowed to create new props after device registration.
+        */
+       connector->max_bpc_property =
+               intel_dp->attached_connector->base.max_bpc_property;
+       if (connector->max_bpc_property)
+               drm_connector_attach_max_bpc_property(connector, 6, 12);
 
        return connector;
 
index ffec807b89600e6b3c63e4b222cc5f6cdcbe2317..f413904a3e960ffbc1895ee554846a024994761a 100644 (file)
@@ -541,7 +541,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
        pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
                DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
        DRM_INFO("PPS2 = 0x%08x\n", pps_val);
-       if (encoder->type == INTEL_OUTPUT_EDP) {
+       if (cpu_transcoder == TRANSCODER_EDP) {
                I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
                /*
                 * If 2 VDSC instances are needed, configure PPS for second
index 2caa594322bc3f977e6d5720bf1f4a545fd89b43..528b6167833456d9fc6c5a0f8a53197c980a80d1 100644 (file)
@@ -664,15 +664,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 
        for_each_sgt_page(page, sgt_iter, pages) {
                if (obj->mm.dirty)
-                       /*
-                        * As this may not be anonymous memory (e.g. shmem)
-                        * but exist on a real mapping, we have to lock
-                        * the page in order to dirty it -- holding
-                        * the page reference is not sufficient to
-                        * prevent the inode from being truncated.
-                        * Play safe and take the lock.
-                        */
-                       set_page_dirty_lock(page);
+                       set_page_dirty(page);
 
                mark_page_accessed(page);
                put_page(page);
index 98dfb086320fad58f95b6be36d11a58c7af1fc4d..99e8242194c00891698ce9659e9960efe0d39b4e 100644 (file)
@@ -308,11 +308,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
                          FLOW_CONTROL_ENABLE |
                          PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
-       /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
-       if (!IS_COFFEELAKE(i915))
-               WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-                                 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
        /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
        /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
        WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
index f62e3397d9363e81185ace4663f14243b3e24855..bac1ee94f63fc0be121cb371cd9e38351494c322 100644 (file)
@@ -1598,6 +1598,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        pci_set_master(pdev);
 
+       /*
+        * We don't have a max segment size, so set it to the max so sg's
+        * debugging layer doesn't complain
+        */
+       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
        /* overlay on gen2 is broken and can't address above 1G */
        if (IS_GEN(dev_priv, 2)) {
                ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
index 94d3992b599d24a88c473839e831ab81f903427b..724627afdedc2ef70430e2bee94518ad8c0e09c8 100644 (file)
@@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info;
 static void vgt_deballoon_space(struct i915_ggtt *ggtt,
                                struct drm_mm_node *node)
 {
+       if (!drm_mm_node_allocated(node))
+               return;
+
        DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
                         node->start,
                         node->start + node->size,
index e9f9e9fb9b1773a1a40d6b1e9a8a063a10e8f530..6381652a8829127800300e16a118400c3535029b 100644 (file)
@@ -656,10 +656,9 @@ static int ingenic_drm_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (panel) {
+       if (panel)
                bridge = devm_drm_panel_bridge_add(dev, panel,
-                                                  DRM_MODE_CONNECTOR_Unknown);
-       }
+                                                  DRM_MODE_CONNECTOR_DPI);
 
        priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
                                              &priv->dma_hwdesc_phys,
index 477c0f766663de92dfd4bf263e7e374896eef73a..b609dc030d6ca906f61ffd8bba47dce8bf1e644d 100644 (file)
@@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
        timeout = drm_timeout_abs_to_jiffies(timeout_ns);
 
        ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
-       if (ret == 0)
+       if (ret == -ETIME)
                ret = timeout ? -ETIMEDOUT : -EBUSY;
 
        return ret;
index 84a2f243ed9bd16a59bf752f1f01602c72519c03..4695f1c8e33f4f32219102ac569462bb7516a1ea 100644 (file)
@@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
 MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
 MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
 MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
 MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
 MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
 MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
@@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
 MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
 MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
 MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
 MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
 MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
 MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
@@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
 MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
 MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
 MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
 MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
 MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
 MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
@@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
 MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
 MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
 MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
index de0f882f0f7b07d7b591df64b38f14c4d6a6fac0..14b41de44ebcda9a6c6ebda7de21817b22de2a90 100644 (file)
@@ -4,6 +4,7 @@
  * Author: Archit Taneja <archit@ti.com>
  */
 
+#include <linux/bitops.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
 {
        struct device_node *remote_node;
 
-       remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
+       remote_node = of_graph_get_remote_node(out->dev->of_node,
+                                              ffs(out->of_ports) - 1, 0);
        if (!remote_node) {
                dev_dbg(out->dev, "failed to find video sink\n");
                return 0;
index 92ac995dd9c66be77484f5f634d463098607f91a..6e8145c36e933fc97dbf01214c8c93d9a4f10576 100644 (file)
@@ -222,7 +222,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
                size_t unmapped_page;
                size_t pgsize = get_pgsize(iova, len - unmapped_len);
 
-               unmapped_page = ops->unmap(ops, iova, pgsize);
+               unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
                if (!unmapped_page)
                        break;
 
@@ -247,20 +247,28 @@ static void mmu_tlb_inv_context_s1(void *cookie)
        mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
 }
 
-static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-                                    size_t granule, bool leaf, void *cookie)
-{}
-
 static void mmu_tlb_sync_context(void *cookie)
 {
        //struct panfrost_device *pfdev = cookie;
        // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
 }
 
-static const struct iommu_gather_ops mmu_tlb_ops = {
+static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
+                              void *cookie)
+{
+       mmu_tlb_sync_context(cookie);
+}
+
+static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
+                              void *cookie)
+{
+       mmu_tlb_sync_context(cookie);
+}
+
+static const struct iommu_flush_ops mmu_tlb_ops = {
        .tlb_flush_all  = mmu_tlb_inv_context_s1,
-       .tlb_add_flush  = mmu_tlb_inv_range_nosync,
-       .tlb_sync       = mmu_tlb_sync_context,
+       .tlb_flush_walk = mmu_tlb_flush_walk,
+       .tlb_flush_leaf = mmu_tlb_flush_leaf,
 };
 
 static const char *access_type_name(struct panfrost_device *pfdev,
index f33e349c4ec5b4f48db8edd7b69d11f0e4a3c83a..952201c6d821d719f6520c83462923918ebf31f4 100644 (file)
@@ -59,6 +59,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400);
 static struct drm_driver qxl_driver;
 static struct pci_driver qxl_pci_driver;
 
+static bool is_vga(struct pci_dev *pdev)
+{
+       return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
+}
+
 static int
 qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -83,9 +88,17 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto disable_pci;
 
+       if (is_vga(pdev)) {
+               ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
+               if (ret) {
+                       DRM_ERROR("can't get legacy vga ioports\n");
+                       goto disable_pci;
+               }
+       }
+
        ret = qxl_device_init(qdev, &qxl_driver, pdev);
        if (ret)
-               goto disable_pci;
+               goto put_vga;
 
        ret = qxl_modeset_init(qdev);
        if (ret)
@@ -105,6 +118,9 @@ modeset_cleanup:
        qxl_modeset_fini(qdev);
 unload:
        qxl_device_fini(qdev);
+put_vga:
+       if (is_vga(pdev))
+               vga_put(pdev, VGA_RSRC_LEGACY_IO);
 disable_pci:
        pci_disable_device(pdev);
 free_dev:
@@ -122,6 +138,8 @@ qxl_pci_remove(struct pci_dev *pdev)
 
        qxl_modeset_fini(qdev);
        qxl_device_fini(qdev);
+       if (is_vga(pdev))
+               vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
        dev->dev_private = NULL;
        kfree(qdev);
index b45824ec7c8f2cb6eb70ea2dd24ea77246f7f09c..6d61a0eb5d64f00ee9c6adb6fabdc1ae40f9c507 100644 (file)
@@ -9,6 +9,13 @@
 
 #define cmdline_test(test)     selftest(test, test)
 
+cmdline_test(drm_cmdline_test_force_d_only)
+cmdline_test(drm_cmdline_test_force_D_only_dvi)
+cmdline_test(drm_cmdline_test_force_D_only_hdmi)
+cmdline_test(drm_cmdline_test_force_D_only_not_digital)
+cmdline_test(drm_cmdline_test_force_e_only)
+cmdline_test(drm_cmdline_test_margin_only)
+cmdline_test(drm_cmdline_test_interlace_only)
 cmdline_test(drm_cmdline_test_res)
 cmdline_test(drm_cmdline_test_res_missing_x)
 cmdline_test(drm_cmdline_test_res_missing_y)
index 14c96edb13dff89c49dce49a486752a5b3fb5129..013de9d27c35dcda2043a0e6f45082944d34fea8 100644 (file)
 
 static const struct drm_connector no_connector = {};
 
+static int drm_cmdline_test_force_e_only(void *ignored)
+{
+       struct drm_cmdline_mode mode = { };
+
+       FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
+                                                          &no_connector,
+                                                          &mode));
+       FAIL_ON(mode.specified);
+       FAIL_ON(mode.refresh_specified);
+       FAIL_ON(mode.bpp_specified);
+
+       FAIL_ON(mode.rb);
+       FAIL_ON(mode.cvt);
+       FAIL_ON(mode.interlace);
+       FAIL_ON(mode.margins);
+       FAIL_ON(mode.force != DRM_FORCE_ON);
+
+       return 0;
+}
+
+static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
+{
+       struct drm_cmdline_mode mode = { };
+
+       FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+                                                          &no_connector,
+                                                          &mode));
+       FAIL_ON(mode.specified);
+       FAIL_ON(mode.refresh_specified);
+       FAIL_ON(mode.bpp_specified);
+
+       FAIL_ON(mode.rb);
+       FAIL_ON(mode.cvt);
+       FAIL_ON(mode.interlace);
+       FAIL_ON(mode.margins);
+       FAIL_ON(mode.force != DRM_FORCE_ON);
+
+       return 0;
+}
+
+static const struct drm_connector connector_hdmi = {
+       .connector_type = DRM_MODE_CONNECTOR_HDMIB,
+};
+
+static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
+{
+       struct drm_cmdline_mode mode = { };
+
+       FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+                                                          &connector_hdmi,
+                                                          &mode));
+       FAIL_ON(mode.specified);
+       FAIL_ON(mode.refresh_specified);
+       FAIL_ON(mode.bpp_specified);
+
+       FAIL_ON(mode.rb);
+       FAIL_ON(mode.cvt);
+       FAIL_ON(mode.interlace);
+       FAIL_ON(mode.margins);
+       FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
+
+       return 0;
+}
+
+static const struct drm_connector connector_dvi = {
+       .connector_type = DRM_MODE_CONNECTOR_DVII,
+};
+
+static int drm_cmdline_test_force_D_only_dvi(void *ignored)
+{
+       struct drm_cmdline_mode mode = { };
+
+       FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+                                                          &connector_dvi,
+                                                          &mode));
+       FAIL_ON(mode.specified);
+       FAIL_ON(mode.refresh_specified);
+       FAIL_ON(mode.bpp_specified);
+
+       FAIL_ON(mode.rb);
+       FAIL_ON(mode.cvt);
+       FAIL_ON(mode.interlace);
+       FAIL_ON(mode.margins);
+       FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
+
+       return 0;
+}
+
+static int drm_cmdline_test_force_d_only(void *ignored)
+{
+       struct drm_cmdline_mode mode = { };
+
+       FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
+                                                          &no_connector,
+                                                          &mode));
+       FAIL_ON(mode.specified);
+       FAIL_ON(mode.refresh_specified);
+       FAIL_ON(mode.bpp_specified);
+
+       FAIL_ON(mode.rb);
+       FAIL_ON(mode.cvt);
+       FAIL_ON(mode.interlace);
+       FAIL_ON(mode.margins);
+       FAIL_ON(mode.force != DRM_FORCE_OFF);
+
+       return 0;
+}
+
+static int drm_cmdline_test_margin_only(void *ignored)
+{
+       struct drm_cmdline_mode mode = { };
+
+       FAIL_ON(drm_mode_parse_command_line_for_connector("m",
+                                                         &no_connector,
+                                                         &mode));
+
+       return 0;
+}
+
+static int drm_cmdline_test_interlace_only(void *ignored)
+{
+       struct drm_cmdline_mode mode = { };
+
+       FAIL_ON(drm_mode_parse_command_line_for_connector("i",
+                                                         &no_connector,
+                                                         &mode));
+
+       return 0;
+}
+
 static int drm_cmdline_test_res(void *ignored)
 {
        struct drm_cmdline_mode mode = { };
index b2da31310d24c041750b92a96f91ddf8084978c7..09b526518f5a64b75288cc5738ae9ad7e6d0ff12 100644 (file)
@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
                .interruptible = false,
                .no_wait_gpu = false
        };
+       size_t max_segment;
 
        /* wtf swapping */
        if (bo->pages)
@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
        if (!bo->pages)
                goto out;
 
-       ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
-                                       nr_pages << PAGE_SHIFT, GFP_KERNEL);
+       max_segment = virtio_max_dma_size(qdev->vdev);
+       max_segment &= PAGE_MASK;
+       if (max_segment > SCATTERLIST_MAX_SEGMENT)
+               max_segment = SCATTERLIST_MAX_SEGMENT;
+       ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
+                                         nr_pages << PAGE_SHIFT,
+                                         max_segment, GFP_KERNEL);
        if (ret)
                goto out;
        return 0;
index 59e9d05ab928b49f2c6ca1f3a0cd5fbecdd56a1b..0af048d1a8156acfdb24af337f3b3479c4bb1004 100644 (file)
@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                                     !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
                if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
                        kfree(reply);
-
+                       reply = NULL;
                        if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
                                /* A checkpoint occurred. Retry. */
                                continue;
@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 
                if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
                        kfree(reply);
-
+                       reply = NULL;
                        if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
                                /* A checkpoint occurred. Retry. */
                                continue;
@@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                break;
        }
 
-       if (retries == RETRIES) {
-               kfree(reply);
+       if (!reply)
                return -EINVAL;
-       }
 
        *msg_len = reply_len;
        *msg     = reply;
index 650dd71f9724ae605761066b75ff191dc8bd7229..2ca5668bdb626a866b9b581caa0e76af23378f28 100644 (file)
@@ -246,6 +246,16 @@ config SENSORS_ADT7475
          This driver can also be built as a module. If so, the module
          will be called adt7475.
 
+config SENSORS_AS370
+       tristate "Synaptics AS370 SoC hardware monitoring driver"
+       help
+         If you say yes here you get support for the PVT sensors of
+         the Synaptics AS370 SoC
+
+         This driver can also be built as a module. If so, the module
+         will be called as370-hwmon.
+
+
 config SENSORS_ASC7621
        tristate "Andigilog aSC7621"
        depends on I2C
@@ -1382,8 +1392,8 @@ config SENSORS_SHTC1
        tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
        depends on I2C
        help
-         If you say yes here you get support for the Sensiron SHTC1 and SHTW1
-         humidity and temperature sensors.
+         If you say yes here you get support for the Sensiron SHTC1, SHTW1,
+         and SHTC3 humidity and temperature sensors.
 
          This driver can also be built as a module. If so, the module
          will be called shtc1.
@@ -1570,16 +1580,6 @@ config SENSORS_ADC128D818
          This driver can also be built as a module. If so, the module
          will be called adc128d818.
 
-config SENSORS_ADS1015
-       tristate "Texas Instruments ADS1015"
-       depends on I2C
-       help
-         If you say yes here you get support for Texas Instruments
-         ADS1015/ADS1115 12/16-bit 4-input ADC device.
-
-         This driver can also be built as a module. If so, the module
-         will be called ads1015.
-
 config SENSORS_ADS7828
        tristate "Texas Instruments ADS7828 and compatibles"
        depends on I2C
@@ -1834,17 +1834,12 @@ config SENSORS_W83795
          will be called w83795.
 
 config SENSORS_W83795_FANCTRL
-       bool "Include automatic fan control support (DANGEROUS)"
+       bool "Include automatic fan control support"
        depends on SENSORS_W83795
        help
          If you say yes here, support for automatic fan speed control
          will be included in the driver.
 
-         This part of the code wasn't carefully reviewed and tested yet,
-         so enabling this option is strongly discouraged on production
-         servers. Only developers and testers should enable it for the
-         time being.
-
          Please also note that this option will create sysfs attribute
          files which may change in the future, so you shouldn't rely
          on them being stable.
index 8db472ea04f00880b8aee3a303847d95fb733261..c86ce4d3d36b9e67f2173cceed38466c36051657 100644 (file)
@@ -35,7 +35,6 @@ obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
 obj-$(CONFIG_SENSORS_ADM1029)  += adm1029.o
 obj-$(CONFIG_SENSORS_ADM1031)  += adm1031.o
 obj-$(CONFIG_SENSORS_ADM9240)  += adm9240.o
-obj-$(CONFIG_SENSORS_ADS1015)  += ads1015.o
 obj-$(CONFIG_SENSORS_ADS7828)  += ads7828.o
 obj-$(CONFIG_SENSORS_ADS7871)  += ads7871.o
 obj-$(CONFIG_SENSORS_ADT7X10)  += adt7x10.o
@@ -48,6 +47,7 @@ obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
 obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
 obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
 obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
+obj-$(CONFIG_SENSORS_AS370)    += as370-hwmon.o
 obj-$(CONFIG_SENSORS_ASC7621)  += asc7621.o
 obj-$(CONFIG_SENSORS_ASPEED)   += aspeed-pwm-tacho.o
 obj-$(CONFIG_SENSORS_ATXP1)    += atxp1.o
index 6ba1a08253f0a3cc12a7657e4c31a0853c13b963..4cf25458f0b9515e419a6f0b88047bf35809a2e5 100644 (file)
@@ -681,8 +681,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
 
        if (resource->caps.flags & POWER_METER_CAN_CAP) {
                if (!can_cap_in_hardware()) {
-                       dev_err(&resource->acpi_dev->dev,
-                               "Ignoring unsafe software power cap!\n");
+                       dev_warn(&resource->acpi_dev->dev,
+                                "Ignoring unsafe software power cap!\n");
                        goto skip_unsafe_cap;
                }
 
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
deleted file mode 100644 (file)
index 3727a37..0000000
+++ /dev/null
@@ -1,324 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ads1015.c - lm_sensors driver for ads1015 12-bit 4-input ADC
- * (C) Copyright 2010
- * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
- *
- * Based on the ads7828 driver by Steve Hardy.
- *
- * Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads1015.pdf
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-
-#include <linux/platform_data/ads1015.h>
-
-/* ADS1015 registers */
-enum {
-       ADS1015_CONVERSION = 0,
-       ADS1015_CONFIG = 1,
-};
-
-/* PGA fullscale voltages in mV */
-static const unsigned int fullscale_table[8] = {
-       6144, 4096, 2048, 1024, 512, 256, 256, 256 };
-
-/* Data rates in samples per second */
-static const unsigned int data_rate_table_1015[8] = {
-       128, 250, 490, 920, 1600, 2400, 3300, 3300
-};
-
-static const unsigned int data_rate_table_1115[8] = {
-       8, 16, 32, 64, 128, 250, 475, 860
-};
-
-#define ADS1015_DEFAULT_CHANNELS 0xff
-#define ADS1015_DEFAULT_PGA 2
-#define ADS1015_DEFAULT_DATA_RATE 4
-
-enum ads1015_chips {
-       ads1015,
-       ads1115,
-};
-
-struct ads1015_data {
-       struct device *hwmon_dev;
-       struct mutex update_lock; /* mutex protect updates */
-       struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
-       enum ads1015_chips id;
-};
-
-static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
-{
-       u16 config;
-       struct ads1015_data *data = i2c_get_clientdata(client);
-       unsigned int pga = data->channel_data[channel].pga;
-       unsigned int data_rate = data->channel_data[channel].data_rate;
-       unsigned int conversion_time_ms;
-       const unsigned int * const rate_table = data->id == ads1115 ?
-               data_rate_table_1115 : data_rate_table_1015;
-       int res;
-
-       mutex_lock(&data->update_lock);
-
-       /* get channel parameters */
-       res = i2c_smbus_read_word_swapped(client, ADS1015_CONFIG);
-       if (res < 0)
-               goto err_unlock;
-       config = res;
-       conversion_time_ms = DIV_ROUND_UP(1000, rate_table[data_rate]);
-
-       /* setup and start single conversion */
-       config &= 0x001f;
-       config |= (1 << 15) | (1 << 8);
-       config |= (channel & 0x0007) << 12;
-       config |= (pga & 0x0007) << 9;
-       config |= (data_rate & 0x0007) << 5;
-
-       res = i2c_smbus_write_word_swapped(client, ADS1015_CONFIG, config);
-       if (res < 0)
-               goto err_unlock;
-
-       /* wait until conversion finished */
-       msleep(conversion_time_ms);
-       res = i2c_smbus_read_word_swapped(client, ADS1015_CONFIG);
-       if (res < 0)
-               goto err_unlock;
-       config = res;
-       if (!(config & (1 << 15))) {
-               /* conversion not finished in time */
-               res = -EIO;
-               goto err_unlock;
-       }
-
-       res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION);
-
-err_unlock:
-       mutex_unlock(&data->update_lock);
-       return res;
-}
-
-static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
-                            s16 reg)
-{
-       struct ads1015_data *data = i2c_get_clientdata(client);
-       unsigned int pga = data->channel_data[channel].pga;
-       int fullscale = fullscale_table[pga];
-       const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
-
-       return DIV_ROUND_CLOSEST(reg * fullscale, mask);
-}
-
-/* sysfs callback function */
-static ssize_t in_show(struct device *dev, struct device_attribute *da,
-                      char *buf)
-{
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
-       int res;
-       int index = attr->index;
-
-       res = ads1015_read_adc(client, index);
-       if (res < 0)
-               return res;
-
-       return sprintf(buf, "%d\n", ads1015_reg_to_mv(client, index, res));
-}
-
-static const struct sensor_device_attribute ads1015_in[] = {
-       SENSOR_ATTR_RO(in0_input, in, 0),
-       SENSOR_ATTR_RO(in1_input, in, 1),
-       SENSOR_ATTR_RO(in2_input, in, 2),
-       SENSOR_ATTR_RO(in3_input, in, 3),
-       SENSOR_ATTR_RO(in4_input, in, 4),
-       SENSOR_ATTR_RO(in5_input, in, 5),
-       SENSOR_ATTR_RO(in6_input, in, 6),
-       SENSOR_ATTR_RO(in7_input, in, 7),
-};
-
-/*
- * Driver interface
- */
-
-static int ads1015_remove(struct i2c_client *client)
-{
-       struct ads1015_data *data = i2c_get_clientdata(client);
-       int k;
-
-       hwmon_device_unregister(data->hwmon_dev);
-       for (k = 0; k < ADS1015_CHANNELS; ++k)
-               device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
-       return 0;
-}
-
-#ifdef CONFIG_OF
-static int ads1015_get_channels_config_of(struct i2c_client *client)
-{
-       struct ads1015_data *data = i2c_get_clientdata(client);
-       struct device_node *node;
-
-       if (!client->dev.of_node
-           || !of_get_next_child(client->dev.of_node, NULL))
-               return -EINVAL;
-
-       for_each_child_of_node(client->dev.of_node, node) {
-               u32 pval;
-               unsigned int channel;
-               unsigned int pga = ADS1015_DEFAULT_PGA;
-               unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
-
-               if (of_property_read_u32(node, "reg", &pval)) {
-                       dev_err(&client->dev, "invalid reg on %pOF\n", node);
-                       continue;
-               }
-
-               channel = pval;
-               if (channel >= ADS1015_CHANNELS) {
-                       dev_err(&client->dev,
-                               "invalid channel index %d on %pOF\n",
-                               channel, node);
-                       continue;
-               }
-
-               if (!of_property_read_u32(node, "ti,gain", &pval)) {
-                       pga = pval;
-                       if (pga > 6) {
-                               dev_err(&client->dev, "invalid gain on %pOF\n",
-                                       node);
-                               return -EINVAL;
-                       }
-               }
-
-               if (!of_property_read_u32(node, "ti,datarate", &pval)) {
-                       data_rate = pval;
-                       if (data_rate > 7) {
-                               dev_err(&client->dev,
-                                       "invalid data_rate on %pOF\n", node);
-                               return -EINVAL;
-                       }
-               }
-
-               data->channel_data[channel].enabled = true;
-               data->channel_data[channel].pga = pga;
-               data->channel_data[channel].data_rate = data_rate;
-       }
-
-       return 0;
-}
-#endif
-
-static void ads1015_get_channels_config(struct i2c_client *client)
-{
-       unsigned int k;
-       struct ads1015_data *data = i2c_get_clientdata(client);
-       struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
-
-       /* prefer platform data */
-       if (pdata) {
-               memcpy(data->channel_data, pdata->channel_data,
-                      sizeof(data->channel_data));
-               return;
-       }
-
-#ifdef CONFIG_OF
-       if (!ads1015_get_channels_config_of(client))
-               return;
-#endif
-
-       /* fallback on default configuration */
-       for (k = 0; k < ADS1015_CHANNELS; ++k) {
-               data->channel_data[k].enabled = true;
-               data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
-               data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE;
-       }
-}
-
-static int ads1015_probe(struct i2c_client *client,
-                        const struct i2c_device_id *id)
-{
-       struct ads1015_data *data;
-       int err;
-       unsigned int k;
-
-       data = devm_kzalloc(&client->dev, sizeof(struct ads1015_data),
-                           GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       if (client->dev.of_node)
-               data->id = (enum ads1015_chips)
-                       of_device_get_match_data(&client->dev);
-       else
-               data->id = id->driver_data;
-       i2c_set_clientdata(client, data);
-       mutex_init(&data->update_lock);
-
-       /* build sysfs attribute group */
-       ads1015_get_channels_config(client);
-       for (k = 0; k < ADS1015_CHANNELS; ++k) {
-               if (!data->channel_data[k].enabled)
-                       continue;
-               err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
-               if (err)
-                       goto exit_remove;
-       }
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
-
-       return 0;
-
-exit_remove:
-       for (k = 0; k < ADS1015_CHANNELS; ++k)
-               device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
-       return err;
-}
-
-static const struct i2c_device_id ads1015_id[] = {
-       { "ads1015",  ads1015},
-       { "ads1115",  ads1115},
-       { }
-};
-MODULE_DEVICE_TABLE(i2c, ads1015_id);
-
-static const struct of_device_id __maybe_unused ads1015_of_match[] = {
-       {
-               .compatible = "ti,ads1015",
-               .data = (void *)ads1015
-       },
-       {
-               .compatible = "ti,ads1115",
-               .data = (void *)ads1115
-       },
-       { },
-};
-MODULE_DEVICE_TABLE(of, ads1015_of_match);
-
-static struct i2c_driver ads1015_driver = {
-       .driver = {
-               .name = "ads1015",
-               .of_match_table = of_match_ptr(ads1015_of_match),
-       },
-       .probe = ads1015_probe,
-       .remove = ads1015_remove,
-       .id_table = ads1015_id,
-};
-
-module_i2c_driver(ads1015_driver);
-
-MODULE_AUTHOR("Dirk Eibach <eibach@gdsys.de>");
-MODULE_DESCRIPTION("ADS1015 driver");
-MODULE_LICENSE("GPL");
index c3c6031a728520d9e0035de8ff2e5e02d693d41f..6c64d50c9aae8ec175d9477fa98af6c851849d2f 100644 (file)
@@ -187,7 +187,7 @@ static const struct of_device_id __maybe_unused adt7475_of_match[] = {
 MODULE_DEVICE_TABLE(of, adt7475_of_match);
 
 struct adt7475_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex lock;
 
        unsigned long measure_updated;
@@ -212,6 +212,7 @@ struct adt7475_data {
 
        u8 vid;
        u8 vrm;
+       const struct attribute_group *groups[9];
 };
 
 static struct i2c_driver adt7475_driver;
@@ -346,8 +347,8 @@ static ssize_t voltage_store(struct device *dev,
 {
 
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned char reg;
        long val;
 
@@ -440,8 +441,8 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned char reg = 0;
        u8 out;
        int temp;
@@ -542,8 +543,7 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
        long val;
 
        switch (sattr->index) {
@@ -570,8 +570,8 @@ static ssize_t temp_st_store(struct device *dev,
                             size_t count)
 {
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned char reg;
        int shift, idx;
        ulong val;
@@ -647,8 +647,8 @@ static ssize_t point2_show(struct device *dev, struct device_attribute *attr,
 static ssize_t point2_store(struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
        int temp;
        long val;
@@ -710,8 +710,8 @@ static ssize_t tach_store(struct device *dev, struct device_attribute *attr,
 {
 
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned long val;
 
        if (kstrtoul(buf, 10, &val))
@@ -769,8 +769,8 @@ static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
 {
 
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned char reg = 0;
        long val;
 
@@ -818,8 +818,8 @@ static ssize_t stall_disable_show(struct device *dev,
                                  struct device_attribute *attr, char *buf)
 {
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+
        u8 mask = BIT(5 + sattr->index);
 
        return sprintf(buf, "%d\n", !!(data->enh_acoustics[0] & mask));
@@ -830,8 +830,8 @@ static ssize_t stall_disable_store(struct device *dev,
                                   const char *buf, size_t count)
 {
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        long val;
        u8 mask = BIT(5 + sattr->index);
 
@@ -914,8 +914,8 @@ static ssize_t pwmchan_store(struct device *dev,
                             size_t count)
 {
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        int r;
        long val;
 
@@ -938,8 +938,8 @@ static ssize_t pwmctrl_store(struct device *dev,
                             size_t count)
 {
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        int r;
        long val;
 
@@ -982,8 +982,8 @@ static ssize_t pwmfreq_store(struct device *dev,
                             size_t count)
 {
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        int out;
        long val;
 
@@ -1022,8 +1022,8 @@ static ssize_t pwm_use_point2_pwm_at_crit_store(struct device *dev,
                                        struct device_attribute *devattr,
                                        const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        long val;
 
        if (kstrtol(buf, 10, &val))
@@ -1342,26 +1342,6 @@ static int adt7475_detect(struct i2c_client *client,
        return 0;
 }
 
-static void adt7475_remove_files(struct i2c_client *client,
-                                struct adt7475_data *data)
-{
-       sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group);
-       if (data->has_fan4)
-               sysfs_remove_group(&client->dev.kobj, &fan4_attr_group);
-       if (data->has_pwm2)
-               sysfs_remove_group(&client->dev.kobj, &pwm2_attr_group);
-       if (data->has_voltage & (1 << 0))
-               sysfs_remove_group(&client->dev.kobj, &in0_attr_group);
-       if (data->has_voltage & (1 << 3))
-               sysfs_remove_group(&client->dev.kobj, &in3_attr_group);
-       if (data->has_voltage & (1 << 4))
-               sysfs_remove_group(&client->dev.kobj, &in4_attr_group);
-       if (data->has_voltage & (1 << 5))
-               sysfs_remove_group(&client->dev.kobj, &in5_attr_group);
-       if (data->has_vid)
-               sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
-}
-
 static int adt7475_update_limits(struct i2c_client *client)
 {
        struct adt7475_data *data = i2c_get_clientdata(client);
@@ -1489,7 +1469,8 @@ static int adt7475_probe(struct i2c_client *client,
        };
 
        struct adt7475_data *data;
-       int i, ret = 0, revision;
+       struct device *hwmon_dev;
+       int i, ret = 0, revision, group_num = 0;
        u8 config2, config3;
 
        data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
@@ -1497,6 +1478,7 @@ static int adt7475_probe(struct i2c_client *client,
                return -ENOMEM;
 
        mutex_init(&data->lock);
+       data->client = client;
        i2c_set_clientdata(client, data);
 
        if (client->dev.of_node)
@@ -1590,52 +1572,40 @@ static int adt7475_probe(struct i2c_client *client,
                break;
        }
 
-       ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group);
-       if (ret)
-               return ret;
+       data->groups[group_num++] = &adt7475_attr_group;
 
        /* Features that can be disabled individually */
        if (data->has_fan4) {
-               ret = sysfs_create_group(&client->dev.kobj, &fan4_attr_group);
-               if (ret)
-                       goto eremove;
+               data->groups[group_num++] = &fan4_attr_group;
        }
        if (data->has_pwm2) {
-               ret = sysfs_create_group(&client->dev.kobj, &pwm2_attr_group);
-               if (ret)
-                       goto eremove;
+               data->groups[group_num++] = &pwm2_attr_group;
        }
        if (data->has_voltage & (1 << 0)) {
-               ret = sysfs_create_group(&client->dev.kobj, &in0_attr_group);
-               if (ret)
-                       goto eremove;
+               data->groups[group_num++] = &in0_attr_group;
        }
        if (data->has_voltage & (1 << 3)) {
-               ret = sysfs_create_group(&client->dev.kobj, &in3_attr_group);
-               if (ret)
-                       goto eremove;
+               data->groups[group_num++] = &in3_attr_group;
        }
        if (data->has_voltage & (1 << 4)) {
-               ret = sysfs_create_group(&client->dev.kobj, &in4_attr_group);
-               if (ret)
-                       goto eremove;
+               data->groups[group_num++] = &in4_attr_group;
        }
        if (data->has_voltage & (1 << 5)) {
-               ret = sysfs_create_group(&client->dev.kobj, &in5_attr_group);
-               if (ret)
-                       goto eremove;
+               data->groups[group_num++] = &in5_attr_group;
        }
        if (data->has_vid) {
                data->vrm = vid_which_vrm();
-               ret = sysfs_create_group(&client->dev.kobj, &vid_attr_group);
-               if (ret)
-                       goto eremove;
+               data->groups[group_num] = &vid_attr_group;
        }
 
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto eremove;
+       /* register device with all the acquired attributes */
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name, data,
+                                                          data->groups);
+
+       if (IS_ERR(hwmon_dev)) {
+               ret = PTR_ERR(hwmon_dev);
+               return ret;
        }
 
        dev_info(&client->dev, "%s device, revision %d\n",
@@ -1657,21 +1627,7 @@ static int adt7475_probe(struct i2c_client *client,
        /* Limits and settings, should never change update more than once */
        ret = adt7475_update_limits(client);
        if (ret)
-               goto eremove;
-
-       return 0;
-
-eremove:
-       adt7475_remove_files(client, data);
-       return ret;
-}
-
-static int adt7475_remove(struct i2c_client *client)
-{
-       struct adt7475_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       adt7475_remove_files(client, data);
+               return ret;
 
        return 0;
 }
@@ -1683,7 +1639,6 @@ static struct i2c_driver adt7475_driver = {
                .of_match_table = of_match_ptr(adt7475_of_match),
        },
        .probe          = adt7475_probe,
-       .remove         = adt7475_remove,
        .id_table       = adt7475_id,
        .detect         = adt7475_detect,
        .address_list   = normal_i2c,
@@ -1757,8 +1712,8 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
 
 static int adt7475_update_measure(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u16 ext;
        int i;
        int ret;
@@ -1854,8 +1809,7 @@ static int adt7475_update_measure(struct device *dev)
 
 static struct adt7475_data *adt7475_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct adt7475_data *data = i2c_get_clientdata(client);
+       struct adt7475_data *data = dev_get_drvdata(dev);
        int ret;
 
        mutex_lock(&data->lock);
diff --git a/drivers/hwmon/as370-hwmon.c b/drivers/hwmon/as370-hwmon.c
new file mode 100644 (file)
index 0000000..464244b
--- /dev/null
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synaptics AS370 SoC Hardware Monitoring Driver
+ *
+ * Copyright (C) 2018 Synaptics Incorporated
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#define CTRL           0x0
+#define  PD            BIT(0)
+#define  EN            BIT(1)
+#define  T_SEL         BIT(2)
+#define  V_SEL         BIT(3)
+#define  NMOS_SEL      BIT(8)
+#define  PMOS_SEL      BIT(9)
+#define STS            0x4
+#define  BN_MASK       GENMASK(11, 0)
+#define  EOC           BIT(12)
+
+struct as370_hwmon {
+       void __iomem *base;
+};
+
+static void init_pvt(struct as370_hwmon *hwmon)
+{
+       u32 val;
+       void __iomem *addr = hwmon->base + CTRL;
+
+       val = PD;
+       writel_relaxed(val, addr);
+       val |= T_SEL;
+       writel_relaxed(val, addr);
+       val |= EN;
+       writel_relaxed(val, addr);
+       val &= ~PD;
+       writel_relaxed(val, addr);
+}
+
+static int as370_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+                           u32 attr, int channel, long *temp)
+{
+       int val;
+       struct as370_hwmon *hwmon = dev_get_drvdata(dev);
+
+       switch (attr) {
+       case hwmon_temp_input:
+               val = readl_relaxed(hwmon->base + STS) & BN_MASK;
+               *temp = DIV_ROUND_CLOSEST(val * 251802, 4096) - 85525;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static umode_t
+as370_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+                      u32 attr, int channel)
+{
+       if (type != hwmon_temp)
+               return 0;
+
+       switch (attr) {
+       case hwmon_temp_input:
+               return 0444;
+       default:
+               return 0;
+       }
+}
+
+static const u32 as370_hwmon_temp_config[] = {
+       HWMON_T_INPUT,
+       0
+};
+
+static const struct hwmon_channel_info as370_hwmon_temp = {
+       .type = hwmon_temp,
+       .config = as370_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *as370_hwmon_info[] = {
+       &as370_hwmon_temp,
+       NULL
+};
+
+static const struct hwmon_ops as370_hwmon_ops = {
+       .is_visible = as370_hwmon_is_visible,
+       .read = as370_hwmon_read,
+};
+
+static const struct hwmon_chip_info as370_chip_info = {
+       .ops = &as370_hwmon_ops,
+       .info = as370_hwmon_info,
+};
+
+static int as370_hwmon_probe(struct platform_device *pdev)
+{
+       struct device *hwmon_dev;
+       struct as370_hwmon *hwmon;
+       struct device *dev = &pdev->dev;
+
+       hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+       if (!hwmon)
+               return -ENOMEM;
+
+       hwmon->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(hwmon->base))
+               return PTR_ERR(hwmon->base);
+
+       init_pvt(hwmon);
+
+       hwmon_dev = devm_hwmon_device_register_with_info(dev,
+                                                        "as370",
+                                                        hwmon,
+                                                        &as370_chip_info,
+                                                        NULL);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id as370_hwmon_match[] = {
+       { .compatible = "syna,as370-hwmon" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, as370_hwmon_match);
+
+static struct platform_driver as370_hwmon_driver = {
+       .probe = as370_hwmon_probe,
+       .driver = {
+               .name = "as370-hwmon",
+               .of_match_table = as370_hwmon_match,
+       },
+};
+module_platform_driver(as370_hwmon_driver);
+
+MODULE_AUTHOR("Jisheng Zhang<jszhang@kernel.org>");
+MODULE_DESCRIPTION("Synaptics AS370 SoC hardware monitor");
+MODULE_LICENSE("GPL v2");
index c9fa84b256783df45824abe066bdbe1cb5896422..4c609e23a4efb93d683bcefbd6e78b4ab53ea0ea 100644 (file)
@@ -706,21 +706,21 @@ static int asb100_detect_subclients(struct i2c_client *client)
                goto ERROR_SC_2;
        }
 
-       data->lm75[0] = i2c_new_dummy(adapter, sc_addr[0]);
-       if (!data->lm75[0]) {
+       data->lm75[0] = i2c_new_dummy_device(adapter, sc_addr[0]);
+       if (IS_ERR(data->lm75[0])) {
                dev_err(&client->dev,
                        "subclient %d registration at address 0x%x failed.\n",
                        1, sc_addr[0]);
-               err = -ENOMEM;
+               err = PTR_ERR(data->lm75[0]);
                goto ERROR_SC_2;
        }
 
-       data->lm75[1] = i2c_new_dummy(adapter, sc_addr[1]);
-       if (!data->lm75[1]) {
+       data->lm75[1] = i2c_new_dummy_device(adapter, sc_addr[1]);
+       if (IS_ERR(data->lm75[1])) {
                dev_err(&client->dev,
                        "subclient %d registration at address 0x%x failed.\n",
                        2, sc_addr[1]);
-               err = -ENOMEM;
+               err = PTR_ERR(data->lm75[1]);
                goto ERROR_SC_3;
        }
 
index fe6618e49dc436cafda1e5ad12f6a8cbd89ef7c6..d855c78fb8bea77d87df0fe2a16289a039bc63ed 100644 (file)
@@ -736,7 +736,7 @@ static int __init coretemp_init(void)
 
        err = platform_driver_register(&coretemp_driver);
        if (err)
-               return err;
+               goto outzone;
 
        err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
                                coretemp_cpu_online, coretemp_cpu_offline);
@@ -747,6 +747,7 @@ static int __init coretemp_init(void)
 
 outdrv:
        platform_driver_unregister(&coretemp_driver);
+outzone:
        kfree(zone_devices);
        return err;
 }
index f1c2d5faedf0d76d2074b305065cf2261e6ceb72..b85a125dd86f46a26ac817bd42a718c4ad97310c 100644 (file)
@@ -44,12 +44,20 @@ static ssize_t iio_hwmon_read_val(struct device *dev,
        int ret;
        struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
        struct iio_hwmon_state *state = dev_get_drvdata(dev);
+       struct iio_channel *chan = &state->channels[sattr->index];
+       enum iio_chan_type type;
+
+       ret = iio_read_channel_processed(chan, &result);
+       if (ret < 0)
+               return ret;
 
-       ret = iio_read_channel_processed(&state->channels[sattr->index],
-                                       &result);
+       ret = iio_get_channel_type(chan, &type);
        if (ret < 0)
                return ret;
 
+       if (type == IIO_POWER)
+               result *= 1000; /* mili-Watts to micro-Watts conversion */
+
        return sprintf(buf, "%d\n", result);
 }
 
@@ -59,7 +67,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
        struct iio_hwmon_state *st;
        struct sensor_device_attribute *a;
        int ret, i;
-       int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
+       int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1, power_i = 1;
        enum iio_chan_type type;
        struct iio_channel *channels;
        struct device *hwmon_dev;
@@ -114,6 +122,10 @@ static int iio_hwmon_probe(struct platform_device *pdev)
                        n = curr_i++;
                        prefix = "curr";
                        break;
+               case IIO_POWER:
+                       n = power_i++;
+                       prefix = "power";
+                       break;
                case IIO_HUMIDITYRELATIVE:
                        n = humidity_i++;
                        prefix = "humidity";
index c77e89239dcd9a32c143203030362264d86f83b7..5c1dddde193c3e1ce50c539ba91e00b885a73190 100644 (file)
@@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
        { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
        {}
 };
index 4994c90c892986d9546765607eb7726b41adfee9..f73bd4eceb28f0e46c628b1902b3b2d6d90c0195 100644 (file)
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/jiffies.h>
 #include <linux/pci.h>
 #include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <asm/processor.h>
 #define SEL_CORE       0x04
 
 struct k8temp_data {
-       struct device *hwmon_dev;
        struct mutex update_lock;
-       const char *name;
-       char valid;             /* zero until following fields are valid */
-       unsigned long last_updated;     /* in jiffies */
 
        /* registers values */
        u8 sensorsp;            /* sensor presence bits - SEL_CORE, SEL_PLACE */
-       u32 temp[2][2];         /* core, place */
        u8 swap_core_select;    /* meaning of SEL_CORE is inverted */
        u32 temp_offset;
 };
 
-static struct k8temp_data *k8temp_update_device(struct device *dev)
-{
-       struct k8temp_data *data = dev_get_drvdata(dev);
-       struct pci_dev *pdev = to_pci_dev(dev);
-       u8 tmp;
-
-       mutex_lock(&data->update_lock);
-
-       if (!data->valid
-           || time_after(jiffies, data->last_updated + HZ)) {
-               pci_read_config_byte(pdev, REG_TEMP, &tmp);
-               tmp &= ~(SEL_PLACE | SEL_CORE); /* Select sensor 0, core0 */
-               pci_write_config_byte(pdev, REG_TEMP, tmp);
-               pci_read_config_dword(pdev, REG_TEMP, &data->temp[0][0]);
-
-               if (data->sensorsp & SEL_PLACE) {
-                       tmp |= SEL_PLACE;       /* Select sensor 1, core0 */
-                       pci_write_config_byte(pdev, REG_TEMP, tmp);
-                       pci_read_config_dword(pdev, REG_TEMP,
-                                             &data->temp[0][1]);
-               }
-
-               if (data->sensorsp & SEL_CORE) {
-                       tmp &= ~SEL_PLACE;      /* Select sensor 0, core1 */
-                       tmp |= SEL_CORE;
-                       pci_write_config_byte(pdev, REG_TEMP, tmp);
-                       pci_read_config_dword(pdev, REG_TEMP,
-                                             &data->temp[1][0]);
-
-                       if (data->sensorsp & SEL_PLACE) {
-                               tmp |= SEL_PLACE; /* Select sensor 1, core1 */
-                               pci_write_config_byte(pdev, REG_TEMP, tmp);
-                               pci_read_config_dword(pdev, REG_TEMP,
-                                                     &data->temp[1][1]);
-                       }
-               }
-
-               data->last_updated = jiffies;
-               data->valid = 1;
-       }
-
-       mutex_unlock(&data->update_lock);
-       return data;
-}
-
-/*
- * Sysfs stuff
- */
-
-static ssize_t name_show(struct device *dev, struct device_attribute
-                        *devattr, char *buf)
-{
-       struct k8temp_data *data = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%s\n", data->name);
-}
-
-
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
-                        char *buf)
-{
-       struct sensor_device_attribute_2 *attr =
-           to_sensor_dev_attr_2(devattr);
-       int core = attr->nr;
-       int place = attr->index;
-       int temp;
-       struct k8temp_data *data = k8temp_update_device(dev);
-
-       if (data->swap_core_select && (data->sensorsp & SEL_CORE))
-               core = core ? 0 : 1;
-
-       temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
-
-       return sprintf(buf, "%d\n", temp);
-}
-
-/* core, place */
-
-static SENSOR_DEVICE_ATTR_2_RO(temp1_input, temp, 0, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_input, temp, 0, 1);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_input, temp, 1, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp4_input, temp, 1, 1);
-static DEVICE_ATTR_RO(name);
-
 static const struct pci_device_id k8temp_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
        { 0 },
 };
-
 MODULE_DEVICE_TABLE(pci, k8temp_ids);
 
 static int is_rev_g_desktop(u8 model)
@@ -159,14 +67,76 @@ static int is_rev_g_desktop(u8 model)
        return 1;
 }
 
+static umode_t
+k8temp_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+                 u32 attr, int channel)
+{
+       const struct k8temp_data *data = drvdata;
+
+       if ((channel & 1) && !(data->sensorsp & SEL_PLACE))
+               return 0;
+
+       if ((channel & 2) && !(data->sensorsp & SEL_CORE))
+               return 0;
+
+       return 0444;
+}
+
+static int
+k8temp_read(struct device *dev, enum hwmon_sensor_types type,
+           u32 attr, int channel, long *val)
+{
+       struct k8temp_data *data = dev_get_drvdata(dev);
+       struct pci_dev *pdev = to_pci_dev(dev->parent);
+       int core, place;
+       u32 temp;
+       u8 tmp;
+
+       core = (channel >> 1) & 1;
+       place = channel & 1;
+
+       core ^= data->swap_core_select;
+
+       mutex_lock(&data->update_lock);
+       pci_read_config_byte(pdev, REG_TEMP, &tmp);
+       tmp &= ~(SEL_PLACE | SEL_CORE);
+       if (core)
+               tmp |= SEL_CORE;
+       if (place)
+               tmp |= SEL_PLACE;
+       pci_write_config_byte(pdev, REG_TEMP, tmp);
+       pci_read_config_dword(pdev, REG_TEMP, &temp);
+       mutex_unlock(&data->update_lock);
+
+       *val = TEMP_FROM_REG(temp) + data->temp_offset;
+
+       return 0;
+}
+
+static const struct hwmon_ops k8temp_ops = {
+       .is_visible = k8temp_is_visible,
+       .read = k8temp_read,
+};
+
+static const struct hwmon_channel_info *k8temp_info[] = {
+       HWMON_CHANNEL_INFO(temp,
+               HWMON_T_INPUT, HWMON_T_INPUT, HWMON_T_INPUT, HWMON_T_INPUT),
+       NULL
+};
+
+static const struct hwmon_chip_info k8temp_chip_info = {
+       .ops = &k8temp_ops,
+       .info = k8temp_info,
+};
+
 static int k8temp_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
-       int err;
        u8 scfg;
        u32 temp;
        u8 model, stepping;
        struct k8temp_data *data;
+       struct device *hwmon_dev;
 
        data = devm_kzalloc(&pdev->dev, sizeof(struct k8temp_data), GFP_KERNEL);
        if (!data)
@@ -231,86 +201,21 @@ static int k8temp_probe(struct pci_dev *pdev,
                        data->sensorsp &= ~SEL_CORE;
        }
 
-       data->name = "k8temp";
        mutex_init(&data->update_lock);
-       pci_set_drvdata(pdev, data);
-
-       /* Register sysfs hooks */
-       err = device_create_file(&pdev->dev,
-                          &sensor_dev_attr_temp1_input.dev_attr);
-       if (err)
-               goto exit_remove;
-
-       /* sensor can be changed and reports something */
-       if (data->sensorsp & SEL_PLACE) {
-               err = device_create_file(&pdev->dev,
-                                  &sensor_dev_attr_temp2_input.dev_attr);
-               if (err)
-                       goto exit_remove;
-       }
-
-       /* core can be changed and reports something */
-       if (data->sensorsp & SEL_CORE) {
-               err = device_create_file(&pdev->dev,
-                                  &sensor_dev_attr_temp3_input.dev_attr);
-               if (err)
-                       goto exit_remove;
-               if (data->sensorsp & SEL_PLACE) {
-                       err = device_create_file(&pdev->dev,
-                                          &sensor_dev_attr_temp4_input.
-                                          dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-       }
-
-       err = device_create_file(&pdev->dev, &dev_attr_name);
-       if (err)
-               goto exit_remove;
 
-       data->hwmon_dev = hwmon_device_register(&pdev->dev);
+       hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+                                                        "k8temp",
+                                                        data,
+                                                        &k8temp_chip_info,
+                                                        NULL);
 
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
-
-       return 0;
-
-exit_remove:
-       device_remove_file(&pdev->dev,
-                          &sensor_dev_attr_temp1_input.dev_attr);
-       device_remove_file(&pdev->dev,
-                          &sensor_dev_attr_temp2_input.dev_attr);
-       device_remove_file(&pdev->dev,
-                          &sensor_dev_attr_temp3_input.dev_attr);
-       device_remove_file(&pdev->dev,
-                          &sensor_dev_attr_temp4_input.dev_attr);
-       device_remove_file(&pdev->dev, &dev_attr_name);
-       return err;
-}
-
-static void k8temp_remove(struct pci_dev *pdev)
-{
-       struct k8temp_data *data = pci_get_drvdata(pdev);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       device_remove_file(&pdev->dev,
-                          &sensor_dev_attr_temp1_input.dev_attr);
-       device_remove_file(&pdev->dev,
-                          &sensor_dev_attr_temp2_input.dev_attr);
-       device_remove_file(&pdev->dev,
-                          &sensor_dev_attr_temp3_input.dev_attr);
-       device_remove_file(&pdev->dev,
-                          &sensor_dev_attr_temp4_input.dev_attr);
-       device_remove_file(&pdev->dev, &dev_attr_name);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static struct pci_driver k8temp_driver = {
        .name = "k8temp",
        .id_table = k8temp_ids,
        .probe = k8temp_probe,
-       .remove = k8temp_remove,
 };
 
 module_pci_driver(k8temp_driver);
index ce5ec403ec739c819ee8f896e5958d63380246ee..5e6392294c0351e529754bef61426dbdcd35876d 100644 (file)
@@ -16,9 +16,9 @@
 #include <linux/of_device.h>
 #include <linux/of.h>
 #include <linux/regmap.h>
+#include <linux/util_macros.h>
 #include "lm75.h"
 
-
 /*
  * This driver handles the LM75 and compatible digital temperature sensors.
  */
@@ -36,6 +36,7 @@ enum lm75_type {              /* keep sorted in alphabetical order */
        max6626,
        max31725,
        mcp980x,
+       pct2075,
        stds75,
        stlm75,
        tcn75,
@@ -50,6 +51,41 @@ enum lm75_type {             /* keep sorted in alphabetical order */
        tmp75c,
 };
 
+/**
+ * struct lm75_params - lm75 configuration parameters.
+ * @set_mask:          Bits to set in configuration register when configuring
+ *                     the chip.
+ * @clr_mask:          Bits to clear in configuration register when configuring
+ *                     the chip.
+ * @default_resolution:        Default number of bits to represent the temperature
+ *                     value.
+ * @resolution_limits: Limit register resolution. Optional. Should be set if
+ *                     the resolution of limit registers does not match the
+ *                     resolution of the temperature register.
+ * @resolutions:       List of resolutions associated with sample times.
+ *                     Optional. Should be set if num_sample_times is larger
+ *                     than 1, and if the resolution changes with sample times.
+ *                     If set, number of entries must match num_sample_times.
+ * @default_sample_time:Sample time to be set by default.
+ * @num_sample_times:  Number of possible sample times to be set. Optional.
+ *                     Should be set if the number of sample times is larger
+ *                     than one.
+ * @sample_times:      All the possible sample times to be set. Mandatory if
+ *                     num_sample_times is larger than 1. If set, number of
+ *                     entries must match num_sample_times.
+ */
+
+struct lm75_params {
+       u8                      set_mask;
+       u8                      clr_mask;
+       u8                      default_resolution;
+       u8                      resolution_limits;
+       const u8                *resolutions;
+       unsigned int            default_sample_time;
+       u8                      num_sample_times;
+       const unsigned int      *sample_times;
+};
+
 /* Addresses scanned */
 static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
                                        0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
@@ -59,24 +95,231 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
 #define LM75_REG_CONF          0x01
 #define LM75_REG_HYST          0x02
 #define LM75_REG_MAX           0x03
+#define PCT2075_REG_IDLE       0x04
 
 /* Each client has this additional data */
 struct lm75_data {
-       struct i2c_client       *client;
-       struct regmap           *regmap;
-       u8                      orig_conf;
-       u8                      resolution;     /* In bits, between 9 and 16 */
-       u8                      resolution_limits;
-       unsigned int            sample_time;    /* In ms */
+       struct i2c_client               *client;
+       struct regmap                   *regmap;
+       u8                              orig_conf;
+       u8                              current_conf;
+       u8                              resolution;     /* In bits, 9 to 16 */
+       unsigned int                    sample_time;    /* In ms */
+       enum lm75_type                  kind;
+       const struct lm75_params        *params;
 };
 
 /*-----------------------------------------------------------------------*/
 
+static const u8 lm75_sample_set_masks[] = { 0 << 5, 1 << 5, 2 << 5, 3 << 5 };
+
+#define LM75_SAMPLE_CLEAR_MASK (3 << 5)
+
+/* The structure below stores the configuration values of the supported devices.
+ * In case of being supported multiple configurations, the default one must
+ * always be the first element of the array
+ */
+static const struct lm75_params device_params[] = {
+       [adt75] = {
+               .clr_mask = 1 << 5,     /* not one-shot mode */
+               .default_resolution = 12,
+               .default_sample_time = MSEC_PER_SEC / 10,
+       },
+       [ds1775] = {
+               .clr_mask = 3 << 5,
+               .set_mask = 2 << 5,     /* 11-bit mode */
+               .default_resolution = 11,
+               .default_sample_time = 500,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 125, 250, 500, 1000 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [ds75] = {
+               .clr_mask = 3 << 5,
+               .set_mask = 2 << 5,     /* 11-bit mode */
+               .default_resolution = 11,
+               .default_sample_time = 600,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 150, 300, 600, 1200 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [stds75] = {
+               .clr_mask = 3 << 5,
+               .set_mask = 2 << 5,     /* 11-bit mode */
+               .default_resolution = 11,
+               .default_sample_time = 600,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 150, 300, 600, 1200 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [stlm75] = {
+               .default_resolution = 9,
+               .default_sample_time = MSEC_PER_SEC / 6,
+       },
+       [ds7505] = {
+               .set_mask = 3 << 5,     /* 12-bit mode*/
+               .default_resolution = 12,
+               .default_sample_time = 200,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 25, 50, 100, 200 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [g751] = {
+               .default_resolution = 9,
+               .default_sample_time = MSEC_PER_SEC / 10,
+       },
+       [lm75] = {
+               .default_resolution = 9,
+               .default_sample_time = MSEC_PER_SEC / 10,
+       },
+       [lm75a] = {
+               .default_resolution = 9,
+               .default_sample_time = MSEC_PER_SEC / 10,
+       },
+       [lm75b] = {
+               .default_resolution = 11,
+               .default_sample_time = MSEC_PER_SEC / 10,
+       },
+       [max6625] = {
+               .default_resolution = 9,
+               .default_sample_time = MSEC_PER_SEC / 7,
+       },
+       [max6626] = {
+               .default_resolution = 12,
+               .default_sample_time = MSEC_PER_SEC / 7,
+               .resolution_limits = 9,
+       },
+       [max31725] = {
+               .default_resolution = 16,
+               .default_sample_time = MSEC_PER_SEC / 20,
+       },
+       [tcn75] = {
+               .default_resolution = 9,
+               .default_sample_time = MSEC_PER_SEC / 18,
+       },
+       [pct2075] = {
+               .default_resolution = 11,
+               .default_sample_time = MSEC_PER_SEC / 10,
+               .num_sample_times = 31,
+               .sample_times = (unsigned int []){ 100, 200, 300, 400, 500, 600,
+               700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700,
+               1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700,
+               2800, 2900, 3000, 3100 },
+       },
+       [mcp980x] = {
+               .set_mask = 3 << 5,     /* 12-bit mode */
+               .clr_mask = 1 << 7,     /* not one-shot mode */
+               .default_resolution = 12,
+               .resolution_limits = 9,
+               .default_sample_time = 240,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 30, 60, 120, 240 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [tmp100] = {
+               .set_mask = 3 << 5,     /* 12-bit mode */
+               .clr_mask = 1 << 7,     /* not one-shot mode */
+               .default_resolution = 12,
+               .default_sample_time = 320,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 40, 80, 160, 320 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [tmp101] = {
+               .set_mask = 3 << 5,     /* 12-bit mode */
+               .clr_mask = 1 << 7,     /* not one-shot mode */
+               .default_resolution = 12,
+               .default_sample_time = 320,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 40, 80, 160, 320 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [tmp105] = {
+               .set_mask = 3 << 5,     /* 12-bit mode */
+               .clr_mask = 1 << 7,     /* not one-shot mode*/
+               .default_resolution = 12,
+               .default_sample_time = 220,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [tmp112] = {
+               .set_mask = 3 << 5,     /* 8 samples / second */
+               .clr_mask = 1 << 7,     /* no one-shot mode*/
+               .default_resolution = 12,
+               .default_sample_time = 125,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 125, 250, 1000, 4000 },
+       },
+       [tmp175] = {
+               .set_mask = 3 << 5,     /* 12-bit mode */
+               .clr_mask = 1 << 7,     /* not one-shot mode*/
+               .default_resolution = 12,
+               .default_sample_time = 220,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [tmp275] = {
+               .set_mask = 3 << 5,     /* 12-bit mode */
+               .clr_mask = 1 << 7,     /* not one-shot mode*/
+               .default_resolution = 12,
+               .default_sample_time = 220,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [tmp75] = {
+               .set_mask = 3 << 5,     /* 12-bit mode */
+               .clr_mask = 1 << 7,     /* not one-shot mode*/
+               .default_resolution = 12,
+               .default_sample_time = 220,
+               .num_sample_times = 4,
+               .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+               .resolutions = (u8 []) {9, 10, 11, 12 },
+       },
+       [tmp75b] = { /* not one-shot mode, Conversion rate 37Hz */
+               .clr_mask = 1 << 7 | 3 << 5,
+               .default_resolution = 12,
+               .default_sample_time = MSEC_PER_SEC / 37,
+               .sample_times = (unsigned int []){ MSEC_PER_SEC / 37,
+                       MSEC_PER_SEC / 18,
+                       MSEC_PER_SEC / 9, MSEC_PER_SEC / 4 },
+               .num_sample_times = 4,
+       },
+       [tmp75c] = {
+               .clr_mask = 1 << 5,     /*not one-shot mode*/
+               .default_resolution = 12,
+               .default_sample_time = MSEC_PER_SEC / 12,
+       }
+};
+
 static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
 {
        return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
 }
 
+static int lm75_write_config(struct lm75_data *data, u8 set_mask,
+                            u8 clr_mask)
+{
+       u8 value;
+
+       clr_mask |= LM75_SHUTDOWN;
+       value = data->current_conf & ~clr_mask;
+       value |= set_mask;
+
+       if (data->current_conf != value) {
+               s32 err;
+
+               err = i2c_smbus_write_byte_data(data->client, LM75_REG_CONF,
+                                               value);
+               if (err)
+                       return err;
+               data->current_conf = value;
+       }
+       return 0;
+}
+
 static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
                     u32 attr, int channel, long *val)
 {
@@ -120,16 +363,12 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
        return 0;
 }
 
-static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
-                     u32 attr, int channel, long temp)
+static int lm75_write_temp(struct device *dev, u32 attr, long temp)
 {
        struct lm75_data *data = dev_get_drvdata(dev);
        u8 resolution;
        int reg;
 
-       if (type != hwmon_temp)
-               return -EINVAL;
-
        switch (attr) {
        case hwmon_temp_max:
                reg = LM75_REG_MAX;
@@ -145,8 +384,8 @@ static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
         * Resolution of limit registers is assumed to be the same as the
         * temperature input register resolution unless given explicitly.
         */
-       if (data->resolution_limits)
-               resolution = data->resolution_limits;
+       if (data->params->resolution_limits)
+               resolution = data->params->resolution_limits;
        else
                resolution = data->resolution;
 
@@ -154,16 +393,88 @@ static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
        temp = DIV_ROUND_CLOSEST(temp  << (resolution - 8),
                                 1000) << (16 - resolution);
 
-       return regmap_write(data->regmap, reg, temp);
+       return regmap_write(data->regmap, reg, (u16)temp);
+}
+
+static int lm75_update_interval(struct device *dev, long val)
+{
+       struct lm75_data *data = dev_get_drvdata(dev);
+       unsigned int reg;
+       u8 index;
+       s32 err;
+
+       index = find_closest(val, data->params->sample_times,
+                            (int)data->params->num_sample_times);
+
+       switch (data->kind) {
+       default:
+               err = lm75_write_config(data, lm75_sample_set_masks[index],
+                                       LM75_SAMPLE_CLEAR_MASK);
+               if (err)
+                       return err;
+
+               data->sample_time = data->params->sample_times[index];
+               if (data->params->resolutions)
+                       data->resolution = data->params->resolutions[index];
+               break;
+       case tmp112:
+               err = regmap_read(data->regmap, LM75_REG_CONF, &reg);
+               if (err < 0)
+                       return err;
+               reg &= ~0x00c0;
+               reg |= (3 - index) << 6;
+               err = regmap_write(data->regmap, LM75_REG_CONF, reg);
+               if (err < 0)
+                       return err;
+               data->sample_time = data->params->sample_times[index];
+               break;
+       case pct2075:
+               err = i2c_smbus_write_byte_data(data->client, PCT2075_REG_IDLE,
+                                               index + 1);
+               if (err)
+                       return err;
+               data->sample_time = data->params->sample_times[index];
+               break;
+       }
+       return 0;
+}
+
+static int lm75_write_chip(struct device *dev, u32 attr, long val)
+{
+       switch (attr) {
+       case hwmon_chip_update_interval:
+               return lm75_update_interval(dev, val);
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
+                     u32 attr, int channel, long val)
+{
+       switch (type) {
+       case hwmon_chip:
+               return lm75_write_chip(dev, attr, val);
+       case hwmon_temp:
+               return lm75_write_temp(dev, attr, val);
+       default:
+               return -EINVAL;
+       }
+       return 0;
 }
 
 static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
                               u32 attr, int channel)
 {
+       const struct lm75_data *config_data = data;
+
        switch (type) {
        case hwmon_chip:
                switch (attr) {
                case hwmon_chip_update_interval:
+                       if (config_data->params->num_sample_times > 1)
+                               return 0644;
                        return 0444;
                }
                break;
@@ -208,13 +519,13 @@ static bool lm75_is_writeable_reg(struct device *dev, unsigned int reg)
 
 static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
 {
-       return reg == LM75_REG_TEMP;
+       return reg == LM75_REG_TEMP || reg == LM75_REG_CONF;
 }
 
 static const struct regmap_config lm75_regmap_config = {
        .reg_bits = 8,
        .val_bits = 16,
-       .max_register = LM75_REG_MAX,
+       .max_register = PCT2075_REG_IDLE,
        .writeable_reg = lm75_is_writeable_reg,
        .volatile_reg = lm75_is_volatile_reg,
        .val_format_endian = REGMAP_ENDIAN_BIG,
@@ -238,8 +549,6 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
        struct device *hwmon_dev;
        struct lm75_data *data;
        int status, err;
-       u8 set_mask, clr_mask;
-       int new;
        enum lm75_type kind;
 
        if (client->dev.of_node)
@@ -256,6 +565,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
                return -ENOMEM;
 
        data->client = client;
+       data->kind = kind;
 
        data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
        if (IS_ERR(data->regmap))
@@ -264,113 +574,30 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
        /* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
         * Then tweak to be more precise when appropriate.
         */
-       set_mask = 0;
-       clr_mask = LM75_SHUTDOWN;               /* continuous conversions */
-
-       switch (kind) {
-       case adt75:
-               clr_mask |= 1 << 5;             /* not one-shot mode */
-               data->resolution = 12;
-               data->sample_time = MSEC_PER_SEC / 8;
-               break;
-       case ds1775:
-       case ds75:
-       case stds75:
-               clr_mask |= 3 << 5;
-               set_mask |= 2 << 5;             /* 11-bit mode */
-               data->resolution = 11;
-               data->sample_time = MSEC_PER_SEC;
-               break;
-       case stlm75:
-               data->resolution = 9;
-               data->sample_time = MSEC_PER_SEC / 5;
-               break;
-       case ds7505:
-               set_mask |= 3 << 5;             /* 12-bit mode */
-               data->resolution = 12;
-               data->sample_time = MSEC_PER_SEC / 4;
-               break;
-       case g751:
-       case lm75:
-       case lm75a:
-               data->resolution = 9;
-               data->sample_time = MSEC_PER_SEC / 2;
-               break;
-       case lm75b:
-               data->resolution = 11;
-               data->sample_time = MSEC_PER_SEC / 4;
-               break;
-       case max6625:
-               data->resolution = 9;
-               data->sample_time = MSEC_PER_SEC / 4;
-               break;
-       case max6626:
-               data->resolution = 12;
-               data->resolution_limits = 9;
-               data->sample_time = MSEC_PER_SEC / 4;
-               break;
-       case max31725:
-               data->resolution = 16;
-               data->sample_time = MSEC_PER_SEC / 8;
-               break;
-       case tcn75:
-               data->resolution = 9;
-               data->sample_time = MSEC_PER_SEC / 8;
-               break;
-       case mcp980x:
-               data->resolution_limits = 9;
-               /* fall through */
-       case tmp100:
-       case tmp101:
-               set_mask |= 3 << 5;             /* 12-bit mode */
-               data->resolution = 12;
-               data->sample_time = MSEC_PER_SEC;
-               clr_mask |= 1 << 7;             /* not one-shot mode */
-               break;
-       case tmp112:
-               set_mask |= 3 << 5;             /* 12-bit mode */
-               clr_mask |= 1 << 7;             /* not one-shot mode */
-               data->resolution = 12;
-               data->sample_time = MSEC_PER_SEC / 4;
-               break;
-       case tmp105:
-       case tmp175:
-       case tmp275:
-       case tmp75:
-               set_mask |= 3 << 5;             /* 12-bit mode */
-               clr_mask |= 1 << 7;             /* not one-shot mode */
-               data->resolution = 12;
-               data->sample_time = MSEC_PER_SEC / 2;
-               break;
-       case tmp75b:  /* not one-shot mode, Conversion rate 37Hz */
-               clr_mask |= 1 << 7 | 0x3 << 5;
-               data->resolution = 12;
-               data->sample_time = MSEC_PER_SEC / 37;
-               break;
-       case tmp75c:
-               clr_mask |= 1 << 5;             /* not one-shot mode */
-               data->resolution = 12;
-               data->sample_time = MSEC_PER_SEC / 4;
-               break;
-       }
 
-       /* configure as specified */
+       data->params = &device_params[data->kind];
+
+       /* Save default sample time and resolution*/
+       data->sample_time = data->params->default_sample_time;
+       data->resolution = data->params->default_resolution;
+
+       /* Cache original configuration */
        status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
        if (status < 0) {
                dev_dbg(dev, "Can't read config? %d\n", status);
                return status;
        }
        data->orig_conf = status;
-       new = status & ~clr_mask;
-       new |= set_mask;
-       if (status != new)
-               i2c_smbus_write_byte_data(client, LM75_REG_CONF, new);
+       data->current_conf = status;
 
-       err = devm_add_action_or_reset(dev, lm75_remove, data);
+       err = lm75_write_config(data, data->params->set_mask,
+                               data->params->clr_mask);
        if (err)
                return err;
 
-       dev_dbg(dev, "Config %02x\n", new);
+       err = devm_add_action_or_reset(dev, lm75_remove, data);
+       if (err)
+               return err;
 
        hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
                                                         data, &lm75_chip_info,
@@ -397,6 +624,7 @@ static const struct i2c_device_id lm75_ids[] = {
        { "max31725", max31725, },
        { "max31726", max31725, },
        { "mcp980x", mcp980x, },
+       { "pct2075", pct2075, },
        { "stds75", stds75, },
        { "stlm75", stlm75, },
        { "tcn75", tcn75, },
@@ -466,6 +694,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
                .compatible = "maxim,mcp980x",
                .data = (void *)mcp980x
        },
+       {
+               .compatible = "nxp,pct2075",
+               .data = (void *)pct2075
+       },
        {
                .compatible = "st,stds75",
                .data = (void *)stds75
index f9431ad43ad3b02a0b83e3e4990d56aa2f0fcdba..53ff5051774ccc5e1a3254105da6499b1b69fed4 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/i2c.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
 
 #define LTC2990_STATUS 0x00
 #define LTC2990_CONTROL        0x01
@@ -206,7 +206,6 @@ static int ltc2990_i2c_probe(struct i2c_client *i2c,
        int ret;
        struct device *hwmon_dev;
        struct ltc2990_data *data;
-       struct device_node *of_node = i2c->dev.of_node;
 
        if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
                                     I2C_FUNC_SMBUS_WORD_DATA))
@@ -218,9 +217,10 @@ static int ltc2990_i2c_probe(struct i2c_client *i2c,
 
        data->i2c = i2c;
 
-       if (of_node) {
-               ret = of_property_read_u32_array(of_node, "lltc,meas-mode",
-                                                data->mode, 2);
+       if (dev_fwnode(&i2c->dev)) {
+               ret = device_property_read_u32_array(&i2c->dev,
+                                                    "lltc,meas-mode",
+                                                    data->mode, 2);
                if (ret < 0)
                        return ret;
 
index d42bc0883a32b4ba115f113d84e6503ecd43e081..7efa6bfef06097e5d20878566a84bc9666b3f2d6 100644 (file)
@@ -20,6 +20,7 @@
  *
  * Chip        #vin    #fan    #pwm    #temp  chip IDs       man ID
  * nct6106d     9      3       3       6+3    0xc450 0xc1    0x5ca3
+ * nct6116d     9      5       5       3+3    0xd280 0xc1    0x5ca3
  * nct6775f     9      4       3       6+3    0xb470 0xc1    0x5ca3
  * nct6776f     9      5       3       6+3    0xc330 0xc1    0x5ca3
  * nct6779d    15      5       5       2+6    0xc560 0xc1    0x5ca3
 
 #define USE_ALTERNATE
 
-enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
-            nct6795, nct6796, nct6797, nct6798 };
+enum kinds { nct6106, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792,
+            nct6793, nct6795, nct6796, nct6797, nct6798 };
 
 /* used to set data->name = nct6775_device_names[data->sio_kind] */
 static const char * const nct6775_device_names[] = {
        "nct6106",
+       "nct6116",
        "nct6775",
        "nct6776",
        "nct6779",
@@ -78,6 +80,7 @@ static const char * const nct6775_device_names[] = {
 
 static const char * const nct6775_sio_names[] __initconst = {
        "NCT6106D",
+       "NCT6116D",
        "NCT6775F",
        "NCT6776D/F",
        "NCT6779D",
@@ -115,6 +118,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
 #define SIO_REG_ADDR           0x60    /* Logical device address (2 bytes) */
 
 #define SIO_NCT6106_ID         0xc450
+#define SIO_NCT6116_ID         0xd280
 #define SIO_NCT6775_ID         0xb470
 #define SIO_NCT6776_ID         0xc330
 #define SIO_NCT6779_ID         0xc560
@@ -825,10 +829,8 @@ static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
 
 static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
 static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
-static const u16 NCT6106_REG_PWM[] = { 0x119, 0x129, 0x139 };
 static const u16 NCT6106_REG_PWM_READ[] = { 0x4a, 0x4b, 0x4c };
 static const u16 NCT6106_REG_FAN_MODE[] = { 0x113, 0x123, 0x133 };
-static const u16 NCT6106_REG_TEMP_SEL[] = { 0x110, 0x120, 0x130 };
 static const u16 NCT6106_REG_TEMP_SOURCE[] = {
        0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5 };
 
@@ -896,6 +898,70 @@ static const u16 NCT6106_REG_TEMP_CRIT[32] = {
        [12] = 0x205,
 };
 
+/* NCT6112D/NCT6114D/NCT6116D specific data */
+
+static const u16 NCT6116_REG_FAN[] = { 0x20, 0x22, 0x24, 0x26, 0x28 };
+static const u16 NCT6116_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4, 0xe6, 0xe8 };
+static const u16 NCT6116_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0xf6, 0xf5 };
+static const u16 NCT6116_FAN_PULSE_SHIFT[] = { 0, 2, 4, 6, 6 };
+
+static const u16 NCT6116_REG_PWM[] = { 0x119, 0x129, 0x139, 0x199, 0x1a9 };
+static const u16 NCT6116_REG_FAN_MODE[] = { 0x113, 0x123, 0x133, 0x193, 0x1a3 };
+static const u16 NCT6116_REG_TEMP_SEL[] = { 0x110, 0x120, 0x130, 0x190, 0x1a0 };
+static const u16 NCT6116_REG_TEMP_SOURCE[] = {
+       0xb0, 0xb1, 0xb2 };
+
+static const u16 NCT6116_REG_CRITICAL_TEMP[] = {
+       0x11a, 0x12a, 0x13a, 0x19a, 0x1aa };
+static const u16 NCT6116_REG_CRITICAL_TEMP_TOLERANCE[] = {
+       0x11b, 0x12b, 0x13b, 0x19b, 0x1ab };
+
+static const u16 NCT6116_REG_CRITICAL_PWM_ENABLE[] = {
+       0x11c, 0x12c, 0x13c, 0x19c, 0x1ac };
+static const u16 NCT6116_REG_CRITICAL_PWM[] = {
+       0x11d, 0x12d, 0x13d, 0x19d, 0x1ad };
+
+static const u16 NCT6116_REG_FAN_STEP_UP_TIME[] = {
+       0x114, 0x124, 0x134, 0x194, 0x1a4 };
+static const u16 NCT6116_REG_FAN_STEP_DOWN_TIME[] = {
+       0x115, 0x125, 0x135, 0x195, 0x1a5 };
+static const u16 NCT6116_REG_FAN_STOP_OUTPUT[] = {
+       0x116, 0x126, 0x136, 0x196, 0x1a6 };
+static const u16 NCT6116_REG_FAN_START_OUTPUT[] = {
+       0x117, 0x127, 0x137, 0x197, 0x1a7 };
+static const u16 NCT6116_REG_FAN_STOP_TIME[] = {
+       0x118, 0x128, 0x138, 0x198, 0x1a8 };
+static const u16 NCT6116_REG_TOLERANCE_H[] = {
+       0x112, 0x122, 0x132, 0x192, 0x1a2 };
+
+static const u16 NCT6116_REG_TARGET[] = {
+       0x111, 0x121, 0x131, 0x191, 0x1a1 };
+
+static const u16 NCT6116_REG_AUTO_TEMP[] = {
+       0x160, 0x170, 0x180, 0x1d0, 0x1e0 };
+static const u16 NCT6116_REG_AUTO_PWM[] = {
+       0x164, 0x174, 0x184, 0x1d4, 0x1e4 };
+
+static const s8 NCT6116_ALARM_BITS[] = {
+       0, 1, 2, 3, 4, 5, 7, 8,         /* in0.. in7 */
+       9, -1, -1, -1, -1, -1, -1,      /* in8..in9 */
+       -1,                             /* unused */
+       32, 33, 34, 35, 36,             /* fan1..fan5 */
+       -1, -1, -1,                     /* unused */
+       16, 17, 18, -1, -1, -1,         /* temp1..temp6 */
+       48, -1                          /* intrusion0, intrusion1 */
+};
+
+static const s8 NCT6116_BEEP_BITS[] = {
+       0, 1, 2, 3, 4, 5, 7, 8,         /* in0.. in7 */
+       9, 10, 11, 12, -1, -1, -1,      /* in8..in14 */
+       32,                             /* global beep enable */
+       24, 25, 26, 27, 28,             /* fan1..fan5 */
+       -1, -1, -1,                     /* unused */
+       16, 17, 18, -1, -1, -1,         /* temp1..temp6 */
+       34, -1                          /* intrusion0, intrusion1 */
+};
+
 static enum pwm_enable reg_to_pwm_enable(int pwm, int mode)
 {
        if (mode == 0 && pwm == 255)
@@ -1294,6 +1360,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
                return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
                  reg == 0xe0 || reg == 0xe2 || reg == 0xe4 ||
                  reg == 0x111 || reg == 0x121 || reg == 0x131;
+       case nct6116:
+               return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
+                 reg == 0x26 || reg == 0x28 || reg == 0xe0 || reg == 0xe2 ||
+                 reg == 0xe4 || reg == 0xe6 || reg == 0xe8 || reg == 0x111 ||
+                 reg == 0x121 || reg == 0x131 || reg == 0x191 || reg == 0x1a1;
        case nct6775:
                return (((reg & 0xff00) == 0x100 ||
                    (reg & 0xff00) == 0x200) &&
@@ -1673,6 +1744,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
                        data->auto_pwm[i][data->auto_pwm_num] = 0xff;
                        break;
                case nct6106:
+               case nct6116:
                case nct6779:
                case nct6791:
                case nct6792:
@@ -3109,6 +3181,7 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
                case nct6776:
                        break; /* always enabled, nothing to do */
                case nct6106:
+               case nct6116:
                case nct6779:
                case nct6791:
                case nct6792:
@@ -3535,6 +3608,23 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
 
                fan3pin = !(cr24 & 0x80);
                pwm3pin = cr24 & 0x08;
+       } else if (data->kind == nct6116) {
+               int cr1a = superio_inb(sioreg, 0x1a);
+               int cr1b = superio_inb(sioreg, 0x1b);
+               int cr24 = superio_inb(sioreg, 0x24);
+               int cr2a = superio_inb(sioreg, 0x2a);
+               int cr2b = superio_inb(sioreg, 0x2b);
+               int cr2f = superio_inb(sioreg, 0x2f);
+
+               fan3pin = !(cr2b & 0x10);
+               fan4pin = (cr2b & 0x80) ||                      // pin 1(2)
+                       (!(cr2f & 0x10) && (cr1a & 0x04));      // pin 65(66)
+               fan5pin = (cr2b & 0x80) ||                      // pin 126(127)
+                       (!(cr1b & 0x03) && (cr2a & 0x02));      // pin 94(96)
+
+               pwm3pin = fan3pin && (cr24 & 0x08);
+               pwm4pin = fan4pin;
+               pwm5pin = fan5pin;
        } else {
                /*
                 * NCT6779D, NCT6791D, NCT6792D, NCT6793D, NCT6795D, NCT6796D,
@@ -3765,7 +3855,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
                data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
                data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
-               data->REG_PWM[0] = NCT6106_REG_PWM;
+               data->REG_PWM[0] = NCT6116_REG_PWM;
                data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
                data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
                data->REG_PWM[5] = NCT6106_REG_WEIGHT_DUTY_STEP;
@@ -3784,7 +3874,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_CRITICAL_PWM = NCT6106_REG_CRITICAL_PWM;
                data->REG_TEMP_OFFSET = NCT6106_REG_TEMP_OFFSET;
                data->REG_TEMP_SOURCE = NCT6106_REG_TEMP_SOURCE;
-               data->REG_TEMP_SEL = NCT6106_REG_TEMP_SEL;
+               data->REG_TEMP_SEL = NCT6116_REG_TEMP_SEL;
                data->REG_WEIGHT_TEMP_SEL = NCT6106_REG_WEIGHT_TEMP_SEL;
                data->REG_WEIGHT_TEMP[0] = NCT6106_REG_WEIGHT_TEMP_STEP;
                data->REG_WEIGHT_TEMP[1] = NCT6106_REG_WEIGHT_TEMP_STEP_TOL;
@@ -3806,6 +3896,79 @@ static int nct6775_probe(struct platform_device *pdev)
                reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
                reg_temp_crit_h = NCT6106_REG_TEMP_CRIT_H;
 
+               break;
+       case nct6116:
+               data->in_num = 9;
+               data->pwm_num = 3;
+               data->auto_pwm_num = 4;
+               data->temp_fixed_num = 3;
+               data->num_temp_alarms = 3;
+               data->num_temp_beeps = 3;
+
+               data->fan_from_reg = fan_from_reg13;
+               data->fan_from_reg_min = fan_from_reg13;
+
+               data->temp_label = nct6776_temp_label;
+               data->temp_mask = NCT6776_TEMP_MASK;
+               data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
+
+               data->REG_VBAT = NCT6106_REG_VBAT;
+               data->REG_DIODE = NCT6106_REG_DIODE;
+               data->DIODE_MASK = NCT6106_DIODE_MASK;
+               data->REG_VIN = NCT6106_REG_IN;
+               data->REG_IN_MINMAX[0] = NCT6106_REG_IN_MIN;
+               data->REG_IN_MINMAX[1] = NCT6106_REG_IN_MAX;
+               data->REG_TARGET = NCT6116_REG_TARGET;
+               data->REG_FAN = NCT6116_REG_FAN;
+               data->REG_FAN_MODE = NCT6116_REG_FAN_MODE;
+               data->REG_FAN_MIN = NCT6116_REG_FAN_MIN;
+               data->REG_FAN_PULSES = NCT6116_REG_FAN_PULSES;
+               data->FAN_PULSE_SHIFT = NCT6116_FAN_PULSE_SHIFT;
+               data->REG_FAN_TIME[0] = NCT6116_REG_FAN_STOP_TIME;
+               data->REG_FAN_TIME[1] = NCT6116_REG_FAN_STEP_UP_TIME;
+               data->REG_FAN_TIME[2] = NCT6116_REG_FAN_STEP_DOWN_TIME;
+               data->REG_TOLERANCE_H = NCT6116_REG_TOLERANCE_H;
+               data->REG_PWM[0] = NCT6116_REG_PWM;
+               data->REG_PWM[1] = NCT6116_REG_FAN_START_OUTPUT;
+               data->REG_PWM[2] = NCT6116_REG_FAN_STOP_OUTPUT;
+               data->REG_PWM[5] = NCT6106_REG_WEIGHT_DUTY_STEP;
+               data->REG_PWM[6] = NCT6106_REG_WEIGHT_DUTY_BASE;
+               data->REG_PWM_READ = NCT6106_REG_PWM_READ;
+               data->REG_PWM_MODE = NCT6106_REG_PWM_MODE;
+               data->PWM_MODE_MASK = NCT6106_PWM_MODE_MASK;
+               data->REG_AUTO_TEMP = NCT6116_REG_AUTO_TEMP;
+               data->REG_AUTO_PWM = NCT6116_REG_AUTO_PWM;
+               data->REG_CRITICAL_TEMP = NCT6116_REG_CRITICAL_TEMP;
+               data->REG_CRITICAL_TEMP_TOLERANCE
+                 = NCT6116_REG_CRITICAL_TEMP_TOLERANCE;
+               data->REG_CRITICAL_PWM_ENABLE = NCT6116_REG_CRITICAL_PWM_ENABLE;
+               data->CRITICAL_PWM_ENABLE_MASK
+                 = NCT6106_CRITICAL_PWM_ENABLE_MASK;
+               data->REG_CRITICAL_PWM = NCT6116_REG_CRITICAL_PWM;
+               data->REG_TEMP_OFFSET = NCT6106_REG_TEMP_OFFSET;
+               data->REG_TEMP_SOURCE = NCT6116_REG_TEMP_SOURCE;
+               data->REG_TEMP_SEL = NCT6116_REG_TEMP_SEL;
+               data->REG_WEIGHT_TEMP_SEL = NCT6106_REG_WEIGHT_TEMP_SEL;
+               data->REG_WEIGHT_TEMP[0] = NCT6106_REG_WEIGHT_TEMP_STEP;
+               data->REG_WEIGHT_TEMP[1] = NCT6106_REG_WEIGHT_TEMP_STEP_TOL;
+               data->REG_WEIGHT_TEMP[2] = NCT6106_REG_WEIGHT_TEMP_BASE;
+               data->REG_ALARM = NCT6106_REG_ALARM;
+               data->ALARM_BITS = NCT6116_ALARM_BITS;
+               data->REG_BEEP = NCT6106_REG_BEEP;
+               data->BEEP_BITS = NCT6116_BEEP_BITS;
+
+               reg_temp = NCT6106_REG_TEMP;
+               reg_temp_mon = NCT6106_REG_TEMP_MON;
+               num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
+               num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
+               reg_temp_over = NCT6106_REG_TEMP_OVER;
+               reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+               reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+               reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+               reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+               reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+               reg_temp_crit_h = NCT6106_REG_TEMP_CRIT_H;
+
                break;
        case nct6775:
                data->in_num = 9;
@@ -4352,6 +4515,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->have_vid = (cr2a & 0x60) == 0x40;
                break;
        case nct6106:
+       case nct6116:
        case nct6779:
        case nct6791:
        case nct6792:
@@ -4381,6 +4545,7 @@ static int nct6775_probe(struct platform_device *pdev)
                                  NCT6775_REG_CR_FAN_DEBOUNCE);
                switch (data->kind) {
                case nct6106:
+               case nct6116:
                        tmp |= 0xe0;
                        break;
                case nct6775:
@@ -4576,6 +4741,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
        case SIO_NCT6106_ID:
                sio_data->kind = nct6106;
                break;
+       case SIO_NCT6116_ID:
+               sio_data->kind = nct6116;
+               break;
        case SIO_NCT6775_ID:
                sio_data->kind = nct6775;
                break;
index 710c30562fc1fe62ff14bee4122c539c3cd7ad71..95b447cfa24cb82a118fa62f3bc4c99caec33153 100644 (file)
 #define DTS_T_CTRL1_REG                0x27
 #define VT_ADC_MD_REG          0x2E
 
+#define VSEN1_HV_LL_REG                0x02    /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_LV_LL_REG                0x03    /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_HV_HL_REG                0x00    /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_LV_HL_REG                0x01    /* Bank 1; 2 regs (HV/LV) per sensor */
+#define SMI_STS1_REG           0xC1    /* Bank 0; SMI Status Register */
+#define SMI_STS3_REG           0xC3    /* Bank 0; SMI Status Register */
+#define SMI_STS5_REG           0xC5    /* Bank 0; SMI Status Register */
+#define SMI_STS7_REG           0xC7    /* Bank 0; SMI Status Register */
+#define SMI_STS8_REG           0xC8    /* Bank 0; SMI Status Register */
+
 #define VSEN1_HV_REG           0x40    /* Bank 0; 2 regs (HV/LV) per sensor */
 #define TEMP_CH1_HV_REG                0x42    /* Bank 0; same as VSEN2_HV */
 #define LTD_HV_REG             0x62    /* Bank 0; 2 regs in VSEN range */
+#define LTD_HV_HL_REG          0x44    /* Bank 1; 1 reg for LTD */
+#define LTD_LV_HL_REG          0x45    /* Bank 1; 1 reg for LTD */
+#define LTD_HV_LL_REG          0x46    /* Bank 1; 1 reg for LTD */
+#define LTD_LV_LL_REG          0x47    /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_CH_REG                0x05    /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_W_REG         0x06    /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_WH_REG                0x07    /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_C_REG         0x04    /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_C_REG       0x90    /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_CH_REG      0x91    /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_W_REG       0x92    /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_WH_REG      0x93    /* Bank 1; 1 reg per sensor */
 #define FANIN1_HV_REG          0x80    /* Bank 0; 2 regs (HV/LV) per sensor */
+#define FANIN1_HV_HL_REG       0x60    /* Bank 1; 2 regs (HV/LV) per sensor */
+#define FANIN1_LV_HL_REG       0x61    /* Bank 1; 2 regs (HV/LV) per sensor */
 #define T_CPU1_HV_REG          0xA0    /* Bank 0; 2 regs (HV/LV) per sensor */
 
 #define PRTS_REG               0x03    /* Bank 2 */
@@ -58,6 +82,8 @@
 #define FANCTL1_FMR_REG                0x00    /* Bank 3; 1 reg per channel */
 #define FANCTL1_OUT_REG                0x10    /* Bank 3; 1 reg per channel */
 
+#define ENABLE_TSI     BIT(1)
+
 static const unsigned short normal_i2c[] = {
        0x2d, 0x2e, I2C_CLIENT_END
 };
@@ -72,6 +98,7 @@ struct nct7904_data {
        u8 fan_mode[FANCTL_MAX];
        u8 enable_dts;
        u8 has_dts;
+       u8 temp_mode; /* 0: TR mode, 1: TD mode */
 };
 
 /* Access functions */
@@ -170,6 +197,25 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
                        rpm = 1350000 / cnt;
                *val = rpm;
                return 0;
+       case hwmon_fan_min:
+               ret = nct7904_read_reg16(data, BANK_1,
+                                        FANIN1_HV_HL_REG + channel * 2);
+               if (ret < 0)
+                       return ret;
+               cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
+               if (cnt == 0x1fff)
+                       rpm = 0;
+               else
+                       rpm = 1350000 / cnt;
+               *val = rpm;
+               return 0;
+       case hwmon_fan_alarm:
+               ret = nct7904_read_reg(data, BANK_0,
+                                      SMI_STS5_REG + (channel >> 3));
+               if (ret < 0)
+                       return ret;
+               *val = (ret >> (channel & 0x07)) & 1;
+               return 0;
        default:
                return -EOPNOTSUPP;
        }
@@ -179,8 +225,20 @@ static umode_t nct7904_fan_is_visible(const void *_data, u32 attr, int channel)
 {
        const struct nct7904_data *data = _data;
 
-       if (attr == hwmon_fan_input && data->fanin_mask & (1 << channel))
-               return 0444;
+       switch (attr) {
+       case hwmon_fan_input:
+       case hwmon_fan_alarm:
+               if (data->fanin_mask & (1 << channel))
+                       return 0444;
+               break;
+       case hwmon_fan_min:
+               if (data->fanin_mask & (1 << channel))
+                       return 0644;
+               break;
+       default:
+               break;
+       }
+
        return 0;
 }
 
@@ -211,6 +269,37 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
                        volt *= 6; /* 0.006V scale */
                *val = volt;
                return 0;
+       case hwmon_in_min:
+               ret = nct7904_read_reg16(data, BANK_1,
+                                        VSEN1_HV_LL_REG + index * 4);
+               if (ret < 0)
+                       return ret;
+               volt = ((ret & 0xff00) >> 5) | (ret & 0x7);
+               if (index < 14)
+                       volt *= 2; /* 0.002V scale */
+               else
+                       volt *= 6; /* 0.006V scale */
+               *val = volt;
+               return 0;
+       case hwmon_in_max:
+               ret = nct7904_read_reg16(data, BANK_1,
+                                        VSEN1_HV_HL_REG + index * 4);
+               if (ret < 0)
+                       return ret;
+               volt = ((ret & 0xff00) >> 5) | (ret & 0x7);
+               if (index < 14)
+                       volt *= 2; /* 0.002V scale */
+               else
+                       volt *= 6; /* 0.006V scale */
+               *val = volt;
+               return 0;
+       case hwmon_in_alarm:
+               ret = nct7904_read_reg(data, BANK_0,
+                                      SMI_STS1_REG + (index >> 3));
+               if (ret < 0)
+                       return ret;
+               *val = (ret >> (index & 0x07)) & 1;
+               return 0;
        default:
                return -EOPNOTSUPP;
        }
@@ -221,9 +310,20 @@ static umode_t nct7904_in_is_visible(const void *_data, u32 attr, int channel)
        const struct nct7904_data *data = _data;
        int index = nct7904_chan_to_index[channel];
 
-       if (channel > 0 && attr == hwmon_in_input &&
-           (data->vsen_mask & BIT(index)))
-               return 0444;
+       switch (attr) {
+       case hwmon_in_input:
+       case hwmon_in_alarm:
+               if (channel > 0 && (data->vsen_mask & BIT(index)))
+                       return 0444;
+               break;
+       case hwmon_in_min:
+       case hwmon_in_max:
+               if (channel > 0 && (data->vsen_mask & BIT(index)))
+                       return 0644;
+               break;
+       default:
+               break;
+       }
 
        return 0;
 }
@@ -233,6 +333,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
 {
        struct nct7904_data *data = dev_get_drvdata(dev);
        int ret, temp;
+       unsigned int reg1, reg2, reg3;
 
        switch (attr) {
        case hwmon_temp_input:
@@ -250,16 +351,106 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
                temp = ((ret & 0xff00) >> 5) | (ret & 0x7);
                *val = sign_extend32(temp, 10) * 125;
                return 0;
+       case hwmon_temp_alarm:
+               if (channel == 4) {
+                       ret = nct7904_read_reg(data, BANK_0,
+                                              SMI_STS3_REG);
+                       if (ret < 0)
+                               return ret;
+                       *val = (ret >> 1) & 1;
+               } else if (channel < 4) {
+                       ret = nct7904_read_reg(data, BANK_0,
+                                              SMI_STS1_REG);
+                       if (ret < 0)
+                               return ret;
+                       *val = (ret >> (((channel * 2) + 1) & 0x07)) & 1;
+               } else {
+                       if ((channel - 5) < 4) {
+                               ret = nct7904_read_reg(data, BANK_0,
+                                                      SMI_STS7_REG +
+                                                      ((channel - 5) >> 3));
+                               if (ret < 0)
+                                       return ret;
+                               *val = (ret >> ((channel - 5) & 0x07)) & 1;
+                       } else {
+                               ret = nct7904_read_reg(data, BANK_0,
+                                                      SMI_STS8_REG +
+                                                      ((channel - 5) >> 3));
+                               if (ret < 0)
+                                       return ret;
+                               *val = (ret >> (((channel - 5) & 0x07) - 4))
+                                                       & 1;
+                       }
+               }
+               return 0;
+       case hwmon_temp_type:
+               if (channel < 5) {
+                       if ((data->tcpu_mask >> channel) & 0x01) {
+                               if ((data->temp_mode >> channel) & 0x01)
+                                       *val = 3; /* TD */
+                               else
+                                       *val = 4; /* TR */
+                       } else {
+                               *val = 0;
+                       }
+               } else {
+                       if ((data->has_dts >> (channel - 5)) & 0x01) {
+                               if (data->enable_dts & ENABLE_TSI)
+                                       *val = 5; /* TSI */
+                               else
+                                       *val = 6; /* PECI */
+                       } else {
+                               *val = 0;
+                       }
+               }
+               return 0;
+       case hwmon_temp_max:
+               reg1 = LTD_HV_LL_REG;
+               reg2 = TEMP_CH1_W_REG;
+               reg3 = DTS_T_CPU1_W_REG;
+               break;
+       case hwmon_temp_max_hyst:
+               reg1 = LTD_LV_LL_REG;
+               reg2 = TEMP_CH1_WH_REG;
+               reg3 = DTS_T_CPU1_WH_REG;
+               break;
+       case hwmon_temp_crit:
+               reg1 = LTD_HV_HL_REG;
+               reg2 = TEMP_CH1_C_REG;
+               reg3 = DTS_T_CPU1_C_REG;
+               break;
+       case hwmon_temp_crit_hyst:
+               reg1 = LTD_LV_HL_REG;
+               reg2 = TEMP_CH1_CH_REG;
+               reg3 = DTS_T_CPU1_CH_REG;
+               break;
        default:
                return -EOPNOTSUPP;
        }
+
+       if (channel == 4)
+               ret = nct7904_read_reg(data, BANK_1, reg1);
+       else if (channel < 5)
+               ret = nct7904_read_reg(data, BANK_1,
+                                      reg2 + channel * 8);
+       else
+               ret = nct7904_read_reg(data, BANK_1,
+                                      reg3 + (channel - 5) * 4);
+
+       if (ret < 0)
+               return ret;
+       *val = ret * 1000;
+       return 0;
 }
 
 static umode_t nct7904_temp_is_visible(const void *_data, u32 attr, int channel)
 {
        const struct nct7904_data *data = _data;
 
-       if (attr == hwmon_temp_input) {
+       switch (attr) {
+       case hwmon_temp_input:
+       case hwmon_temp_alarm:
+       case hwmon_temp_type:
                if (channel < 5) {
                        if (data->tcpu_mask & BIT(channel))
                                return 0444;
@@ -267,6 +458,21 @@ static umode_t nct7904_temp_is_visible(const void *_data, u32 attr, int channel)
                        if (data->has_dts & BIT(channel - 5))
                                return 0444;
                }
+               break;
+       case hwmon_temp_max:
+       case hwmon_temp_max_hyst:
+       case hwmon_temp_crit:
+       case hwmon_temp_crit_hyst:
+               if (channel < 5) {
+                       if (data->tcpu_mask & BIT(channel))
+                               return 0644;
+               } else {
+                       if (data->has_dts & BIT(channel - 5))
+                               return 0644;
+               }
+               break;
+       default:
+               break;
        }
 
        return 0;
@@ -297,6 +503,137 @@ static int nct7904_read_pwm(struct device *dev, u32 attr, int channel,
        }
 }
 
+static int nct7904_write_temp(struct device *dev, u32 attr, int channel,
+                             long val)
+{
+       struct nct7904_data *data = dev_get_drvdata(dev);
+       int ret;
+       unsigned int reg1, reg2, reg3;
+
+       val = clamp_val(val / 1000, -128, 127);
+
+       switch (attr) {
+       case hwmon_temp_max:
+               reg1 = LTD_HV_LL_REG;
+               reg2 = TEMP_CH1_W_REG;
+               reg3 = DTS_T_CPU1_W_REG;
+               break;
+       case hwmon_temp_max_hyst:
+               reg1 = LTD_LV_LL_REG;
+               reg2 = TEMP_CH1_WH_REG;
+               reg3 = DTS_T_CPU1_WH_REG;
+               break;
+       case hwmon_temp_crit:
+               reg1 = LTD_HV_HL_REG;
+               reg2 = TEMP_CH1_C_REG;
+               reg3 = DTS_T_CPU1_C_REG;
+               break;
+       case hwmon_temp_crit_hyst:
+               reg1 = LTD_LV_HL_REG;
+               reg2 = TEMP_CH1_CH_REG;
+               reg3 = DTS_T_CPU1_CH_REG;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       if (channel == 4)
+               ret = nct7904_write_reg(data, BANK_1, reg1, val);
+       else if (channel < 5)
+               ret = nct7904_write_reg(data, BANK_1,
+                                       reg2 + channel * 8, val);
+       else
+               ret = nct7904_write_reg(data, BANK_1,
+                                       reg3 + (channel - 5) * 4, val);
+
+       return ret;
+}
+
+static int nct7904_write_fan(struct device *dev, u32 attr, int channel,
+                            long val)
+{
+       struct nct7904_data *data = dev_get_drvdata(dev);
+       int ret;
+       u8 tmp;
+
+       switch (attr) {
+       case hwmon_fan_min:
+               if (val <= 0)
+                       return -EINVAL;
+
+               val = clamp_val(DIV_ROUND_CLOSEST(1350000, val), 1, 0x1fff);
+               tmp = (val >> 5) & 0xff;
+               ret = nct7904_write_reg(data, BANK_1,
+                                       FANIN1_HV_HL_REG + channel * 2, tmp);
+               if (ret < 0)
+                       return ret;
+               tmp = val & 0x1f;
+               ret = nct7904_write_reg(data, BANK_1,
+                                       FANIN1_LV_HL_REG + channel * 2, tmp);
+               return ret;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int nct7904_write_in(struct device *dev, u32 attr, int channel,
+                           long val)
+{
+       struct nct7904_data *data = dev_get_drvdata(dev);
+       int ret, index, tmp;
+
+       index = nct7904_chan_to_index[channel];
+
+       if (index < 14)
+               val = val / 2; /* 0.002V scale */
+       else
+               val = val / 6; /* 0.006V scale */
+
+       val = clamp_val(val, 0, 0x7ff);
+
+       switch (attr) {
+       case hwmon_in_min:
+               tmp = nct7904_read_reg(data, BANK_1,
+                                      VSEN1_LV_LL_REG + index * 4);
+               if (tmp < 0)
+                       return tmp;
+               tmp &= ~0x7;
+               tmp |= val & 0x7;
+               ret = nct7904_write_reg(data, BANK_1,
+                                       VSEN1_LV_LL_REG + index * 4, tmp);
+               if (ret < 0)
+                       return ret;
+               tmp = nct7904_read_reg(data, BANK_1,
+                                      VSEN1_HV_LL_REG + index * 4);
+               if (tmp < 0)
+                       return tmp;
+               tmp = (val >> 3) & 0xff;
+               ret = nct7904_write_reg(data, BANK_1,
+                                       VSEN1_HV_LL_REG + index * 4, tmp);
+               return ret;
+       case hwmon_in_max:
+               tmp = nct7904_read_reg(data, BANK_1,
+                                      VSEN1_LV_HL_REG + index * 4);
+               if (tmp < 0)
+                       return tmp;
+               tmp &= ~0x7;
+               tmp |= val & 0x7;
+               ret = nct7904_write_reg(data, BANK_1,
+                                       VSEN1_LV_HL_REG + index * 4, tmp);
+               if (ret < 0)
+                       return ret;
+               tmp = nct7904_read_reg(data, BANK_1,
+                                      VSEN1_HV_HL_REG + index * 4);
+               if (tmp < 0)
+                       return tmp;
+               tmp = (val >> 3) & 0xff;
+               ret = nct7904_write_reg(data, BANK_1,
+                                       VSEN1_HV_HL_REG + index * 4, tmp);
+               return ret;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
                             long val)
 {
@@ -354,8 +691,14 @@ static int nct7904_write(struct device *dev, enum hwmon_sensor_types type,
                         u32 attr, int channel, long val)
 {
        switch (type) {
+       case hwmon_in:
+               return nct7904_write_in(dev, attr, channel, val);
+       case hwmon_fan:
+               return nct7904_write_fan(dev, attr, channel, val);
        case hwmon_pwm:
                return nct7904_write_pwm(dev, attr, channel, val);
+       case hwmon_temp:
+               return nct7904_write_temp(dev, attr, channel, val);
        default:
                return -EOPNOTSUPP;
        }
@@ -404,51 +747,91 @@ static int nct7904_detect(struct i2c_client *client,
 
 static const struct hwmon_channel_info *nct7904_info[] = {
        HWMON_CHANNEL_INFO(in,
-                          HWMON_I_INPUT, /* dummy, skipped in is_visible */
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT,
-                          HWMON_I_INPUT),
+                          /* dummy, skipped in is_visible */
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM,
+                          HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+                          HWMON_I_ALARM),
        HWMON_CHANNEL_INFO(fan,
-                          HWMON_F_INPUT,
-                          HWMON_F_INPUT,
-                          HWMON_F_INPUT,
-                          HWMON_F_INPUT,
-                          HWMON_F_INPUT,
-                          HWMON_F_INPUT,
-                          HWMON_F_INPUT,
-                          HWMON_F_INPUT),
+                          HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+                          HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+                          HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+                          HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+                          HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+                          HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+                          HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+                          HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM),
        HWMON_CHANNEL_INFO(pwm,
                           HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
                           HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
                           HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
                           HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
        HWMON_CHANNEL_INFO(temp,
-                          HWMON_T_INPUT,
-                          HWMON_T_INPUT,
-                          HWMON_T_INPUT,
-                          HWMON_T_INPUT,
-                          HWMON_T_INPUT,
-                          HWMON_T_INPUT,
-                          HWMON_T_INPUT,
-                          HWMON_T_INPUT,
-                          HWMON_T_INPUT),
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST,
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST,
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST,
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST,
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST,
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST,
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST,
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST,
+                          HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+                          HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+                          HWMON_T_CRIT_HYST),
        NULL
 };
 
@@ -530,11 +913,14 @@ static int nct7904_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
 
+       data->temp_mode = 0;
        for (i = 0; i < 4; i++) {
                val = (ret & (0x03 << i)) >> (i * 2);
                bit = (1 << i);
                if (val == 0)
                        data->tcpu_mask &= ~bit;
+               else if (val == 0x1 || val == 0x2)
+                       data->temp_mode |= bit;
        }
 
        /* PECI */
@@ -557,7 +943,7 @@ static int nct7904_probe(struct i2c_client *client,
                if (ret < 0)
                        return ret;
                data->has_dts = ret & 0xF;
-               if (data->enable_dts & 0x2) {
+               if (data->enable_dts & ENABLE_TSI) {
                        ret = nct7904_read_reg(data, BANK_0, DTS_T_CTRL1_REG);
                        if (ret < 0)
                                return ret;
index 09aaefa6fdb8c192e561d720dab6cc7d65a40138..11a28609da3c769343fd83c976a6ffdc13050bc5 100644 (file)
@@ -967,10 +967,8 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
                spin_lock_init(&data->fan_lock[i]);
 
                data->fan_irq[i] = platform_get_irq(pdev, i);
-               if (data->fan_irq[i] < 0) {
-                       dev_err(dev, "get IRQ fan%d failed\n", i);
+               if (data->fan_irq[i] < 0)
                        return data->fan_irq[i];
-               }
 
                sprintf(name, "NPCM7XX-FAN-MD%d", i);
                ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr,
index b6588483fae125a28562df82e90b2fb1446f29ea..d62d69bb7e490412b72ec7e6eb4e2ad612add115 100644 (file)
@@ -46,6 +46,15 @@ config SENSORS_IBM_CFFPS
          This driver can also be built as a module. If so, the module will
          be called ibm-cffps.
 
+config SENSORS_INSPUR_IPSPS
+       tristate "INSPUR Power System Power Supply"
+       help
+         If you say yes here you get hardware monitoring support for the INSPUR
+         Power System power supply.
+
+         This driver can also be built as a module. If so, the module will
+         be called inspur-ipsps.
+
 config SENSORS_IR35221
        tristate "Infineon IR35221"
        help
index c950ea9a5d003e0645be173a871303875cc6a47b..03bacfcfd6607dbb2365b6d1e912489f3838f92b 100644 (file)
@@ -7,6 +7,7 @@ obj-$(CONFIG_PMBUS)             += pmbus_core.o
 obj-$(CONFIG_SENSORS_PMBUS)    += pmbus.o
 obj-$(CONFIG_SENSORS_ADM1275)  += adm1275.o
 obj-$(CONFIG_SENSORS_IBM_CFFPS)        += ibm-cffps.o
+obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
 obj-$(CONFIG_SENSORS_IR35221)  += ir35221.o
 obj-$(CONFIG_SENSORS_IR38064)  += ir38064.o
 obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
index ee2ee9e3ffd739ab06116b6d6ef9db1867980665..d44745e498e76b0d847caaff878118e711a571d0 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/of_device.h>
 #include <linux/pmbus.h>
 
 #include "pmbus.h"
@@ -20,8 +21,9 @@
 #define CFFPS_PN_CMD                           0x9B
 #define CFFPS_SN_CMD                           0x9E
 #define CFFPS_CCIN_CMD                         0xBD
-#define CFFPS_FW_CMD_START                     0xFA
-#define CFFPS_FW_NUM_BYTES                     4
+#define CFFPS_FW_CMD                           0xFA
+#define CFFPS1_FW_NUM_BYTES                    4
+#define CFFPS2_FW_NUM_WORDS                    3
 #define CFFPS_SYS_CONFIG_CMD                   0xDA
 
 #define CFFPS_INPUT_HISTORY_CMD                        0xD6
@@ -52,6 +54,8 @@ enum {
        CFFPS_DEBUGFS_NUM_ENTRIES
 };
 
+enum versions { cffps1, cffps2 };
+
 struct ibm_cffps_input_history {
        struct mutex update_lock;
        unsigned long last_update;
@@ -61,6 +65,7 @@ struct ibm_cffps_input_history {
 };
 
 struct ibm_cffps {
+       enum versions version;
        struct i2c_client *client;
 
        struct ibm_cffps_input_history input_history;
@@ -132,6 +137,8 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
        struct ibm_cffps *psu = to_psu(idxp, idx);
        char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
 
+       pmbus_set_page(psu->client, 0);
+
        switch (idx) {
        case CFFPS_DEBUGFS_INPUT_HISTORY:
                return ibm_cffps_read_input_history(psu, buf, count, ppos);
@@ -152,16 +159,36 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
                rc = snprintf(data, 5, "%04X", rc);
                goto done;
        case CFFPS_DEBUGFS_FW:
-               for (i = 0; i < CFFPS_FW_NUM_BYTES; ++i) {
-                       rc = i2c_smbus_read_byte_data(psu->client,
-                                                     CFFPS_FW_CMD_START + i);
-                       if (rc < 0)
-                               return rc;
+               switch (psu->version) {
+               case cffps1:
+                       for (i = 0; i < CFFPS1_FW_NUM_BYTES; ++i) {
+                               rc = i2c_smbus_read_byte_data(psu->client,
+                                                             CFFPS_FW_CMD +
+                                                               i);
+                               if (rc < 0)
+                                       return rc;
+
+                               snprintf(&data[i * 2], 3, "%02X", rc);
+                       }
 
-                       snprintf(&data[i * 2], 3, "%02X", rc);
-               }
+                       rc = i * 2;
+                       break;
+               case cffps2:
+                       for (i = 0; i < CFFPS2_FW_NUM_WORDS; ++i) {
+                               rc = i2c_smbus_read_word_data(psu->client,
+                                                             CFFPS_FW_CMD +
+                                                               i);
+                               if (rc < 0)
+                                       return rc;
+
+                               snprintf(&data[i * 4], 5, "%04X", rc);
+                       }
 
-               rc = i * 2;
+                       rc = i * 4;
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
                goto done;
        default:
                return -EINVAL;
@@ -279,6 +306,8 @@ static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
                        psu->led_state = CFFPS_LED_ON;
        }
 
+       pmbus_set_page(psu->client, 0);
+
        rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
                                       psu->led_state);
        if (rc < 0)
@@ -299,6 +328,8 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
        if (led_cdev->brightness == LED_OFF)
                return 0;
 
+       pmbus_set_page(psu->client, 0);
+
        rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
                                       CFFPS_LED_BLINK);
        if (rc < 0)
@@ -328,15 +359,32 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
                dev_warn(dev, "failed to register led class: %d\n", rc);
 }
 
-static struct pmbus_driver_info ibm_cffps_info = {
-       .pages = 1,
-       .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
-               PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
-               PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT |
-               PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
-               PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_STATUS_FAN12,
-       .read_byte_data = ibm_cffps_read_byte_data,
-       .read_word_data = ibm_cffps_read_word_data,
+static struct pmbus_driver_info ibm_cffps_info[] = {
+       [cffps1] = {
+               .pages = 1,
+               .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+                       PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
+                       PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+                       PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+                       PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
+                       PMBUS_HAVE_STATUS_FAN12,
+               .read_byte_data = ibm_cffps_read_byte_data,
+               .read_word_data = ibm_cffps_read_word_data,
+       },
+       [cffps2] = {
+               .pages = 2,
+               .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+                       PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
+                       PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+                       PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+                       PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
+                       PMBUS_HAVE_STATUS_FAN12,
+               .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+                       PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+                       PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
+               .read_byte_data = ibm_cffps_read_byte_data,
+               .read_word_data = ibm_cffps_read_word_data,
+       },
 };
 
 static struct pmbus_platform_data ibm_cffps_pdata = {
@@ -347,12 +395,21 @@ static int ibm_cffps_probe(struct i2c_client *client,
                           const struct i2c_device_id *id)
 {
        int i, rc;
+       enum versions vs;
        struct dentry *debugfs;
        struct dentry *ibm_cffps_dir;
        struct ibm_cffps *psu;
+       const void *md = of_device_get_match_data(&client->dev);
+
+       if (md)
+               vs = (enum versions)md;
+       else if (id)
+               vs = (enum versions)id->driver_data;
+       else
+               vs = cffps1;
 
        client->dev.platform_data = &ibm_cffps_pdata;
-       rc = pmbus_do_probe(client, id, &ibm_cffps_info);
+       rc = pmbus_do_probe(client, id, &ibm_cffps_info[vs]);
        if (rc)
                return rc;
 
@@ -364,6 +421,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
        if (!psu)
                return 0;
 
+       psu->version = vs;
        psu->client = client;
        mutex_init(&psu->input_history.update_lock);
        psu->input_history.last_update = jiffies - HZ;
@@ -405,13 +463,21 @@ static int ibm_cffps_probe(struct i2c_client *client,
 }
 
 static const struct i2c_device_id ibm_cffps_id[] = {
-       { "ibm_cffps1", 1 },
+       { "ibm_cffps1", cffps1 },
+       { "ibm_cffps2", cffps2 },
        {}
 };
 MODULE_DEVICE_TABLE(i2c, ibm_cffps_id);
 
 static const struct of_device_id ibm_cffps_of_match[] = {
-       { .compatible = "ibm,cffps1" },
+       {
+               .compatible = "ibm,cffps1",
+               .data = (void *)cffps1
+       },
+       {
+               .compatible = "ibm,cffps2",
+               .data = (void *)cffps2
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, ibm_cffps_of_match);
diff --git a/drivers/hwmon/pmbus/inspur-ipsps.c b/drivers/hwmon/pmbus/inspur-ipsps.c
new file mode 100644 (file)
index 0000000..42e0154
--- /dev/null
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2019 Inspur Corp.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include <linux/hwmon-sysfs.h>
+
+#include "pmbus.h"
+
+#define IPSPS_REG_VENDOR_ID    0x99
+#define IPSPS_REG_MODEL                0x9A
+#define IPSPS_REG_FW_VERSION   0x9B
+#define IPSPS_REG_PN           0x9C
+#define IPSPS_REG_SN           0x9E
+#define IPSPS_REG_HW_VERSION   0xB0
+#define IPSPS_REG_MODE         0xFC
+
+#define MODE_ACTIVE            0x55
+#define MODE_STANDBY           0x0E
+#define MODE_REDUNDANCY                0x00
+
+#define MODE_ACTIVE_STRING             "active"
+#define MODE_STANDBY_STRING            "standby"
+#define MODE_REDUNDANCY_STRING         "redundancy"
+
+enum ipsps_index {
+       vendor,
+       model,
+       fw_version,
+       part_number,
+       serial_number,
+       hw_version,
+       mode,
+       num_regs,
+};
+
+static const u8 ipsps_regs[num_regs] = {
+       [vendor] = IPSPS_REG_VENDOR_ID,
+       [model] = IPSPS_REG_MODEL,
+       [fw_version] = IPSPS_REG_FW_VERSION,
+       [part_number] = IPSPS_REG_PN,
+       [serial_number] = IPSPS_REG_SN,
+       [hw_version] = IPSPS_REG_HW_VERSION,
+       [mode] = IPSPS_REG_MODE,
+};
+
+static ssize_t ipsps_string_show(struct device *dev,
+                                struct device_attribute *devattr,
+                                char *buf)
+{
+       u8 reg;
+       int rc;
+       char *p;
+       char data[I2C_SMBUS_BLOCK_MAX + 1];
+       struct i2c_client *client = to_i2c_client(dev->parent);
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+       reg = ipsps_regs[attr->index];
+       rc = i2c_smbus_read_block_data(client, reg, data);
+       if (rc < 0)
+               return rc;
+
+       /* filled with printable characters, ending with # */
+       p = memscan(data, '#', rc);
+       *p = '\0';
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", data);
+}
+
+static ssize_t ipsps_fw_version_show(struct device *dev,
+                                    struct device_attribute *devattr,
+                                    char *buf)
+{
+       u8 reg;
+       int rc;
+       u8 data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+       struct i2c_client *client = to_i2c_client(dev->parent);
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+       reg = ipsps_regs[attr->index];
+       rc = i2c_smbus_read_block_data(client, reg, data);
+       if (rc < 0)
+               return rc;
+
+       if (rc != 6)
+               return -EPROTO;
+
+       return snprintf(buf, PAGE_SIZE, "%u.%02u%u-%u.%02u\n",
+                       data[1], data[2]/* < 100 */, data[3]/*< 10*/,
+                       data[4], data[5]/* < 100 */);
+}
+
+static ssize_t ipsps_mode_show(struct device *dev,
+                              struct device_attribute *devattr, char *buf)
+{
+       u8 reg;
+       int rc;
+       struct i2c_client *client = to_i2c_client(dev->parent);
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+       reg = ipsps_regs[attr->index];
+       rc = i2c_smbus_read_byte_data(client, reg);
+       if (rc < 0)
+               return rc;
+
+       switch (rc) {
+       case MODE_ACTIVE:
+               return snprintf(buf, PAGE_SIZE, "[%s] %s %s\n",
+                               MODE_ACTIVE_STRING,
+                               MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+       case MODE_STANDBY:
+               return snprintf(buf, PAGE_SIZE, "%s [%s] %s\n",
+                               MODE_ACTIVE_STRING,
+                               MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+       case MODE_REDUNDANCY:
+               return snprintf(buf, PAGE_SIZE, "%s %s [%s]\n",
+                               MODE_ACTIVE_STRING,
+                               MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+       default:
+               return snprintf(buf, PAGE_SIZE, "unspecified\n");
+       }
+}
+
+static ssize_t ipsps_mode_store(struct device *dev,
+                               struct device_attribute *devattr,
+                               const char *buf, size_t count)
+{
+       u8 reg;
+       int rc;
+       struct i2c_client *client = to_i2c_client(dev->parent);
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+       reg = ipsps_regs[attr->index];
+       if (sysfs_streq(MODE_STANDBY_STRING, buf)) {
+               rc = i2c_smbus_write_byte_data(client, reg,
+                                              MODE_STANDBY);
+               if (rc < 0)
+                       return rc;
+               return count;
+       } else if (sysfs_streq(MODE_ACTIVE_STRING, buf)) {
+               rc = i2c_smbus_write_byte_data(client, reg,
+                                              MODE_ACTIVE);
+               if (rc < 0)
+                       return rc;
+               return count;
+       }
+
+       return -EINVAL;
+}
+
+static SENSOR_DEVICE_ATTR_RO(vendor, ipsps_string, vendor);
+static SENSOR_DEVICE_ATTR_RO(model, ipsps_string, model);
+static SENSOR_DEVICE_ATTR_RO(part_number, ipsps_string, part_number);
+static SENSOR_DEVICE_ATTR_RO(serial_number, ipsps_string, serial_number);
+static SENSOR_DEVICE_ATTR_RO(hw_version, ipsps_string, hw_version);
+static SENSOR_DEVICE_ATTR_RO(fw_version, ipsps_fw_version, fw_version);
+static SENSOR_DEVICE_ATTR_RW(mode, ipsps_mode, mode);
+
+static struct attribute *ipsps_attrs[] = {
+       &sensor_dev_attr_vendor.dev_attr.attr,
+       &sensor_dev_attr_model.dev_attr.attr,
+       &sensor_dev_attr_part_number.dev_attr.attr,
+       &sensor_dev_attr_serial_number.dev_attr.attr,
+       &sensor_dev_attr_hw_version.dev_attr.attr,
+       &sensor_dev_attr_fw_version.dev_attr.attr,
+       &sensor_dev_attr_mode.dev_attr.attr,
+       NULL,
+};
+
+ATTRIBUTE_GROUPS(ipsps);
+
+static struct pmbus_driver_info ipsps_info = {
+       .pages = 1,
+       .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+               PMBUS_HAVE_IIN | PMBUS_HAVE_POUT | PMBUS_HAVE_PIN |
+               PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+               PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT |
+               PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
+               PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_STATUS_FAN12,
+       .groups = ipsps_groups,
+};
+
+static struct pmbus_platform_data ipsps_pdata = {
+       .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
+static int ipsps_probe(struct i2c_client *client,
+                      const struct i2c_device_id *id)
+{
+       client->dev.platform_data = &ipsps_pdata;
+       return pmbus_do_probe(client, id, &ipsps_info);
+}
+
+static const struct i2c_device_id ipsps_id[] = {
+       { "ipsps1", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, ipsps_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id ipsps_of_match[] = {
+       { .compatible = "inspur,ipsps1" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, ipsps_of_match);
+#endif
+
+static struct i2c_driver ipsps_driver = {
+       .driver = {
+               .name = "inspur-ipsps",
+               .of_match_table = of_match_ptr(ipsps_of_match),
+       },
+       .probe = ipsps_probe,
+       .remove = pmbus_do_remove,
+       .id_table = ipsps_id,
+};
+
+module_i2c_driver(ipsps_driver);
+
+MODULE_AUTHOR("John Wang");
+MODULE_DESCRIPTION("PMBus driver for Inspur Power System power supplies");
+MODULE_LICENSE("GPL");
index 69d9029ea410d03581515b954a74a5264f2a6f20..254b0f98c7552024dad65835e3805342b9147ba4 100644 (file)
@@ -244,8 +244,6 @@ static int max31785_write_word_data(struct i2c_client *client, int page,
 #define MAX31785_VOUT_FUNCS \
        (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
 
-#define MAX37185_NUM_FAN_PAGES 6
-
 static const struct pmbus_driver_info max31785_info = {
        .pages = MAX31785_NR_PAGES,
 
index c846759bc1c0866d3954f5fd9b895e315275a800..a9229c6b0e84dcc7435d18bd9e19bef9a60227ae 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/pmbus.h>
-#include <linux/gpio.h>
 #include <linux/gpio/driver.h>
 #include "pmbus.h"
 
index efe4bb1ff221c871d99c1c6697690cf870cdeea7..d3a64a35f7a9af2d11fe31de0d37b5abb7c2301a 100644 (file)
@@ -146,7 +146,7 @@ static struct platform_driver rpi_hwmon_driver = {
 };
 module_platform_driver(rpi_hwmon_driver);
 
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
 MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:raspberrypi-hwmon");
index 83fe08185ac7d9a9a1a92338277b742fa0547919..a0078ccede03f9302db615deab5e0d65df75fdcc 100644 (file)
@@ -24,19 +24,33 @@ static const unsigned char shtc1_cmd_measure_blocking_lpm[]    = { 0x64, 0x58 };
 static const unsigned char shtc1_cmd_measure_nonblocking_lpm[] = { 0x60, 0x9c };
 
 /* command for reading the ID register */
-static const unsigned char shtc1_cmd_read_id_reg[]            = { 0xef, 0xc8 };
+static const unsigned char shtc1_cmd_read_id_reg[]             = { 0xef, 0xc8 };
 
-/* constants for reading the ID register */
-#define SHTC1_ID         0x07
-#define SHTC1_ID_REG_MASK 0x1f
+/*
+ * constants for reading the ID register
+ * SHTC1: 0x0007 with mask 0x003f
+ * SHTW1: 0x0007 with mask 0x003f
+ * SHTC3: 0x0807 with mask 0x083f
+ */
+#define SHTC3_ID      0x0807
+#define SHTC3_ID_MASK 0x083f
+#define SHTC1_ID      0x0007
+#define SHTC1_ID_MASK 0x003f
 
 /* delays for non-blocking i2c commands, both in us */
 #define SHTC1_NONBLOCKING_WAIT_TIME_HPM  14400
 #define SHTC1_NONBLOCKING_WAIT_TIME_LPM   1000
+#define SHTC3_NONBLOCKING_WAIT_TIME_HPM  12100
+#define SHTC3_NONBLOCKING_WAIT_TIME_LPM    800
 
 #define SHTC1_CMD_LENGTH      2
 #define SHTC1_RESPONSE_LENGTH 6
 
+enum shtcx_chips {
+       shtc1,
+       shtc3,
+};
+
 struct shtc1_data {
        struct i2c_client *client;
        struct mutex update_lock;
@@ -47,6 +61,7 @@ struct shtc1_data {
        unsigned int nonblocking_wait_time; /* in us */
 
        struct shtc1_platform_data setup;
+       enum shtcx_chips chip;
 
        int temperature; /* 1000 * temperature in dgr C */
        int humidity; /* 1000 * relative humidity in %RH */
@@ -157,13 +172,16 @@ static void shtc1_select_command(struct shtc1_data *data)
                data->command = data->setup.blocking_io ?
                                shtc1_cmd_measure_blocking_hpm :
                                shtc1_cmd_measure_nonblocking_hpm;
-               data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_HPM;
-
+               data->nonblocking_wait_time = (data->chip == shtc1) ?
+                               SHTC1_NONBLOCKING_WAIT_TIME_HPM :
+                               SHTC3_NONBLOCKING_WAIT_TIME_HPM;
        } else {
                data->command = data->setup.blocking_io ?
                                shtc1_cmd_measure_blocking_lpm :
                                shtc1_cmd_measure_nonblocking_lpm;
-               data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_LPM;
+               data->nonblocking_wait_time = (data->chip == shtc1) ?
+                               SHTC1_NONBLOCKING_WAIT_TIME_LPM :
+                               SHTC3_NONBLOCKING_WAIT_TIME_LPM;
        }
 }
 
@@ -171,9 +189,11 @@ static int shtc1_probe(struct i2c_client *client,
                       const struct i2c_device_id *id)
 {
        int ret;
-       char id_reg[2];
+       u16 id_reg;
+       char id_reg_buf[2];
        struct shtc1_data *data;
        struct device *hwmon_dev;
+       enum shtcx_chips chip = id->driver_data;
        struct i2c_adapter *adap = client->adapter;
        struct device *dev = &client->dev;
 
@@ -187,13 +207,20 @@ static int shtc1_probe(struct i2c_client *client,
                dev_err(dev, "could not send read_id_reg command: %d\n", ret);
                return ret < 0 ? ret : -ENODEV;
        }
-       ret = i2c_master_recv(client, id_reg, sizeof(id_reg));
-       if (ret != sizeof(id_reg)) {
+       ret = i2c_master_recv(client, id_reg_buf, sizeof(id_reg_buf));
+       if (ret != sizeof(id_reg_buf)) {
                dev_err(dev, "could not read ID register: %d\n", ret);
                return -ENODEV;
        }
-       if ((id_reg[1] & SHTC1_ID_REG_MASK) != SHTC1_ID) {
-               dev_err(dev, "ID register doesn't match\n");
+
+       id_reg = be16_to_cpup((__be16 *)id_reg_buf);
+       if (chip == shtc3) {
+               if ((id_reg & SHTC3_ID_MASK) != SHTC3_ID) {
+                       dev_err(dev, "SHTC3 ID register does not match\n");
+                       return -ENODEV;
+               }
+       } else if ((id_reg & SHTC1_ID_MASK) != SHTC1_ID) {
+               dev_err(dev, "SHTC1 ID register does not match\n");
                return -ENODEV;
        }
 
@@ -204,6 +231,7 @@ static int shtc1_probe(struct i2c_client *client,
        data->setup.blocking_io = false;
        data->setup.high_precision = true;
        data->client = client;
+       data->chip = chip;
 
        if (client->dev.platform_data)
                data->setup = *(struct shtc1_platform_data *)dev->platform_data;
@@ -222,8 +250,9 @@ static int shtc1_probe(struct i2c_client *client,
 
 /* device ID table */
 static const struct i2c_device_id shtc1_id[] = {
-       { "shtc1", 0 },
-       { "shtw1", 0 },
+       { "shtc1", shtc1 },
+       { "shtw1", shtc1 },
+       { "shtc3", shtc3 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, shtc1_id);
index d8c91c2cb8cf5eab77498c81cd465033597bc2dd..6eff14fe395d3466ba944b1b81a37fffc0c87b19 100644 (file)
@@ -586,10 +586,10 @@ static int smm665_probe(struct i2c_client *client,
 
        data->client = client;
        data->type = id->driver_data;
-       data->cmdreg = i2c_new_dummy(adapter, (client->addr & ~SMM665_REGMASK)
+       data->cmdreg = i2c_new_dummy_device(adapter, (client->addr & ~SMM665_REGMASK)
                                     | SMM665_CMDREG_BASE);
-       if (!data->cmdreg)
-               return -ENOMEM;
+       if (IS_ERR(data->cmdreg))
+               return PTR_ERR(data->cmdreg);
 
        switch (data->type) {
        case smm465:
index d2c04b6a3f2b7cfd7974b8cf19ae1f21ad80ed69..015f1ea319669eff7932daf49456a6638b9a7add 100644 (file)
@@ -894,12 +894,12 @@ w83781d_detect_subclients(struct i2c_client *new_client)
        }
 
        for (i = 0; i < num_sc; i++) {
-               data->lm75[i] = i2c_new_dummy(adapter, sc_addr[i]);
-               if (!data->lm75[i]) {
+               data->lm75[i] = i2c_new_dummy_device(adapter, sc_addr[i]);
+               if (IS_ERR(data->lm75[i])) {
                        dev_err(&new_client->dev,
                                "Subclient %d registration at address 0x%x failed.\n",
                                i, sc_addr[i]);
-                       err = -ENOMEM;
+                       err = PTR_ERR(data->lm75[i]);
                        if (i == 1)
                                goto ERROR_SC_3;
                        goto ERROR_SC_2;
index 050ad42016914803181eb5e6f306185039f4eac8..aad8d4da5802be02cf981778ea68642075706f4a 100644 (file)
@@ -1260,7 +1260,7 @@ static int w83791d_detect_subclients(struct i2c_client *client)
        struct i2c_adapter *adapter = client->adapter;
        struct w83791d_data *data = i2c_get_clientdata(client);
        int address = client->addr;
-       int i, id, err;
+       int i, id;
        u8 val;
 
        id = i2c_adapter_id(adapter);
@@ -1272,8 +1272,7 @@ static int w83791d_detect_subclients(struct i2c_client *client)
                                        "invalid subclient "
                                        "address %d; must be 0x48-0x4f\n",
                                        force_subclients[i]);
-                               err = -ENODEV;
-                               goto error_sc_0;
+                               return -ENODEV;
                        }
                }
                w83791d_write(client, W83791D_REG_I2C_SUBADDR,
@@ -1283,29 +1282,22 @@ static int w83791d_detect_subclients(struct i2c_client *client)
 
        val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
        if (!(val & 0x08))
-               data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
+               data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
+                                                         0x48 + (val & 0x7));
        if (!(val & 0x80)) {
-               if ((data->lm75[0] != NULL) &&
+               if (!IS_ERR(data->lm75[0]) &&
                                ((val & 0x7) == ((val >> 4) & 0x7))) {
                        dev_err(&client->dev,
                                "duplicate addresses 0x%x, "
                                "use force_subclient\n",
                                data->lm75[0]->addr);
-                       err = -ENODEV;
-                       goto error_sc_1;
+                       return -ENODEV;
                }
-               data->lm75[1] = i2c_new_dummy(adapter,
-                                             0x48 + ((val >> 4) & 0x7));
+               data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
+                                                         0x48 + ((val >> 4) & 0x7));
        }
 
        return 0;
-
-/* Undo inits in case of errors */
-
-error_sc_1:
-       i2c_unregister_device(data->lm75[0]);
-error_sc_0:
-       return err;
 }
 
 
@@ -1394,7 +1386,7 @@ static int w83791d_probe(struct i2c_client *client,
        /* Register sysfs hooks */
        err = sysfs_create_group(&client->dev.kobj, &w83791d_group);
        if (err)
-               goto error3;
+               return err;
 
        /* Check if pins of fan/pwm 4-5 are in use as GPIO */
        has_fanpwm45 = w83791d_read(client, W83791D_REG_GPIO) & 0x10;
@@ -1419,9 +1411,6 @@ error5:
                sysfs_remove_group(&client->dev.kobj, &w83791d_group_fanpwm45);
 error4:
        sysfs_remove_group(&client->dev.kobj, &w83791d_group);
-error3:
-       i2c_unregister_device(data->lm75[0]);
-       i2c_unregister_device(data->lm75[1]);
        return err;
 }
 
@@ -1432,9 +1421,6 @@ static int w83791d_remove(struct i2c_client *client)
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&client->dev.kobj, &w83791d_group);
 
-       i2c_unregister_device(data->lm75[0]);
-       i2c_unregister_device(data->lm75[1]);
-
        return 0;
 }
 
index da8a6d62aa2347c9c7fd8dac5aae66193082b3f5..7fc8a1160c8f95269a8c29e07db0ff3e92c962d9 100644 (file)
@@ -924,7 +924,7 @@ store_sf2_level(struct device *dev, struct device_attribute *attr,
 static int
 w83792d_detect_subclients(struct i2c_client *new_client)
 {
-       int i, id, err;
+       int i, id;
        int address = new_client->addr;
        u8 val;
        struct i2c_adapter *adapter = new_client->adapter;
@@ -938,8 +938,7 @@ w83792d_detect_subclients(struct i2c_client *new_client)
                                dev_err(&new_client->dev,
                                        "invalid subclient address %d; must be 0x48-0x4f\n",
                                        force_subclients[i]);
-                               err = -ENODEV;
-                               goto ERROR_SC_0;
+                               return -ENODEV;
                        }
                }
                w83792d_write_value(new_client, W83792D_REG_I2C_SUBADDR,
@@ -949,28 +948,21 @@ w83792d_detect_subclients(struct i2c_client *new_client)
 
        val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
        if (!(val & 0x08))
-               data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
+               data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
+                                                         0x48 + (val & 0x7));
        if (!(val & 0x80)) {
-               if ((data->lm75[0] != NULL) &&
+               if (!IS_ERR(data->lm75[0]) &&
                        ((val & 0x7) == ((val >> 4) & 0x7))) {
                        dev_err(&new_client->dev,
                                "duplicate addresses 0x%x, use force_subclient\n",
                                data->lm75[0]->addr);
-                       err = -ENODEV;
-                       goto ERROR_SC_1;
+                       return -ENODEV;
                }
-               data->lm75[1] = i2c_new_dummy(adapter,
-                                             0x48 + ((val >> 4) & 0x7));
+               data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
+                                                         0x48 + ((val >> 4) & 0x7));
        }
 
        return 0;
-
-/* Undo inits in case of errors */
-
-ERROR_SC_1:
-       i2c_unregister_device(data->lm75[0]);
-ERROR_SC_0:
-       return err;
 }
 
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
@@ -1396,7 +1388,7 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
        /* Register sysfs hooks */
        err = sysfs_create_group(&dev->kobj, &w83792d_group);
        if (err)
-               goto exit_i2c_unregister;
+               return err;
 
        /*
         * Read GPIO enable register to check if pins for fan 4,5 are used as
@@ -1441,9 +1433,6 @@ exit_remove_files:
        sysfs_remove_group(&dev->kobj, &w83792d_group);
        for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
                sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
-exit_i2c_unregister:
-       i2c_unregister_device(data->lm75[0]);
-       i2c_unregister_device(data->lm75[1]);
        return err;
 }
 
@@ -1459,9 +1448,6 @@ w83792d_remove(struct i2c_client *client)
                sysfs_remove_group(&client->dev.kobj,
                                   &w83792d_group_fan[i]);
 
-       i2c_unregister_device(data->lm75[0]);
-       i2c_unregister_device(data->lm75[1]);
-
        return 0;
 }
 
index 46f5dfec8d0a29ef6ff40b151d19f4e3024320fd..9df48b70c70c7381fbcba60119182a39edb2a2cd 100644 (file)
@@ -1551,9 +1551,6 @@ static int w83793_remove(struct i2c_client *client)
        for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
                device_remove_file(dev, &w83793_temp[i].dev_attr);
 
-       i2c_unregister_device(data->lm75[0]);
-       i2c_unregister_device(data->lm75[1]);
-
        /* Decrease data reference counter */
        mutex_lock(&watchdog_data_mutex);
        kref_put(&data->kref, w83793_release_resources);
@@ -1565,7 +1562,7 @@ static int w83793_remove(struct i2c_client *client)
 static int
 w83793_detect_subclients(struct i2c_client *client)
 {
-       int i, id, err;
+       int i, id;
        int address = client->addr;
        u8 tmp;
        struct i2c_adapter *adapter = client->adapter;
@@ -1580,8 +1577,7 @@ w83793_detect_subclients(struct i2c_client *client)
                                        "invalid subclient "
                                        "address %d; must be 0x48-0x4f\n",
                                        force_subclients[i]);
-                               err = -EINVAL;
-                               goto ERROR_SC_0;
+                               return -EINVAL;
                        }
                }
                w83793_write_value(client, W83793_REG_I2C_SUBADDR,
@@ -1591,28 +1587,21 @@ w83793_detect_subclients(struct i2c_client *client)
 
        tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
        if (!(tmp & 0x08))
-               data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (tmp & 0x7));
+               data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
+                                                         0x48 + (tmp & 0x7));
        if (!(tmp & 0x80)) {
-               if ((data->lm75[0] != NULL)
+               if (!IS_ERR(data->lm75[0])
                    && ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
                        dev_err(&client->dev,
                                "duplicate addresses 0x%x, "
                                "use force_subclients\n", data->lm75[0]->addr);
-                       err = -ENODEV;
-                       goto ERROR_SC_1;
+                       return -ENODEV;
                }
-               data->lm75[1] = i2c_new_dummy(adapter,
-                                             0x48 + ((tmp >> 4) & 0x7));
+               data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
+                                                         0x48 + ((tmp >> 4) & 0x7));
        }
 
        return 0;
-
-       /* Undo inits in case of errors */
-
-ERROR_SC_1:
-       i2c_unregister_device(data->lm75[0]);
-ERROR_SC_0:
-       return err;
 }
 
 /* Return 0 if detection is successful, -ENODEV otherwise */
@@ -1945,9 +1934,6 @@ exit_remove:
 
        for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
                device_remove_file(dev, &w83793_temp[i].dev_attr);
-
-       i2c_unregister_device(data->lm75[0]);
-       i2c_unregister_device(data->lm75[1]);
 free_mem:
        kfree(data);
 exit:
index c0378c3de9a41a7ec65b994e18b907e56fc3b88e..91dfeba624855b32b3c3c43a2ad276ea87c294da 100644 (file)
@@ -164,6 +164,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
                .driver_data = (kernel_ulong_t)0,
        },
+       {
+               /* Lewisburg PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
+               .driver_data = (kernel_ulong_t)0,
+       },
        {
                /* Gemini Lake */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
@@ -199,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Tiger Lake PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index e55b902560dee28f854718b72f40ca48803de249..181e7ff1ec4fc374e31968e3a89c0fc9592a6d73 100644 (file)
@@ -1276,7 +1276,6 @@ int stm_source_register_device(struct device *parent,
 
 err:
        put_device(&src->dev);
-       kfree(src);
 
        return err;
 }
index d7fd76baec92666b0f51fcaf02af81815615593c..19ef2b0c682a9992b2ef74c669d7f45e80254eb5 100644 (file)
@@ -790,7 +790,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
 
 static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
 {
-       u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+       u32 val;
+
+       /* We do not support the SMBUS Quick command */
+       val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
 
        if (adap->algo->reg_slave)
                val |= I2C_FUNC_SLAVE;
index e7f9305b2dd9f661c8863839dee346aa49438c4e..f5f001738df5e2b1b8c6e9db4973a9ce8f065140 100644 (file)
@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
 
        dev->disable_int(dev);
        dev->disable(dev);
+       synchronize_irq(dev->irq);
        dev->slave = NULL;
        pm_runtime_put(dev->dev);
 
index f2956936c3f2d3f5a28b21d79d3d56e578f90793..2e08b4722dc41bd2821a9bce4c2871d89de94755 100644 (file)
@@ -1194,19 +1194,28 @@ static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
        int i;
 
        status = acpi_get_object_info(obj_handle, &info);
-       if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID))
+       if (ACPI_FAILURE(status))
                return AE_OK;
 
+       if (!(info->valid & ACPI_VALID_HID))
+               goto smo88xx_not_found;
+
        hid = info->hardware_id.string;
        if (!hid)
-               return AE_OK;
+               goto smo88xx_not_found;
 
        i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
        if (i < 0)
-               return AE_OK;
+               goto smo88xx_not_found;
+
+       kfree(info);
 
        *((bool *)return_value) = true;
        return AE_CTRL_TERMINATE;
+
+smo88xx_not_found:
+       kfree(info);
+       return AE_OK;
 }
 
 static bool is_dell_system_with_lis3lv02d(void)
index 252edb433fdfb14f43a777763f3e99e5ae7014d8..29eae1bf4f861a39078091644aa329326aa1c2ed 100644 (file)
@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
        .max_num_msgs = 255,
 };
 
+static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
+       .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
 static const struct mtk_i2c_compatible mt2712_compat = {
        .regs = mt_i2c_regs_v1,
        .pmic_i2c = 0,
@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
 };
 
 static const struct mtk_i2c_compatible mt8183_compat = {
+       .quirks = &mt8183_i2c_quirks,
        .regs = mt_i2c_regs_v2,
        .pmic_i2c = 0,
        .dcm = 0,
@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
 
 static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+       if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+               return I2C_FUNC_I2C |
+                       (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+       else
+               return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 static const struct i2c_algorithm mtk_i2c_algorithm = {
index c46c4bddc7ca360f2241aa1d213da9d8b9dc2486..cba325eb852fed527fe4c73d72fb718113db5097 100644 (file)
@@ -91,7 +91,7 @@
 #define SB800_PIIX4_PORT_IDX_MASK      0x06
 #define SB800_PIIX4_PORT_IDX_SHIFT     1
 
-/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
 #define SB800_PIIX4_PORT_IDX_KERNCZ            0x02
 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ       0x18
 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ      3
@@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
        /* Find which register is used for port selection */
        if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
            PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
-               switch (PIIX4_dev->device) {
-               case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+               if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
+                   (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+                    PIIX4_dev->revision >= 0x1F)) {
                        piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
                        piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
                        piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
-                       break;
-               case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
-               default:
+               } else {
                        piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
                        piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
                        piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
-                       break;
                }
        } else {
                if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
index f26ed495d38420b3864c997945940941dcf5e754..9c440fa6a3dd109349a86365a678e46ffe8b89f7 100644 (file)
@@ -832,7 +832,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
  */
 void i2c_unregister_device(struct i2c_client *client)
 {
-       if (!client)
+       if (IS_ERR_OR_NULL(client))
                return;
 
        if (client->dev.of_node) {
index 69cc040c3a1c9fd9e7aec39486cc4edb0b0c98da..9e2e1406f85ece3c85768ed1ec5f1493120389ff 100644 (file)
@@ -200,6 +200,59 @@ struct i3c_device *dev_to_i3cdev(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dev_to_i3cdev);
 
+/**
+ * i3c_device_match_id() - Returns the i3c_device_id entry matching @i3cdev
+ * @i3cdev: I3C device
+ * @id_table: I3C device match table
+ *
+ * Return: a pointer to an i3c_device_id object or NULL if there's no match.
+ */
+const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+                   const struct i3c_device_id *id_table)
+{
+       struct i3c_device_info devinfo;
+       const struct i3c_device_id *id;
+
+       i3c_device_get_info(i3cdev, &devinfo);
+
+       /*
+        * The lower 32bits of the provisional ID is just filled with a random
+        * value, try to match using DCR info.
+        */
+       if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
+               u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
+               u16 part = I3C_PID_PART_ID(devinfo.pid);
+               u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+               /* First try to match by manufacturer/part ID. */
+               for (id = id_table; id->match_flags != 0; id++) {
+                       if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
+                           I3C_MATCH_MANUF_AND_PART)
+                               continue;
+
+                       if (manuf != id->manuf_id || part != id->part_id)
+                               continue;
+
+                       if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
+                           ext_info != id->extra_info)
+                               continue;
+
+                       return id;
+               }
+       }
+
+       /* Fallback to DCR match. */
+       for (id = id_table; id->match_flags != 0; id++) {
+               if ((id->match_flags & I3C_MATCH_DCR) &&
+                   id->dcr == devinfo.dcr)
+                       return id;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(i3c_device_match_id);
+
 /**
  * i3c_driver_register_with_owner() - register an I3C device driver
  *
index d6f8b038a89678fcd40bce106e020c902aa4cbd3..5c051dba32a51fa8a00b3e7304cbc7bc9cad2aee 100644 (file)
@@ -123,7 +123,7 @@ static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
        if (dev->type == &i3c_device_type)
                return dev_to_i3cdev(dev)->desc;
 
-       master = container_of(dev, struct i3c_master_controller, dev);
+       master = dev_to_i3cmaster(dev);
 
        return master->this;
 }
@@ -276,51 +276,6 @@ static const struct device_type i3c_device_type = {
        .uevent = i3c_device_uevent,
 };
 
-static const struct i3c_device_id *
-i3c_device_match_id(struct i3c_device *i3cdev,
-                   const struct i3c_device_id *id_table)
-{
-       struct i3c_device_info devinfo;
-       const struct i3c_device_id *id;
-
-       i3c_device_get_info(i3cdev, &devinfo);
-
-       /*
-        * The lower 32bits of the provisional ID is just filled with a random
-        * value, try to match using DCR info.
-        */
-       if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
-               u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
-               u16 part = I3C_PID_PART_ID(devinfo.pid);
-               u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
-
-               /* First try to match by manufacturer/part ID. */
-               for (id = id_table; id->match_flags != 0; id++) {
-                       if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
-                           I3C_MATCH_MANUF_AND_PART)
-                               continue;
-
-                       if (manuf != id->manuf_id || part != id->part_id)
-                               continue;
-
-                       if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
-                           ext_info != id->extra_info)
-                               continue;
-
-                       return id;
-               }
-       }
-
-       /* Fallback to DCR match. */
-       for (id = id_table; id->match_flags != 0; id++) {
-               if ((id->match_flags & I3C_MATCH_DCR) &&
-                   id->dcr == devinfo.dcr)
-                       return id;
-       }
-
-       return NULL;
-}
-
 static int i3c_device_match(struct device *dev, struct device_driver *drv)
 {
        struct i3c_device *i3cdev;
@@ -645,6 +600,8 @@ i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
 
        dev->common.master = master;
        dev->boardinfo = boardinfo;
+       dev->addr = boardinfo->base.addr;
+       dev->lvr = boardinfo->lvr;
 
        return dev;
 }
@@ -963,8 +920,8 @@ int i3c_master_defslvs_locked(struct i3c_master_controller *master)
 
        desc = defslvs->slaves;
        i3c_bus_for_each_i2cdev(bus, i2cdev) {
-               desc->lvr = i2cdev->boardinfo->lvr;
-               desc->static_addr = i2cdev->boardinfo->base.addr << 1;
+               desc->lvr = i2cdev->lvr;
+               desc->static_addr = i2cdev->addr << 1;
                desc++;
        }
 
@@ -1084,8 +1041,10 @@ static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
        if (ret)
                goto out;
 
-       if (dest.payload.len != sizeof(*mwl))
-               return -EIO;
+       if (dest.payload.len != sizeof(*mwl)) {
+               ret = -EIO;
+               goto out;
+       }
 
        info->max_write_len = be16_to_cpu(mwl->len);
 
@@ -1631,8 +1590,8 @@ static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
                                 common.node) {
                i3c_master_detach_i2c_dev(i2cdev);
                i3c_bus_set_addr_slot_status(&master->bus,
-                                       i2cdev->boardinfo->base.addr,
-                                       I3C_ADDR_SLOT_FREE);
+                                            i2cdev->addr,
+                                            I3C_ADDR_SLOT_FREE);
                i3c_master_free_i2c_dev(i2cdev);
        }
 }
@@ -2093,8 +2052,10 @@ static int of_populate_i3c_bus(struct i3c_master_controller *master)
 
        for_each_available_child_of_node(i3cbus_np, node) {
                ret = of_i3c_master_add_dev(master, node);
-               if (ret)
+               if (ret) {
+                       of_node_put(node);
                        return ret;
+               }
        }
 
        /*
index 09912d75c6d5398e63ebd5148a6cc5441721d595..b0ff0e12d84ca0faa8a5a5f2ebfcc7b53cdbb1e2 100644 (file)
@@ -1033,12 +1033,12 @@ static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
                return -ENOMEM;
 
        data->index = pos;
-       master->addrs[pos] = dev->boardinfo->base.addr;
+       master->addrs[pos] = dev->addr;
        master->free_pos &= ~BIT(pos);
        i2c_dev_set_master_data(dev, data);
 
        writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
-              DEV_ADDR_TABLE_STATIC_ADDR(dev->boardinfo->base.addr),
+              DEV_ADDR_TABLE_STATIC_ADDR(dev->addr),
               master->regs +
               DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
 
index 237f24adddc632bc3470df59572cb6a68a9198f9..10db0bf0655a9f71938f146f802857eac4c9c329 100644 (file)
@@ -903,7 +903,8 @@ static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
                                       u8 dyn_addr)
 {
-       u32 activedevs, rr;
+       unsigned long activedevs;
+       u32 rr;
        int i;
 
        if (!dyn_addr) {
@@ -913,13 +914,10 @@ static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
                return ffs(master->free_rr_slots) - 1;
        }
 
-       activedevs = readl(master->regs + DEVS_CTRL) &
-                    DEVS_CTRL_DEVS_ACTIVE_MASK;
-
-       for (i = 1; i <= master->maxdevs; i++) {
-               if (!(BIT(i) & activedevs))
-                       continue;
+       activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+       activedevs &= ~BIT(0);
 
+       for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
                rr = readl(master->regs + DEV_ID_RR0(i));
                if (!(rr & DEV_ID_RR0_IS_I3C) ||
                    DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
@@ -1005,9 +1003,9 @@ static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
        master->free_rr_slots &= ~BIT(slot);
        i2c_dev_set_master_data(dev, data);
 
-       writel(prepare_rr0_dev_address(dev->boardinfo->base.addr),
+       writel(prepare_rr0_dev_address(dev->addr),
               master->regs + DEV_ID_RR0(data->id));
-       writel(dev->boardinfo->lvr, master->regs + DEV_ID_RR2(data->id));
+       writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
        writel(readl(master->regs + DEVS_CTRL) |
               DEVS_CTRL_DEV_ACTIVE(data->id),
               master->regs + DEVS_CTRL);
@@ -1126,18 +1124,16 @@ static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
 {
        struct cdns_i3c_master *master = to_cdns_i3c_master(m);
-       u32 olddevs, newdevs;
+       unsigned long olddevs, newdevs;
        int ret, slot;
        u8 addrs[MAX_DEVS] = { };
        u8 last_addr = 0;
 
        olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+       olddevs |= BIT(0);
 
        /* Prepare RR slots before launching DAA. */
-       for (slot = 1; slot <= master->maxdevs; slot++) {
-               if (olddevs & BIT(slot))
-                       continue;
-
+       for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
                ret = i3c_master_get_free_addr(m, last_addr + 1);
                if (ret < 0)
                        return -ENOSPC;
@@ -1161,10 +1157,8 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
         * Clear all retaining registers filled during DAA. We already
         * have the addressed assigned to them in the addrs array.
         */
-       for (slot = 1; slot <= master->maxdevs; slot++) {
-               if (newdevs & BIT(slot))
-                       i3c_master_add_i3c_dev_locked(m, addrs[slot]);
-       }
+       for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
+               i3c_master_add_i3c_dev_locked(m, addrs[slot]);
 
        /*
         * Clear slots that ended up not being used. Can be caused by I3C
index 7e3286265a3845346d11b82d7d8c2b3fe72042fe..f0af3a42f53cf6d605b3c1ad413468bcdd9138dd 100644 (file)
@@ -958,7 +958,7 @@ config TI_ADC161S626
 
 config TI_ADS1015
        tristate "Texas Instruments ADS1015 ADC"
-       depends on I2C && !SENSORS_ADS1015
+       depends on I2C
        select REGMAP_I2C
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
index 1db5ad3d9580ee00a4c8ee65da2769146bcf731b..8c1931a57f4a598532d19e2d373e5539371f5edf 100644 (file)
@@ -1962,6 +1962,10 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
                struct sockaddr_in s_laddr, *s_raddr;
                const struct in_ifaddr *ifa;
 
+               if (!in_dev) {
+                       rv = -ENODEV;
+                       goto out;
+               }
                memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
                s_raddr = (struct sockaddr_in *)&id->remote_addr;
 
@@ -1991,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
                struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
                        *s_raddr = &to_sockaddr_in6(id->remote_addr);
 
+               if (!in6_dev) {
+                       rv = -ENODEV;
+                       goto out;
+               }
                siw_dbg(id->device,
                        "laddr %pI6:%d, raddr %pI6:%d\n",
                        &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
                        &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
 
-               read_lock_bh(&in6_dev->lock);
+               rtnl_lock();
                list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
-                       struct sockaddr_in6 bind_addr;
-
+                       if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
+                               continue;
                        if (ipv6_addr_any(&s_laddr->sin6_addr) ||
                            ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
-                               bind_addr.sin6_family = AF_INET6;
-                               bind_addr.sin6_port = s_laddr->sin6_port;
-                               bind_addr.sin6_flowinfo = 0;
-                               bind_addr.sin6_addr = ifp->addr;
-                               bind_addr.sin6_scope_id = dev->ifindex;
+                               struct sockaddr_in6 bind_addr  = {
+                                       .sin6_family = AF_INET6,
+                                       .sin6_port = s_laddr->sin6_port,
+                                       .sin6_flowinfo = 0,
+                                       .sin6_addr = ifp->addr,
+                                       .sin6_scope_id = dev->ifindex };
 
                                rv = siw_listen_address(id, backlog,
                                                (struct sockaddr *)&bind_addr,
@@ -2015,12 +2024,12 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
                                        listeners++;
                        }
                }
-               read_unlock_bh(&in6_dev->lock);
-
+               rtnl_unlock();
                in6_dev_put(in6_dev);
        } else {
-               return -EAFNOSUPPORT;
+               rv = -EAFNOSUPPORT;
        }
+out:
        if (listeners)
                rv = 0;
        else if (!rv)
index e15cdcd8cb3c8c64597707aa54398444f6a1ac4c..a4ddeade8ac4d72691d13f1a351f8b688e00509d 100644 (file)
@@ -182,6 +182,7 @@ config INTEL_IOMMU
        select IOMMU_IOVA
        select NEED_DMA_MAP_STATE
        select DMAR_TABLE
+       select SWIOTLB
        help
          DMA remapping (DMAR) devices support enables independent address
          translations for Direct Memory Access (DMA) from devices.
index f13f36ae1af652836254ba07315f99303fba6f88..4f405f926e739cdd4d00937f9b57b4d42ee06b30 100644 (file)
@@ -10,13 +10,14 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
 obj-$(CONFIG_IOMMU_IOVA) += iova.o
 obj-$(CONFIG_OF_IOMMU) += of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
-obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
 obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
+obj-$(CONFIG_INTEL_IOMMU) += intel-trace.o
 obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
 obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
 obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
index b607a92791d3f8b48a9c1caeaea48986753ead64..1ed3b98324bac09ce8399b2ee0ad445e08802d20 100644 (file)
@@ -436,7 +436,7 @@ static int iommu_init_device(struct device *dev)
         * invalid address), we ignore the capability for the device so
         * it'll be forced to go into translation mode.
         */
-       if ((iommu_pass_through || !amd_iommu_force_isolation) &&
+       if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
            dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
                struct amd_iommu *iommu;
 
@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
        iommu_completion_wait(iommu);
 }
 
+static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+{
+       struct iommu_cmd cmd;
+
+       build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+                             dom_id, 1);
+       iommu_queue_command(iommu, &cmd);
+
+       iommu_completion_wait(iommu);
+}
+
 static void amd_iommu_flush_all(struct amd_iommu *iommu)
 {
        struct iommu_cmd cmd;
@@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
  * another level increases the size of the address space by 9 bits to a size up
  * to 64 bits.
  */
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
                                   gfp_t gfp)
 {
+       unsigned long flags;
        u64 *pte;
 
-       if (domain->mode == PAGE_MODE_6_LEVEL)
+       spin_lock_irqsave(&domain->lock, flags);
+
+       if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
                /* address space already 64 bit large */
-               return false;
+               goto out;
 
        pte = (void *)get_zeroed_page(gfp);
        if (!pte)
-               return false;
+               goto out;
 
        *pte             = PM_LEVEL_PDE(domain->mode,
                                        iommu_virt_to_phys(domain->pt_root));
@@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
        domain->mode    += 1;
        domain->updated  = true;
 
-       return true;
+out:
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       return;
 }
 
 static u64 *alloc_pte(struct protection_domain *domain,
@@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
 {
        u64 pte_root = 0;
        u64 flags = 0;
+       u32 old_domid;
 
        if (domain->mode != PAGE_MODE_NONE)
                pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
        flags &= ~DEV_DOMID_MASK;
        flags |= domain->id;
 
+       old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
        amd_iommu_dev_table[devid].data[1]  = flags;
        amd_iommu_dev_table[devid].data[0]  = pte_root;
+
+       /*
+        * A kdump kernel might be replacing a domain ID that was copied from
+        * the previous kernel--if so, it needs to flush the translation cache
+        * entries for the old domain ID that is being overwritten
+        */
+       if (old_domid) {
+               struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+               amd_iommu_flush_tlb_domid(iommu, old_domid);
+       }
 }
 
 static void clear_dte_entry(u16 devid)
@@ -2226,7 +2256,7 @@ static int amd_iommu_add_device(struct device *dev)
 
        BUG_ON(!dev_data);
 
-       if (iommu_pass_through || dev_data->iommu_v2)
+       if (dev_data->iommu_v2)
                iommu_request_dm_for_dev(dev);
 
        /* Domains are initialized for this device - have a look what we ended up with */
@@ -2547,7 +2577,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
 
                        bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
                        phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
-                       ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+                       ret = iommu_map_page(domain, bus_addr, phys_addr,
+                                            PAGE_SIZE, prot,
+                                            GFP_ATOMIC | __GFP_NOWARN);
                        if (ret)
                                goto out_unmap;
 
@@ -2805,7 +2837,7 @@ int __init amd_iommu_init_api(void)
 
 int __init amd_iommu_init_dma_ops(void)
 {
-       swiotlb        = (iommu_pass_through || sme_me_mask) ? 1 : 0;
+       swiotlb        = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
        iommu_detected = 1;
 
        if (amd_iommu_unmap_flush)
@@ -3055,7 +3087,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
 }
 
 static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
-                          size_t page_size)
+                             size_t page_size,
+                             struct iommu_iotlb_gather *gather)
 {
        struct protection_domain *domain = to_pdomain(dom);
        size_t unmap_size;
@@ -3196,9 +3229,10 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
        domain_flush_complete(dom);
 }
 
-static void amd_iommu_iotlb_range_add(struct iommu_domain *domain,
-                                     unsigned long iova, size_t size)
+static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+                                struct iommu_iotlb_gather *gather)
 {
+       amd_iommu_flush_iotlb_all(domain);
 }
 
 const struct iommu_ops amd_iommu_ops = {
@@ -3219,8 +3253,7 @@ const struct iommu_ops amd_iommu_ops = {
        .is_attach_deferred = amd_iommu_is_attach_deferred,
        .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
        .flush_iotlb_all = amd_iommu_flush_iotlb_all,
-       .iotlb_range_add = amd_iommu_iotlb_range_add,
-       .iotlb_sync = amd_iommu_flush_iotlb_all,
+       .iotlb_sync = amd_iommu_iotlb_sync,
 };
 
 /*****************************************************************************
@@ -4313,13 +4346,62 @@ static const struct irq_domain_ops amd_ir_domain_ops = {
        .deactivate = irq_remapping_deactivate,
 };
 
+int amd_iommu_activate_guest_mode(void *data)
+{
+       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+           !entry || entry->lo.fields_vapic.guest_mode)
+               return 0;
+
+       entry->lo.val = 0;
+       entry->hi.val = 0;
+
+       entry->lo.fields_vapic.guest_mode  = 1;
+       entry->lo.fields_vapic.ga_log_intr = 1;
+       entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
+       entry->hi.fields.vector            = ir_data->ga_vector;
+       entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
+
+       return modify_irte_ga(ir_data->irq_2_irte.devid,
+                             ir_data->irq_2_irte.index, entry, NULL);
+}
+EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
+
+int amd_iommu_deactivate_guest_mode(void *data)
+{
+       struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+       struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+       struct irq_cfg *cfg = ir_data->cfg;
+
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+           !entry || !entry->lo.fields_vapic.guest_mode)
+               return 0;
+
+       entry->lo.val = 0;
+       entry->hi.val = 0;
+
+       entry->lo.fields_remap.dm          = apic->irq_dest_mode;
+       entry->lo.fields_remap.int_type    = apic->irq_delivery_mode;
+       entry->hi.fields.vector            = cfg->vector;
+       entry->lo.fields_remap.destination =
+                               APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+       entry->hi.fields.destination =
+                               APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
+
+       return modify_irte_ga(ir_data->irq_2_irte.devid,
+                             ir_data->irq_2_irte.index, entry, NULL);
+}
+EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
+
 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
 {
+       int ret;
        struct amd_iommu *iommu;
        struct amd_iommu_pi_data *pi_data = vcpu_info;
        struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
        struct amd_ir_data *ir_data = data->chip_data;
-       struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
        struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
        struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
 
@@ -4330,6 +4412,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
        if (!dev_data || !dev_data->use_vapic)
                return 0;
 
+       ir_data->cfg = irqd_cfg(data);
        pi_data->ir_data = ir_data;
 
        /* Note:
@@ -4348,37 +4431,24 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
 
        pi_data->prev_ga_tag = ir_data->cached_ga_tag;
        if (pi_data->is_guest_mode) {
-               /* Setting */
-               irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
-               irte->hi.fields.vector = vcpu_pi_info->vector;
-               irte->lo.fields_vapic.ga_log_intr = 1;
-               irte->lo.fields_vapic.guest_mode = 1;
-               irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
-
-               ir_data->cached_ga_tag = pi_data->ga_tag;
+               ir_data->ga_root_ptr = (pi_data->base >> 12);
+               ir_data->ga_vector = vcpu_pi_info->vector;
+               ir_data->ga_tag = pi_data->ga_tag;
+               ret = amd_iommu_activate_guest_mode(ir_data);
+               if (!ret)
+                       ir_data->cached_ga_tag = pi_data->ga_tag;
        } else {
-               /* Un-Setting */
-               struct irq_cfg *cfg = irqd_cfg(data);
-
-               irte->hi.val = 0;
-               irte->lo.val = 0;
-               irte->hi.fields.vector = cfg->vector;
-               irte->lo.fields_remap.guest_mode = 0;
-               irte->lo.fields_remap.destination =
-                               APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
-               irte->hi.fields.destination =
-                               APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
-               irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
-               irte->lo.fields_remap.dm = apic->irq_dest_mode;
+               ret = amd_iommu_deactivate_guest_mode(ir_data);
 
                /*
                 * This communicates the ga_tag back to the caller
                 * so that it can do all the necessary clean up.
                 */
-               ir_data->cached_ga_tag = 0;
+               if (!ret)
+                       ir_data->cached_ga_tag = 0;
        }
 
-       return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
+       return ret;
 }
 
 
diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
new file mode 100644 (file)
index 0000000..12d540d
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef AMD_IOMMU_H
+#define AMD_IOMMU_H
+
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
+
+#ifdef CONFIG_DMI
+void amd_iommu_apply_ivrs_quirks(void);
+#else
+static void amd_iommu_apply_ivrs_quirks(void) { }
+#endif
+
+#endif
index 4413aa67000e576a43a7c8d0e8fd79ee2cc0574a..568c52317757ca2924598b69c7329dcfad940356 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/irq_remapping.h>
 
 #include <linux/crash_dump.h>
+#include "amd_iommu.h"
 #include "amd_iommu_proto.h"
 #include "amd_iommu_types.h"
 #include "irq_remapping.h"
@@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
        set_iommu_for_device(iommu, devid);
 }
 
-static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
 {
        struct devid_map *entry;
        struct list_head *list;
@@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
        if (ret)
                return ret;
 
+       amd_iommu_apply_ivrs_quirks();
+
        /*
         * First save the recommended feature enable bits from ACPI
         */
diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
new file mode 100644 (file)
index 0000000..c235f79
--- /dev/null
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Quirks for AMD IOMMU
+ *
+ * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
+ */
+
+#ifdef CONFIG_DMI
+#include <linux/dmi.h>
+
+#include "amd_iommu.h"
+
+#define IVHD_SPECIAL_IOAPIC            1
+
+struct ivrs_quirk_entry {
+       u8 id;
+       u16 devid;
+};
+
+enum {
+       DELL_INSPIRON_7375 = 0,
+       DELL_LATITUDE_5495,
+       LENOVO_IDEAPAD_330S_15ARR,
+};
+
+static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
+       /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
+       [DELL_INSPIRON_7375] = {
+               { .id = 4, .devid = 0xa0 },
+               { .id = 5, .devid = 0x2 },
+               {}
+       },
+       /* ivrs_ioapic[4]=00:14.0 */
+       [DELL_LATITUDE_5495] = {
+               { .id = 4, .devid = 0xa0 },
+               {}
+       },
+       /* ivrs_ioapic[32]=00:14.0 */
+       [LENOVO_IDEAPAD_330S_15ARR] = {
+               { .id = 32, .devid = 0xa0 },
+               {}
+       },
+       {}
+};
+
+static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
+{
+       const struct ivrs_quirk_entry *i;
+
+       for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
+               add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+
+       return 0;
+}
+
+static const struct dmi_system_id ivrs_quirks[] __initconst = {
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Dell Inspiron 7375",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
+       },
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Dell Latitude 5495",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+       },
+       {
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Lenovo ideapad 330S-15ARR",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
+       },
+       {}
+};
+
+void __init amd_iommu_apply_ivrs_quirks(void)
+{
+       dmi_check_system(ivrs_quirks);
+}
+#endif
index 64edd5a9694cc06400f70692a654f5c006956ddd..9ac229e92b07475f30010338e427cb146821edc1 100644 (file)
@@ -873,6 +873,15 @@ struct amd_ir_data {
        struct msi_msg msi_entry;
        void *entry;    /* Pointer to union irte or struct irte_ga */
        void *ref;      /* Pointer to the actual irte */
+
+       /**
+        * Store information for activate/de-activate
+        * Guest virtual APIC mode during runtime.
+        */
+       struct irq_cfg *cfg;
+       int ga_vector;
+       int ga_root_ptr;
+       int ga_tag;
 };
 
 struct amd_irte_ops {
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
new file mode 100644 (file)
index 0000000..5c87a38
--- /dev/null
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Miscellaneous Arm SMMU implementation and integration quirks
+// Copyright (C) 2019 Arm Limited
+
+#define pr_fmt(fmt) "arm-smmu: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/of.h>
+
+#include "arm-smmu.h"
+
+
+static int arm_smmu_gr0_ns(int offset)
+{
+       switch(offset) {
+       case ARM_SMMU_GR0_sCR0:
+       case ARM_SMMU_GR0_sACR:
+       case ARM_SMMU_GR0_sGFSR:
+       case ARM_SMMU_GR0_sGFSYNR0:
+       case ARM_SMMU_GR0_sGFSYNR1:
+       case ARM_SMMU_GR0_sGFSYNR2:
+               return offset + 0x400;
+       default:
+               return offset;
+       }
+}
+
+static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
+                           int offset)
+{
+       if (page == ARM_SMMU_GR0)
+               offset = arm_smmu_gr0_ns(offset);
+       return readl_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
+                             int offset, u32 val)
+{
+       if (page == ARM_SMMU_GR0)
+               offset = arm_smmu_gr0_ns(offset);
+       writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+/* Since we don't care for sGFAR, we can do without 64-bit accessors */
+static const struct arm_smmu_impl calxeda_impl = {
+       .read_reg = arm_smmu_read_ns,
+       .write_reg = arm_smmu_write_ns,
+};
+
+
+struct cavium_smmu {
+       struct arm_smmu_device smmu;
+       u32 id_base;
+};
+
+static int cavium_cfg_probe(struct arm_smmu_device *smmu)
+{
+       static atomic_t context_count = ATOMIC_INIT(0);
+       struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
+       /*
+        * Cavium CN88xx erratum #27704.
+        * Ensure ASID and VMID allocation is unique across all SMMUs in
+        * the system.
+        */
+       cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
+       dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
+
+       return 0;
+}
+
+static int cavium_init_context(struct arm_smmu_domain *smmu_domain)
+{
+       struct cavium_smmu *cs = container_of(smmu_domain->smmu,
+                                             struct cavium_smmu, smmu);
+
+       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
+               smmu_domain->cfg.vmid += cs->id_base;
+       else
+               smmu_domain->cfg.asid += cs->id_base;
+
+       return 0;
+}
+
+static const struct arm_smmu_impl cavium_impl = {
+       .cfg_probe = cavium_cfg_probe,
+       .init_context = cavium_init_context,
+};
+
+static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+       struct cavium_smmu *cs;
+
+       cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
+       if (!cs)
+               return ERR_PTR(-ENOMEM);
+
+       cs->smmu = *smmu;
+       cs->smmu.impl = &cavium_impl;
+
+       devm_kfree(smmu->dev, smmu);
+
+       return &cs->smmu;
+}
+
+
+#define ARM_MMU500_ACTLR_CPRE          (1 << 1)
+
+#define ARM_MMU500_ACR_CACHE_LOCK      (1 << 26)
+#define ARM_MMU500_ACR_S2CRB_TLBEN     (1 << 10)
+#define ARM_MMU500_ACR_SMTNMB_TLBEN    (1 << 8)
+
+static int arm_mmu500_reset(struct arm_smmu_device *smmu)
+{
+       u32 reg, major;
+       int i;
+       /*
+        * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
+        * writes to the context bank ACTLRs will stick. And we just hope that
+        * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
+        */
+       reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
+       major = FIELD_GET(ID7_MAJOR, reg);
+       reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
+       if (major >= 2)
+               reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+       /*
+        * Allow unmatched Stream IDs to allocate bypass
+        * TLB entries for reduced latency.
+        */
+       reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
+
+       /*
+        * Disable MMU-500's not-particularly-beneficial next-page
+        * prefetcher for the sake of errata #841119 and #826419.
+        */
+       for (i = 0; i < smmu->num_context_banks; ++i) {
+               reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
+               reg &= ~ARM_MMU500_ACTLR_CPRE;
+               arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
+       }
+
+       return 0;
+}
+
+static const struct arm_smmu_impl arm_mmu500_impl = {
+       .reset = arm_mmu500_reset,
+};
+
+
+struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+       /*
+        * We will inevitably have to combine model-specific implementation
+        * quirks with platform-specific integration quirks, but everything
+        * we currently support happens to work out as straightforward
+        * mutually-exclusive assignments.
+        */
+       switch (smmu->model) {
+       case ARM_MMU500:
+               smmu->impl = &arm_mmu500_impl;
+               break;
+       case CAVIUM_SMMUV2:
+               return cavium_smmu_impl_init(smmu);
+       default:
+               break;
+       }
+
+       if (of_property_read_bool(smmu->dev->of_node,
+                                 "calxeda,smmu-secure-config-access"))
+               smmu->impl = &calxeda_impl;
+
+       return smmu;
+}
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
deleted file mode 100644 (file)
index 1c278f7..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * IOMMU API for ARM architected SMMU implementations.
- *
- * Copyright (C) 2013 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#ifndef _ARM_SMMU_REGS_H
-#define _ARM_SMMU_REGS_H
-
-/* Configuration registers */
-#define ARM_SMMU_GR0_sCR0              0x0
-#define sCR0_CLIENTPD                  (1 << 0)
-#define sCR0_GFRE                      (1 << 1)
-#define sCR0_GFIE                      (1 << 2)
-#define sCR0_EXIDENABLE                        (1 << 3)
-#define sCR0_GCFGFRE                   (1 << 4)
-#define sCR0_GCFGFIE                   (1 << 5)
-#define sCR0_USFCFG                    (1 << 10)
-#define sCR0_VMIDPNE                   (1 << 11)
-#define sCR0_PTM                       (1 << 12)
-#define sCR0_FB                                (1 << 13)
-#define sCR0_VMID16EN                  (1 << 31)
-#define sCR0_BSU_SHIFT                 14
-#define sCR0_BSU_MASK                  0x3
-
-/* Auxiliary Configuration register */
-#define ARM_SMMU_GR0_sACR              0x10
-
-/* Identification registers */
-#define ARM_SMMU_GR0_ID0               0x20
-#define ARM_SMMU_GR0_ID1               0x24
-#define ARM_SMMU_GR0_ID2               0x28
-#define ARM_SMMU_GR0_ID3               0x2c
-#define ARM_SMMU_GR0_ID4               0x30
-#define ARM_SMMU_GR0_ID5               0x34
-#define ARM_SMMU_GR0_ID6               0x38
-#define ARM_SMMU_GR0_ID7               0x3c
-#define ARM_SMMU_GR0_sGFSR             0x48
-#define ARM_SMMU_GR0_sGFSYNR0          0x50
-#define ARM_SMMU_GR0_sGFSYNR1          0x54
-#define ARM_SMMU_GR0_sGFSYNR2          0x58
-
-#define ID0_S1TS                       (1 << 30)
-#define ID0_S2TS                       (1 << 29)
-#define ID0_NTS                                (1 << 28)
-#define ID0_SMS                                (1 << 27)
-#define ID0_ATOSNS                     (1 << 26)
-#define ID0_PTFS_NO_AARCH32            (1 << 25)
-#define ID0_PTFS_NO_AARCH32S           (1 << 24)
-#define ID0_CTTW                       (1 << 14)
-#define ID0_NUMIRPT_SHIFT              16
-#define ID0_NUMIRPT_MASK               0xff
-#define ID0_NUMSIDB_SHIFT              9
-#define ID0_NUMSIDB_MASK               0xf
-#define ID0_EXIDS                      (1 << 8)
-#define ID0_NUMSMRG_SHIFT              0
-#define ID0_NUMSMRG_MASK               0xff
-
-#define ID1_PAGESIZE                   (1 << 31)
-#define ID1_NUMPAGENDXB_SHIFT          28
-#define ID1_NUMPAGENDXB_MASK           7
-#define ID1_NUMS2CB_SHIFT              16
-#define ID1_NUMS2CB_MASK               0xff
-#define ID1_NUMCB_SHIFT                        0
-#define ID1_NUMCB_MASK                 0xff
-
-#define ID2_OAS_SHIFT                  4
-#define ID2_OAS_MASK                   0xf
-#define ID2_IAS_SHIFT                  0
-#define ID2_IAS_MASK                   0xf
-#define ID2_UBS_SHIFT                  8
-#define ID2_UBS_MASK                   0xf
-#define ID2_PTFS_4K                    (1 << 12)
-#define ID2_PTFS_16K                   (1 << 13)
-#define ID2_PTFS_64K                   (1 << 14)
-#define ID2_VMID16                     (1 << 15)
-
-#define ID7_MAJOR_SHIFT                        4
-#define ID7_MAJOR_MASK                 0xf
-
-/* Global TLB invalidation */
-#define ARM_SMMU_GR0_TLBIVMID          0x64
-#define ARM_SMMU_GR0_TLBIALLNSNH       0x68
-#define ARM_SMMU_GR0_TLBIALLH          0x6c
-#define ARM_SMMU_GR0_sTLBGSYNC         0x70
-#define ARM_SMMU_GR0_sTLBGSTATUS       0x74
-#define sTLBGSTATUS_GSACTIVE           (1 << 0)
-
-/* Stream mapping registers */
-#define ARM_SMMU_GR0_SMR(n)            (0x800 + ((n) << 2))
-#define SMR_VALID                      (1 << 31)
-#define SMR_MASK_SHIFT                 16
-#define SMR_ID_SHIFT                   0
-
-#define ARM_SMMU_GR0_S2CR(n)           (0xc00 + ((n) << 2))
-#define S2CR_CBNDX_SHIFT               0
-#define S2CR_CBNDX_MASK                        0xff
-#define S2CR_EXIDVALID                 (1 << 10)
-#define S2CR_TYPE_SHIFT                        16
-#define S2CR_TYPE_MASK                 0x3
-enum arm_smmu_s2cr_type {
-       S2CR_TYPE_TRANS,
-       S2CR_TYPE_BYPASS,
-       S2CR_TYPE_FAULT,
-};
-
-#define S2CR_PRIVCFG_SHIFT             24
-#define S2CR_PRIVCFG_MASK              0x3
-enum arm_smmu_s2cr_privcfg {
-       S2CR_PRIVCFG_DEFAULT,
-       S2CR_PRIVCFG_DIPAN,
-       S2CR_PRIVCFG_UNPRIV,
-       S2CR_PRIVCFG_PRIV,
-};
-
-/* Context bank attribute registers */
-#define ARM_SMMU_GR1_CBAR(n)           (0x0 + ((n) << 2))
-#define CBAR_VMID_SHIFT                        0
-#define CBAR_VMID_MASK                 0xff
-#define CBAR_S1_BPSHCFG_SHIFT          8
-#define CBAR_S1_BPSHCFG_MASK           3
-#define CBAR_S1_BPSHCFG_NSH            3
-#define CBAR_S1_MEMATTR_SHIFT          12
-#define CBAR_S1_MEMATTR_MASK           0xf
-#define CBAR_S1_MEMATTR_WB             0xf
-#define CBAR_TYPE_SHIFT                        16
-#define CBAR_TYPE_MASK                 0x3
-#define CBAR_TYPE_S2_TRANS             (0 << CBAR_TYPE_SHIFT)
-#define CBAR_TYPE_S1_TRANS_S2_BYPASS   (1 << CBAR_TYPE_SHIFT)
-#define CBAR_TYPE_S1_TRANS_S2_FAULT    (2 << CBAR_TYPE_SHIFT)
-#define CBAR_TYPE_S1_TRANS_S2_TRANS    (3 << CBAR_TYPE_SHIFT)
-#define CBAR_IRPTNDX_SHIFT             24
-#define CBAR_IRPTNDX_MASK              0xff
-
-#define ARM_SMMU_GR1_CBFRSYNRA(n)      (0x400 + ((n) << 2))
-
-#define ARM_SMMU_GR1_CBA2R(n)          (0x800 + ((n) << 2))
-#define CBA2R_RW64_32BIT               (0 << 0)
-#define CBA2R_RW64_64BIT               (1 << 0)
-#define CBA2R_VMID_SHIFT               16
-#define CBA2R_VMID_MASK                        0xffff
-
-#define ARM_SMMU_CB_SCTLR              0x0
-#define ARM_SMMU_CB_ACTLR              0x4
-#define ARM_SMMU_CB_RESUME             0x8
-#define ARM_SMMU_CB_TTBCR2             0x10
-#define ARM_SMMU_CB_TTBR0              0x20
-#define ARM_SMMU_CB_TTBR1              0x28
-#define ARM_SMMU_CB_TTBCR              0x30
-#define ARM_SMMU_CB_CONTEXTIDR         0x34
-#define ARM_SMMU_CB_S1_MAIR0           0x38
-#define ARM_SMMU_CB_S1_MAIR1           0x3c
-#define ARM_SMMU_CB_PAR                        0x50
-#define ARM_SMMU_CB_FSR                        0x58
-#define ARM_SMMU_CB_FAR                        0x60
-#define ARM_SMMU_CB_FSYNR0             0x68
-#define ARM_SMMU_CB_S1_TLBIVA          0x600
-#define ARM_SMMU_CB_S1_TLBIASID                0x610
-#define ARM_SMMU_CB_S1_TLBIVAL         0x620
-#define ARM_SMMU_CB_S2_TLBIIPAS2       0x630
-#define ARM_SMMU_CB_S2_TLBIIPAS2L      0x638
-#define ARM_SMMU_CB_TLBSYNC            0x7f0
-#define ARM_SMMU_CB_TLBSTATUS          0x7f4
-#define ARM_SMMU_CB_ATS1PR             0x800
-#define ARM_SMMU_CB_ATSR               0x8f0
-
-#define SCTLR_S1_ASIDPNE               (1 << 12)
-#define SCTLR_CFCFG                    (1 << 7)
-#define SCTLR_CFIE                     (1 << 6)
-#define SCTLR_CFRE                     (1 << 5)
-#define SCTLR_E                                (1 << 4)
-#define SCTLR_AFE                      (1 << 2)
-#define SCTLR_TRE                      (1 << 1)
-#define SCTLR_M                                (1 << 0)
-
-#define CB_PAR_F                       (1 << 0)
-
-#define ATSR_ACTIVE                    (1 << 0)
-
-#define RESUME_RETRY                   (0 << 0)
-#define RESUME_TERMINATE               (1 << 0)
-
-#define TTBCR2_SEP_SHIFT               15
-#define TTBCR2_SEP_UPSTREAM            (0x7 << TTBCR2_SEP_SHIFT)
-#define TTBCR2_AS                      (1 << 4)
-
-#define TTBRn_ASID_SHIFT               48
-
-#define FSR_MULTI                      (1 << 31)
-#define FSR_SS                         (1 << 30)
-#define FSR_UUT                                (1 << 8)
-#define FSR_ASF                                (1 << 7)
-#define FSR_TLBLKF                     (1 << 6)
-#define FSR_TLBMCF                     (1 << 5)
-#define FSR_EF                         (1 << 4)
-#define FSR_PF                         (1 << 3)
-#define FSR_AFF                                (1 << 2)
-#define FSR_TF                         (1 << 1)
-
-#define FSR_IGN                                (FSR_AFF | FSR_ASF | \
-                                        FSR_TLBMCF | FSR_TLBLKF)
-#define FSR_FAULT                      (FSR_MULTI | FSR_SS | FSR_UUT | \
-                                        FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
-
-#define FSYNR0_WNR                     (1 << 4)
-
-#endif /* _ARM_SMMU_REGS_H */
index c5c93e48b4dbdf7409ca3b8008a24a4ab3214a49..4aa414843557651673c8c1f1f971d944a37f2647 100644 (file)
 #define ARM_SMMU_MEMATTR_DEVICE_nGnRE  0x1
 #define ARM_SMMU_MEMATTR_OIWB          0xf
 
-#define Q_IDX(q, p)                    ((p) & ((1 << (q)->max_n_shift) - 1))
-#define Q_WRP(q, p)                    ((p) & (1 << (q)->max_n_shift))
-#define Q_OVERFLOW_FLAG                        (1 << 31)
-#define Q_OVF(q, p)                    ((p) & Q_OVERFLOW_FLAG)
+#define Q_IDX(llq, p)                  ((p) & ((1 << (llq)->max_n_shift) - 1))
+#define Q_WRP(llq, p)                  ((p) & (1 << (llq)->max_n_shift))
+#define Q_OVERFLOW_FLAG                        (1U << 31)
+#define Q_OVF(p)                       ((p) & Q_OVERFLOW_FLAG)
 #define Q_ENT(q, p)                    ((q)->base +                    \
-                                        Q_IDX(q, p) * (q)->ent_dwords)
+                                        Q_IDX(&((q)->llq), p) *        \
+                                        (q)->ent_dwords)
 
 #define Q_BASE_RWA                     (1UL << 62)
 #define Q_BASE_ADDR_MASK               GENMASK_ULL(51, 5)
 #define CMDQ_ERR_CERROR_ABT_IDX                2
 #define CMDQ_ERR_CERROR_ATC_INV_IDX    3
 
+#define CMDQ_PROD_OWNED_FLAG           Q_OVERFLOW_FLAG
+
+/*
+ * This is used to size the command queue and therefore must be at least
+ * BITS_PER_LONG so that the valid_map works correctly (it relies on the
+ * total number of queue entries being a multiple of BITS_PER_LONG).
+ */
+#define CMDQ_BATCH_ENTRIES             BITS_PER_LONG
+
 #define CMDQ_0_OP                      GENMASK_ULL(7, 0)
 #define CMDQ_0_SSV                     (1UL << 11)
 
 #define PRIQ_1_ADDR_MASK               GENMASK_ULL(63, 12)
 
 /* High-level queue structures */
-#define ARM_SMMU_POLL_TIMEOUT_US       100
-#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US  1000000 /* 1s! */
-#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT  10
+#define ARM_SMMU_POLL_TIMEOUT_US       1000000 /* 1s! */
+#define ARM_SMMU_POLL_SPIN_COUNT       10
 
 #define MSI_IOVA_BASE                  0x8000000
 #define MSI_IOVA_LENGTH                        0x100000
@@ -472,13 +481,29 @@ struct arm_smmu_cmdq_ent {
 
                #define CMDQ_OP_CMD_SYNC        0x46
                struct {
-                       u32                     msidata;
                        u64                     msiaddr;
                } sync;
        };
 };
 
+struct arm_smmu_ll_queue {
+       union {
+               u64                     val;
+               struct {
+                       u32             prod;
+                       u32             cons;
+               };
+               struct {
+                       atomic_t        prod;
+                       atomic_t        cons;
+               } atomic;
+               u8                      __pad[SMP_CACHE_BYTES];
+       } ____cacheline_aligned_in_smp;
+       u32                             max_n_shift;
+};
+
 struct arm_smmu_queue {
+       struct arm_smmu_ll_queue        llq;
        int                             irq; /* Wired interrupt */
 
        __le64                          *base;
@@ -486,17 +511,23 @@ struct arm_smmu_queue {
        u64                             q_base;
 
        size_t                          ent_dwords;
-       u32                             max_n_shift;
-       u32                             prod;
-       u32                             cons;
 
        u32 __iomem                     *prod_reg;
        u32 __iomem                     *cons_reg;
 };
 
+struct arm_smmu_queue_poll {
+       ktime_t                         timeout;
+       unsigned int                    delay;
+       unsigned int                    spin_cnt;
+       bool                            wfe;
+};
+
 struct arm_smmu_cmdq {
        struct arm_smmu_queue           q;
-       spinlock_t                      lock;
+       atomic_long_t                   *valid_map;
+       atomic_t                        owner_prod;
+       atomic_t                        lock;
 };
 
 struct arm_smmu_evtq {
@@ -576,8 +607,6 @@ struct arm_smmu_device {
 
        int                             gerr_irq;
        int                             combined_irq;
-       u32                             sync_nr;
-       u8                              prev_cmd_opcode;
 
        unsigned long                   ias; /* IPA */
        unsigned long                   oas; /* PA */
@@ -596,12 +625,6 @@ struct arm_smmu_device {
 
        struct arm_smmu_strtab_cfg      strtab_cfg;
 
-       /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */
-       union {
-               u32                     sync_count;
-               u64                     padding;
-       };
-
        /* IOMMU core code handle */
        struct iommu_device             iommu;
 };
@@ -614,7 +637,7 @@ struct arm_smmu_master {
        struct list_head                domain_head;
        u32                             *sids;
        unsigned int                    num_sids;
-       bool                            ats_enabled             :1;
+       bool                            ats_enabled;
 };
 
 /* SMMU private data for an IOMMU domain */
@@ -631,6 +654,7 @@ struct arm_smmu_domain {
 
        struct io_pgtable_ops           *pgtbl_ops;
        bool                            non_strict;
+       atomic_t                        nr_ats_masters;
 
        enum arm_smmu_domain_stage      stage;
        union {
@@ -685,85 +709,97 @@ static void parse_driver_options(struct arm_smmu_device *smmu)
 }
 
 /* Low-level queue manipulation functions */
-static bool queue_full(struct arm_smmu_queue *q)
+static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
+{
+       u32 space, prod, cons;
+
+       prod = Q_IDX(q, q->prod);
+       cons = Q_IDX(q, q->cons);
+
+       if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons))
+               space = (1 << q->max_n_shift) - (prod - cons);
+       else
+               space = cons - prod;
+
+       return space >= n;
+}
+
+static bool queue_full(struct arm_smmu_ll_queue *q)
 {
        return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
               Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
 }
 
-static bool queue_empty(struct arm_smmu_queue *q)
+static bool queue_empty(struct arm_smmu_ll_queue *q)
 {
        return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
               Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
 }
 
-static void queue_sync_cons(struct arm_smmu_queue *q)
+static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
 {
-       q->cons = readl_relaxed(q->cons_reg);
+       return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
+               (Q_IDX(q, q->cons) > Q_IDX(q, prod))) ||
+              ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) &&
+               (Q_IDX(q, q->cons) <= Q_IDX(q, prod)));
 }
 
-static void queue_inc_cons(struct arm_smmu_queue *q)
+static void queue_sync_cons_out(struct arm_smmu_queue *q)
 {
-       u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
-
-       q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
-
        /*
         * Ensure that all CPU accesses (reads and writes) to the queue
         * are complete before we update the cons pointer.
         */
        mb();
-       writel_relaxed(q->cons, q->cons_reg);
+       writel_relaxed(q->llq.cons, q->cons_reg);
+}
+
+static void queue_inc_cons(struct arm_smmu_ll_queue *q)
+{
+       u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
+       q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
 }
 
-static int queue_sync_prod(struct arm_smmu_queue *q)
+static int queue_sync_prod_in(struct arm_smmu_queue *q)
 {
        int ret = 0;
        u32 prod = readl_relaxed(q->prod_reg);
 
-       if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
+       if (Q_OVF(prod) != Q_OVF(q->llq.prod))
                ret = -EOVERFLOW;
 
-       q->prod = prod;
+       q->llq.prod = prod;
        return ret;
 }
 
-static void queue_inc_prod(struct arm_smmu_queue *q)
+static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n)
 {
-       u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
-
-       q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
-       writel(q->prod, q->prod_reg);
+       u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n;
+       return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
 }
 
-/*
- * Wait for the SMMU to consume items. If sync is true, wait until the queue
- * is empty. Otherwise, wait until there is at least one free slot.
- */
-static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
+static void queue_poll_init(struct arm_smmu_device *smmu,
+                           struct arm_smmu_queue_poll *qp)
 {
-       ktime_t timeout;
-       unsigned int delay = 1, spin_cnt = 0;
-
-       /* Wait longer if it's a CMD_SYNC */
-       timeout = ktime_add_us(ktime_get(), sync ?
-                                           ARM_SMMU_CMDQ_SYNC_TIMEOUT_US :
-                                           ARM_SMMU_POLL_TIMEOUT_US);
+       qp->delay = 1;
+       qp->spin_cnt = 0;
+       qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+       qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
+}
 
-       while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) {
-               if (ktime_compare(ktime_get(), timeout) > 0)
-                       return -ETIMEDOUT;
+static int queue_poll(struct arm_smmu_queue_poll *qp)
+{
+       if (ktime_compare(ktime_get(), qp->timeout) > 0)
+               return -ETIMEDOUT;
 
-               if (wfe) {
-                       wfe();
-               } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) {
-                       cpu_relax();
-                       continue;
-               } else {
-                       udelay(delay);
-                       delay *= 2;
-                       spin_cnt = 0;
-               }
+       if (qp->wfe) {
+               wfe();
+       } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) {
+               cpu_relax();
+       } else {
+               udelay(qp->delay);
+               qp->delay *= 2;
+               qp->spin_cnt = 0;
        }
 
        return 0;
@@ -777,16 +813,6 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
                *dst++ = cpu_to_le64(*src++);
 }
 
-static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
-{
-       if (queue_full(q))
-               return -ENOSPC;
-
-       queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
-       queue_inc_prod(q);
-       return 0;
-}
-
 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
 {
        int i;
@@ -797,11 +823,12 @@ static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
 
 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
 {
-       if (queue_empty(q))
+       if (queue_empty(&q->llq))
                return -EAGAIN;
 
-       queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
-       queue_inc_cons(q);
+       queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
+       queue_inc_cons(&q->llq);
+       queue_sync_cons_out(q);
        return 0;
 }
 
@@ -868,20 +895,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
                cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
                break;
        case CMDQ_OP_CMD_SYNC:
-               if (ent->sync.msiaddr)
+               if (ent->sync.msiaddr) {
                        cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
-               else
+                       cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
+               } else {
                        cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
+               }
                cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
                cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
-               /*
-                * Commands are written little-endian, but we want the SMMU to
-                * receive MSIData, and thus write it back to memory, in CPU
-                * byte order, so big-endian needs an extra byteswap here.
-                */
-               cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
-                                    cpu_to_le32(ent->sync.msidata));
-               cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
                break;
        default:
                return -ENOENT;
@@ -890,6 +911,27 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
        return 0;
 }
 
+static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
+                                        u32 prod)
+{
+       struct arm_smmu_queue *q = &smmu->cmdq.q;
+       struct arm_smmu_cmdq_ent ent = {
+               .opcode = CMDQ_OP_CMD_SYNC,
+       };
+
+       /*
+        * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI
+        * payload, so the write will zero the entire command on that platform.
+        */
+       if (smmu->features & ARM_SMMU_FEAT_MSI &&
+           smmu->features & ARM_SMMU_FEAT_COHERENCY) {
+               ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
+                                  q->ent_dwords * 8;
+       }
+
+       arm_smmu_cmdq_build_cmd(cmd, &ent);
+}
+
 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
 {
        static const char *cerror_str[] = {
@@ -948,109 +990,456 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
        queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
 }
 
-static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
+/*
+ * Command queue locking.
+ * This is a form of bastardised rwlock with the following major changes:
+ *
+ * - The only LOCK routines are exclusive_trylock() and shared_lock().
+ *   Neither have barrier semantics, and instead provide only a control
+ *   dependency.
+ *
+ * - The UNLOCK routines are supplemented with shared_tryunlock(), which
+ *   fails if the caller appears to be the last lock holder (yes, this is
+ *   racy). All successful UNLOCK routines have RELEASE semantics.
+ */
+static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
 {
-       struct arm_smmu_queue *q = &smmu->cmdq.q;
-       bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+       int val;
+
+       /*
+        * We can try to avoid the cmpxchg() loop by simply incrementing the
+        * lock counter. When held in exclusive state, the lock counter is set
+        * to INT_MIN so these increments won't hurt as the value will remain
+        * negative.
+        */
+       if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
+               return;
+
+       do {
+               val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
+       } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
+}
+
+static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
+{
+       (void)atomic_dec_return_release(&cmdq->lock);
+}
+
+static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
+{
+       if (atomic_read(&cmdq->lock) == 1)
+               return false;
+
+       arm_smmu_cmdq_shared_unlock(cmdq);
+       return true;
+}
+
+#define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)           \
+({                                                                     \
+       bool __ret;                                                     \
+       local_irq_save(flags);                                          \
+       __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN);       \
+       if (!__ret)                                                     \
+               local_irq_restore(flags);                               \
+       __ret;                                                          \
+})
+
+#define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags)         \
+({                                                                     \
+       atomic_set_release(&cmdq->lock, 0);                             \
+       local_irq_restore(flags);                                       \
+})
+
+
+/*
+ * Command queue insertion.
+ * This is made fiddly by our attempts to achieve some sort of scalability
+ * since there is one queue shared amongst all of the CPUs in the system.  If
+ * you like mixed-size concurrency, dependency ordering and relaxed atomics,
+ * then you'll *love* this monstrosity.
+ *
+ * The basic idea is to split the queue up into ranges of commands that are
+ * owned by a given CPU; the owner may not have written all of the commands
+ * itself, but is responsible for advancing the hardware prod pointer when
+ * the time comes. The algorithm is roughly:
+ *
+ *     1. Allocate some space in the queue. At this point we also discover
+ *        whether the head of the queue is currently owned by another CPU,
+ *        or whether we are the owner.
+ *
+ *     2. Write our commands into our allocated slots in the queue.
+ *
+ *     3. Mark our slots as valid in arm_smmu_cmdq.valid_map.
+ *
+ *     4. If we are an owner:
+ *             a. Wait for the previous owner to finish.
+ *             b. Mark the queue head as unowned, which tells us the range
+ *                that we are responsible for publishing.
+ *             c. Wait for all commands in our owned range to become valid.
+ *             d. Advance the hardware prod pointer.
+ *             e. Tell the next owner we've finished.
+ *
+ *     5. If we are inserting a CMD_SYNC (we may or may not have been an
+ *        owner), then we need to stick around until it has completed:
+ *             a. If we have MSIs, the SMMU can write back into the CMD_SYNC
+ *                to clear the first 4 bytes.
+ *             b. Otherwise, we spin waiting for the hardware cons pointer to
+ *                advance past our command.
+ *
+ * The devil is in the details, particularly the use of locking for handling
+ * SYNC completion and freeing up space in the queue before we think that it is
+ * full.
+ */
+static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
+                                              u32 sprod, u32 eprod, bool set)
+{
+       u32 swidx, sbidx, ewidx, ebidx;
+       struct arm_smmu_ll_queue llq = {
+               .max_n_shift    = cmdq->q.llq.max_n_shift,
+               .prod           = sprod,
+       };
+
+       ewidx = BIT_WORD(Q_IDX(&llq, eprod));
+       ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG;
+
+       while (llq.prod != eprod) {
+               unsigned long mask;
+               atomic_long_t *ptr;
+               u32 limit = BITS_PER_LONG;
+
+               swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
+               sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
 
-       smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]);
+               ptr = &cmdq->valid_map[swidx];
 
-       while (queue_insert_raw(q, cmd) == -ENOSPC) {
-               if (queue_poll_cons(q, false, wfe))
-                       dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+               if ((swidx == ewidx) && (sbidx < ebidx))
+                       limit = ebidx;
+
+               mask = GENMASK(limit - 1, sbidx);
+
+               /*
+                * The valid bit is the inverse of the wrap bit. This means
+                * that a zero-initialised queue is invalid and, after marking
+                * all entries as valid, they become invalid again when we
+                * wrap.
+                */
+               if (set) {
+                       atomic_long_xor(mask, ptr);
+               } else { /* Poll */
+                       unsigned long valid;
+
+                       valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
+                       atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid);
+               }
+
+               llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
        }
 }
 
-static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
-                                   struct arm_smmu_cmdq_ent *ent)
+/* Mark all entries in the range [sprod, eprod) as valid */
+static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
+                                       u32 sprod, u32 eprod)
+{
+       __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
+}
+
+/* Wait for all entries in the range [sprod, eprod) to become valid */
+static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
+                                        u32 sprod, u32 eprod)
+{
+       __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
+}
+
+/* Wait for the command queue to become non-full */
+static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
+                                            struct arm_smmu_ll_queue *llq)
 {
-       u64 cmd[CMDQ_ENT_DWORDS];
        unsigned long flags;
+       struct arm_smmu_queue_poll qp;
+       struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+       int ret = 0;
 
-       if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
-               dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
-                        ent->opcode);
-               return;
+       /*
+        * Try to update our copy of cons by grabbing exclusive cmdq access. If
+        * that fails, spin until somebody else updates it for us.
+        */
+       if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
+               WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
+               arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
+               llq->val = READ_ONCE(cmdq->q.llq.val);
+               return 0;
        }
 
-       spin_lock_irqsave(&smmu->cmdq.lock, flags);
-       arm_smmu_cmdq_insert_cmd(smmu, cmd);
-       spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+       queue_poll_init(smmu, &qp);
+       do {
+               llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
+               if (!queue_full(llq))
+                       break;
+
+               ret = queue_poll(&qp);
+       } while (!ret);
+
+       return ret;
 }
 
 /*
- * The difference between val and sync_idx is bounded by the maximum size of
- * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic.
+ * Wait until the SMMU signals a CMD_SYNC completion MSI.
+ * Must be called with the cmdq lock held in some capacity.
  */
-static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
+static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
+                                         struct arm_smmu_ll_queue *llq)
 {
-       ktime_t timeout;
-       u32 val;
+       int ret = 0;
+       struct arm_smmu_queue_poll qp;
+       struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+       u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
 
-       timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US);
-       val = smp_cond_load_acquire(&smmu->sync_count,
-                                   (int)(VAL - sync_idx) >= 0 ||
-                                   !ktime_before(ktime_get(), timeout));
+       queue_poll_init(smmu, &qp);
 
-       return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0;
+       /*
+        * The MSI won't generate an event, since it's being written back
+        * into the command queue.
+        */
+       qp.wfe = false;
+       smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp)));
+       llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
+       return ret;
 }
 
-static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
+/*
+ * Wait until the SMMU cons index passes llq->prod.
+ * Must be called with the cmdq lock held in some capacity.
+ */
+static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
+                                              struct arm_smmu_ll_queue *llq)
 {
-       u64 cmd[CMDQ_ENT_DWORDS];
-       unsigned long flags;
-       struct arm_smmu_cmdq_ent ent = {
-               .opcode = CMDQ_OP_CMD_SYNC,
-               .sync   = {
-                       .msiaddr = virt_to_phys(&smmu->sync_count),
-               },
-       };
+       struct arm_smmu_queue_poll qp;
+       struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+       u32 prod = llq->prod;
+       int ret = 0;
 
-       spin_lock_irqsave(&smmu->cmdq.lock, flags);
+       queue_poll_init(smmu, &qp);
+       llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
+       do {
+               if (queue_consumed(llq, prod))
+                       break;
 
-       /* Piggy-back on the previous command if it's a SYNC */
-       if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) {
-               ent.sync.msidata = smmu->sync_nr;
-       } else {
-               ent.sync.msidata = ++smmu->sync_nr;
-               arm_smmu_cmdq_build_cmd(cmd, &ent);
-               arm_smmu_cmdq_insert_cmd(smmu, cmd);
-       }
+               ret = queue_poll(&qp);
 
-       spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+               /*
+                * This needs to be a readl() so that our subsequent call
+                * to arm_smmu_cmdq_shared_tryunlock() can fail accurately.
+                *
+                * Specifically, we need to ensure that we observe all
+                * shared_lock()s by other CMD_SYNCs that share our owner,
+                * so that a failing call to tryunlock() means that we're
+                * the last one out and therefore we can safely advance
+                * cmdq->q.llq.cons. Roughly speaking:
+                *
+                * CPU 0                CPU1                    CPU2 (us)
+                *
+                * if (sync)
+                *      shared_lock();
+                *
+                * dma_wmb();
+                * set_valid_map();
+                *
+                *                      if (owner) {
+                *                              poll_valid_map();
+                *                              <control dependency>
+                *                              writel(prod_reg);
+                *
+                *                                              readl(cons_reg);
+                *                                              tryunlock();
+                *
+                * Requires us to see CPU 0's shared_lock() acquisition.
+                */
+               llq->cons = readl(cmdq->q.cons_reg);
+       } while (!ret);
 
-       return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
+       return ret;
 }
 
-static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
+                                        struct arm_smmu_ll_queue *llq)
 {
-       u64 cmd[CMDQ_ENT_DWORDS];
+       if (smmu->features & ARM_SMMU_FEAT_MSI &&
+           smmu->features & ARM_SMMU_FEAT_COHERENCY)
+               return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
+
+       return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
+}
+
+static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
+                                       u32 prod, int n)
+{
+       int i;
+       struct arm_smmu_ll_queue llq = {
+               .max_n_shift    = cmdq->q.llq.max_n_shift,
+               .prod           = prod,
+       };
+
+       for (i = 0; i < n; ++i) {
+               u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS];
+
+               prod = queue_inc_prod_n(&llq, i);
+               queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
+       }
+}
+
+/*
+ * This is the actual insertion function, and provides the following
+ * ordering guarantees to callers:
+ *
+ * - There is a dma_wmb() before publishing any commands to the queue.
+ *   This can be relied upon to order prior writes to data structures
+ *   in memory (such as a CD or an STE) before the command.
+ *
+ * - On completion of a CMD_SYNC, there is a control dependency.
+ *   This can be relied upon to order subsequent writes to memory (e.g.
+ *   freeing an IOVA) after completion of the CMD_SYNC.
+ *
+ * - Command insertion is totally ordered, so if two CPUs each race to
+ *   insert their own list of commands then all of the commands from one
+ *   CPU will appear before any of the commands from the other CPU.
+ */
+static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
+                                      u64 *cmds, int n, bool sync)
+{
+       u64 cmd_sync[CMDQ_ENT_DWORDS];
+       u32 prod;
        unsigned long flags;
-       bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
-       struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
-       int ret;
+       bool owner;
+       struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+       struct arm_smmu_ll_queue llq = {
+               .max_n_shift = cmdq->q.llq.max_n_shift,
+       }, head = llq;
+       int ret = 0;
 
-       arm_smmu_cmdq_build_cmd(cmd, &ent);
+       /* 1. Allocate some space in the queue */
+       local_irq_save(flags);
+       llq.val = READ_ONCE(cmdq->q.llq.val);
+       do {
+               u64 old;
+
+               while (!queue_has_space(&llq, n + sync)) {
+                       local_irq_restore(flags);
+                       if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
+                               dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+                       local_irq_save(flags);
+               }
 
-       spin_lock_irqsave(&smmu->cmdq.lock, flags);
-       arm_smmu_cmdq_insert_cmd(smmu, cmd);
-       ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
-       spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+               head.cons = llq.cons;
+               head.prod = queue_inc_prod_n(&llq, n + sync) |
+                                            CMDQ_PROD_OWNED_FLAG;
 
+               old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
+               if (old == llq.val)
+                       break;
+
+               llq.val = old;
+       } while (1);
+       owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
+       head.prod &= ~CMDQ_PROD_OWNED_FLAG;
+       llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
+
+       /*
+        * 2. Write our commands into the queue
+        * Dependency ordering from the cmpxchg() loop above.
+        */
+       arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
+       if (sync) {
+               prod = queue_inc_prod_n(&llq, n);
+               arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod);
+               queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
+
+               /*
+                * In order to determine completion of our CMD_SYNC, we must
+                * ensure that the queue can't wrap twice without us noticing.
+                * We achieve that by taking the cmdq lock as shared before
+                * marking our slot as valid.
+                */
+               arm_smmu_cmdq_shared_lock(cmdq);
+       }
+
+       /* 3. Mark our slots as valid, ensuring commands are visible first */
+       dma_wmb();
+       arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
+
+       /* 4. If we are the owner, take control of the SMMU hardware */
+       if (owner) {
+               /* a. Wait for previous owner to finish */
+               atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
+
+               /* b. Stop gathering work by clearing the owned flag */
+               prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG,
+                                                  &cmdq->q.llq.atomic.prod);
+               prod &= ~CMDQ_PROD_OWNED_FLAG;
+
+               /*
+                * c. Wait for any gathered work to be written to the queue.
+                * Note that we read our own entries so that we have the control
+                * dependency required by (d).
+                */
+               arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
+
+               /*
+                * d. Advance the hardware prod pointer
+                * Control dependency ordering from the entries becoming valid.
+                */
+               writel_relaxed(prod, cmdq->q.prod_reg);
+
+               /*
+                * e. Tell the next owner we're done
+                * Make sure we've updated the hardware first, so that we don't
+                * race to update prod and potentially move it backwards.
+                */
+               atomic_set_release(&cmdq->owner_prod, prod);
+       }
+
+       /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
+       if (sync) {
+               llq.prod = queue_inc_prod_n(&llq, n);
+               ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
+               if (ret) {
+                       dev_err_ratelimited(smmu->dev,
+                                           "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
+                                           llq.prod,
+                                           readl_relaxed(cmdq->q.prod_reg),
+                                           readl_relaxed(cmdq->q.cons_reg));
+               }
+
+               /*
+                * Try to unlock the cmq lock. This will fail if we're the last
+                * reader, in which case we can safely update cmdq->q.llq.cons
+                */
+               if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
+                       WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
+                       arm_smmu_cmdq_shared_unlock(cmdq);
+               }
+       }
+
+       local_irq_restore(flags);
        return ret;
 }
 
-static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
+                                  struct arm_smmu_cmdq_ent *ent)
 {
-       int ret;
-       bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
-                  (smmu->features & ARM_SMMU_FEAT_COHERENCY);
+       u64 cmd[CMDQ_ENT_DWORDS];
 
-       ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu)
-                 : __arm_smmu_cmdq_issue_sync(smmu);
-       if (ret)
-               dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
-       return ret;
+       if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
+               dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
+                        ent->opcode);
+               return -EINVAL;
+       }
+
+       return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false);
+}
+
+static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+{
+       return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
 }
 
 /* Context descriptor manipulation functions */
@@ -1305,6 +1694,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
        int i;
        struct arm_smmu_device *smmu = dev;
        struct arm_smmu_queue *q = &smmu->evtq.q;
+       struct arm_smmu_ll_queue *llq = &q->llq;
        u64 evt[EVTQ_ENT_DWORDS];
 
        do {
@@ -1322,12 +1712,13 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
                 * Not much we can do on overflow, so scream and pretend we're
                 * trying harder.
                 */
-               if (queue_sync_prod(q) == -EOVERFLOW)
+               if (queue_sync_prod_in(q) == -EOVERFLOW)
                        dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
-       } while (!queue_empty(q));
+       } while (!queue_empty(llq));
 
        /* Sync our overflow flag, as we believe we're up to speed */
-       q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
+       llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+                   Q_IDX(llq, llq->cons);
        return IRQ_HANDLED;
 }
 
@@ -1373,19 +1764,21 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
 {
        struct arm_smmu_device *smmu = dev;
        struct arm_smmu_queue *q = &smmu->priq.q;
+       struct arm_smmu_ll_queue *llq = &q->llq;
        u64 evt[PRIQ_ENT_DWORDS];
 
        do {
                while (!queue_remove_raw(q, evt))
                        arm_smmu_handle_ppr(smmu, evt);
 
-               if (queue_sync_prod(q) == -EOVERFLOW)
+               if (queue_sync_prod_in(q) == -EOVERFLOW)
                        dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
-       } while (!queue_empty(q));
+       } while (!queue_empty(llq));
 
        /* Sync our overflow flag, as we believe we're up to speed */
-       q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
-       writel(q->cons, q->cons_reg);
+       llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+                     Q_IDX(llq, llq->cons);
+       queue_sync_cons_out(q);
        return IRQ_HANDLED;
 }
 
@@ -1534,6 +1927,23 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
        if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
                return 0;
 
+       /*
+        * Ensure that we've completed prior invalidation of the main TLBs
+        * before we read 'nr_ats_masters' in case of a concurrent call to
+        * arm_smmu_enable_ats():
+        *
+        *      // unmap()                      // arm_smmu_enable_ats()
+        *      TLBI+SYNC                       atomic_inc(&nr_ats_masters);
+        *      smp_mb();                       [...]
+        *      atomic_read(&nr_ats_masters);   pci_enable_ats() // writel()
+        *
+        * Ensures that we always see the incremented 'nr_ats_masters' count if
+        * ATS was enabled at the PCI device before completion of the TLBI.
+        */
+       smp_mb();
+       if (!atomic_read(&smmu_domain->nr_ats_masters))
+               return 0;
+
        arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
 
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
@@ -1545,13 +1955,6 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
 }
 
 /* IO_PGTABLE API */
-static void arm_smmu_tlb_sync(void *cookie)
-{
-       struct arm_smmu_domain *smmu_domain = cookie;
-
-       arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
-}
-
 static void arm_smmu_tlb_inv_context(void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
@@ -1570,25 +1973,32 @@ static void arm_smmu_tlb_inv_context(void *cookie)
        /*
         * NOTE: when io-pgtable is in non-strict mode, we may get here with
         * PTEs previously cleared by unmaps on the current CPU not yet visible
-        * to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
-        * to guarantee those are observed before the TLBI. Do be careful, 007.
+        * to the SMMU. We are relying on the dma_wmb() implicit during cmd
+        * insertion to guarantee those are observed before the TLBI. Do be
+        * careful, 007.
         */
        arm_smmu_cmdq_issue_cmd(smmu, &cmd);
        arm_smmu_cmdq_issue_sync(smmu);
+       arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
 }
 
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-                                         size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
+                                  size_t granule, bool leaf,
+                                  struct arm_smmu_domain *smmu_domain)
 {
-       struct arm_smmu_domain *smmu_domain = cookie;
+       u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
        struct arm_smmu_device *smmu = smmu_domain->smmu;
+       unsigned long start = iova, end = iova + size;
+       int i = 0;
        struct arm_smmu_cmdq_ent cmd = {
                .tlbi = {
                        .leaf   = leaf,
-                       .addr   = iova,
                },
        };
 
+       if (!size)
+               return;
+
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                cmd.opcode      = CMDQ_OP_TLBI_NH_VA;
                cmd.tlbi.asid   = smmu_domain->s1_cfg.cd.asid;
@@ -1597,16 +2007,54 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
                cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
        }
 
-       do {
-               arm_smmu_cmdq_issue_cmd(smmu, &cmd);
-               cmd.tlbi.addr += granule;
-       } while (size -= granule);
+       while (iova < end) {
+               if (i == CMDQ_BATCH_ENTRIES) {
+                       arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false);
+                       i = 0;
+               }
+
+               cmd.tlbi.addr = iova;
+               arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd);
+               iova += granule;
+               i++;
+       }
+
+       arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true);
+
+       /*
+        * Unfortunately, this can't be leaf-only since we may have
+        * zapped an entire table.
+        */
+       arm_smmu_atc_inv_domain(smmu_domain, 0, start, size);
+}
+
+static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
+                                        unsigned long iova, size_t granule,
+                                        void *cookie)
+{
+       struct arm_smmu_domain *smmu_domain = cookie;
+       struct iommu_domain *domain = &smmu_domain->domain;
+
+       iommu_iotlb_gather_add_page(domain, gather, iova, granule);
 }
 
-static const struct iommu_gather_ops arm_smmu_gather_ops = {
+static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
+                                 size_t granule, void *cookie)
+{
+       arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
+}
+
+static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
+                                 size_t granule, void *cookie)
+{
+       arm_smmu_tlb_inv_range(iova, size, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops arm_smmu_flush_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
-       .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
-       .tlb_sync       = arm_smmu_tlb_sync,
+       .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+       .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+       .tlb_add_page   = arm_smmu_tlb_inv_page_nosync,
 };
 
 /* IOMMU API */
@@ -1796,7 +2244,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
                .ias            = ias,
                .oas            = oas,
                .coherent_walk  = smmu->features & ARM_SMMU_FEAT_COHERENCY,
-               .tlb            = &arm_smmu_gather_ops,
+               .tlb            = &arm_smmu_flush_ops,
                .iommu_dev      = smmu->dev,
        };
 
@@ -1863,44 +2311,65 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
        }
 }
 
-static int arm_smmu_enable_ats(struct arm_smmu_master *master)
+#ifdef CONFIG_PCI_ATS
+static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
 {
-       int ret;
-       size_t stu;
        struct pci_dev *pdev;
        struct arm_smmu_device *smmu = master->smmu;
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
 
        if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
            !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
-               return -ENXIO;
+               return false;
 
        pdev = to_pci_dev(master->dev);
-       if (pdev->untrusted)
-               return -EPERM;
+       return !pdev->untrusted && pdev->ats_cap;
+}
+#else
+static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
+{
+       return false;
+}
+#endif
+
+static void arm_smmu_enable_ats(struct arm_smmu_master *master)
+{
+       size_t stu;
+       struct pci_dev *pdev;
+       struct arm_smmu_device *smmu = master->smmu;
+       struct arm_smmu_domain *smmu_domain = master->domain;
+
+       /* Don't enable ATS at the endpoint if it's not enabled in the STE */
+       if (!master->ats_enabled)
+               return;
 
        /* Smallest Translation Unit: log2 of the smallest supported granule */
        stu = __ffs(smmu->pgsize_bitmap);
+       pdev = to_pci_dev(master->dev);
 
-       ret = pci_enable_ats(pdev, stu);
-       if (ret)
-               return ret;
-
-       master->ats_enabled = true;
-       return 0;
+       atomic_inc(&smmu_domain->nr_ats_masters);
+       arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
+       if (pci_enable_ats(pdev, stu))
+               dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
 }
 
 static void arm_smmu_disable_ats(struct arm_smmu_master *master)
 {
        struct arm_smmu_cmdq_ent cmd;
+       struct arm_smmu_domain *smmu_domain = master->domain;
 
-       if (!master->ats_enabled || !dev_is_pci(master->dev))
+       if (!master->ats_enabled)
                return;
 
+       pci_disable_ats(to_pci_dev(master->dev));
+       /*
+        * Ensure ATS is disabled at the endpoint before we issue the
+        * ATC invalidation via the SMMU.
+        */
+       wmb();
        arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
        arm_smmu_atc_inv_master(master, &cmd);
-       pci_disable_ats(to_pci_dev(master->dev));
-       master->ats_enabled = false;
+       atomic_dec(&smmu_domain->nr_ats_masters);
 }
 
 static void arm_smmu_detach_dev(struct arm_smmu_master *master)
@@ -1911,14 +2380,15 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
        if (!smmu_domain)
                return;
 
+       arm_smmu_disable_ats(master);
+
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
        list_del(&master->domain_head);
        spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
 
        master->domain = NULL;
+       master->ats_enabled = false;
        arm_smmu_install_ste_for_dev(master);
-
-       arm_smmu_disable_ats(master);
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1958,17 +2428,20 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 
        master->domain = smmu_domain;
 
-       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
-       list_add(&master->domain_head, &smmu_domain->devices);
-       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
-
        if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
-               arm_smmu_enable_ats(master);
+               master->ats_enabled = arm_smmu_ats_supported(master);
 
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
                arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
 
        arm_smmu_install_ste_for_dev(master);
+
+       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+       list_add(&master->domain_head, &smmu_domain->devices);
+       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+       arm_smmu_enable_ats(master);
+
 out_unlock:
        mutex_unlock(&smmu_domain->init_mutex);
        return ret;
@@ -1985,21 +2458,16 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
        return ops->map(ops, iova, paddr, size, prot);
 }
 
-static size_t
-arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+                            size_t size, struct iommu_iotlb_gather *gather)
 {
-       int ret;
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
 
        if (!ops)
                return 0;
 
-       ret = ops->unmap(ops, iova, size);
-       if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size))
-               return 0;
-
-       return ret;
+       return ops->unmap(ops, iova, size, gather);
 }
 
 static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
@@ -2010,12 +2478,13 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
                arm_smmu_tlb_inv_context(smmu_domain);
 }
 
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+                               struct iommu_iotlb_gather *gather)
 {
-       struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
-       if (smmu)
-               arm_smmu_cmdq_issue_sync(smmu);
+       arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start,
+                              gather->pgsize, true, smmu_domain);
 }
 
 static phys_addr_t
@@ -2286,13 +2755,13 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
        size_t qsz;
 
        do {
-               qsz = ((1 << q->max_n_shift) * dwords) << 3;
+               qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
                q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
                                              GFP_KERNEL);
                if (q->base || qsz < PAGE_SIZE)
                        break;
 
-               q->max_n_shift--;
+               q->llq.max_n_shift--;
        } while (1);
 
        if (!q->base) {
@@ -2304,7 +2773,7 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
 
        if (!WARN_ON(q->base_dma & (qsz - 1))) {
                dev_info(smmu->dev, "allocated %u entries for %s\n",
-                        1 << q->max_n_shift, name);
+                        1 << q->llq.max_n_shift, name);
        }
 
        q->prod_reg     = arm_smmu_page1_fixup(prod_off, smmu);
@@ -2313,24 +2782,55 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
 
        q->q_base  = Q_BASE_RWA;
        q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
-       q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->max_n_shift);
+       q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
 
-       q->prod = q->cons = 0;
+       q->llq.prod = q->llq.cons = 0;
        return 0;
 }
 
+static void arm_smmu_cmdq_free_bitmap(void *data)
+{
+       unsigned long *bitmap = data;
+       bitmap_free(bitmap);
+}
+
+static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
+{
+       int ret = 0;
+       struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+       unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
+       atomic_long_t *bitmap;
+
+       atomic_set(&cmdq->owner_prod, 0);
+       atomic_set(&cmdq->lock, 0);
+
+       bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL);
+       if (!bitmap) {
+               dev_err(smmu->dev, "failed to allocate cmdq bitmap\n");
+               ret = -ENOMEM;
+       } else {
+               cmdq->valid_map = bitmap;
+               devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap);
+       }
+
+       return ret;
+}
+
 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
 {
        int ret;
 
        /* cmdq */
-       spin_lock_init(&smmu->cmdq.lock);
        ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
                                      ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
                                      "cmdq");
        if (ret)
                return ret;
 
+       ret = arm_smmu_cmdq_init(smmu);
+       if (ret)
+               return ret;
+
        /* evtq */
        ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
                                      ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
@@ -2708,8 +3208,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
 
        /* Command queue */
        writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
-       writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
-       writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
+       writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
+       writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
 
        enables = CR0_CMDQEN;
        ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
@@ -2736,9 +3236,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
 
        /* Event queue */
        writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
-       writel_relaxed(smmu->evtq.q.prod,
+       writel_relaxed(smmu->evtq.q.llq.prod,
                       arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
-       writel_relaxed(smmu->evtq.q.cons,
+       writel_relaxed(smmu->evtq.q.llq.cons,
                       arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
 
        enables |= CR0_EVTQEN;
@@ -2753,9 +3253,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
        if (smmu->features & ARM_SMMU_FEAT_PRI) {
                writeq_relaxed(smmu->priq.q.q_base,
                               smmu->base + ARM_SMMU_PRIQ_BASE);
-               writel_relaxed(smmu->priq.q.prod,
+               writel_relaxed(smmu->priq.q.llq.prod,
                               arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
-               writel_relaxed(smmu->priq.q.cons,
+               writel_relaxed(smmu->priq.q.llq.cons,
                               arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
 
                enables |= CR0_PRIQEN;
@@ -2909,18 +3409,24 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
        }
 
        /* Queue sizes, capped to ensure natural alignment */
-       smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
-                                        FIELD_GET(IDR1_CMDQS, reg));
-       if (!smmu->cmdq.q.max_n_shift) {
-               /* Odd alignment restrictions on the base, so ignore for now */
-               dev_err(smmu->dev, "unit-length command queue not supported\n");
+       smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
+                                            FIELD_GET(IDR1_CMDQS, reg));
+       if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
+               /*
+                * We don't support splitting up batches, so one batch of
+                * commands plus an extra sync needs to fit inside the command
+                * queue. There's also no way we can handle the weird alignment
+                * restrictions on the base pointer for a unit-length queue.
+                */
+               dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
+                       CMDQ_BATCH_ENTRIES);
                return -ENXIO;
        }
 
-       smmu->evtq.q.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
-                                        FIELD_GET(IDR1_EVTQS, reg));
-       smmu->priq.q.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
-                                        FIELD_GET(IDR1_PRIQS, reg));
+       smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
+                                            FIELD_GET(IDR1_EVTQS, reg));
+       smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
+                                            FIELD_GET(IDR1_PRIQS, reg));
 
        /* SID/SSID sizes */
        smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
index 64977c131ee62aeeaea4865989ba8b378c924aeb..5b93c79371e98327ca20c9e7fb7a48db42f77a79 100644 (file)
 
 #include <linux/acpi.h>
 #include <linux/acpi_iort.h>
-#include <linux/atomic.h>
+#include <linux/bitfield.h>
 #include <linux/delay.h>
 #include <linux/dma-iommu.h>
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/io-64-nonatomic-hi-lo.h>
-#include <linux/io-pgtable.h>
-#include <linux/iommu.h>
 #include <linux/iopoll.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
-#include <linux/spinlock.h>
 
 #include <linux/amba/bus.h>
 #include <linux/fsl/mc.h>
 
-#include "arm-smmu-regs.h"
+#include "arm-smmu.h"
 
 /*
  * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
  */
 #define QCOM_DUMMY_VAL -1
 
-#define ARM_MMU500_ACTLR_CPRE          (1 << 1)
-
-#define ARM_MMU500_ACR_CACHE_LOCK      (1 << 26)
-#define ARM_MMU500_ACR_S2CRB_TLBEN     (1 << 10)
-#define ARM_MMU500_ACR_SMTNMB_TLBEN    (1 << 8)
-
 #define TLB_LOOP_TIMEOUT               1000000 /* 1s! */
 #define TLB_SPIN_COUNT                 10
 
-/* Maximum number of context banks per SMMU */
-#define ARM_SMMU_MAX_CBS               128
-
-/* SMMU global address space */
-#define ARM_SMMU_GR0(smmu)             ((smmu)->base)
-#define ARM_SMMU_GR1(smmu)             ((smmu)->base + (1 << (smmu)->pgshift))
-
-/*
- * SMMU global address space with conditional offset to access secure
- * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
- * nsGFSYNR0: 0x450)
- */
-#define ARM_SMMU_GR0_NS(smmu)                                          \
-       ((smmu)->base +                                                 \
-               ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)       \
-                       ? 0x400 : 0))
-
-/*
- * Some 64-bit registers only make sense to write atomically, but in such
- * cases all the data relevant to AArch32 formats lies within the lower word,
- * therefore this actually makes more sense than it might first appear.
- */
-#ifdef CONFIG_64BIT
-#define smmu_write_atomic_lq           writeq_relaxed
-#else
-#define smmu_write_atomic_lq           writel_relaxed
-#endif
-
-/* Translation context bank */
-#define ARM_SMMU_CB(smmu, n)   ((smmu)->cb_base + ((n) << (smmu)->pgshift))
-
 #define MSI_IOVA_BASE                  0x8000000
 #define MSI_IOVA_LENGTH                        0x100000
 
@@ -113,19 +72,6 @@ module_param(disable_bypass, bool, S_IRUGO);
 MODULE_PARM_DESC(disable_bypass,
        "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
 
-enum arm_smmu_arch_version {
-       ARM_SMMU_V1,
-       ARM_SMMU_V1_64K,
-       ARM_SMMU_V2,
-};
-
-enum arm_smmu_implementation {
-       GENERIC_SMMU,
-       ARM_MMU500,
-       CAVIUM_SMMUV2,
-       QCOM_SMMUV2,
-};
-
 struct arm_smmu_s2cr {
        struct iommu_group              *group;
        int                             count;
@@ -163,117 +109,8 @@ struct arm_smmu_master_cfg {
 #define for_each_cfg_sme(fw, i, idx) \
        for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
 
-struct arm_smmu_device {
-       struct device                   *dev;
-
-       void __iomem                    *base;
-       void __iomem                    *cb_base;
-       unsigned long                   pgshift;
-
-#define ARM_SMMU_FEAT_COHERENT_WALK    (1 << 0)
-#define ARM_SMMU_FEAT_STREAM_MATCH     (1 << 1)
-#define ARM_SMMU_FEAT_TRANS_S1         (1 << 2)
-#define ARM_SMMU_FEAT_TRANS_S2         (1 << 3)
-#define ARM_SMMU_FEAT_TRANS_NESTED     (1 << 4)
-#define ARM_SMMU_FEAT_TRANS_OPS                (1 << 5)
-#define ARM_SMMU_FEAT_VMID16           (1 << 6)
-#define ARM_SMMU_FEAT_FMT_AARCH64_4K   (1 << 7)
-#define ARM_SMMU_FEAT_FMT_AARCH64_16K  (1 << 8)
-#define ARM_SMMU_FEAT_FMT_AARCH64_64K  (1 << 9)
-#define ARM_SMMU_FEAT_FMT_AARCH32_L    (1 << 10)
-#define ARM_SMMU_FEAT_FMT_AARCH32_S    (1 << 11)
-#define ARM_SMMU_FEAT_EXIDS            (1 << 12)
-       u32                             features;
-
-#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
-       u32                             options;
-       enum arm_smmu_arch_version      version;
-       enum arm_smmu_implementation    model;
-
-       u32                             num_context_banks;
-       u32                             num_s2_context_banks;
-       DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
-       struct arm_smmu_cb              *cbs;
-       atomic_t                        irptndx;
-
-       u32                             num_mapping_groups;
-       u16                             streamid_mask;
-       u16                             smr_mask_mask;
-       struct arm_smmu_smr             *smrs;
-       struct arm_smmu_s2cr            *s2crs;
-       struct mutex                    stream_map_mutex;
-
-       unsigned long                   va_size;
-       unsigned long                   ipa_size;
-       unsigned long                   pa_size;
-       unsigned long                   pgsize_bitmap;
-
-       u32                             num_global_irqs;
-       u32                             num_context_irqs;
-       unsigned int                    *irqs;
-       struct clk_bulk_data            *clks;
-       int                             num_clks;
-
-       u32                             cavium_id_base; /* Specific to Cavium */
-
-       spinlock_t                      global_sync_lock;
-
-       /* IOMMU core code handle */
-       struct iommu_device             iommu;
-};
-
-enum arm_smmu_context_fmt {
-       ARM_SMMU_CTX_FMT_NONE,
-       ARM_SMMU_CTX_FMT_AARCH64,
-       ARM_SMMU_CTX_FMT_AARCH32_L,
-       ARM_SMMU_CTX_FMT_AARCH32_S,
-};
-
-struct arm_smmu_cfg {
-       u8                              cbndx;
-       u8                              irptndx;
-       union {
-               u16                     asid;
-               u16                     vmid;
-       };
-       u32                             cbar;
-       enum arm_smmu_context_fmt       fmt;
-};
-#define INVALID_IRPTNDX                        0xff
-
-enum arm_smmu_domain_stage {
-       ARM_SMMU_DOMAIN_S1 = 0,
-       ARM_SMMU_DOMAIN_S2,
-       ARM_SMMU_DOMAIN_NESTED,
-       ARM_SMMU_DOMAIN_BYPASS,
-};
-
-struct arm_smmu_domain {
-       struct arm_smmu_device          *smmu;
-       struct io_pgtable_ops           *pgtbl_ops;
-       const struct iommu_gather_ops   *tlb_ops;
-       struct arm_smmu_cfg             cfg;
-       enum arm_smmu_domain_stage      stage;
-       bool                            non_strict;
-       struct mutex                    init_mutex; /* Protects smmu pointer */
-       spinlock_t                      cb_lock; /* Serialises ATS1* ops and TLB syncs */
-       struct iommu_domain             domain;
-};
-
-struct arm_smmu_option_prop {
-       u32 opt;
-       const char *prop;
-};
-
-static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
-
 static bool using_legacy_binding, using_generic_binding;
 
-static struct arm_smmu_option_prop arm_smmu_options[] = {
-       { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
-       { 0, NULL},
-};
-
 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
 {
        if (pm_runtime_enabled(smmu->dev))
@@ -293,20 +130,6 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
        return container_of(dom, struct arm_smmu_domain, domain);
 }
 
-static void parse_driver_options(struct arm_smmu_device *smmu)
-{
-       int i = 0;
-
-       do {
-               if (of_property_read_bool(smmu->dev->of_node,
-                                               arm_smmu_options[i].prop)) {
-                       smmu->options |= arm_smmu_options[i].opt;
-                       dev_notice(smmu->dev, "option %s\n",
-                               arm_smmu_options[i].prop);
-               }
-       } while (arm_smmu_options[++i].opt);
-}
-
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
        if (dev_is_pci(dev)) {
@@ -415,15 +238,17 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
 }
 
 /* Wait for any pending TLB invalidations to complete */
-static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
-                               void __iomem *sync, void __iomem *status)
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+                               int sync, int status)
 {
        unsigned int spin_cnt, delay;
+       u32 reg;
 
-       writel_relaxed(QCOM_DUMMY_VAL, sync);
+       arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
        for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
                for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
-                       if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
+                       reg = arm_smmu_readl(smmu, page, status);
+                       if (!(reg & sTLBGSTATUS_GSACTIVE))
                                return;
                        cpu_relax();
                }
@@ -435,12 +260,11 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
 
 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
 {
-       void __iomem *base = ARM_SMMU_GR0(smmu);
        unsigned long flags;
 
        spin_lock_irqsave(&smmu->global_sync_lock, flags);
-       __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
-                           base + ARM_SMMU_GR0_sTLBGSTATUS);
+       __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
+                           ARM_SMMU_GR0_sTLBGSTATUS);
        spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
 }
 
@@ -448,12 +272,11 @@ static void arm_smmu_tlb_sync_context(void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
-       void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
        unsigned long flags;
 
        spin_lock_irqsave(&smmu_domain->cb_lock, flags);
-       __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
-                           base + ARM_SMMU_CB_TLBSTATUS);
+       __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
+                           ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
        spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
 }
 
@@ -467,14 +290,13 @@ static void arm_smmu_tlb_sync_vmid(void *cookie)
 static void arm_smmu_tlb_inv_context_s1(void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
-       struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-       void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
-
        /*
-        * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
-        * cleared by the current CPU are visible to the SMMU before the TLBI.
+        * The TLBI write may be relaxed, so ensure that PTEs cleared by the
+        * current CPU are visible beforehand.
         */
-       writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+       wmb();
+       arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
+                         ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
        arm_smmu_tlb_sync_context(cookie);
 }
 
@@ -482,87 +304,143 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
-       void __iomem *base = ARM_SMMU_GR0(smmu);
 
-       /* NOTE: see above */
-       writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+       /* See above */
+       wmb();
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
        arm_smmu_tlb_sync_global(smmu);
 }
 
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-                                         size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
+                                     size_t granule, bool leaf, void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-       bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
-       void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+       int reg, idx = cfg->cbndx;
 
-       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
                wmb();
 
-       if (stage1) {
-               reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
-
-               if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
-                       iova &= ~12UL;
-                       iova |= cfg->asid;
-                       do {
-                               writel_relaxed(iova, reg);
-                               iova += granule;
-                       } while (size -= granule);
-               } else {
-                       iova >>= 12;
-                       iova |= (u64)cfg->asid << 48;
-                       do {
-                               writeq_relaxed(iova, reg);
-                               iova += granule >> 12;
-                       } while (size -= granule);
-               }
+       reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+
+       if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
+               iova = (iova >> 12) << 12;
+               iova |= cfg->asid;
+               do {
+                       arm_smmu_cb_write(smmu, idx, reg, iova);
+                       iova += granule;
+               } while (size -= granule);
        } else {
-               reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
-                             ARM_SMMU_CB_S2_TLBIIPAS2;
                iova >>= 12;
+               iova |= (u64)cfg->asid << 48;
                do {
-                       smmu_write_atomic_lq(iova, reg);
+                       arm_smmu_cb_writeq(smmu, idx, reg, iova);
                        iova += granule >> 12;
                } while (size -= granule);
        }
 }
 
+static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
+                                     size_t granule, bool leaf, void *cookie)
+{
+       struct arm_smmu_domain *smmu_domain = cookie;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       int reg, idx = smmu_domain->cfg.cbndx;
+
+       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+               wmb();
+
+       reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
+       iova >>= 12;
+       do {
+               if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
+                       arm_smmu_cb_writeq(smmu, idx, reg, iova);
+               else
+                       arm_smmu_cb_write(smmu, idx, reg, iova);
+               iova += granule >> 12;
+       } while (size -= granule);
+}
+
 /*
  * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
  * almost negligible, but the benefit of getting the first one in as far ahead
  * of the sync as possible is significant, hence we don't just make this a
- * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
+ * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
  */
 static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
                                         size_t granule, bool leaf, void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
-       void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
 
-       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
                wmb();
 
-       writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+}
+
+static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
+                                 size_t granule, void *cookie)
+{
+       struct arm_smmu_domain *smmu_domain = cookie;
+       const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+
+       ops->tlb_inv_range(iova, size, granule, false, cookie);
+       ops->tlb_sync(cookie);
+}
+
+static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
+                                 size_t granule, void *cookie)
+{
+       struct arm_smmu_domain *smmu_domain = cookie;
+       const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+
+       ops->tlb_inv_range(iova, size, granule, true, cookie);
+       ops->tlb_sync(cookie);
+}
+
+static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
+                                 unsigned long iova, size_t granule,
+                                 void *cookie)
+{
+       struct arm_smmu_domain *smmu_domain = cookie;
+       const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+
+       ops->tlb_inv_range(iova, granule, granule, true, cookie);
 }
 
-static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
-       .tlb_flush_all  = arm_smmu_tlb_inv_context_s1,
-       .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
-       .tlb_sync       = arm_smmu_tlb_sync_context,
+static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
+       .tlb = {
+               .tlb_flush_all  = arm_smmu_tlb_inv_context_s1,
+               .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+               .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+               .tlb_add_page   = arm_smmu_tlb_add_page,
+       },
+       .tlb_inv_range          = arm_smmu_tlb_inv_range_s1,
+       .tlb_sync               = arm_smmu_tlb_sync_context,
 };
 
-static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
-       .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
-       .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
-       .tlb_sync       = arm_smmu_tlb_sync_context,
+static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
+       .tlb = {
+               .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
+               .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+               .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+               .tlb_add_page   = arm_smmu_tlb_add_page,
+       },
+       .tlb_inv_range          = arm_smmu_tlb_inv_range_s2,
+       .tlb_sync               = arm_smmu_tlb_sync_context,
 };
 
-static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
-       .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
-       .tlb_add_flush  = arm_smmu_tlb_inv_vmid_nosync,
-       .tlb_sync       = arm_smmu_tlb_sync_vmid,
+static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
+       .tlb = {
+               .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
+               .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+               .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+               .tlb_add_page   = arm_smmu_tlb_add_page,
+       },
+       .tlb_inv_range          = arm_smmu_tlb_inv_vmid_nosync,
+       .tlb_sync               = arm_smmu_tlb_sync_vmid,
 };
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -571,26 +449,22 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
        unsigned long iova;
        struct iommu_domain *domain = dev;
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-       struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
-       void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
-       void __iomem *cb_base;
-
-       cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
-       fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+       int idx = smmu_domain->cfg.cbndx;
 
+       fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
        if (!(fsr & FSR_FAULT))
                return IRQ_NONE;
 
-       fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
-       iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
-       cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
+       fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
+       iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
+       cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
 
        dev_err_ratelimited(smmu->dev,
        "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
-                           fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
+                           fsr, iova, fsynr, cbfrsynra, idx);
 
-       writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+       arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
        return IRQ_HANDLED;
 }
 
@@ -598,12 +472,11 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
 {
        u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
        struct arm_smmu_device *smmu = dev;
-       void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
 
-       gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
-       gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
-       gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
-       gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+       gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
+       gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
+       gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
+       gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
 
        if (!gfsr)
                return IRQ_NONE;
@@ -614,7 +487,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
                "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
                gfsr, gfsynr0, gfsynr1, gfsynr2);
 
-       writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
        return IRQ_HANDLED;
 }
 
@@ -627,16 +500,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 
        cb->cfg = cfg;
 
-       /* TTBCR */
+       /* TCR */
        if (stage1) {
                if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
                        cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
                } else {
                        cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
                        cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-                       cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
+                       cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
                        if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
-                               cb->tcr[1] |= TTBCR2_AS;
+                               cb->tcr[1] |= TCR2_AS;
                }
        } else {
                cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
@@ -649,9 +522,9 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
                        cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
                } else {
                        cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-                       cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
+                       cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
                        cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
-                       cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
+                       cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
                }
        } else {
                cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
@@ -675,74 +548,71 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
        bool stage1;
        struct arm_smmu_cb *cb = &smmu->cbs[idx];
        struct arm_smmu_cfg *cfg = cb->cfg;
-       void __iomem *cb_base, *gr1_base;
-
-       cb_base = ARM_SMMU_CB(smmu, idx);
 
        /* Unassigned context banks only need disabling */
        if (!cfg) {
-               writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+               arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
                return;
        }
 
-       gr1_base = ARM_SMMU_GR1(smmu);
        stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
 
        /* CBA2R */
        if (smmu->version > ARM_SMMU_V1) {
                if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
-                       reg = CBA2R_RW64_64BIT;
+                       reg = CBA2R_VA64;
                else
-                       reg = CBA2R_RW64_32BIT;
+                       reg = 0;
                /* 16-bit VMIDs live in CBA2R */
                if (smmu->features & ARM_SMMU_FEAT_VMID16)
-                       reg |= cfg->vmid << CBA2R_VMID_SHIFT;
+                       reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
 
-               writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
+               arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
        }
 
        /* CBAR */
-       reg = cfg->cbar;
+       reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
        if (smmu->version < ARM_SMMU_V2)
-               reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
+               reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
 
        /*
         * Use the weakest shareability/memory types, so they are
         * overridden by the ttbcr/pte.
         */
        if (stage1) {
-               reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
-                       (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+               reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
+                       FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
        } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
                /* 8-bit VMIDs live in CBAR */
-               reg |= cfg->vmid << CBAR_VMID_SHIFT;
+               reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
        }
-       writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
+       arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
 
        /*
-        * TTBCR
+        * TCR
         * We must write this before the TTBRs, since it determines the
         * access behaviour of some fields (in particular, ASID[15:8]).
         */
        if (stage1 && smmu->version > ARM_SMMU_V1)
-               writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
-       writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
+               arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
+       arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
 
        /* TTBRs */
        if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
-               writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
-               writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
-               writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
+               arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
+               arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
+               arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
        } else {
-               writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
+               arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
                if (stage1)
-                       writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
+                       arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
+                                          cb->ttbr[1]);
        }
 
        /* MAIRs (stage-1 only) */
        if (stage1) {
-               writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
-               writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
+               arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
+               arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
        }
 
        /* SCTLR */
@@ -752,7 +622,7 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
        if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                reg |= SCTLR_E;
 
-       writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
+       arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
 }
 
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
@@ -842,7 +712,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                        ias = min(ias, 32UL);
                        oas = min(oas, 32UL);
                }
-               smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
+               smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
                break;
        case ARM_SMMU_DOMAIN_NESTED:
                /*
@@ -862,9 +732,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                        oas = min(oas, 40UL);
                }
                if (smmu->version == ARM_SMMU_V2)
-                       smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
+                       smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
                else
-                       smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
+                       smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
                break;
        default:
                ret = -EINVAL;
@@ -884,23 +754,29 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        }
 
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
-               cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
+               cfg->vmid = cfg->cbndx + 1;
        else
-               cfg->asid = cfg->cbndx + smmu->cavium_id_base;
+               cfg->asid = cfg->cbndx;
+
+       smmu_domain->smmu = smmu;
+       if (smmu->impl && smmu->impl->init_context) {
+               ret = smmu->impl->init_context(smmu_domain);
+               if (ret)
+                       goto out_unlock;
+       }
 
        pgtbl_cfg = (struct io_pgtable_cfg) {
                .pgsize_bitmap  = smmu->pgsize_bitmap,
                .ias            = ias,
                .oas            = oas,
                .coherent_walk  = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
-               .tlb            = smmu_domain->tlb_ops,
+               .tlb            = &smmu_domain->flush_ops->tlb,
                .iommu_dev      = smmu->dev,
        };
 
        if (smmu_domain->non_strict)
                pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
 
-       smmu_domain->smmu = smmu;
        pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
        if (!pgtbl_ops) {
                ret = -ENOMEM;
@@ -1019,24 +895,24 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
 {
        struct arm_smmu_smr *smr = smmu->smrs + idx;
-       u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
+       u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
 
        if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
                reg |= SMR_VALID;
-       writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
 }
 
 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
 {
        struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
-       u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
-                 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
-                 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
+       u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
+                 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
+                 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
 
        if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
            smmu->smrs[idx].valid)
                reg |= S2CR_EXIDVALID;
-       writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
 }
 
 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
@@ -1052,7 +928,6 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
  */
 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
 {
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        u32 smr;
 
        if (!smmu->smrs)
@@ -1063,15 +938,15 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
         * bits are set, so check each one separately. We can reject
         * masters later if they try to claim IDs outside these masks.
         */
-       smr = smmu->streamid_mask << SMR_ID_SHIFT;
-       writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-       smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-       smmu->streamid_mask = smr >> SMR_ID_SHIFT;
+       smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
+       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
+       smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
 
-       smr = smmu->streamid_mask << SMR_MASK_SHIFT;
-       writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-       smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-       smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+       smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
+       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
+       smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
 }
 
 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
@@ -1140,8 +1015,8 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
        mutex_lock(&smmu->stream_map_mutex);
        /* Figure out a viable stream map entry allocation */
        for_each_cfg_sme(fwspec, i, idx) {
-               u16 sid = fwspec->ids[i];
-               u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
+               u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
+               u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
 
                if (idx != INVALID_SMENDX) {
                        ret = -EEXIST;
@@ -1301,7 +1176,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
 }
 
 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
-                            size_t size)
+                            size_t size, struct iommu_iotlb_gather *gather)
 {
        struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
        struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@@ -1311,7 +1186,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
                return 0;
 
        arm_smmu_rpm_get(smmu);
-       ret = ops->unmap(ops, iova, size);
+       ret = ops->unmap(ops, iova, size, gather);
        arm_smmu_rpm_put(smmu);
 
        return ret;
@@ -1322,21 +1197,22 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
 
-       if (smmu_domain->tlb_ops) {
+       if (smmu_domain->flush_ops) {
                arm_smmu_rpm_get(smmu);
-               smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+               smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
                arm_smmu_rpm_put(smmu);
        }
 }
 
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+                               struct iommu_iotlb_gather *gather)
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
 
-       if (smmu_domain->tlb_ops) {
+       if (smmu_domain->flush_ops) {
                arm_smmu_rpm_get(smmu);
-               smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+               smmu_domain->flush_ops->tlb_sync(smmu_domain);
                arm_smmu_rpm_put(smmu);
        }
 }
@@ -1349,28 +1225,25 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
        struct device *dev = smmu->dev;
-       void __iomem *cb_base;
+       void __iomem *reg;
        u32 tmp;
        u64 phys;
        unsigned long va, flags;
-       int ret;
+       int ret, idx = cfg->cbndx;
 
        ret = arm_smmu_rpm_get(smmu);
        if (ret < 0)
                return 0;
 
-       cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
-
        spin_lock_irqsave(&smmu_domain->cb_lock, flags);
-       /* ATS1 registers can only be written atomically */
        va = iova & ~0xfffUL;
-       if (smmu->version == ARM_SMMU_V2)
-               smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
-       else /* Register is only 32-bit in v1 */
-               writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
+       if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+               arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
+       else
+               arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
 
-       if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
-                                     !(tmp & ATSR_ACTIVE), 5, 50)) {
+       reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
+       if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
                spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
                dev_err(dev,
                        "iova to phys timed out on %pad. Falling back to software table walk.\n",
@@ -1378,7 +1251,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
                return ops->iova_to_phys(ops, iova);
        }
 
-       phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
+       phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
        spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
        if (phys & CB_PAR_F) {
                dev_err(dev, "translation fault!\n");
@@ -1466,8 +1339,8 @@ static int arm_smmu_add_device(struct device *dev)
 
        ret = -EINVAL;
        for (i = 0; i < fwspec->num_ids; i++) {
-               u16 sid = fwspec->ids[i];
-               u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
+               u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
+               u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
 
                if (sid & ~smmu->streamid_mask) {
                        dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
@@ -1648,12 +1521,12 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
        u32 mask, fwid = 0;
 
        if (args->args_count > 0)
-               fwid |= (u16)args->args[0];
+               fwid |= FIELD_PREP(SMR_ID, args->args[0]);
 
        if (args->args_count > 1)
-               fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
+               fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
        else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
-               fwid |= (u16)mask << SMR_MASK_SHIFT;
+               fwid |= FIELD_PREP(SMR_MASK, mask);
 
        return iommu_fwspec_add_ids(dev, &fwid, 1);
 }
@@ -1706,13 +1579,12 @@ static struct iommu_ops arm_smmu_ops = {
 
 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 {
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        int i;
-       u32 reg, major;
+       u32 reg;
 
        /* clear global FSR */
-       reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
-       writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+       reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
 
        /*
         * Reset stream mapping groups: Initial values mark all SMRn as
@@ -1721,47 +1593,17 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        for (i = 0; i < smmu->num_mapping_groups; ++i)
                arm_smmu_write_sme(smmu, i);
 
-       if (smmu->model == ARM_MMU500) {
-               /*
-                * Before clearing ARM_MMU500_ACTLR_CPRE, need to
-                * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
-                * bit is only present in MMU-500r2 onwards.
-                */
-               reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
-               major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
-               reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
-               if (major >= 2)
-                       reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
-               /*
-                * Allow unmatched Stream IDs to allocate bypass
-                * TLB entries for reduced latency.
-                */
-               reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
-               writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
-       }
-
        /* Make sure all context banks are disabled and clear CB_FSR  */
        for (i = 0; i < smmu->num_context_banks; ++i) {
-               void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
-
                arm_smmu_write_context_bank(smmu, i);
-               writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
-               /*
-                * Disable MMU-500's not-particularly-beneficial next-page
-                * prefetcher for the sake of errata #841119 and #826419.
-                */
-               if (smmu->model == ARM_MMU500) {
-                       reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
-                       reg &= ~ARM_MMU500_ACTLR_CPRE;
-                       writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
-               }
+               arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
        }
 
        /* Invalidate the TLB, just in case */
-       writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
-       writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
 
-       reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+       reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
 
        /* Enable fault reporting */
        reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
@@ -1780,7 +1622,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        reg &= ~sCR0_FB;
 
        /* Don't upgrade barriers */
-       reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+       reg &= ~(sCR0_BSU);
 
        if (smmu->features & ARM_SMMU_FEAT_VMID16)
                reg |= sCR0_VMID16EN;
@@ -1788,9 +1630,12 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        if (smmu->features & ARM_SMMU_FEAT_EXIDS)
                reg |= sCR0_EXIDENABLE;
 
+       if (smmu->impl && smmu->impl->reset)
+               smmu->impl->reset(smmu);
+
        /* Push the button */
        arm_smmu_tlb_sync_global(smmu);
-       writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
 }
 
 static int arm_smmu_id_size_to_bits(int size)
@@ -1814,8 +1659,7 @@ static int arm_smmu_id_size_to_bits(int size)
 
 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 {
-       unsigned long size;
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       unsigned int size;
        u32 id;
        bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
        int i;
@@ -1825,7 +1669,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                        smmu->version == ARM_SMMU_V2 ? 2 : 1);
 
        /* ID0 */
-       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
+       id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
 
        /* Restrict available stages based on module parameter */
        if (force_stage == 1)
@@ -1879,12 +1723,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                smmu->features |= ARM_SMMU_FEAT_EXIDS;
                size = 1 << 16;
        } else {
-               size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+               size = 1 << FIELD_GET(ID0_NUMSIDB, id);
        }
        smmu->streamid_mask = size - 1;
        if (id & ID0_SMS) {
                smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
-               size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
+               size = FIELD_GET(ID0_NUMSMRG, id);
                if (size == 0) {
                        dev_err(smmu->dev,
                                "stream-matching supported, but no SMRs present!\n");
@@ -1898,7 +1742,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                        return -ENOMEM;
 
                dev_notice(smmu->dev,
-                          "\tstream matching with %lu register groups", size);
+                          "\tstream matching with %u register groups", size);
        }
        /* s2cr->type == 0 means translation, so initialise explicitly */
        smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
@@ -1919,49 +1763,38 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        }
 
        /* ID1 */
-       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
+       id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
        smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
 
        /* Check for size mismatch of SMMU address space from mapped region */
-       size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
-       size <<= smmu->pgshift;
-       if (smmu->cb_base != gr0_base + size)
+       size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
+       if (smmu->numpage != 2 * size << smmu->pgshift)
                dev_warn(smmu->dev,
-                       "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
-                       size * 2, (smmu->cb_base - gr0_base) * 2);
+                       "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
+                       2 * size << smmu->pgshift, smmu->numpage);
+       /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
+       smmu->numpage = size;
 
-       smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
-       smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
+       smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
+       smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
        if (smmu->num_s2_context_banks > smmu->num_context_banks) {
                dev_err(smmu->dev, "impossible number of S2 context banks!\n");
                return -ENODEV;
        }
        dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
                   smmu->num_context_banks, smmu->num_s2_context_banks);
-       /*
-        * Cavium CN88xx erratum #27704.
-        * Ensure ASID and VMID allocation is unique across all SMMUs in
-        * the system.
-        */
-       if (smmu->model == CAVIUM_SMMUV2) {
-               smmu->cavium_id_base =
-                       atomic_add_return(smmu->num_context_banks,
-                                         &cavium_smmu_context_count);
-               smmu->cavium_id_base -= smmu->num_context_banks;
-               dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
-       }
        smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
                                 sizeof(*smmu->cbs), GFP_KERNEL);
        if (!smmu->cbs)
                return -ENOMEM;
 
        /* ID2 */
-       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
-       size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
+       id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
+       size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
        smmu->ipa_size = size;
 
        /* The output mask is also applied for bypass */
-       size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
+       size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
        smmu->pa_size = size;
 
        if (id & ID2_VMID16)
@@ -1981,7 +1814,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                if (smmu->version == ARM_SMMU_V1_64K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
        } else {
-               size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
+               size = FIELD_GET(ID2_UBS, id);
                smmu->va_size = arm_smmu_id_size_to_bits(size);
                if (id & ID2_PTFS_4K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
@@ -2018,6 +1851,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
                           smmu->ipa_size, smmu->pa_size);
 
+       if (smmu->impl && smmu->impl->cfg_probe)
+               return smmu->impl->cfg_probe(smmu);
+
        return 0;
 }
 
@@ -2130,8 +1966,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
        smmu->version = data->version;
        smmu->model = data->model;
 
-       parse_driver_options(smmu);
-
        legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
        if (legacy_binding && !using_generic_binding) {
                if (!using_legacy_binding)
@@ -2194,12 +2028,20 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        if (err)
                return err;
 
+       smmu = arm_smmu_impl_init(smmu);
+       if (IS_ERR(smmu))
+               return PTR_ERR(smmu);
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        ioaddr = res->start;
        smmu->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(smmu->base))
                return PTR_ERR(smmu->base);
-       smmu->cb_base = smmu->base + resource_size(res) / 2;
+       /*
+        * The resource size should effectively match the value of SMMU_TOP;
+        * stash that temporarily until we know PAGESIZE to validate it with.
+        */
+       smmu->numpage = resource_size(res);
 
        num_irqs = 0;
        while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
@@ -2339,7 +2181,7 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev)
 
        arm_smmu_rpm_get(smmu);
        /* Turn the thing off */
-       writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
        arm_smmu_rpm_put(smmu);
 
        if (pm_runtime_enabled(smmu->dev))
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
new file mode 100644 (file)
index 0000000..b19b6ca
--- /dev/null
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IOMMU API for ARM architected SMMU implementations.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _ARM_SMMU_H
+#define _ARM_SMMU_H
+
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/* Configuration registers */
+#define ARM_SMMU_GR0_sCR0              0x0
+#define sCR0_VMID16EN                  BIT(31)
+#define sCR0_BSU                       GENMASK(15, 14)
+#define sCR0_FB                                BIT(13)
+#define sCR0_PTM                       BIT(12)
+#define sCR0_VMIDPNE                   BIT(11)
+#define sCR0_USFCFG                    BIT(10)
+#define sCR0_GCFGFIE                   BIT(5)
+#define sCR0_GCFGFRE                   BIT(4)
+#define sCR0_EXIDENABLE                        BIT(3)
+#define sCR0_GFIE                      BIT(2)
+#define sCR0_GFRE                      BIT(1)
+#define sCR0_CLIENTPD                  BIT(0)
+
+/* Auxiliary Configuration register */
+#define ARM_SMMU_GR0_sACR              0x10
+
+/* Identification registers */
+#define ARM_SMMU_GR0_ID0               0x20
+#define ID0_S1TS                       BIT(30)
+#define ID0_S2TS                       BIT(29)
+#define ID0_NTS                                BIT(28)
+#define ID0_SMS                                BIT(27)
+#define ID0_ATOSNS                     BIT(26)
+#define ID0_PTFS_NO_AARCH32            BIT(25)
+#define ID0_PTFS_NO_AARCH32S           BIT(24)
+#define ID0_NUMIRPT                    GENMASK(23, 16)
+#define ID0_CTTW                       BIT(14)
+#define ID0_NUMSIDB                    GENMASK(12, 9)
+#define ID0_EXIDS                      BIT(8)
+#define ID0_NUMSMRG                    GENMASK(7, 0)
+
+#define ARM_SMMU_GR0_ID1               0x24
+#define ID1_PAGESIZE                   BIT(31)
+#define ID1_NUMPAGENDXB                        GENMASK(30, 28)
+#define ID1_NUMS2CB                    GENMASK(23, 16)
+#define ID1_NUMCB                      GENMASK(7, 0)
+
+#define ARM_SMMU_GR0_ID2               0x28
+#define ID2_VMID16                     BIT(15)
+#define ID2_PTFS_64K                   BIT(14)
+#define ID2_PTFS_16K                   BIT(13)
+#define ID2_PTFS_4K                    BIT(12)
+#define ID2_UBS                                GENMASK(11, 8)
+#define ID2_OAS                                GENMASK(7, 4)
+#define ID2_IAS                                GENMASK(3, 0)
+
+#define ARM_SMMU_GR0_ID3               0x2c
+#define ARM_SMMU_GR0_ID4               0x30
+#define ARM_SMMU_GR0_ID5               0x34
+#define ARM_SMMU_GR0_ID6               0x38
+
+#define ARM_SMMU_GR0_ID7               0x3c
+#define ID7_MAJOR                      GENMASK(7, 4)
+#define ID7_MINOR                      GENMASK(3, 0)
+
+#define ARM_SMMU_GR0_sGFSR             0x48
+#define ARM_SMMU_GR0_sGFSYNR0          0x50
+#define ARM_SMMU_GR0_sGFSYNR1          0x54
+#define ARM_SMMU_GR0_sGFSYNR2          0x58
+
+/* Global TLB invalidation */
+#define ARM_SMMU_GR0_TLBIVMID          0x64
+#define ARM_SMMU_GR0_TLBIALLNSNH       0x68
+#define ARM_SMMU_GR0_TLBIALLH          0x6c
+#define ARM_SMMU_GR0_sTLBGSYNC         0x70
+
+#define ARM_SMMU_GR0_sTLBGSTATUS       0x74
+#define sTLBGSTATUS_GSACTIVE           BIT(0)
+
+/* Stream mapping registers */
+#define ARM_SMMU_GR0_SMR(n)            (0x800 + ((n) << 2))
+#define SMR_VALID                      BIT(31)
+#define SMR_MASK                       GENMASK(31, 16)
+#define SMR_ID                         GENMASK(15, 0)
+
+#define ARM_SMMU_GR0_S2CR(n)           (0xc00 + ((n) << 2))
+#define S2CR_PRIVCFG                   GENMASK(25, 24)
+enum arm_smmu_s2cr_privcfg {
+       S2CR_PRIVCFG_DEFAULT,
+       S2CR_PRIVCFG_DIPAN,
+       S2CR_PRIVCFG_UNPRIV,
+       S2CR_PRIVCFG_PRIV,
+};
+#define S2CR_TYPE                      GENMASK(17, 16)
+enum arm_smmu_s2cr_type {
+       S2CR_TYPE_TRANS,
+       S2CR_TYPE_BYPASS,
+       S2CR_TYPE_FAULT,
+};
+#define S2CR_EXIDVALID                 BIT(10)
+#define S2CR_CBNDX                     GENMASK(7, 0)
+
+/* Context bank attribute registers */
+#define ARM_SMMU_GR1_CBAR(n)           (0x0 + ((n) << 2))
+#define CBAR_IRPTNDX                   GENMASK(31, 24)
+#define CBAR_TYPE                      GENMASK(17, 16)
+enum arm_smmu_cbar_type {
+       CBAR_TYPE_S2_TRANS,
+       CBAR_TYPE_S1_TRANS_S2_BYPASS,
+       CBAR_TYPE_S1_TRANS_S2_FAULT,
+       CBAR_TYPE_S1_TRANS_S2_TRANS,
+};
+#define CBAR_S1_MEMATTR                        GENMASK(15, 12)
+#define CBAR_S1_MEMATTR_WB             0xf
+#define CBAR_S1_BPSHCFG                        GENMASK(9, 8)
+#define CBAR_S1_BPSHCFG_NSH            3
+#define CBAR_VMID                      GENMASK(7, 0)
+
+#define ARM_SMMU_GR1_CBFRSYNRA(n)      (0x400 + ((n) << 2))
+
+#define ARM_SMMU_GR1_CBA2R(n)          (0x800 + ((n) << 2))
+#define CBA2R_VMID16                   GENMASK(31, 16)
+#define CBA2R_VA64                     BIT(0)
+
+#define ARM_SMMU_CB_SCTLR              0x0
+#define SCTLR_S1_ASIDPNE               BIT(12)
+#define SCTLR_CFCFG                    BIT(7)
+#define SCTLR_CFIE                     BIT(6)
+#define SCTLR_CFRE                     BIT(5)
+#define SCTLR_E                                BIT(4)
+#define SCTLR_AFE                      BIT(2)
+#define SCTLR_TRE                      BIT(1)
+#define SCTLR_M                                BIT(0)
+
+#define ARM_SMMU_CB_ACTLR              0x4
+
+#define ARM_SMMU_CB_RESUME             0x8
+#define RESUME_TERMINATE               BIT(0)
+
+#define ARM_SMMU_CB_TCR2               0x10
+#define TCR2_SEP                       GENMASK(17, 15)
+#define TCR2_SEP_UPSTREAM              0x7
+#define TCR2_AS                                BIT(4)
+
+#define ARM_SMMU_CB_TTBR0              0x20
+#define ARM_SMMU_CB_TTBR1              0x28
+#define TTBRn_ASID                     GENMASK_ULL(63, 48)
+
+#define ARM_SMMU_CB_TCR                        0x30
+#define ARM_SMMU_CB_CONTEXTIDR         0x34
+#define ARM_SMMU_CB_S1_MAIR0           0x38
+#define ARM_SMMU_CB_S1_MAIR1           0x3c
+
+#define ARM_SMMU_CB_PAR                        0x50
+#define CB_PAR_F                       BIT(0)
+
+#define ARM_SMMU_CB_FSR                        0x58
+#define FSR_MULTI                      BIT(31)
+#define FSR_SS                         BIT(30)
+#define FSR_UUT                                BIT(8)
+#define FSR_ASF                                BIT(7)
+#define FSR_TLBLKF                     BIT(6)
+#define FSR_TLBMCF                     BIT(5)
+#define FSR_EF                         BIT(4)
+#define FSR_PF                         BIT(3)
+#define FSR_AFF                                BIT(2)
+#define FSR_TF                         BIT(1)
+
+#define FSR_IGN                                (FSR_AFF | FSR_ASF | \
+                                        FSR_TLBMCF | FSR_TLBLKF)
+#define FSR_FAULT                      (FSR_MULTI | FSR_SS | FSR_UUT | \
+                                        FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
+
+#define ARM_SMMU_CB_FAR                        0x60
+
+#define ARM_SMMU_CB_FSYNR0             0x68
+#define FSYNR0_WNR                     BIT(4)
+
+#define ARM_SMMU_CB_S1_TLBIVA          0x600
+#define ARM_SMMU_CB_S1_TLBIASID                0x610
+#define ARM_SMMU_CB_S1_TLBIVAL         0x620
+#define ARM_SMMU_CB_S2_TLBIIPAS2       0x630
+#define ARM_SMMU_CB_S2_TLBIIPAS2L      0x638
+#define ARM_SMMU_CB_TLBSYNC            0x7f0
+#define ARM_SMMU_CB_TLBSTATUS          0x7f4
+#define ARM_SMMU_CB_ATS1PR             0x800
+
+#define ARM_SMMU_CB_ATSR               0x8f0
+#define ATSR_ACTIVE                    BIT(0)
+
+
+/* Maximum number of context banks per SMMU */
+#define ARM_SMMU_MAX_CBS               128
+
+
+/* Shared driver definitions */
+enum arm_smmu_arch_version {
+       ARM_SMMU_V1,
+       ARM_SMMU_V1_64K,
+       ARM_SMMU_V2,
+};
+
+enum arm_smmu_implementation {
+       GENERIC_SMMU,
+       ARM_MMU500,
+       CAVIUM_SMMUV2,
+       QCOM_SMMUV2,
+};
+
+struct arm_smmu_device {
+       struct device                   *dev;
+
+       void __iomem                    *base;
+       unsigned int                    numpage;
+       unsigned int                    pgshift;
+
+#define ARM_SMMU_FEAT_COHERENT_WALK    (1 << 0)
+#define ARM_SMMU_FEAT_STREAM_MATCH     (1 << 1)
+#define ARM_SMMU_FEAT_TRANS_S1         (1 << 2)
+#define ARM_SMMU_FEAT_TRANS_S2         (1 << 3)
+#define ARM_SMMU_FEAT_TRANS_NESTED     (1 << 4)
+#define ARM_SMMU_FEAT_TRANS_OPS                (1 << 5)
+#define ARM_SMMU_FEAT_VMID16           (1 << 6)
+#define ARM_SMMU_FEAT_FMT_AARCH64_4K   (1 << 7)
+#define ARM_SMMU_FEAT_FMT_AARCH64_16K  (1 << 8)
+#define ARM_SMMU_FEAT_FMT_AARCH64_64K  (1 << 9)
+#define ARM_SMMU_FEAT_FMT_AARCH32_L    (1 << 10)
+#define ARM_SMMU_FEAT_FMT_AARCH32_S    (1 << 11)
+#define ARM_SMMU_FEAT_EXIDS            (1 << 12)
+       u32                             features;
+
+       enum arm_smmu_arch_version      version;
+       enum arm_smmu_implementation    model;
+       const struct arm_smmu_impl      *impl;
+
+       u32                             num_context_banks;
+       u32                             num_s2_context_banks;
+       DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+       struct arm_smmu_cb              *cbs;
+       atomic_t                        irptndx;
+
+       u32                             num_mapping_groups;
+       u16                             streamid_mask;
+       u16                             smr_mask_mask;
+       struct arm_smmu_smr             *smrs;
+       struct arm_smmu_s2cr            *s2crs;
+       struct mutex                    stream_map_mutex;
+
+       unsigned long                   va_size;
+       unsigned long                   ipa_size;
+       unsigned long                   pa_size;
+       unsigned long                   pgsize_bitmap;
+
+       u32                             num_global_irqs;
+       u32                             num_context_irqs;
+       unsigned int                    *irqs;
+       struct clk_bulk_data            *clks;
+       int                             num_clks;
+
+       spinlock_t                      global_sync_lock;
+
+       /* IOMMU core code handle */
+       struct iommu_device             iommu;
+};
+
+enum arm_smmu_context_fmt {
+       ARM_SMMU_CTX_FMT_NONE,
+       ARM_SMMU_CTX_FMT_AARCH64,
+       ARM_SMMU_CTX_FMT_AARCH32_L,
+       ARM_SMMU_CTX_FMT_AARCH32_S,
+};
+
+struct arm_smmu_cfg {
+       u8                              cbndx;
+       u8                              irptndx;
+       union {
+               u16                     asid;
+               u16                     vmid;
+       };
+       enum arm_smmu_cbar_type         cbar;
+       enum arm_smmu_context_fmt       fmt;
+};
+#define INVALID_IRPTNDX                        0xff
+
+enum arm_smmu_domain_stage {
+       ARM_SMMU_DOMAIN_S1 = 0,
+       ARM_SMMU_DOMAIN_S2,
+       ARM_SMMU_DOMAIN_NESTED,
+       ARM_SMMU_DOMAIN_BYPASS,
+};
+
+struct arm_smmu_flush_ops {
+       struct iommu_flush_ops          tlb;
+       void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
+                             bool leaf, void *cookie);
+       void (*tlb_sync)(void *cookie);
+};
+
+struct arm_smmu_domain {
+       struct arm_smmu_device          *smmu;
+       struct io_pgtable_ops           *pgtbl_ops;
+       const struct arm_smmu_flush_ops *flush_ops;
+       struct arm_smmu_cfg             cfg;
+       enum arm_smmu_domain_stage      stage;
+       bool                            non_strict;
+       struct mutex                    init_mutex; /* Protects smmu pointer */
+       spinlock_t                      cb_lock; /* Serialises ATS1* ops and TLB syncs */
+       struct iommu_domain             domain;
+};
+
+
+/* Implementation details, yay! */
+struct arm_smmu_impl {
+       u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset);
+       void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset,
+                         u32 val);
+       u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset);
+       void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset,
+                           u64 val);
+       int (*cfg_probe)(struct arm_smmu_device *smmu);
+       int (*reset)(struct arm_smmu_device *smmu);
+       int (*init_context)(struct arm_smmu_domain *smmu_domain);
+};
+
+static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
+{
+       return smmu->base + (n << smmu->pgshift);
+}
+
+static inline u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset)
+{
+       if (smmu->impl && unlikely(smmu->impl->read_reg))
+               return smmu->impl->read_reg(smmu, page, offset);
+       return readl_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static inline void arm_smmu_writel(struct arm_smmu_device *smmu, int page,
+                                  int offset, u32 val)
+{
+       if (smmu->impl && unlikely(smmu->impl->write_reg))
+               smmu->impl->write_reg(smmu, page, offset, val);
+       else
+               writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+static inline u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset)
+{
+       if (smmu->impl && unlikely(smmu->impl->read_reg64))
+               return smmu->impl->read_reg64(smmu, page, offset);
+       return readq_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
+                                  int offset, u64 val)
+{
+       if (smmu->impl && unlikely(smmu->impl->write_reg64))
+               smmu->impl->write_reg64(smmu, page, offset, val);
+       else
+               writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+#define ARM_SMMU_GR0           0
+#define ARM_SMMU_GR1           1
+#define ARM_SMMU_CB(s, n)      ((s)->numpage + (n))
+
+#define arm_smmu_gr0_read(s, o)                \
+       arm_smmu_readl((s), ARM_SMMU_GR0, (o))
+#define arm_smmu_gr0_write(s, o, v)    \
+       arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v))
+
+#define arm_smmu_gr1_read(s, o)                \
+       arm_smmu_readl((s), ARM_SMMU_GR1, (o))
+#define arm_smmu_gr1_write(s, o, v)    \
+       arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v))
+
+#define arm_smmu_cb_read(s, n, o)      \
+       arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o))
+#define arm_smmu_cb_write(s, n, o, v)  \
+       arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v))
+#define arm_smmu_cb_readq(s, n, o)     \
+       arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o))
+#define arm_smmu_cb_writeq(s, n, o, v) \
+       arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
+
+struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+
+#endif /* _ARM_SMMU_H */
index f68a62c3c32b55e1414ee5eff0cc34a452f38489..8f412af842471aef7a56603156123a2d70fd8142 100644 (file)
@@ -303,13 +303,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
                u64 size, struct device *dev)
 {
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
-       struct iova_domain *iovad = &cookie->iovad;
        unsigned long order, base_pfn;
+       struct iova_domain *iovad;
        int attr;
 
        if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
                return -EINVAL;
 
+       iovad = &cookie->iovad;
+
        /* Use the smallest supported page size for IOVA granularity */
        order = __ffs(domain->pgsize_bitmap);
        base_pfn = max_t(unsigned long, 1, base >> order);
@@ -444,13 +446,18 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iova_domain *iovad = &cookie->iovad;
        size_t iova_off = iova_offset(iovad, dma_addr);
+       struct iommu_iotlb_gather iotlb_gather;
+       size_t unmapped;
 
        dma_addr -= iova_off;
        size = iova_align(iovad, size + iova_off);
+       iommu_iotlb_gather_init(&iotlb_gather);
+
+       unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
+       WARN_ON(unmapped != size);
 
-       WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
        if (!cookie->fq_domain)
-               iommu_tlb_sync(domain);
+               iommu_tlb_sync(domain, &iotlb_gather);
        iommu_dma_free_iova(cookie, dma_addr, size);
 }
 
index 5d0754ed5fa0c34203b8ffd7d22403a29bccd013..eecd6a4216672e430224fb961b53e98cf37ee423 100644 (file)
@@ -1519,6 +1519,64 @@ static const char *dma_remap_fault_reasons[] =
        "PCE for translation request specifies blocking",
 };
 
+static const char * const dma_remap_sm_fault_reasons[] = {
+       "SM: Invalid Root Table Address",
+       "SM: TTM 0 for request with PASID",
+       "SM: TTM 0 for page group request",
+       "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
+       "SM: Error attempting to access Root Entry",
+       "SM: Present bit in Root Entry is clear",
+       "SM: Non-zero reserved field set in Root Entry",
+       "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
+       "SM: Error attempting to access Context Entry",
+       "SM: Present bit in Context Entry is clear",
+       "SM: Non-zero reserved field set in the Context Entry",
+       "SM: Invalid Context Entry",
+       "SM: DTE field in Context Entry is clear",
+       "SM: PASID Enable field in Context Entry is clear",
+       "SM: PASID is larger than the max in Context Entry",
+       "SM: PRE field in Context-Entry is clear",
+       "SM: RID_PASID field error in Context-Entry",
+       "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
+       "SM: Error attempting to access the PASID Directory Entry",
+       "SM: Present bit in Directory Entry is clear",
+       "SM: Non-zero reserved field set in PASID Directory Entry",
+       "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
+       "SM: Error attempting to access PASID Table Entry",
+       "SM: Present bit in PASID Table Entry is clear",
+       "SM: Non-zero reserved field set in PASID Table Entry",
+       "SM: Invalid Scalable-Mode PASID Table Entry",
+       "SM: ERE field is clear in PASID Table Entry",
+       "SM: SRE field is clear in PASID Table Entry",
+       "Unknown", "Unknown",/* 0x5E-0x5F */
+       "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
+       "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
+       "SM: Error attempting to access first-level paging entry",
+       "SM: Present bit in first-level paging entry is clear",
+       "SM: Non-zero reserved field set in first-level paging entry",
+       "SM: Error attempting to access FL-PML4 entry",
+       "SM: First-level entry address beyond MGAW in Nested translation",
+       "SM: Read permission error in FL-PML4 entry in Nested translation",
+       "SM: Read permission error in first-level paging entry in Nested translation",
+       "SM: Write permission error in first-level paging entry in Nested translation",
+       "SM: Error attempting to access second-level paging entry",
+       "SM: Read/Write permission error in second-level paging entry",
+       "SM: Non-zero reserved field set in second-level paging entry",
+       "SM: Invalid second-level page table pointer",
+       "SM: A/D bit update needed in second-level entry when set up in no snoop",
+       "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
+       "SM: Address in first-level translation is not canonical",
+       "SM: U/S set 0 for first-level translation with user privilege",
+       "SM: No execute permission for request with PASID and ER=1",
+       "SM: Address beyond the DMA hardware max",
+       "SM: Second-level entry address beyond the max",
+       "SM: No write permission for Write/AtomicOp request",
+       "SM: No read permission for Read/AtomicOp request",
+       "SM: Invalid address-interrupt address",
+       "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
+       "SM: A/D bit update needed in first-level entry when set up in no snoop",
+};
+
 static const char *irq_remap_fault_reasons[] =
 {
        "Detected reserved fields in the decoded interrupt-remapped request",
@@ -1536,6 +1594,10 @@ static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
                                        ARRAY_SIZE(irq_remap_fault_reasons))) {
                *fault_type = INTR_REMAP;
                return irq_remap_fault_reasons[fault_reason - 0x20];
+       } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
+                       ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
+               *fault_type = DMA_REMAP;
+               return dma_remap_sm_fault_reasons[fault_reason - 0x30];
        } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
                *fault_type = DMA_REMAP;
                return dma_remap_fault_reasons[fault_reason];
@@ -1611,7 +1673,8 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
 }
 
 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
-               u8 fault_reason, u16 source_id, unsigned long long addr)
+               u8 fault_reason, int pasid, u16 source_id,
+               unsigned long long addr)
 {
        const char *reason;
        int fault_type;
@@ -1624,10 +1687,11 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
                        PCI_FUNC(source_id & 0xFF), addr >> 48,
                        fault_reason, reason);
        else
-               pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
+               pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n",
                       type ? "DMA Read" : "DMA Write",
                       source_id >> 8, PCI_SLOT(source_id & 0xFF),
-                      PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
+                      PCI_FUNC(source_id & 0xFF), pasid, addr,
+                      fault_reason, reason);
        return 0;
 }
 
@@ -1659,8 +1723,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
                u8 fault_reason;
                u16 source_id;
                u64 guest_addr;
-               int type;
+               int type, pasid;
                u32 data;
+               bool pasid_present;
 
                /* highest 32 bits */
                data = readl(iommu->reg + reg +
@@ -1672,10 +1737,12 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
                        fault_reason = dma_frcd_fault_reason(data);
                        type = dma_frcd_type(data);
 
+                       pasid = dma_frcd_pasid_value(data);
                        data = readl(iommu->reg + reg +
                                     fault_index * PRIMARY_FAULT_REG_LEN + 8);
                        source_id = dma_frcd_source_id(data);
 
+                       pasid_present = dma_frcd_pasid_present(data);
                        guest_addr = dmar_readq(iommu->reg + reg +
                                        fault_index * PRIMARY_FAULT_REG_LEN);
                        guest_addr = dma_frcd_page_addr(guest_addr);
@@ -1688,7 +1755,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
                raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
 
                if (!ratelimited)
+                       /* Using pasid -1 if pasid is not present */
                        dmar_fault_do_one(iommu, type, fault_reason,
+                                         pasid_present ? pasid : -1,
                                          source_id, guest_addr);
 
                fault_index++;
index b0c1e5f9daae5acc353ef91c5d0ab44f773fa318..9c94e16fb1277f793bb8d24062b09afb8ab49d74 100644 (file)
@@ -566,7 +566,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
 
 static const struct iommu_ops exynos_iommu_ops;
 
-static int __init exynos_sysmmu_probe(struct platform_device *pdev)
+static int exynos_sysmmu_probe(struct platform_device *pdev)
 {
        int irq, ret;
        struct device *dev = &pdev->dev;
@@ -583,10 +583,8 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
                return PTR_ERR(data->sfrbase);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq <= 0) {
-               dev_err(dev, "Unable to find IRQ resource\n");
+       if (irq <= 0)
                return irq;
-       }
 
        ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
                                dev_name(dev), data);
@@ -1130,7 +1128,8 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain
 }
 
 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
-                                unsigned long l_iova, size_t size)
+                                unsigned long l_iova, size_t size,
+                                struct iommu_iotlb_gather *gather)
 {
        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
        sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
index 12d094d08c0a2e982fb712b9310ceadf7213013d..87de0b975672b0a8864277ff799b5e02e56547ea 100644 (file)
 #include <linux/dma-direct.h>
 #include <linux/crash_dump.h>
 #include <linux/numa.h>
+#include <linux/swiotlb.h>
 #include <asm/irq_remapping.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
+#include <trace/events/intel_iommu.h>
 
 #include "irq_remapping.h"
 #include "intel-pasid.h"
@@ -339,11 +341,15 @@ static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
 static void dmar_remove_one_dev_info(struct device *dev);
 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+static void domain_context_clear(struct intel_iommu *iommu,
+                                struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu);
 static bool device_is_rmrr_locked(struct device *dev);
 static int intel_iommu_attach_device(struct iommu_domain *domain,
                                     struct device *dev);
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+                                           dma_addr_t iova);
 
 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
 int dmar_disabled = 0;
@@ -360,6 +366,7 @@ static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
+static int intel_no_bounce;
 
 #define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
@@ -373,6 +380,9 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
+#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) &&   \
+                               to_pci_dev(d)->untrusted)
+
 /*
  * Iterate over elements in device_domain_list and call the specified
  * callback @fn against each element.
@@ -455,6 +465,9 @@ static int __init intel_iommu_setup(char *str)
                        printk(KERN_INFO
                                "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
                        intel_iommu_tboot_noforce = 1;
+               } else if (!strncmp(str, "nobounce", 8)) {
+                       pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
+                       intel_no_bounce = 1;
                }
 
                str += strcspn(str, ",");
@@ -2105,9 +2118,26 @@ out_unlock:
        return ret;
 }
 
+struct domain_context_mapping_data {
+       struct dmar_domain *domain;
+       struct intel_iommu *iommu;
+       struct pasid_table *table;
+};
+
+static int domain_context_mapping_cb(struct pci_dev *pdev,
+                                    u16 alias, void *opaque)
+{
+       struct domain_context_mapping_data *data = opaque;
+
+       return domain_context_mapping_one(data->domain, data->iommu,
+                                         data->table, PCI_BUS_NUM(alias),
+                                         alias & 0xff);
+}
+
 static int
 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
 {
+       struct domain_context_mapping_data data;
        struct pasid_table *table;
        struct intel_iommu *iommu;
        u8 bus, devfn;
@@ -2117,7 +2147,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
                return -ENODEV;
 
        table = intel_pasid_get_table(dev);
-       return domain_context_mapping_one(domain, iommu, table, bus, devfn);
+
+       if (!dev_is_pci(dev))
+               return domain_context_mapping_one(domain, iommu, table,
+                                                 bus, devfn);
+
+       data.domain = domain;
+       data.iommu = iommu;
+       data.table = table;
+
+       return pci_for_each_dma_alias(to_pci_dev(dev),
+                                     &domain_context_mapping_cb, &data);
 }
 
 static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -3267,7 +3307,7 @@ static int __init init_dmars(void)
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
        }
 
-       if (iommu_pass_through)
+       if (iommu_default_passthrough())
                iommu_identity_mapping |= IDENTMAP_ALL;
 
 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
@@ -3505,6 +3545,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 
        start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
        start_paddr += paddr & ~PAGE_MASK;
+
+       trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
+
        return start_paddr;
 
 error:
@@ -3560,10 +3603,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
        if (dev_is_pci(dev))
                pdev = to_pci_dev(dev);
 
-       dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
-
        freelist = domain_unmap(domain, start_pfn, last_pfn);
-
        if (intel_iommu_strict || (pdev && pdev->untrusted) ||
                        !has_iova_flush_queue(&domain->iovad)) {
                iommu_flush_iotlb_psi(iommu, domain, start_pfn,
@@ -3579,6 +3619,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
                 * cpu used up by the iotlb flush operation...
                 */
        }
+
+       trace_unmap_single(dev, dev_addr, size);
 }
 
 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
@@ -3669,6 +3711,8 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
        }
 
        intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
+
+       trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
 }
 
 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
@@ -3725,6 +3769,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
                return 0;
        }
 
+       trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
+                    sg_phys(sglist), size << VTD_PAGE_SHIFT);
+
        return nelems;
 }
 
@@ -3740,6 +3787,252 @@ static const struct dma_map_ops intel_dma_ops = {
        .dma_supported = dma_direct_supported,
 };
 
+static void
+bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
+                  enum dma_data_direction dir, enum dma_sync_target target)
+{
+       struct dmar_domain *domain;
+       phys_addr_t tlb_addr;
+
+       domain = find_domain(dev);
+       if (WARN_ON(!domain))
+               return;
+
+       tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
+       if (is_swiotlb_buffer(tlb_addr))
+               swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
+}
+
+static dma_addr_t
+bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
+                 enum dma_data_direction dir, unsigned long attrs,
+                 u64 dma_mask)
+{
+       size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
+       struct dmar_domain *domain;
+       struct intel_iommu *iommu;
+       unsigned long iova_pfn;
+       unsigned long nrpages;
+       phys_addr_t tlb_addr;
+       int prot = 0;
+       int ret;
+
+       domain = find_domain(dev);
+       if (WARN_ON(dir == DMA_NONE || !domain))
+               return DMA_MAPPING_ERROR;
+
+       iommu = domain_get_iommu(domain);
+       if (WARN_ON(!iommu))
+               return DMA_MAPPING_ERROR;
+
+       nrpages = aligned_nrpages(0, size);
+       iova_pfn = intel_alloc_iova(dev, domain,
+                                   dma_to_mm_pfn(nrpages), dma_mask);
+       if (!iova_pfn)
+               return DMA_MAPPING_ERROR;
+
+       /*
+        * Check if DMAR supports zero-length reads on write only
+        * mappings..
+        */
+       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
+                       !cap_zlr(iommu->cap))
+               prot |= DMA_PTE_READ;
+       if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+               prot |= DMA_PTE_WRITE;
+
+       /*
+        * If both the physical buffer start address and size are
+        * page aligned, we don't need to use a bounce page.
+        */
+       if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
+               tlb_addr = swiotlb_tbl_map_single(dev,
+                               __phys_to_dma(dev, io_tlb_start),
+                               paddr, size, aligned_size, dir, attrs);
+               if (tlb_addr == DMA_MAPPING_ERROR) {
+                       goto swiotlb_error;
+               } else {
+                       /* Cleanup the padding area. */
+                       void *padding_start = phys_to_virt(tlb_addr);
+                       size_t padding_size = aligned_size;
+
+                       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+                           (dir == DMA_TO_DEVICE ||
+                            dir == DMA_BIDIRECTIONAL)) {
+                               padding_start += size;
+                               padding_size -= size;
+                       }
+
+                       memset(padding_start, 0, padding_size);
+               }
+       } else {
+               tlb_addr = paddr;
+       }
+
+       ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
+                                tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
+       if (ret)
+               goto mapping_error;
+
+       trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
+
+       return (phys_addr_t)iova_pfn << PAGE_SHIFT;
+
+mapping_error:
+       if (is_swiotlb_buffer(tlb_addr))
+               swiotlb_tbl_unmap_single(dev, tlb_addr, size,
+                                        aligned_size, dir, attrs);
+swiotlb_error:
+       free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
+       dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
+               size, (unsigned long long)paddr, dir);
+
+       return DMA_MAPPING_ERROR;
+}
+
+static void
+bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+                   enum dma_data_direction dir, unsigned long attrs)
+{
+       size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
+       struct dmar_domain *domain;
+       phys_addr_t tlb_addr;
+
+       domain = find_domain(dev);
+       if (WARN_ON(!domain))
+               return;
+
+       tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
+       if (WARN_ON(!tlb_addr))
+               return;
+
+       intel_unmap(dev, dev_addr, size);
+       if (is_swiotlb_buffer(tlb_addr))
+               swiotlb_tbl_unmap_single(dev, tlb_addr, size,
+                                        aligned_size, dir, attrs);
+
+       trace_bounce_unmap_single(dev, dev_addr, size);
+}
+
+static dma_addr_t
+bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       return bounce_map_single(dev, page_to_phys(page) + offset,
+                                size, dir, attrs, *dev->dma_mask);
+}
+
+static dma_addr_t
+bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
+                   enum dma_data_direction dir, unsigned long attrs)
+{
+       return bounce_map_single(dev, phys_addr, size,
+                                dir, attrs, *dev->dma_mask);
+}
+
+static void
+bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
+                 enum dma_data_direction dir, unsigned long attrs)
+{
+       bounce_unmap_single(dev, dev_addr, size, dir, attrs);
+}
+
+static void
+bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
+                     enum dma_data_direction dir, unsigned long attrs)
+{
+       bounce_unmap_single(dev, dev_addr, size, dir, attrs);
+}
+
+static void
+bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
+               enum dma_data_direction dir, unsigned long attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nelems, i)
+               bounce_unmap_page(dev, sg->dma_address,
+                                 sg_dma_len(sg), dir, attrs);
+}
+
+static int
+bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
+             enum dma_data_direction dir, unsigned long attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sglist, sg, nelems, i) {
+               sg->dma_address = bounce_map_page(dev, sg_page(sg),
+                                                 sg->offset, sg->length,
+                                                 dir, attrs);
+               if (sg->dma_address == DMA_MAPPING_ERROR)
+                       goto out_unmap;
+               sg_dma_len(sg) = sg->length;
+       }
+
+       return nelems;
+
+out_unmap:
+       bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+       return 0;
+}
+
+static void
+bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+                          size_t size, enum dma_data_direction dir)
+{
+       bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
+}
+
+static void
+bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
+                             size_t size, enum dma_data_direction dir)
+{
+       bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
+}
+
+static void
+bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
+                      int nelems, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nelems, i)
+               bounce_sync_single(dev, sg_dma_address(sg),
+                                  sg_dma_len(sg), dir, SYNC_FOR_CPU);
+}
+
+static void
+bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+                         int nelems, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sglist, sg, nelems, i)
+               bounce_sync_single(dev, sg_dma_address(sg),
+                                  sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
+}
+
+static const struct dma_map_ops bounce_dma_ops = {
+       .alloc                  = intel_alloc_coherent,
+       .free                   = intel_free_coherent,
+       .map_sg                 = bounce_map_sg,
+       .unmap_sg               = bounce_unmap_sg,
+       .map_page               = bounce_map_page,
+       .unmap_page             = bounce_unmap_page,
+       .sync_single_for_cpu    = bounce_sync_single_for_cpu,
+       .sync_single_for_device = bounce_sync_single_for_device,
+       .sync_sg_for_cpu        = bounce_sync_sg_for_cpu,
+       .sync_sg_for_device     = bounce_sync_sg_for_device,
+       .map_resource           = bounce_map_resource,
+       .unmap_resource         = bounce_unmap_resource,
+       .dma_supported          = dma_direct_supported,
+};
+
 static inline int iommu_domain_cache_init(void)
 {
        int ret = 0;
@@ -4540,22 +4833,20 @@ const struct attribute_group *intel_iommu_groups[] = {
        NULL,
 };
 
-static int __init platform_optin_force_iommu(void)
+static inline bool has_untrusted_dev(void)
 {
        struct pci_dev *pdev = NULL;
-       bool has_untrusted_dev = false;
 
-       if (!dmar_platform_optin() || no_platform_optin)
-               return 0;
+       for_each_pci_dev(pdev)
+               if (pdev->untrusted)
+                       return true;
 
-       for_each_pci_dev(pdev) {
-               if (pdev->untrusted) {
-                       has_untrusted_dev = true;
-                       break;
-               }
-       }
+       return false;
+}
 
-       if (!has_untrusted_dev)
+static int __init platform_optin_force_iommu(void)
+{
+       if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev())
                return 0;
 
        if (no_iommu || dmar_disabled)
@@ -4569,9 +4860,6 @@ static int __init platform_optin_force_iommu(void)
                iommu_identity_mapping |= IDENTMAP_ALL;
 
        dmar_disabled = 0;
-#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-       swiotlb = 0;
-#endif
        no_iommu = 0;
 
        return 1;
@@ -4711,7 +4999,14 @@ int __init intel_iommu_init(void)
        up_write(&dmar_global_lock);
 
 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-       swiotlb = 0;
+       /*
+        * If the system has no untrusted device or the user has decided
+        * to disable the bounce page mechanisms, we don't need swiotlb.
+        * Mark this and the pre-allocated bounce pages will be released
+        * later.
+        */
+       if (!has_untrusted_dev() || intel_no_bounce)
+               swiotlb = 0;
 #endif
        dma_ops = &intel_dma_ops;
 
@@ -4759,6 +5054,28 @@ out_free_dmar:
        return ret;
 }
 
+static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+{
+       struct intel_iommu *iommu = opaque;
+
+       domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+       return 0;
+}
+
+/*
+ * NB - intel-iommu lacks any sort of reference counting for the users of
+ * dependent devices.  If multiple endpoints have intersecting dependent
+ * devices, unbinding the driver from any one of them will possibly leave
+ * the others unable to operate.
+ */
+static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
+{
+       if (!iommu || !dev || !dev_is_pci(dev))
+               return;
+
+       pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
+}
+
 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 {
        struct dmar_domain *domain;
@@ -4779,7 +5096,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
                                        PASID_RID2PASID);
 
                iommu_disable_dev_iotlb(info);
-               domain_context_clear_one(iommu, info->bus, info->devfn);
+               domain_context_clear(iommu, info->dev);
                intel_pasid_free_table(info->dev);
        }
 
@@ -5153,7 +5470,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
 }
 
 static size_t intel_iommu_unmap(struct iommu_domain *domain,
-                               unsigned long iova, size_t size)
+                               unsigned long iova, size_t size,
+                               struct iommu_iotlb_gather *gather)
 {
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
        struct page *freelist = NULL;
@@ -5309,6 +5627,11 @@ static int intel_iommu_add_device(struct device *dev)
                }
        }
 
+       if (device_needs_bounce(dev)) {
+               dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
+               set_dma_ops(dev, &bounce_dma_ops);
+       }
+
        return 0;
 }
 
@@ -5326,6 +5649,9 @@ static void intel_iommu_remove_device(struct device *dev)
        iommu_group_remove_device(dev);
 
        iommu_device_unlink(&iommu->iommu, dev);
+
+       if (device_needs_bounce(dev))
+               set_dma_ops(dev, NULL);
 }
 
 static void intel_iommu_get_resv_regions(struct device *device,
@@ -5639,20 +5965,46 @@ const struct iommu_ops intel_iommu_ops = {
        .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
 };
 
-static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+static void quirk_iommu_igfx(struct pci_dev *dev)
 {
-       /* G4x/GM45 integrated gfx dmar support is totally busted. */
        pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
        dmar_map_gfx = 0;
 }
 
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
+/* G4x/GM45 integrated gfx dmar support is totally busted. */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
+
+/* Broadwell igfx malfunctions with dmar */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
 
 static void quirk_iommu_rwbf(struct pci_dev *dev)
 {
index 780de0caafe845cb1b2c031010fcc25402decf31..9b159132405dc750111432bb5435abcae11040af 100644 (file)
@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
 }
 
 static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
-                                      unsigned long address, unsigned long pages, int ih, int gl)
+                               unsigned long address, unsigned long pages, int ih)
 {
        struct qi_desc desc;
 
-       if (pages == -1) {
-               /* For global kernel pages we have to flush them in *all* PASIDs
-                * because that's the only option the hardware gives us. Despite
-                * the fact that they are actually only accessible through one. */
-               if (gl)
-                       desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
-                                       QI_EIOTLB_DID(sdev->did) |
-                                       QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
-                                       QI_EIOTLB_TYPE;
-               else
-                       desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
-                                       QI_EIOTLB_DID(sdev->did) |
-                                       QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
-                                       QI_EIOTLB_TYPE;
+       /*
+        * Do PASID granu IOTLB invalidation if page selective capability is
+        * not available.
+        */
+       if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
+               desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+                       QI_EIOTLB_DID(sdev->did) |
+                       QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+                       QI_EIOTLB_TYPE;
                desc.qw1 = 0;
        } else {
                int mask = ilog2(__roundup_pow_of_two(pages));
@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
                                QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
                                QI_EIOTLB_TYPE;
                desc.qw1 = QI_EIOTLB_ADDR(address) |
-                               QI_EIOTLB_GL(gl) |
                                QI_EIOTLB_IH(ih) |
                                QI_EIOTLB_AM(mask);
        }
@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
 }
 
 static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
-                                 unsigned long pages, int ih, int gl)
+                               unsigned long pages, int ih)
 {
        struct intel_svm_dev *sdev;
 
        rcu_read_lock();
        list_for_each_entry_rcu(sdev, &svm->devs, list)
-               intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
+               intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
        rcu_read_unlock();
 }
 
@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
        struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
 
        intel_flush_svm_range(svm, start,
-                             (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
+                             (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
 }
 
 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
        rcu_read_lock();
        list_for_each_entry_rcu(sdev, &svm->devs, list) {
                intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
-               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
        }
        rcu_read_unlock();
 
@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
                                 * large and has to be physically contiguous. So it's
                                 * hard to be as defensive as we might like. */
                                intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
-                               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+                               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
                                kfree_rcu(sdev, rcu);
 
                                if (list_empty(&svm->devs)) {
diff --git a/drivers/iommu/intel-trace.c b/drivers/iommu/intel-trace.c
new file mode 100644 (file)
index 0000000..bfb6a6e
--- /dev/null
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel IOMMU trace support
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/intel_iommu.h>
index 4786ca061e31fd6e0ad6cb3328f66e89306e064a..81e43c1df7ecb5fdd769e2dc1bc4c265f37fec1f 100644 (file)
@@ -376,13 +376,13 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
 {
        struct set_msi_sid_data *data = opaque;
 
+       if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
+               data->busmatch_count++;
+
        data->pdev = pdev;
        data->alias = alias;
        data->count++;
 
-       if (PCI_BUS_NUM(alias) == pdev->bus->number)
-               data->busmatch_count++;
-
        return 0;
 }
 
index 0fc8dfab2abf57d51418ba17f5d470eb6292b49a..4cb394937700ce4e0f5ff7133042f6e9f5f6a38c 100644 (file)
 #define ARM_V7S_TEX_MASK               0x7
 #define ARM_V7S_ATTR_TEX(val)          (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
 
-#define ARM_V7S_ATTR_MTK_4GB           BIT(9) /* MTK extend it for 4GB mode */
+/* MediaTek extend the two bits for PA 32bit/33bit */
+#define ARM_V7S_ATTR_MTK_PA_BIT32      BIT(9)
+#define ARM_V7S_ATTR_MTK_PA_BIT33      BIT(4)
 
 /* *well, except for TEX on level 2 large pages, of course :( */
 #define ARM_V7S_CONT_PAGE_TEX_SHIFT    6
@@ -169,18 +171,62 @@ struct arm_v7s_io_pgtable {
        spinlock_t              split_lock;
 };
 
+static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl);
+
 static dma_addr_t __arm_v7s_dma_addr(void *pages)
 {
        return (dma_addr_t)virt_to_phys(pages);
 }
 
-static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl)
+static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
+{
+       return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
+               (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
+}
+
+static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
+                                   struct io_pgtable_cfg *cfg)
+{
+       arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
+
+       if (!arm_v7s_is_mtk_enabled(cfg))
+               return pte;
+
+       if (paddr & BIT_ULL(32))
+               pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
+       if (paddr & BIT_ULL(33))
+               pte |= ARM_V7S_ATTR_MTK_PA_BIT33;
+       return pte;
+}
+
+static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
+                                 struct io_pgtable_cfg *cfg)
 {
+       arm_v7s_iopte mask;
+       phys_addr_t paddr;
+
        if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
-               pte &= ARM_V7S_TABLE_MASK;
+               mask = ARM_V7S_TABLE_MASK;
+       else if (arm_v7s_pte_is_cont(pte, lvl))
+               mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES;
        else
-               pte &= ARM_V7S_LVL_MASK(lvl);
-       return phys_to_virt(pte);
+               mask = ARM_V7S_LVL_MASK(lvl);
+
+       paddr = pte & mask;
+       if (!arm_v7s_is_mtk_enabled(cfg))
+               return paddr;
+
+       if (pte & ARM_V7S_ATTR_MTK_PA_BIT32)
+               paddr |= BIT_ULL(32);
+       if (pte & ARM_V7S_ATTR_MTK_PA_BIT33)
+               paddr |= BIT_ULL(33);
+       return paddr;
+}
+
+static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl,
+                                 struct arm_v7s_io_pgtable *data)
+{
+       return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg));
 }
 
 static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
@@ -295,9 +341,6 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
        if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
                pte |= ARM_V7S_ATTR_NS_SECTION;
 
-       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
-               pte |= ARM_V7S_ATTR_MTK_4GB;
-
        return pte;
 }
 
@@ -362,7 +405,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
        return false;
 }
 
-static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
+static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *,
+                             struct iommu_iotlb_gather *, unsigned long,
                              size_t, int, arm_v7s_iopte *);
 
 static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
@@ -383,7 +427,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
                        size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
 
                        tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
-                       if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz,
+                       if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
                                                    sz, lvl, tblp) != sz))
                                return -EINVAL;
                } else if (ptep[i]) {
@@ -396,7 +440,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
        if (num_entries > 1)
                pte = arm_v7s_pte_to_cont(pte, lvl);
 
-       pte |= paddr & ARM_V7S_LVL_MASK(lvl);
+       pte |= paddr_to_iopte(paddr, lvl, cfg);
 
        __arm_v7s_set_pte(ptep, pte, num_entries, cfg);
        return 0;
@@ -462,7 +506,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
        }
 
        if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
-               cptep = iopte_deref(pte, lvl);
+               cptep = iopte_deref(pte, lvl, data);
        } else if (pte) {
                /* We require an unmap first */
                WARN_ON(!selftest_running);
@@ -484,7 +528,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
        if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
                return 0;
 
-       if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
+       if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
+                   paddr >= (1ULL << data->iop.cfg.oas)))
                return -ERANGE;
 
        ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
@@ -493,9 +538,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
         * a chance for anything to kick off a table walk for the new iova.
         */
        if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
-               io_pgtable_tlb_add_flush(iop, iova, size,
-                                        ARM_V7S_BLOCK_SIZE(2), false);
-               io_pgtable_tlb_sync(iop);
+               io_pgtable_tlb_flush_walk(iop, iova, size,
+                                         ARM_V7S_BLOCK_SIZE(2));
        } else {
                wmb();
        }
@@ -512,7 +556,8 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop)
                arm_v7s_iopte pte = data->pgd[i];
 
                if (ARM_V7S_PTE_IS_TABLE(pte, 1))
-                       __arm_v7s_free_table(iopte_deref(pte, 1), 2, data);
+                       __arm_v7s_free_table(iopte_deref(pte, 1, data),
+                                            2, data);
        }
        __arm_v7s_free_table(data->pgd, 1, data);
        kmem_cache_destroy(data->l2_tables);
@@ -541,12 +586,12 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
        __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
 
        size *= ARM_V7S_CONT_PAGES;
-       io_pgtable_tlb_add_flush(iop, iova, size, size, true);
-       io_pgtable_tlb_sync(iop);
+       io_pgtable_tlb_flush_leaf(iop, iova, size, size);
        return pte;
 }
 
 static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
+                                     struct iommu_iotlb_gather *gather,
                                      unsigned long iova, size_t size,
                                      arm_v7s_iopte blk_pte,
                                      arm_v7s_iopte *ptep)
@@ -582,16 +627,16 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
                if (!ARM_V7S_PTE_IS_TABLE(pte, 1))
                        return 0;
 
-               tablep = iopte_deref(pte, 1);
-               return __arm_v7s_unmap(data, iova, size, 2, tablep);
+               tablep = iopte_deref(pte, 1, data);
+               return __arm_v7s_unmap(data, gather, iova, size, 2, tablep);
        }
 
-       io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
-       io_pgtable_tlb_sync(&data->iop);
+       io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
        return size;
 }
 
 static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
+                             struct iommu_iotlb_gather *gather,
                              unsigned long iova, size_t size, int lvl,
                              arm_v7s_iopte *ptep)
 {
@@ -638,10 +683,9 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
                for (i = 0; i < num_entries; i++) {
                        if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
                                /* Also flush any partial walks */
-                               io_pgtable_tlb_add_flush(iop, iova, blk_size,
-                                       ARM_V7S_BLOCK_SIZE(lvl + 1), false);
-                               io_pgtable_tlb_sync(iop);
-                               ptep = iopte_deref(pte[i], lvl);
+                               io_pgtable_tlb_flush_walk(iop, iova, blk_size,
+                                               ARM_V7S_BLOCK_SIZE(lvl + 1));
+                               ptep = iopte_deref(pte[i], lvl, data);
                                __arm_v7s_free_table(ptep, lvl + 1, data);
                        } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
                                /*
@@ -651,8 +695,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
                                 */
                                smp_wmb();
                        } else {
-                               io_pgtable_tlb_add_flush(iop, iova, blk_size,
-                                                        blk_size, true);
+                               io_pgtable_tlb_add_page(iop, gather, iova, blk_size);
                        }
                        iova += blk_size;
                }
@@ -662,23 +705,24 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
                 * Insert a table at the next level to map the old region,
                 * minus the part we want to unmap
                 */
-               return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep);
+               return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0],
+                                              ptep);
        }
 
        /* Keep on walkin' */
-       ptep = iopte_deref(pte[0], lvl);
-       return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
+       ptep = iopte_deref(pte[0], lvl, data);
+       return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep);
 }
 
 static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
-                           size_t size)
+                           size_t size, struct iommu_iotlb_gather *gather)
 {
        struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
 
        if (WARN_ON(upper_32_bits(iova)))
                return 0;
 
-       return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
+       return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
 }
 
 static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
@@ -692,7 +736,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
        do {
                ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
                pte = READ_ONCE(*ptep);
-               ptep = iopte_deref(pte, lvl);
+               ptep = iopte_deref(pte, lvl, data);
        } while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
 
        if (!ARM_V7S_PTE_IS_VALID(pte))
@@ -701,7 +745,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
        mask = ARM_V7S_LVL_MASK(lvl);
        if (arm_v7s_pte_is_cont(pte, lvl))
                mask *= ARM_V7S_CONT_PAGES;
-       return (pte & mask) | (iova & ~mask);
+       return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask);
 }
 
 static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
@@ -709,18 +753,21 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
 {
        struct arm_v7s_io_pgtable *data;
 
-       if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
+       if (cfg->ias > ARM_V7S_ADDR_BITS)
+               return NULL;
+
+       if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
                return NULL;
 
        if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
                            IO_PGTABLE_QUIRK_NO_PERMS |
                            IO_PGTABLE_QUIRK_TLBI_ON_MAP |
-                           IO_PGTABLE_QUIRK_ARM_MTK_4GB |
+                           IO_PGTABLE_QUIRK_ARM_MTK_EXT |
                            IO_PGTABLE_QUIRK_NON_STRICT))
                return NULL;
 
        /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
-       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB &&
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT &&
            !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
                        return NULL;
 
@@ -806,22 +853,24 @@ static void dummy_tlb_flush_all(void *cookie)
        WARN_ON(cookie != cfg_cookie);
 }
 
-static void dummy_tlb_add_flush(unsigned long iova, size_t size,
-                               size_t granule, bool leaf, void *cookie)
+static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
+                           void *cookie)
 {
        WARN_ON(cookie != cfg_cookie);
        WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
 }
 
-static void dummy_tlb_sync(void *cookie)
+static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+                              unsigned long iova, size_t granule, void *cookie)
 {
-       WARN_ON(cookie != cfg_cookie);
+       dummy_tlb_flush(iova, granule, granule, cookie);
 }
 
-static const struct iommu_gather_ops dummy_tlb_ops = {
+static const struct iommu_flush_ops dummy_tlb_ops = {
        .tlb_flush_all  = dummy_tlb_flush_all,
-       .tlb_add_flush  = dummy_tlb_add_flush,
-       .tlb_sync       = dummy_tlb_sync,
+       .tlb_flush_walk = dummy_tlb_flush,
+       .tlb_flush_leaf = dummy_tlb_flush,
+       .tlb_add_page   = dummy_tlb_add_page,
 };
 
 #define __FAIL(ops)    ({                              \
@@ -896,7 +945,7 @@ static int __init arm_v7s_do_selftests(void)
        size = 1UL << __ffs(cfg.pgsize_bitmap);
        while (i < loopnr) {
                iova_start = i * SZ_16M;
-               if (ops->unmap(ops, iova_start + size, size) != size)
+               if (ops->unmap(ops, iova_start + size, size, NULL) != size)
                        return __FAIL(ops);
 
                /* Remap of partial unmap */
@@ -914,7 +963,7 @@ static int __init arm_v7s_do_selftests(void)
        for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
                size = 1UL << i;
 
-               if (ops->unmap(ops, iova, size) != size)
+               if (ops->unmap(ops, iova, size, NULL) != size)
                        return __FAIL(ops);
 
                if (ops->iova_to_phys(ops, iova + 42))
index 161a7d56264d0c26b7297930c0d843df674dd7bc..4c91359057c53d906ee5496396e28595c2153905 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/atomic.h>
 #include <linux/bitops.h>
 #include <linux/io-pgtable.h>
-#include <linux/iommu.h>
 #include <linux/kernel.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
@@ -290,6 +289,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
 }
 
 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+                              struct iommu_iotlb_gather *gather,
                               unsigned long iova, size_t size, int lvl,
                               arm_lpae_iopte *ptep);
 
@@ -335,8 +335,10 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
                size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 
                tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
-               if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+               if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
+                       WARN_ON(1);
                        return -EINVAL;
+               }
        }
 
        __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
@@ -537,6 +539,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 }
 
 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+                                      struct iommu_iotlb_gather *gather,
                                       unsigned long iova, size_t size,
                                       arm_lpae_iopte blk_pte, int lvl,
                                       arm_lpae_iopte *ptep)
@@ -582,15 +585,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
 
                tablep = iopte_deref(pte, data);
        } else if (unmap_idx >= 0) {
-               io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
-               io_pgtable_tlb_sync(&data->iop);
+               io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
                return size;
        }
 
-       return __arm_lpae_unmap(data, iova, size, lvl, tablep);
+       return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
 }
 
 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+                              struct iommu_iotlb_gather *gather,
                               unsigned long iova, size_t size, int lvl,
                               arm_lpae_iopte *ptep)
 {
@@ -612,9 +615,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 
                if (!iopte_leaf(pte, lvl, iop->fmt)) {
                        /* Also flush any partial walks */
-                       io_pgtable_tlb_add_flush(iop, iova, size,
-                                               ARM_LPAE_GRANULE(data), false);
-                       io_pgtable_tlb_sync(iop);
+                       io_pgtable_tlb_flush_walk(iop, iova, size,
+                                                 ARM_LPAE_GRANULE(data));
                        ptep = iopte_deref(pte, data);
                        __arm_lpae_free_pgtable(data, lvl + 1, ptep);
                } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
@@ -625,7 +627,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
                         */
                        smp_wmb();
                } else {
-                       io_pgtable_tlb_add_flush(iop, iova, size, size, true);
+                       io_pgtable_tlb_add_page(iop, gather, iova, size);
                }
 
                return size;
@@ -634,17 +636,17 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
                 * Insert a table at the next level to map the old region,
                 * minus the part we want to unmap
                 */
-               return arm_lpae_split_blk_unmap(data, iova, size, pte,
+               return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
                                                lvl + 1, ptep);
        }
 
        /* Keep on walkin' */
        ptep = iopte_deref(pte, data);
-       return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+       return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
 }
 
 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
-                            size_t size)
+                            size_t size, struct iommu_iotlb_gather *gather)
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        arm_lpae_iopte *ptep = data->pgd;
@@ -653,7 +655,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
                return 0;
 
-       return __arm_lpae_unmap(data, iova, size, lvl, ptep);
+       return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
 }
 
 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -1070,22 +1072,24 @@ static void dummy_tlb_flush_all(void *cookie)
        WARN_ON(cookie != cfg_cookie);
 }
 
-static void dummy_tlb_add_flush(unsigned long iova, size_t size,
-                               size_t granule, bool leaf, void *cookie)
+static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
+                           void *cookie)
 {
        WARN_ON(cookie != cfg_cookie);
        WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
 }
 
-static void dummy_tlb_sync(void *cookie)
+static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+                              unsigned long iova, size_t granule, void *cookie)
 {
-       WARN_ON(cookie != cfg_cookie);
+       dummy_tlb_flush(iova, granule, granule, cookie);
 }
 
-static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
+static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
        .tlb_flush_all  = dummy_tlb_flush_all,
-       .tlb_add_flush  = dummy_tlb_add_flush,
-       .tlb_sync       = dummy_tlb_sync,
+       .tlb_flush_walk = dummy_tlb_flush,
+       .tlb_flush_leaf = dummy_tlb_flush,
+       .tlb_add_page   = dummy_tlb_add_page,
 };
 
 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
@@ -1168,7 +1172,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
 
                /* Partial unmap */
                size = 1UL << __ffs(cfg->pgsize_bitmap);
-               if (ops->unmap(ops, SZ_1G + size, size) != size)
+               if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
                        return __FAIL(ops, i);
 
                /* Remap of partial unmap */
@@ -1183,7 +1187,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
                for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
                        size = 1UL << j;
 
-                       if (ops->unmap(ops, iova, size) != size)
+                       if (ops->unmap(ops, iova, size, NULL) != size)
                                return __FAIL(ops, i);
 
                        if (ops->iova_to_phys(ops, iova + 42))
index 0c674d80c37fd5768fa62296f6be72f97c5dd245..d658c7c6a2ab0a6d3d6fcffdb89edf03f9b9bda2 100644 (file)
 
 static struct kset *iommu_group_kset;
 static DEFINE_IDA(iommu_group_ida);
-#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
-static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
-#else
-static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
-#endif
+
+static unsigned int iommu_def_domain_type __read_mostly;
 static bool iommu_dma_strict __read_mostly = true;
+static u32 iommu_cmd_line __read_mostly;
 
 struct iommu_group {
        struct kobject kobj;
@@ -68,6 +66,18 @@ static const char * const iommu_group_resv_type_string[] = {
        [IOMMU_RESV_SW_MSI]                     = "msi",
 };
 
+#define IOMMU_CMD_LINE_DMA_API         BIT(0)
+
+static void iommu_set_cmd_line_dma_api(void)
+{
+       iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
+}
+
+static bool iommu_cmd_line_dma_api(void)
+{
+       return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
+}
+
 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)          \
 struct iommu_group_attribute iommu_group_attr_##_name =                \
        __ATTR(_name, _mode, _show, _store)
@@ -80,12 +90,55 @@ struct iommu_group_attribute iommu_group_attr_##_name =             \
 static LIST_HEAD(iommu_device_list);
 static DEFINE_SPINLOCK(iommu_device_lock);
 
+/*
+ * Use a function instead of an array here because the domain-type is a
+ * bit-field, so an array would waste memory.
+ */
+static const char *iommu_domain_type_str(unsigned int t)
+{
+       switch (t) {
+       case IOMMU_DOMAIN_BLOCKED:
+               return "Blocked";
+       case IOMMU_DOMAIN_IDENTITY:
+               return "Passthrough";
+       case IOMMU_DOMAIN_UNMANAGED:
+               return "Unmanaged";
+       case IOMMU_DOMAIN_DMA:
+               return "Translated";
+       default:
+               return "Unknown";
+       }
+}
+
+static int __init iommu_subsys_init(void)
+{
+       bool cmd_line = iommu_cmd_line_dma_api();
+
+       if (!cmd_line) {
+               if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
+                       iommu_set_default_passthrough(false);
+               else
+                       iommu_set_default_translated(false);
+
+               if (iommu_default_passthrough() && mem_encrypt_active()) {
+                       pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
+                       iommu_set_default_translated(false);
+               }
+       }
+
+       pr_info("Default domain type: %s %s\n",
+               iommu_domain_type_str(iommu_def_domain_type),
+               cmd_line ? "(set via kernel command line)" : "");
+
+       return 0;
+}
+subsys_initcall(iommu_subsys_init);
+
 int iommu_device_register(struct iommu_device *iommu)
 {
        spin_lock(&iommu_device_lock);
        list_add_tail(&iommu->list, &iommu_device_list);
        spin_unlock(&iommu_device_lock);
-
        return 0;
 }
 
@@ -165,7 +218,11 @@ static int __init iommu_set_def_domain_type(char *str)
        if (ret)
                return ret;
 
-       iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
+       if (pt)
+               iommu_set_default_passthrough(true);
+       else
+               iommu_set_default_translated(true);
+
        return 0;
 }
 early_param("iommu.passthrough", iommu_set_def_domain_type);
@@ -229,60 +286,58 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
  * @new: new region to insert
  * @regions: list of regions
  *
- * The new element is sorted by address with respect to the other
- * regions of the same type. In case it overlaps with another
- * region of the same type, regions are merged. In case it
- * overlaps with another region of different type, regions are
- * not merged.
+ * Elements are sorted by start address and overlapping segments
+ * of the same type are merged.
  */
-static int iommu_insert_resv_region(struct iommu_resv_region *new,
-                                   struct list_head *regions)
+int iommu_insert_resv_region(struct iommu_resv_region *new,
+                            struct list_head *regions)
 {
-       struct iommu_resv_region *region;
-       phys_addr_t start = new->start;
-       phys_addr_t end = new->start + new->length - 1;
-       struct list_head *pos = regions->next;
-
-       while (pos != regions) {
-               struct iommu_resv_region *entry =
-                       list_entry(pos, struct iommu_resv_region, list);
-               phys_addr_t a = entry->start;
-               phys_addr_t b = entry->start + entry->length - 1;
-               int type = entry->type;
-
-               if (end < a) {
-                       goto insert;
-               } else if (start > b) {
-                       pos = pos->next;
-               } else if ((start >= a) && (end <= b)) {
-                       if (new->type == type)
-                               return 0;
-                       else
-                               pos = pos->next;
+       struct iommu_resv_region *iter, *tmp, *nr, *top;
+       LIST_HEAD(stack);
+
+       nr = iommu_alloc_resv_region(new->start, new->length,
+                                    new->prot, new->type);
+       if (!nr)
+               return -ENOMEM;
+
+       /* First add the new element based on start address sorting */
+       list_for_each_entry(iter, regions, list) {
+               if (nr->start < iter->start ||
+                   (nr->start == iter->start && nr->type <= iter->type))
+                       break;
+       }
+       list_add_tail(&nr->list, &iter->list);
+
+       /* Merge overlapping segments of type nr->type in @regions, if any */
+       list_for_each_entry_safe(iter, tmp, regions, list) {
+               phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
+
+               /* no merge needed on elements of different types than @nr */
+               if (iter->type != nr->type) {
+                       list_move_tail(&iter->list, &stack);
+                       continue;
+               }
+
+               /* look for the last stack element of same type as @iter */
+               list_for_each_entry_reverse(top, &stack, list)
+                       if (top->type == iter->type)
+                               goto check_overlap;
+
+               list_move_tail(&iter->list, &stack);
+               continue;
+
+check_overlap:
+               top_end = top->start + top->length - 1;
+
+               if (iter->start > top_end + 1) {
+                       list_move_tail(&iter->list, &stack);
                } else {
-                       if (new->type == type) {
-                               phys_addr_t new_start = min(a, start);
-                               phys_addr_t new_end = max(b, end);
-                               int ret;
-
-                               list_del(&entry->list);
-                               entry->start = new_start;
-                               entry->length = new_end - new_start + 1;
-                               ret = iommu_insert_resv_region(entry, regions);
-                               kfree(entry);
-                               return ret;
-                       } else {
-                               pos = pos->next;
-                       }
+                       top->length = max(top_end, iter_end) - top->start + 1;
+                       list_del(&iter->list);
+                       kfree(iter);
                }
        }
-insert:
-       region = iommu_alloc_resv_region(new->start, new->length,
-                                        new->prot, new->type);
-       if (!region)
-               return -ENOMEM;
-
-       list_add_tail(&region->list, pos);
+       list_splice(&stack, regions);
        return 0;
 }
 
@@ -1862,7 +1917,7 @@ EXPORT_SYMBOL_GPL(iommu_map);
 
 static size_t __iommu_unmap(struct iommu_domain *domain,
                            unsigned long iova, size_t size,
-                           bool sync)
+                           struct iommu_iotlb_gather *iotlb_gather)
 {
        const struct iommu_ops *ops = domain->ops;
        size_t unmapped_page, unmapped = 0;
@@ -1899,13 +1954,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
        while (unmapped < size) {
                size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 
-               unmapped_page = ops->unmap(domain, iova, pgsize);
+               unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
                if (!unmapped_page)
                        break;
 
-               if (sync && ops->iotlb_range_add)
-                       ops->iotlb_range_add(domain, iova, pgsize);
-
                pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
                         iova, unmapped_page);
 
@@ -1913,9 +1965,6 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
                unmapped += unmapped_page;
        }
 
-       if (sync && ops->iotlb_sync)
-               ops->iotlb_sync(domain);
-
        trace_unmap(orig_iova, size, unmapped);
        return unmapped;
 }
@@ -1923,14 +1972,22 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
 size_t iommu_unmap(struct iommu_domain *domain,
                   unsigned long iova, size_t size)
 {
-       return __iommu_unmap(domain, iova, size, true);
+       struct iommu_iotlb_gather iotlb_gather;
+       size_t ret;
+
+       iommu_iotlb_gather_init(&iotlb_gather);
+       ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
+       iommu_tlb_sync(domain, &iotlb_gather);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_unmap);
 
 size_t iommu_unmap_fast(struct iommu_domain *domain,
-                       unsigned long iova, size_t size)
+                       unsigned long iova, size_t size,
+                       struct iommu_iotlb_gather *iotlb_gather)
 {
-       return __iommu_unmap(domain, iova, size, false);
+       return __iommu_unmap(domain, iova, size, iotlb_gather);
 }
 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
 
@@ -2143,7 +2200,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
 
        mutex_lock(&group->mutex);
 
-       /* Check if the default domain is already direct mapped */
        ret = 0;
        if (group->default_domain && group->default_domain->type == type)
                goto out;
@@ -2153,7 +2209,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
        if (iommu_group_device_count(group) != 1)
                goto out;
 
-       /* Allocate a direct mapped domain */
        ret = -ENOMEM;
        domain = __iommu_domain_alloc(dev->bus, type);
        if (!domain)
@@ -2168,7 +2223,7 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
 
        iommu_group_create_direct_mappings(group, dev);
 
-       /* Make the direct mapped domain the default for this group */
+       /* Make the domain the default for this group */
        if (group->default_domain)
                iommu_domain_free(group->default_domain);
        group->default_domain = domain;
@@ -2196,6 +2251,28 @@ int iommu_request_dma_domain_for_dev(struct device *dev)
        return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
 }
 
+void iommu_set_default_passthrough(bool cmd_line)
+{
+       if (cmd_line)
+               iommu_set_cmd_line_dma_api();
+
+       iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
+}
+
+void iommu_set_default_translated(bool cmd_line)
+{
+       if (cmd_line)
+               iommu_set_cmd_line_dma_api();
+
+       iommu_def_domain_type = IOMMU_DOMAIN_DMA;
+}
+
+bool iommu_default_passthrough(void)
+{
+       return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
+}
+EXPORT_SYMBOL_GPL(iommu_default_passthrough);
+
 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
 {
        const struct iommu_ops *ops = NULL;
index 3e1a8a6755723a927a7942a7429ab7e6c19a0027..41c605b0058f9615c2dbdd83f1de2404a9b1d255 100644 (file)
@@ -577,7 +577,9 @@ void queue_iova(struct iova_domain *iovad,
 
        spin_unlock_irqrestore(&fq->lock, flags);
 
-       if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
+       /* Avoid false sharing as much as possible. */
+       if (!atomic_read(&iovad->fq_timer_on) &&
+           !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
                mod_timer(&iovad->fq_timer,
                          jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
 }
index ad0098c0c87c7544f18b70c0bd9dc43913ca0478..9da8309f71708f213f91dbe99374681e0da3f652 100644 (file)
@@ -49,6 +49,7 @@ struct ipmmu_features {
        bool setup_imbuscr;
        bool twobit_imttbcr_sl0;
        bool reserved_context;
+       bool cache_snoop;
 };
 
 struct ipmmu_vmsa_device {
@@ -115,45 +116,44 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
 #define IMTTBCR                                0x0008
 #define IMTTBCR_EAE                    (1 << 31)
 #define IMTTBCR_PMB                    (1 << 30)
-#define IMTTBCR_SH1_NON_SHAREABLE      (0 << 28)
-#define IMTTBCR_SH1_OUTER_SHAREABLE    (2 << 28)
-#define IMTTBCR_SH1_INNER_SHAREABLE    (3 << 28)
-#define IMTTBCR_SH1_MASK               (3 << 28)
-#define IMTTBCR_ORGN1_NC               (0 << 26)
-#define IMTTBCR_ORGN1_WB_WA            (1 << 26)
-#define IMTTBCR_ORGN1_WT               (2 << 26)
-#define IMTTBCR_ORGN1_WB               (3 << 26)
-#define IMTTBCR_ORGN1_MASK             (3 << 26)
-#define IMTTBCR_IRGN1_NC               (0 << 24)
-#define IMTTBCR_IRGN1_WB_WA            (1 << 24)
-#define IMTTBCR_IRGN1_WT               (2 << 24)
-#define IMTTBCR_IRGN1_WB               (3 << 24)
-#define IMTTBCR_IRGN1_MASK             (3 << 24)
+#define IMTTBCR_SH1_NON_SHAREABLE      (0 << 28)       /* R-Car Gen2 only */
+#define IMTTBCR_SH1_OUTER_SHAREABLE    (2 << 28)       /* R-Car Gen2 only */
+#define IMTTBCR_SH1_INNER_SHAREABLE    (3 << 28)       /* R-Car Gen2 only */
+#define IMTTBCR_SH1_MASK               (3 << 28)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_NC               (0 << 26)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_WB_WA            (1 << 26)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_WT               (2 << 26)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_WB               (3 << 26)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_MASK             (3 << 26)       /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_NC               (0 << 24)       /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_WB_WA            (1 << 24)       /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_WT               (2 << 24)       /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_WB               (3 << 24)       /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_MASK             (3 << 24)       /* R-Car Gen2 only */
 #define IMTTBCR_TSZ1_MASK              (7 << 16)
 #define IMTTBCR_TSZ1_SHIFT             16
-#define IMTTBCR_SH0_NON_SHAREABLE      (0 << 12)
-#define IMTTBCR_SH0_OUTER_SHAREABLE    (2 << 12)
-#define IMTTBCR_SH0_INNER_SHAREABLE    (3 << 12)
-#define IMTTBCR_SH0_MASK               (3 << 12)
-#define IMTTBCR_ORGN0_NC               (0 << 10)
-#define IMTTBCR_ORGN0_WB_WA            (1 << 10)
-#define IMTTBCR_ORGN0_WT               (2 << 10)
-#define IMTTBCR_ORGN0_WB               (3 << 10)
-#define IMTTBCR_ORGN0_MASK             (3 << 10)
-#define IMTTBCR_IRGN0_NC               (0 << 8)
-#define IMTTBCR_IRGN0_WB_WA            (1 << 8)
-#define IMTTBCR_IRGN0_WT               (2 << 8)
-#define IMTTBCR_IRGN0_WB               (3 << 8)
-#define IMTTBCR_IRGN0_MASK             (3 << 8)
+#define IMTTBCR_SH0_NON_SHAREABLE      (0 << 12)       /* R-Car Gen2 only */
+#define IMTTBCR_SH0_OUTER_SHAREABLE    (2 << 12)       /* R-Car Gen2 only */
+#define IMTTBCR_SH0_INNER_SHAREABLE    (3 << 12)       /* R-Car Gen2 only */
+#define IMTTBCR_SH0_MASK               (3 << 12)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_NC               (0 << 10)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_WB_WA            (1 << 10)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_WT               (2 << 10)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_WB               (3 << 10)       /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_MASK             (3 << 10)       /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_NC               (0 << 8)        /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_WB_WA            (1 << 8)        /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_WT               (2 << 8)        /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_WB               (3 << 8)        /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_MASK             (3 << 8)        /* R-Car Gen2 only */
+#define IMTTBCR_SL0_TWOBIT_LVL_3       (0 << 6)        /* R-Car Gen3 only */
+#define IMTTBCR_SL0_TWOBIT_LVL_2       (1 << 6)        /* R-Car Gen3 only */
+#define IMTTBCR_SL0_TWOBIT_LVL_1       (2 << 6)        /* R-Car Gen3 only */
 #define IMTTBCR_SL0_LVL_2              (0 << 4)
 #define IMTTBCR_SL0_LVL_1              (1 << 4)
 #define IMTTBCR_TSZ0_MASK              (7 << 0)
 #define IMTTBCR_TSZ0_SHIFT             O
 
-#define IMTTBCR_SL0_TWOBIT_LVL_3       (0 << 6)
-#define IMTTBCR_SL0_TWOBIT_LVL_2       (1 << 6)
-#define IMTTBCR_SL0_TWOBIT_LVL_1       (2 << 6)
-
 #define IMBUSCR                                0x000c
 #define IMBUSCR_DVM                    (1 << 2)
 #define IMBUSCR_BUSSEL_SYS             (0 << 0)
@@ -361,16 +361,16 @@ static void ipmmu_tlb_flush_all(void *cookie)
        ipmmu_tlb_invalidate(domain);
 }
 
-static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
-                               size_t granule, bool leaf, void *cookie)
+static void ipmmu_tlb_flush(unsigned long iova, size_t size,
+                               size_t granule, void *cookie)
 {
-       /* The hardware doesn't support selective TLB flush. */
+       ipmmu_tlb_flush_all(cookie);
 }
 
-static const struct iommu_gather_ops ipmmu_gather_ops = {
+static const struct iommu_flush_ops ipmmu_flush_ops = {
        .tlb_flush_all = ipmmu_tlb_flush_all,
-       .tlb_add_flush = ipmmu_tlb_add_flush,
-       .tlb_sync = ipmmu_tlb_flush_all,
+       .tlb_flush_walk = ipmmu_tlb_flush,
+       .tlb_flush_leaf = ipmmu_tlb_flush,
 };
 
 /* -----------------------------------------------------------------------------
@@ -422,17 +422,19 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
 
        /*
         * TTBCR
-        * We use long descriptors with inner-shareable WBWA tables and allocate
-        * the whole 32-bit VA space to TTBR0.
+        * We use long descriptors and allocate the whole 32-bit VA space to
+        * TTBR0.
         */
        if (domain->mmu->features->twobit_imttbcr_sl0)
                tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
        else
                tmp = IMTTBCR_SL0_LVL_1;
 
-       ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
-                            IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
-                            IMTTBCR_IRGN0_WB_WA | tmp);
+       if (domain->mmu->features->cache_snoop)
+               tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
+                      IMTTBCR_IRGN0_WB_WA;
+
+       ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
 
        /* MAIR0 */
        ipmmu_ctx_write_root(domain, IMMAIR0,
@@ -480,7 +482,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
        domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
        domain->cfg.ias = 32;
        domain->cfg.oas = 40;
-       domain->cfg.tlb = &ipmmu_gather_ops;
+       domain->cfg.tlb = &ipmmu_flush_ops;
        domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
        domain->io_domain.geometry.force_aperture = true;
        /*
@@ -733,14 +735,14 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
 }
 
 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
-                         size_t size)
+                         size_t size, struct iommu_iotlb_gather *gather)
 {
        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 
-       return domain->iop->unmap(domain->iop, iova, size);
+       return domain->iop->unmap(domain->iop, iova, size, gather);
 }
 
-static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
+static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
 {
        struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
 
@@ -748,6 +750,12 @@ static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
                ipmmu_tlb_flush_all(domain);
 }
 
+static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
+                            struct iommu_iotlb_gather *gather)
+{
+       ipmmu_flush_iotlb_all(io_domain);
+}
+
 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
                                      dma_addr_t iova)
 {
@@ -957,7 +965,7 @@ static const struct iommu_ops ipmmu_ops = {
        .detach_dev = ipmmu_detach_device,
        .map = ipmmu_map,
        .unmap = ipmmu_unmap,
-       .flush_iotlb_all = ipmmu_iotlb_sync,
+       .flush_iotlb_all = ipmmu_flush_iotlb_all,
        .iotlb_sync = ipmmu_iotlb_sync,
        .iova_to_phys = ipmmu_iova_to_phys,
        .add_device = ipmmu_add_device,
@@ -988,6 +996,7 @@ static const struct ipmmu_features ipmmu_features_default = {
        .setup_imbuscr = true,
        .twobit_imttbcr_sl0 = false,
        .reserved_context = false,
+       .cache_snoop = true,
 };
 
 static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
@@ -998,6 +1007,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
        .setup_imbuscr = false,
        .twobit_imttbcr_sl0 = true,
        .reserved_context = true,
+       .cache_snoop = false,
 };
 
 static const struct of_device_id ipmmu_of_ids[] = {
index b25e2eb9e038d9460a94b487d66c718693f3c8cc..be99d408cf35dc7b8d26612b979d4c584de64c4a 100644 (file)
@@ -168,20 +168,29 @@ fail:
        return;
 }
 
-static void __flush_iotlb_sync(void *cookie)
+static void __flush_iotlb_walk(unsigned long iova, size_t size,
+                              size_t granule, void *cookie)
 {
-       /*
-        * Nothing is needed here, the barrier to guarantee
-        * completion of the tlb sync operation is implicitly
-        * taken care when the iommu client does a writel before
-        * kick starting the other master.
-        */
+       __flush_iotlb_range(iova, size, granule, false, cookie);
+}
+
+static void __flush_iotlb_leaf(unsigned long iova, size_t size,
+                              size_t granule, void *cookie)
+{
+       __flush_iotlb_range(iova, size, granule, true, cookie);
 }
 
-static const struct iommu_gather_ops msm_iommu_gather_ops = {
+static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
+                              unsigned long iova, size_t granule, void *cookie)
+{
+       __flush_iotlb_range(iova, granule, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops msm_iommu_flush_ops = {
        .tlb_flush_all = __flush_iotlb,
-       .tlb_add_flush = __flush_iotlb_range,
-       .tlb_sync = __flush_iotlb_sync,
+       .tlb_flush_walk = __flush_iotlb_walk,
+       .tlb_flush_leaf = __flush_iotlb_leaf,
+       .tlb_add_page = __flush_iotlb_page,
 };
 
 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
@@ -345,7 +354,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
                .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
                .ias = 32,
                .oas = 32,
-               .tlb = &msm_iommu_gather_ops,
+               .tlb = &msm_iommu_flush_ops,
                .iommu_dev = priv->dev,
        };
 
@@ -509,13 +518,13 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
 }
 
 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
-                             size_t len)
+                             size_t len, struct iommu_iotlb_gather *gather)
 {
        struct msm_priv *priv = to_msm_priv(domain);
        unsigned long flags;
 
        spin_lock_irqsave(&priv->pgtlock, flags);
-       len = priv->iop->unmap(priv->iop, iova, len);
+       len = priv->iop->unmap(priv->iop, iova, len, gather);
        spin_unlock_irqrestore(&priv->pgtlock, flags);
 
        return len;
@@ -691,6 +700,13 @@ static struct iommu_ops msm_iommu_ops = {
        .detach_dev = msm_iommu_detach_dev,
        .map = msm_iommu_map,
        .unmap = msm_iommu_unmap,
+       /*
+        * Nothing is needed here, the barrier to guarantee
+        * completion of the tlb sync operation is implicitly
+        * taken care when the iommu client does a writel before
+        * kick starting the other master.
+        */
+       .iotlb_sync = NULL,
        .iova_to_phys = msm_iommu_iova_to_phys,
        .add_device = msm_iommu_add_device,
        .remove_device = msm_iommu_remove_device,
@@ -750,7 +766,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
 
        iommu->irq = platform_get_irq(pdev, 0);
        if (iommu->irq < 0) {
-               dev_err(iommu->dev, "could not get iommu irq\n");
                ret = -ENODEV;
                goto fail;
        }
index 82e4be4dfdaf84d17cc662fb7db2708cbeaf197c..67a483c1a9357c7629d569ae1456ca8181b92c89 100644 (file)
@@ -28,6 +28,7 @@
 #include "mtk_iommu.h"
 
 #define REG_MMU_PT_BASE_ADDR                   0x000
+#define MMU_PT_ADDR_MASK                       GENMASK(31, 7)
 
 #define REG_MMU_INVALIDATE                     0x020
 #define F_ALL_INVLD                            0x2
 #define REG_MMU_DCM_DIS                                0x050
 
 #define REG_MMU_CTRL_REG                       0x110
+#define F_MMU_TF_PROT_TO_PROGRAM_ADDR          (2 << 4)
 #define F_MMU_PREFETCH_RT_REPLACE_MOD          BIT(4)
-#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \
-       ((data)->m4u_plat == M4U_MT2712 ? 4 : 5)
-/* It's named by F_MMU_TF_PROT_SEL in mt2712. */
-#define F_MMU_TF_PROTECT_SEL(prot, data) \
-       (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
+#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173   (2 << 5)
 
 #define REG_MMU_IVRP_PADDR                     0x114
 
 #define F_INT_CLR_BIT                          BIT(12)
 
 #define REG_MMU_INT_MAIN_CONTROL               0x124
-#define F_INT_TRANSLATION_FAULT                        BIT(0)
-#define F_INT_MAIN_MULTI_HIT_FAULT             BIT(1)
-#define F_INT_INVALID_PA_FAULT                 BIT(2)
-#define F_INT_ENTRY_REPLACEMENT_FAULT          BIT(3)
-#define F_INT_TLB_MISS_FAULT                   BIT(4)
-#define F_INT_MISS_TRANSACTION_FIFO_FAULT      BIT(5)
-#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT   BIT(6)
+                                               /* mmu0 | mmu1 */
+#define F_INT_TRANSLATION_FAULT                        (BIT(0) | BIT(7))
+#define F_INT_MAIN_MULTI_HIT_FAULT             (BIT(1) | BIT(8))
+#define F_INT_INVALID_PA_FAULT                 (BIT(2) | BIT(9))
+#define F_INT_ENTRY_REPLACEMENT_FAULT          (BIT(3) | BIT(10))
+#define F_INT_TLB_MISS_FAULT                   (BIT(4) | BIT(11))
+#define F_INT_MISS_TRANSACTION_FIFO_FAULT      (BIT(5) | BIT(12))
+#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT   (BIT(6) | BIT(13))
 
 #define REG_MMU_CPE_DONE                       0x12C
 
 #define REG_MMU_FAULT_ST1                      0x134
+#define F_REG_MMU0_FAULT_MASK                  GENMASK(6, 0)
+#define F_REG_MMU1_FAULT_MASK                  GENMASK(13, 7)
 
-#define REG_MMU_FAULT_VA                       0x13c
+#define REG_MMU0_FAULT_VA                      0x13c
 #define F_MMU_FAULT_VA_WRITE_BIT               BIT(1)
 #define F_MMU_FAULT_VA_LAYER_BIT               BIT(0)
 
-#define REG_MMU_INVLD_PA                       0x140
-#define REG_MMU_INT_ID                         0x150
-#define F_MMU0_INT_ID_LARB_ID(a)               (((a) >> 7) & 0x7)
-#define F_MMU0_INT_ID_PORT_ID(a)               (((a) >> 2) & 0x1f)
+#define REG_MMU0_INVLD_PA                      0x140
+#define REG_MMU1_FAULT_VA                      0x144
+#define REG_MMU1_INVLD_PA                      0x148
+#define REG_MMU0_INT_ID                                0x150
+#define REG_MMU1_INT_ID                                0x154
+#define F_MMU_INT_ID_LARB_ID(a)                        (((a) >> 7) & 0x7)
+#define F_MMU_INT_ID_PORT_ID(a)                        (((a) >> 2) & 0x1f)
 
 #define MTK_PROTECT_PA_ALIGN                   128
 
@@ -107,6 +111,30 @@ struct mtk_iommu_domain {
 
 static const struct iommu_ops mtk_iommu_ops;
 
+/*
+ * In M4U 4GB mode, the physical address is remapped as below:
+ *
+ * CPU Physical address:
+ * ====================
+ *
+ * 0      1G       2G     3G       4G     5G
+ * |---A---|---B---|---C---|---D---|---E---|
+ * +--I/O--+------------Memory-------------+
+ *
+ * IOMMU output physical address:
+ *  =============================
+ *
+ *                                 4G      5G     6G      7G      8G
+ *                                 |---E---|---B---|---C---|---D---|
+ *                                 +------------Memory-------------+
+ *
+ * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
+ * bit32 of the CPU physical address always is needed to set, and for Region
+ * 'E', the CPU physical address keep as is.
+ * Additionally, The iommu consumers always use the CPU phyiscal address.
+ */
+#define MTK_IOMMU_4GB_MODE_REMAP_BASE   0x140000000UL
+
 static LIST_HEAD(m4ulist);     /* List all the M4U HWs */
 
 #define for_each_m4u(data)     list_for_each_entry(data, &m4ulist, list)
@@ -188,10 +216,32 @@ static void mtk_iommu_tlb_sync(void *cookie)
        }
 }
 
-static const struct iommu_gather_ops mtk_iommu_gather_ops = {
+static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+                                    size_t granule, void *cookie)
+{
+       mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
+       mtk_iommu_tlb_sync(cookie);
+}
+
+static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
+                                    size_t granule, void *cookie)
+{
+       mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
+       mtk_iommu_tlb_sync(cookie);
+}
+
+static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
+                                           unsigned long iova, size_t granule,
+                                           void *cookie)
+{
+       mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops mtk_iommu_flush_ops = {
        .tlb_flush_all = mtk_iommu_tlb_flush_all,
-       .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
-       .tlb_sync = mtk_iommu_tlb_sync,
+       .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
+       .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
+       .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
 };
 
 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
@@ -204,13 +254,21 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
 
        /* Read error info from registers */
        int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
-       fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
+       if (int_state & F_REG_MMU0_FAULT_MASK) {
+               regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
+               fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
+               fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
+       } else {
+               regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
+               fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
+               fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
+       }
        layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
        write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
-       fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
-       regval = readl_relaxed(data->base + REG_MMU_INT_ID);
-       fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
-       fault_port = F_MMU0_INT_ID_PORT_ID(regval);
+       fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+       fault_port = F_MMU_INT_ID_PORT_ID(regval);
+
+       fault_larb = data->plat_data->larbid_remap[fault_larb];
 
        if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
                               write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
@@ -242,7 +300,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
        for (i = 0; i < fwspec->num_ids; ++i) {
                larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
                portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
-               larb_mmu = &data->smi_imu.larb_imu[larbid];
+               larb_mmu = &data->larb_imu[larbid];
 
                dev_dbg(dev, "%s iommu port: %d\n",
                        enable ? "enable" : "disable", portid);
@@ -263,17 +321,15 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
        dom->cfg = (struct io_pgtable_cfg) {
                .quirks = IO_PGTABLE_QUIRK_ARM_NS |
                        IO_PGTABLE_QUIRK_NO_PERMS |
-                       IO_PGTABLE_QUIRK_TLBI_ON_MAP,
+                       IO_PGTABLE_QUIRK_TLBI_ON_MAP |
+                       IO_PGTABLE_QUIRK_ARM_MTK_EXT,
                .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
                .ias = 32,
-               .oas = 32,
-               .tlb = &mtk_iommu_gather_ops,
+               .oas = 34,
+               .tlb = &mtk_iommu_flush_ops,
                .iommu_dev = data->dev,
        };
 
-       if (data->enable_4GB)
-               dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
-
        dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
        if (!dom->iop) {
                dev_err(data->dev, "Failed to alloc io pgtable\n");
@@ -336,7 +392,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
        /* Update the pgtable base address register of the M4U HW */
        if (!data->m4u_dom) {
                data->m4u_dom = dom;
-               writel(dom->cfg.arm_v7s_cfg.ttbr[0],
+               writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
                       data->base + REG_MMU_PT_BASE_ADDR);
        }
 
@@ -359,32 +415,43 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
                         phys_addr_t paddr, size_t size, int prot)
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+       struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
        unsigned long flags;
        int ret;
 
+       /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
+       if (data->enable_4GB)
+               paddr |= BIT_ULL(32);
+
        spin_lock_irqsave(&dom->pgtlock, flags);
-       ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
-                           size, prot);
+       ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
        spin_unlock_irqrestore(&dom->pgtlock, flags);
 
        return ret;
 }
 
 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
-                             unsigned long iova, size_t size)
+                             unsigned long iova, size_t size,
+                             struct iommu_iotlb_gather *gather)
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
        unsigned long flags;
        size_t unmapsz;
 
        spin_lock_irqsave(&dom->pgtlock, flags);
-       unmapsz = dom->iop->unmap(dom->iop, iova, size);
+       unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
        spin_unlock_irqrestore(&dom->pgtlock, flags);
 
        return unmapsz;
 }
 
-static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
+static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+       mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+}
+
+static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
+                                struct iommu_iotlb_gather *gather)
 {
        mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
 }
@@ -401,8 +468,8 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
        pa = dom->iop->iova_to_phys(dom->iop, iova);
        spin_unlock_irqrestore(&dom->pgtlock, flags);
 
-       if (data->enable_4GB)
-               pa |= BIT_ULL(32);
+       if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
+               pa &= ~BIT_ULL(32);
 
        return pa;
 }
@@ -490,7 +557,7 @@ static const struct iommu_ops mtk_iommu_ops = {
        .detach_dev     = mtk_iommu_detach_device,
        .map            = mtk_iommu_map,
        .unmap          = mtk_iommu_unmap,
-       .flush_iotlb_all = mtk_iommu_iotlb_sync,
+       .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
        .iotlb_sync     = mtk_iommu_iotlb_sync,
        .iova_to_phys   = mtk_iommu_iova_to_phys,
        .add_device     = mtk_iommu_add_device,
@@ -511,9 +578,11 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
                return ret;
        }
 
-       regval = F_MMU_TF_PROTECT_SEL(2, data);
-       if (data->m4u_plat == M4U_MT8173)
-               regval |= F_MMU_PREFETCH_RT_REPLACE_MOD;
+       if (data->plat_data->m4u_plat == M4U_MT8173)
+               regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
+                        F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
+       else
+               regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
        writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
 
        regval = F_L2_MULIT_HIT_EN |
@@ -533,14 +602,14 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
                F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
        writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
 
-       if (data->m4u_plat == M4U_MT8173)
+       if (data->plat_data->m4u_plat == M4U_MT8173)
                regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
        else
                regval = lower_32_bits(data->protect_base) |
                         upper_32_bits(data->protect_base);
        writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
 
-       if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
+       if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
                /*
                 * If 4GB mode is enabled, the validate PA range is from
                 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
@@ -550,8 +619,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
        }
        writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
 
-       /* It's MISC control register whose default value is ok except mt8173.*/
-       if (data->m4u_plat == M4U_MT8173)
+       if (data->plat_data->reset_axi)
                writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
 
        if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
@@ -584,7 +652,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        if (!data)
                return -ENOMEM;
        data->dev = dev;
-       data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev);
+       data->plat_data = of_device_get_match_data(dev);
 
        /* Protect memory. HW will access here while translation fault.*/
        protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
@@ -594,6 +662,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
 
        /* Whether the current dram is over 4GB */
        data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
+       if (!data->plat_data->has_4gb_mode)
+               data->enable_4GB = false;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        data->base = devm_ioremap_resource(dev, res);
@@ -605,15 +675,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        if (data->irq < 0)
                return data->irq;
 
-       data->bclk = devm_clk_get(dev, "bclk");
-       if (IS_ERR(data->bclk))
-               return PTR_ERR(data->bclk);
+       if (data->plat_data->has_bclk) {
+               data->bclk = devm_clk_get(dev, "bclk");
+               if (IS_ERR(data->bclk))
+                       return PTR_ERR(data->bclk);
+       }
 
        larb_nr = of_count_phandle_with_args(dev->of_node,
                                             "mediatek,larbs", NULL);
        if (larb_nr < 0)
                return larb_nr;
-       data->smi_imu.larb_nr = larb_nr;
 
        for (i = 0; i < larb_nr; i++) {
                struct device_node *larbnode;
@@ -638,7 +709,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
                        of_node_put(larbnode);
                        return -EPROBE_DEFER;
                }
-               data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
+               data->larb_imu[id].dev = &plarbdev->dev;
 
                component_match_add_release(dev, &match, release_of,
                                            compare_of, larbnode);
@@ -699,6 +770,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
        reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
        reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
        reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
+       reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
        clk_disable_unprepare(data->bclk);
        return 0;
 }
@@ -707,6 +779,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
 {
        struct mtk_iommu_data *data = dev_get_drvdata(dev);
        struct mtk_iommu_suspend_reg *reg = &data->reg;
+       struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
        void __iomem *base = data->base;
        int ret;
 
@@ -722,8 +795,9 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
        writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
        writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
        writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
-       if (data->m4u_dom)
-               writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
+       writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
+       if (m4u_dom)
+               writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
                       base + REG_MMU_PT_BASE_ADDR);
        return 0;
 }
@@ -732,9 +806,32 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
        SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
 };
 
+static const struct mtk_iommu_plat_data mt2712_data = {
+       .m4u_plat     = M4U_MT2712,
+       .has_4gb_mode = true,
+       .has_bclk     = true,
+       .has_vld_pa_rng   = true,
+       .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+};
+
+static const struct mtk_iommu_plat_data mt8173_data = {
+       .m4u_plat     = M4U_MT8173,
+       .has_4gb_mode = true,
+       .has_bclk     = true,
+       .reset_axi    = true,
+       .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
+};
+
+static const struct mtk_iommu_plat_data mt8183_data = {
+       .m4u_plat     = M4U_MT8183,
+       .reset_axi    = true,
+       .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
+};
+
 static const struct of_device_id mtk_iommu_of_ids[] = {
-       { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712},
-       { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173},
+       { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
+       { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
+       { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
        {}
 };
 
index 59337323db58e2039b948ae5faca3440533215e1..fc0f16eabacdfa2340677a6db385ffa8fd94c5db 100644 (file)
@@ -24,12 +24,25 @@ struct mtk_iommu_suspend_reg {
        u32                             int_control0;
        u32                             int_main_control;
        u32                             ivrp_paddr;
+       u32                             vld_pa_rng;
 };
 
 enum mtk_iommu_plat {
        M4U_MT2701,
        M4U_MT2712,
        M4U_MT8173,
+       M4U_MT8183,
+};
+
+struct mtk_iommu_plat_data {
+       enum mtk_iommu_plat m4u_plat;
+       bool                has_4gb_mode;
+
+       /* HW will use the EMI clock if there isn't the "bclk". */
+       bool                has_bclk;
+       bool                has_vld_pa_rng;
+       bool                reset_axi;
+       unsigned char       larbid_remap[MTK_LARB_NR_MAX];
 };
 
 struct mtk_iommu_domain;
@@ -43,14 +56,14 @@ struct mtk_iommu_data {
        struct mtk_iommu_suspend_reg    reg;
        struct mtk_iommu_domain         *m4u_dom;
        struct iommu_group              *m4u_group;
-       struct mtk_smi_iommu            smi_imu;      /* SMI larb iommu info */
        bool                            enable_4GB;
        bool                            tlb_flush_active;
 
        struct iommu_device             iommu;
-       enum mtk_iommu_plat             m4u_plat;
+       const struct mtk_iommu_plat_data *plat_data;
 
        struct list_head                list;
+       struct mtk_smi_larb_iommu       larb_imu[MTK_LARB_NR_MAX];
 };
 
 static inline int compare_of(struct device *dev, void *data)
@@ -67,14 +80,14 @@ static inline int mtk_iommu_bind(struct device *dev)
 {
        struct mtk_iommu_data *data = dev_get_drvdata(dev);
 
-       return component_bind_all(dev, &data->smi_imu);
+       return component_bind_all(dev, &data->larb_imu);
 }
 
 static inline void mtk_iommu_unbind(struct device *dev)
 {
        struct mtk_iommu_data *data = dev_get_drvdata(dev);
 
-       component_unbind_all(dev, &data->smi_imu);
+       component_unbind_all(dev, &data->larb_imu);
 }
 
 #endif
index abeeac488372c8e5735f8838deb0ed7f5eef0eca..210b1c7c0bdae4202766dca020dda1159d6917da 100644 (file)
@@ -206,7 +206,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
        for (i = 0; i < fwspec->num_ids; ++i) {
                larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
                portid = mt2701_m4u_to_port(fwspec->ids[i]);
-               larb_mmu = &data->smi_imu.larb_imu[larbid];
+               larb_mmu = &data->larb_imu[larbid];
 
                dev_dbg(dev, "%s iommu port: %d\n",
                        enable ? "enable" : "disable", portid);
@@ -324,7 +324,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
 }
 
 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
-                             unsigned long iova, size_t size)
+                             unsigned long iova, size_t size,
+                             struct iommu_iotlb_gather *gather)
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
        unsigned long flags;
@@ -610,14 +611,12 @@ static int mtk_iommu_probe(struct platform_device *pdev)
                        }
                }
 
-               data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
+               data->larb_imu[larb_nr].dev = &plarbdev->dev;
                component_match_add_release(dev, &match, release_of,
                                            compare_of, larb_spec.np);
                larb_nr++;
        }
 
-       data->smi_imu.larb_nr = larb_nr;
-
        platform_set_drvdata(pdev, data);
 
        ret = mtk_iommu_hw_init(data);
index dfb961d8c21b7fd95feaa102a3235950950c0e5f..09c6e1c680db980aeb67dae51986b2a84e88d99a 100644 (file)
 
 static const struct iommu_ops omap_iommu_ops;
 
+struct orphan_dev {
+       struct device *dev;
+       struct list_head node;
+};
+
+static LIST_HEAD(orphan_dev_list);
+
+static DEFINE_SPINLOCK(orphan_lock);
+
 #define to_iommu(dev)  ((struct omap_iommu *)dev_get_drvdata(dev))
 
 /* bitmap of the page sizes currently supported */
@@ -53,6 +62,8 @@ static const struct iommu_ops omap_iommu_ops;
 static struct platform_driver omap_iommu_driver;
 static struct kmem_cache *iopte_cachep;
 
+static int _omap_iommu_add_device(struct device *dev);
+
 /**
  * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
  * @dom:       generic iommu domain handle
@@ -65,6 +76,9 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
 /**
  * omap_iommu_save_ctx - Save registers for pm off-mode support
  * @dev:       client device
+ *
+ * This should be treated as an deprecated API. It is preserved only
+ * to maintain existing functionality for OMAP3 ISP driver.
  **/
 void omap_iommu_save_ctx(struct device *dev)
 {
@@ -92,6 +106,9 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
 /**
  * omap_iommu_restore_ctx - Restore registers for pm off-mode support
  * @dev:       client device
+ *
+ * This should be treated as an deprecated API. It is preserved only
+ * to maintain existing functionality for OMAP3 ISP driver.
  **/
 void omap_iommu_restore_ctx(struct device *dev)
 {
@@ -186,36 +203,18 @@ static void omap2_iommu_disable(struct omap_iommu *obj)
 
 static int iommu_enable(struct omap_iommu *obj)
 {
-       int err;
-       struct platform_device *pdev = to_platform_device(obj->dev);
-       struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
-
-       if (pdata && pdata->deassert_reset) {
-               err = pdata->deassert_reset(pdev, pdata->reset_name);
-               if (err) {
-                       dev_err(obj->dev, "deassert_reset failed: %d\n", err);
-                       return err;
-               }
-       }
-
-       pm_runtime_get_sync(obj->dev);
+       int ret;
 
-       err = omap2_iommu_enable(obj);
+       ret = pm_runtime_get_sync(obj->dev);
+       if (ret < 0)
+               pm_runtime_put_noidle(obj->dev);
 
-       return err;
+       return ret < 0 ? ret : 0;
 }
 
 static void iommu_disable(struct omap_iommu *obj)
 {
-       struct platform_device *pdev = to_platform_device(obj->dev);
-       struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
-
-       omap2_iommu_disable(obj);
-
        pm_runtime_put_sync(obj->dev);
-
-       if (pdata && pdata->assert_reset)
-               pdata->assert_reset(pdev, pdata->reset_name);
 }
 
 /*
@@ -901,15 +900,219 @@ static void omap_iommu_detach(struct omap_iommu *obj)
 
        dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
                         DMA_TO_DEVICE);
-       iommu_disable(obj);
        obj->pd_dma = 0;
        obj->iopgd = NULL;
+       iommu_disable(obj);
 
        spin_unlock(&obj->iommu_lock);
 
        dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
 }
 
+static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
+{
+       struct iotlb_lock lock;
+       struct cr_regs cr;
+       struct cr_regs *tmp;
+       int i;
+
+       /* check if there are any locked tlbs to save */
+       iotlb_lock_get(obj, &lock);
+       obj->num_cr_ctx = lock.base;
+       if (!obj->num_cr_ctx)
+               return;
+
+       tmp = obj->cr_ctx;
+       for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
+               * tmp++ = cr;
+}
+
+static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
+{
+       struct iotlb_lock l;
+       struct cr_regs *tmp;
+       int i;
+
+       /* no locked tlbs to restore */
+       if (!obj->num_cr_ctx)
+               return;
+
+       l.base = 0;
+       tmp = obj->cr_ctx;
+       for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
+               l.vict = i;
+               iotlb_lock_set(obj, &l);
+               iotlb_load_cr(obj, tmp);
+       }
+       l.base = obj->num_cr_ctx;
+       l.vict = i;
+       iotlb_lock_set(obj, &l);
+}
+
+/**
+ * omap_iommu_domain_deactivate - deactivate attached iommu devices
+ * @domain: iommu domain attached to the target iommu device
+ *
+ * This API allows the client devices of IOMMU devices to suspend
+ * the IOMMUs they control at runtime, after they are idled and
+ * suspended all activity. System Suspend will leverage the PM
+ * driver late callbacks.
+ **/
+int omap_iommu_domain_deactivate(struct iommu_domain *domain)
+{
+       struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+       struct omap_iommu_device *iommu;
+       struct omap_iommu *oiommu;
+       int i;
+
+       if (!omap_domain->dev)
+               return 0;
+
+       iommu = omap_domain->iommus;
+       iommu += (omap_domain->num_iommus - 1);
+       for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
+               oiommu = iommu->iommu_dev;
+               pm_runtime_put_sync(oiommu->dev);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
+
+/**
+ * omap_iommu_domain_activate - activate attached iommu devices
+ * @domain: iommu domain attached to the target iommu device
+ *
+ * This API allows the client devices of IOMMU devices to resume the
+ * IOMMUs they control at runtime, before they can resume operations.
+ * System Resume will leverage the PM driver late callbacks.
+ **/
+int omap_iommu_domain_activate(struct iommu_domain *domain)
+{
+       struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+       struct omap_iommu_device *iommu;
+       struct omap_iommu *oiommu;
+       int i;
+
+       if (!omap_domain->dev)
+               return 0;
+
+       iommu = omap_domain->iommus;
+       for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
+               oiommu = iommu->iommu_dev;
+               pm_runtime_get_sync(oiommu->dev);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
+
+/**
+ * omap_iommu_runtime_suspend - disable an iommu device
+ * @dev:       iommu device
+ *
+ * This function performs all that is necessary to disable an
+ * IOMMU device, either during final detachment from a client
+ * device, or during system/runtime suspend of the device. This
+ * includes programming all the appropriate IOMMU registers, and
+ * managing the associated omap_hwmod's state and the device's
+ * reset line. This function also saves the context of any
+ * locked TLBs if suspending.
+ **/
+static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct iommu_platform_data *pdata = dev_get_platdata(dev);
+       struct omap_iommu *obj = to_iommu(dev);
+       int ret;
+
+       /* save the TLBs only during suspend, and not for power down */
+       if (obj->domain && obj->iopgd)
+               omap_iommu_save_tlb_entries(obj);
+
+       omap2_iommu_disable(obj);
+
+       if (pdata && pdata->device_idle)
+               pdata->device_idle(pdev);
+
+       if (pdata && pdata->assert_reset)
+               pdata->assert_reset(pdev, pdata->reset_name);
+
+       if (pdata && pdata->set_pwrdm_constraint) {
+               ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
+               if (ret) {
+                       dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
+                                ret);
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * omap_iommu_runtime_resume - enable an iommu device
+ * @dev:       iommu device
+ *
+ * This function performs all that is necessary to enable an
+ * IOMMU device, either during initial attachment to a client
+ * device, or during system/runtime resume of the device. This
+ * includes programming all the appropriate IOMMU registers, and
+ * managing the associated omap_hwmod's state and the device's
+ * reset line. The function also restores any locked TLBs if
+ * resuming after a suspend.
+ **/
+static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct iommu_platform_data *pdata = dev_get_platdata(dev);
+       struct omap_iommu *obj = to_iommu(dev);
+       int ret = 0;
+
+       if (pdata && pdata->set_pwrdm_constraint) {
+               ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
+               if (ret) {
+                       dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
+                                ret);
+               }
+       }
+
+       if (pdata && pdata->deassert_reset) {
+               ret = pdata->deassert_reset(pdev, pdata->reset_name);
+               if (ret) {
+                       dev_err(dev, "deassert_reset failed: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       if (pdata && pdata->device_enable)
+               pdata->device_enable(pdev);
+
+       /* restore the TLBs only during resume, and not for power up */
+       if (obj->domain)
+               omap_iommu_restore_tlb_entries(obj);
+
+       ret = omap2_iommu_enable(obj);
+
+       return ret;
+}
+
+/**
+ * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
+ * @dev:       iommu device
+ *
+ * This function performs the necessary checks to determine if the IOMMU
+ * device needs suspending or not. The function checks if the runtime_pm
+ * status of the device is suspended, and returns 1 in that case. This
+ * results in the PM core to skip invoking any of the Sleep PM callbacks
+ * (suspend, suspend_late, resume, resume_early etc).
+ */
+static int omap_iommu_prepare(struct device *dev)
+{
+       if (pm_runtime_status_suspended(dev))
+               return 1;
+       return 0;
+}
+
 static bool omap_iommu_can_register(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -974,6 +1177,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
        struct omap_iommu *obj;
        struct resource *res;
        struct device_node *of = pdev->dev.of_node;
+       struct orphan_dev *orphan_dev, *tmp;
 
        if (!of) {
                pr_err("%s: only DT-based devices are supported\n", __func__);
@@ -984,6 +1188,15 @@ static int omap_iommu_probe(struct platform_device *pdev)
        if (!obj)
                return -ENOMEM;
 
+       /*
+        * self-manage the ordering dependencies between omap_device_enable/idle
+        * and omap_device_assert/deassert_hardreset API
+        */
+       if (pdev->dev.pm_domain) {
+               dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
+               pdev->dev.pm_domain = NULL;
+       }
+
        obj->name = dev_name(&pdev->dev);
        obj->nr_tlb_entries = 32;
        err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
@@ -996,6 +1209,11 @@ static int omap_iommu_probe(struct platform_device *pdev)
 
        obj->dev = &pdev->dev;
        obj->ctx = (void *)obj + sizeof(*obj);
+       obj->cr_ctx = devm_kzalloc(&pdev->dev,
+                                  sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
+                                  GFP_KERNEL);
+       if (!obj->cr_ctx)
+               return -ENOMEM;
 
        spin_lock_init(&obj->iommu_lock);
        spin_lock_init(&obj->page_table_lock);
@@ -1036,13 +1254,20 @@ static int omap_iommu_probe(struct platform_device *pdev)
                        goto out_sysfs;
        }
 
-       pm_runtime_irq_safe(obj->dev);
        pm_runtime_enable(obj->dev);
 
        omap_iommu_debugfs_add(obj);
 
        dev_info(&pdev->dev, "%s registered\n", obj->name);
 
+       list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) {
+               err = _omap_iommu_add_device(orphan_dev->dev);
+               if (!err) {
+                       list_del(&orphan_dev->node);
+                       kfree(orphan_dev);
+               }
+       }
+
        return 0;
 
 out_sysfs:
@@ -1072,6 +1297,14 @@ static int omap_iommu_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct dev_pm_ops omap_iommu_pm_ops = {
+       .prepare = omap_iommu_prepare,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                    pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
+                          omap_iommu_runtime_resume, NULL)
+};
+
 static const struct of_device_id omap_iommu_of_match[] = {
        { .compatible = "ti,omap2-iommu" },
        { .compatible = "ti,omap4-iommu" },
@@ -1085,6 +1318,7 @@ static struct platform_driver omap_iommu_driver = {
        .remove = omap_iommu_remove,
        .driver = {
                .name   = "omap-iommu",
+               .pm     = &omap_iommu_pm_ops,
                .of_match_table = of_match_ptr(omap_iommu_of_match),
        },
 };
@@ -1149,7 +1383,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
 }
 
 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
-                              size_t size)
+                              size_t size, struct iommu_iotlb_gather *gather)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct device *dev = omap_domain->dev;
@@ -1423,7 +1657,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
        return ret;
 }
 
-static int omap_iommu_add_device(struct device *dev)
+static int _omap_iommu_add_device(struct device *dev)
 {
        struct omap_iommu_arch_data *arch_data, *tmp;
        struct omap_iommu *oiommu;
@@ -1432,6 +1666,8 @@ static int omap_iommu_add_device(struct device *dev)
        struct platform_device *pdev;
        int num_iommus, i;
        int ret;
+       struct orphan_dev *orphan_dev;
+       unsigned long flags;
 
        /*
         * Allocate the archdata iommu structure for DT-based devices.
@@ -1463,10 +1699,26 @@ static int omap_iommu_add_device(struct device *dev)
                }
 
                pdev = of_find_device_by_node(np);
-               if (WARN_ON(!pdev)) {
+               if (!pdev) {
                        of_node_put(np);
                        kfree(arch_data);
-                       return -EINVAL;
+                       spin_lock_irqsave(&orphan_lock, flags);
+                       list_for_each_entry(orphan_dev, &orphan_dev_list,
+                                           node) {
+                               if (orphan_dev->dev == dev)
+                                       break;
+                       }
+                       spin_unlock_irqrestore(&orphan_lock, flags);
+
+                       if (orphan_dev && orphan_dev->dev == dev)
+                               return -EPROBE_DEFER;
+
+                       orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL);
+                       orphan_dev->dev = dev;
+                       spin_lock_irqsave(&orphan_lock, flags);
+                       list_add(&orphan_dev->node, &orphan_dev_list);
+                       spin_unlock_irqrestore(&orphan_lock, flags);
+                       return -EPROBE_DEFER;
                }
 
                oiommu = platform_get_drvdata(pdev);
@@ -1477,6 +1729,7 @@ static int omap_iommu_add_device(struct device *dev)
                }
 
                tmp->iommu_dev = oiommu;
+               tmp->dev = &pdev->dev;
 
                of_node_put(np);
        }
@@ -1511,6 +1764,17 @@ static int omap_iommu_add_device(struct device *dev)
        return 0;
 }
 
+static int omap_iommu_add_device(struct device *dev)
+{
+       int ret;
+
+       ret = _omap_iommu_add_device(dev);
+       if (ret == -EPROBE_DEFER)
+               return 0;
+
+       return ret;
+}
+
 static void omap_iommu_remove_device(struct device *dev)
 {
        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@ -1554,7 +1818,7 @@ static const struct iommu_ops omap_iommu_ops = {
 static int __init omap_iommu_init(void)
 {
        struct kmem_cache *p;
-       const unsigned long flags = SLAB_HWCACHE_ALIGN;
+       const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
        size_t align = 1 << 10; /* L2 pagetable alignement */
        struct device_node *np;
        int ret;
index 09968a02d291876461759be869d440e57a8696a9..18ee713ede784d16c884b9cca19ed8ce240706d4 100644 (file)
@@ -73,16 +73,22 @@ struct omap_iommu {
 
        void *ctx; /* iommu context: registres saved area */
 
+       struct cr_regs *cr_ctx;
+       u32 num_cr_ctx;
+
        int has_bus_err_back;
        u32 id;
 
        struct iommu_device iommu;
        struct iommu_group *group;
+
+       u8 pwrst;
 };
 
 /**
  * struct omap_iommu_arch_data - omap iommu private data
- * @iommu_dev: handle of the iommu device
+ * @iommu_dev: handle of the OMAP iommu device
+ * @dev: handle of the iommu device
  *
  * This is an omap iommu private data object, which binds an iommu user
  * to its iommu device. This object should be placed at the iommu user's
@@ -91,6 +97,7 @@ struct omap_iommu {
  */
 struct omap_iommu_arch_data {
        struct omap_iommu *iommu_dev;
+       struct device *dev;
 };
 
 struct cr_regs {
index 34d0b9783b3ed6f57c3fcbe9ec46d16db1e92116..c31e7bc4ccbec2f7083ba30541e2a2b70df7431a 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/atomic.h>
+#include <linux/bitfield.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/dma-iommu.h>
@@ -32,7 +33,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
-#include "arm-smmu-regs.h"
+#include "arm-smmu.h"
 
 #define SMMU_INTR_SEL_NS     0x2000
 
@@ -155,7 +156,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
                struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
                size_t s = size;
 
-               iova &= ~12UL;
+               iova = (iova >> 12) << 12;
                iova |= ctx->asid;
                do {
                        iommu_writel(ctx, reg, iova);
@@ -164,10 +165,32 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        }
 }
 
-static const struct iommu_gather_ops qcom_gather_ops = {
+static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+                                     size_t granule, void *cookie)
+{
+       qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
+       qcom_iommu_tlb_sync(cookie);
+}
+
+static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
+                                     size_t granule, void *cookie)
+{
+       qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
+       qcom_iommu_tlb_sync(cookie);
+}
+
+static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+                                   unsigned long iova, size_t granule,
+                                   void *cookie)
+{
+       qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops qcom_flush_ops = {
        .tlb_flush_all  = qcom_iommu_tlb_inv_context,
-       .tlb_add_flush  = qcom_iommu_tlb_inv_range_nosync,
-       .tlb_sync       = qcom_iommu_tlb_sync,
+       .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
+       .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
+       .tlb_add_page   = qcom_iommu_tlb_add_page,
 };
 
 static irqreturn_t qcom_iommu_fault(int irq, void *dev)
@@ -215,7 +238,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
                .pgsize_bitmap  = qcom_iommu_ops.pgsize_bitmap,
                .ias            = 32,
                .oas            = 40,
-               .tlb            = &qcom_gather_ops,
+               .tlb            = &qcom_flush_ops,
                .iommu_dev      = qcom_iommu->dev,
        };
 
@@ -247,16 +270,16 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
                /* TTBRs */
                iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
                                pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
-                               ((u64)ctx->asid << TTBRn_ASID_SHIFT));
+                               FIELD_PREP(TTBRn_ASID, ctx->asid));
                iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
                                pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
-                               ((u64)ctx->asid << TTBRn_ASID_SHIFT));
+                               FIELD_PREP(TTBRn_ASID, ctx->asid));
 
-               /* TTBCR */
-               iommu_writel(ctx, ARM_SMMU_CB_TTBCR2,
+               /* TCR */
+               iommu_writel(ctx, ARM_SMMU_CB_TCR2,
                                (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
-                               TTBCR2_SEP_UPSTREAM);
-               iommu_writel(ctx, ARM_SMMU_CB_TTBCR,
+                               FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
+               iommu_writel(ctx, ARM_SMMU_CB_TCR,
                                pgtbl_cfg.arm_lpae_s1_cfg.tcr);
 
                /* MAIRs (stage-1 only) */
@@ -417,7 +440,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
 }
 
 static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
-                              size_t size)
+                              size_t size, struct iommu_iotlb_gather *gather)
 {
        size_t ret;
        unsigned long flags;
@@ -434,14 +457,14 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
         */
        pm_runtime_get_sync(qcom_domain->iommu->dev);
        spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
-       ret = ops->unmap(ops, iova, size);
+       ret = ops->unmap(ops, iova, size, gather);
        spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
        pm_runtime_put_sync(qcom_domain->iommu->dev);
 
        return ret;
 }
 
-static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
+static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
        struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
        struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
@@ -454,6 +477,12 @@ static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
        pm_runtime_put_sync(qcom_domain->iommu->dev);
 }
 
+static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
+                                 struct iommu_iotlb_gather *gather)
+{
+       qcom_iommu_flush_iotlb_all(domain);
+}
+
 static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
                                           dma_addr_t iova)
 {
@@ -581,7 +610,7 @@ static const struct iommu_ops qcom_iommu_ops = {
        .detach_dev     = qcom_iommu_detach_dev,
        .map            = qcom_iommu_map,
        .unmap          = qcom_iommu_unmap,
-       .flush_iotlb_all = qcom_iommu_iotlb_sync,
+       .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
        .iotlb_sync     = qcom_iommu_iotlb_sync,
        .iova_to_phys   = qcom_iommu_iova_to_phys,
        .add_device     = qcom_iommu_add_device,
@@ -696,10 +725,8 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
                return PTR_ERR(ctx->base);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "failed to get irq\n");
+       if (irq < 0)
                return -ENODEV;
-       }
 
        /* clear IRQs before registering fault handler, just in case the
         * boot-loader left us a surprise:
@@ -775,7 +802,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
        struct qcom_iommu_dev *qcom_iommu;
        struct device *dev = &pdev->dev;
        struct resource *res;
-       int ret, sz, max_asid = 0;
+       int ret, max_asid = 0;
 
        /* find the max asid (which is 1:1 to ctx bank idx), so we know how
         * many child ctx devices we have:
@@ -783,9 +810,8 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
        for_each_child_of_node(dev->of_node, child)
                max_asid = max(max_asid, get_asid(child));
 
-       sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0]));
-
-       qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL);
+       qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
+                                 GFP_KERNEL);
        if (!qcom_iommu)
                return -ENOMEM;
        qcom_iommu->num_ctxs = max_asid;
index dc26d74d79c2c7c337b717aae3145810d13ff552..26290f310f90bd8c6d05da9b255ec02d144cd56f 100644 (file)
@@ -794,7 +794,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
 }
 
 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
-                            size_t size)
+                            size_t size, struct iommu_iotlb_gather *gather)
 {
        struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
        unsigned long flags;
index 22d4db302c1c8c2cc6b19a11a5ee8baf8c132acc..3b0b18e23187c19d7c787389fb33d73ca957a429 100644 (file)
@@ -314,7 +314,8 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
 }
 
 static size_t s390_iommu_unmap(struct iommu_domain *domain,
-                              unsigned long iova, size_t size)
+                              unsigned long iova, size_t size,
+                              struct iommu_iotlb_gather *gather)
 {
        struct s390_domain *s390_domain = to_s390_domain(domain);
        int flags = ZPCI_PTE_INVALID;
index 6d40bc1b38bf0ccc2f90a441d04729d492c4d3a5..3924f7c055440edd3ee64be0b02710102bd7efcd 100644 (file)
@@ -207,7 +207,7 @@ static inline int __gart_iommu_unmap(struct gart_device *gart,
 }
 
 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
-                              size_t bytes)
+                              size_t bytes, struct iommu_iotlb_gather *gather)
 {
        struct gart_device *gart = gart_handle;
        int err;
@@ -273,11 +273,17 @@ static int gart_iommu_of_xlate(struct device *dev,
        return 0;
 }
 
-static void gart_iommu_sync(struct iommu_domain *domain)
+static void gart_iommu_sync_map(struct iommu_domain *domain)
 {
        FLUSH_GART_REGS(gart_handle);
 }
 
+static void gart_iommu_sync(struct iommu_domain *domain,
+                           struct iommu_iotlb_gather *gather)
+{
+       gart_iommu_sync_map(domain);
+}
+
 static const struct iommu_ops gart_iommu_ops = {
        .capable        = gart_iommu_capable,
        .domain_alloc   = gart_iommu_domain_alloc,
@@ -292,7 +298,7 @@ static const struct iommu_ops gart_iommu_ops = {
        .iova_to_phys   = gart_iommu_iova_to_phys,
        .pgsize_bitmap  = GART_IOMMU_PGSIZES,
        .of_xlate       = gart_iommu_of_xlate,
-       .iotlb_sync_map = gart_iommu_sync,
+       .iotlb_sync_map = gart_iommu_sync_map,
        .iotlb_sync     = gart_iommu_sync,
 };
 
index c4a652b227f8d6a794fa54a85ac9edf867f3f05f..7293fc3f796d69f61f8cbe8a4ef3176d3b9ff5b0 100644 (file)
@@ -680,7 +680,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
 }
 
 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
-                              size_t size)
+                              size_t size, struct iommu_iotlb_gather *gather)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
        dma_addr_t pte_dma;
index 80a740df0737a638e6170f35c0b8b2a52cce37a0..3ea9d7682999528d1348a6d2693f6136556eea57 100644 (file)
@@ -751,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
 }
 
 static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
-                          size_t size)
+                          size_t size, struct iommu_iotlb_gather *gather)
 {
        int ret = 0;
        size_t unmapped;
@@ -797,7 +797,8 @@ static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
        return paddr;
 }
 
-static void viommu_iotlb_sync(struct iommu_domain *domain)
+static void viommu_iotlb_sync(struct iommu_domain *domain,
+                             struct iommu_iotlb_gather *gather)
 {
        struct viommu_domain *vdomain = to_viommu_domain(domain);
 
index 3c3ad42f22bfd29da879255593fe3cb7a21e1b3e..c92b405b7646cfbceaed3b6544fb71d145c9bb42 100644 (file)
@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        if (!cdev->ap.applid)
                return -ENODEV;
 
+       if (count < CAPIMSG_BASELEN)
+               return -EINVAL;
+
        skb = alloc_skb(count, GFP_USER);
        if (!skb)
                return -ENOMEM;
@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        }
        mlen = CAPIMSG_LEN(skb->data);
        if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
-               if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+               if (count < CAPI_DATA_B3_REQ_LEN ||
+                   (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
        CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
 
        if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
+               if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
+                       kfree_skb(skb);
+                       return -EINVAL;
+               }
                mutex_lock(&cdev->lock);
                capincci_free(cdev, CAPIMSG_NCCI(skb->data));
                mutex_unlock(&cdev->lock);
index 42ab43affbffb611886768029dfcbf04efc109ee..439d7d88687340c03b8b2016f8fc8357a36582ba 100644 (file)
 #define SMI_LARB_NONSEC_CON(id)        (0x380 + ((id) * 4))
 #define F_MMU_EN               BIT(0)
 
+/* SMI COMMON */
+#define SMI_BUS_SEL                    0x220
+#define SMI_BUS_LARB_SHIFT(larbid)     ((larbid) << 1)
+/* All are MMU0 defaultly. Only specialize mmu1 here. */
+#define F_MMU1_LARB(larbid)            (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+
+enum mtk_smi_gen {
+       MTK_SMI_GEN1,
+       MTK_SMI_GEN2
+};
+
+struct mtk_smi_common_plat {
+       enum mtk_smi_gen gen;
+       bool             has_gals;
+       u32              bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+};
+
 struct mtk_smi_larb_gen {
-       bool need_larbid;
        int port_in_larb[MTK_LARB_NR_MAX + 1];
        void (*config_port)(struct device *);
+       unsigned int                    larb_direct_to_common_mask;
+       bool                            has_gals;
 };
 
 struct mtk_smi {
        struct device                   *dev;
        struct clk                      *clk_apb, *clk_smi;
+       struct clk                      *clk_gals0, *clk_gals1;
        struct clk                      *clk_async; /*only needed by mt2701*/
-       void __iomem                    *smi_ao_base;
+       union {
+               void __iomem            *smi_ao_base; /* only for gen1 */
+               void __iomem            *base;        /* only for gen2 */
+       };
+       const struct mtk_smi_common_plat *plat;
 };
 
 struct mtk_smi_larb { /* larb: local arbiter */
@@ -63,82 +86,56 @@ struct mtk_smi_larb { /* larb: local arbiter */
        u32                             *mmu;
 };
 
-enum mtk_smi_gen {
-       MTK_SMI_GEN1,
-       MTK_SMI_GEN2
-};
-
-static int mtk_smi_enable(const struct mtk_smi *smi)
+static int mtk_smi_clk_enable(const struct mtk_smi *smi)
 {
        int ret;
 
-       ret = pm_runtime_get_sync(smi->dev);
-       if (ret < 0)
-               return ret;
-
        ret = clk_prepare_enable(smi->clk_apb);
        if (ret)
-               goto err_put_pm;
+               return ret;
 
        ret = clk_prepare_enable(smi->clk_smi);
        if (ret)
                goto err_disable_apb;
 
+       ret = clk_prepare_enable(smi->clk_gals0);
+       if (ret)
+               goto err_disable_smi;
+
+       ret = clk_prepare_enable(smi->clk_gals1);
+       if (ret)
+               goto err_disable_gals0;
+
        return 0;
 
+err_disable_gals0:
+       clk_disable_unprepare(smi->clk_gals0);
+err_disable_smi:
+       clk_disable_unprepare(smi->clk_smi);
 err_disable_apb:
        clk_disable_unprepare(smi->clk_apb);
-err_put_pm:
-       pm_runtime_put_sync(smi->dev);
        return ret;
 }
 
-static void mtk_smi_disable(const struct mtk_smi *smi)
+static void mtk_smi_clk_disable(const struct mtk_smi *smi)
 {
+       clk_disable_unprepare(smi->clk_gals1);
+       clk_disable_unprepare(smi->clk_gals0);
        clk_disable_unprepare(smi->clk_smi);
        clk_disable_unprepare(smi->clk_apb);
-       pm_runtime_put_sync(smi->dev);
 }
 
 int mtk_smi_larb_get(struct device *larbdev)
 {
-       struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
-       const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
-       struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
-       int ret;
-
-       /* Enable the smi-common's power and clocks */
-       ret = mtk_smi_enable(common);
-       if (ret)
-               return ret;
+       int ret = pm_runtime_get_sync(larbdev);
 
-       /* Enable the larb's power and clocks */
-       ret = mtk_smi_enable(&larb->smi);
-       if (ret) {
-               mtk_smi_disable(common);
-               return ret;
-       }
-
-       /* Configure the iommu info for this larb */
-       larb_gen->config_port(larbdev);
-
-       return 0;
+       return (ret < 0) ? ret : 0;
 }
 EXPORT_SYMBOL_GPL(mtk_smi_larb_get);
 
 void mtk_smi_larb_put(struct device *larbdev)
 {
-       struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
-       struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
-
-       /*
-        * Don't de-configure the iommu info for this larb since there may be
-        * several modules in this larb.
-        * The iommu info will be reset after power off.
-        */
-
-       mtk_smi_disable(&larb->smi);
-       mtk_smi_disable(common);
+       pm_runtime_put_sync(larbdev);
 }
 EXPORT_SYMBOL_GPL(mtk_smi_larb_put);
 
@@ -146,39 +143,26 @@ static int
 mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
 {
        struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-       struct mtk_smi_iommu *smi_iommu = data;
+       struct mtk_smi_larb_iommu *larb_mmu = data;
        unsigned int         i;
 
-       if (larb->larb_gen->need_larbid) {
-               larb->mmu = &smi_iommu->larb_imu[larb->larbid].mmu;
-               return 0;
-       }
-
-       /*
-        * If there is no larbid property, Loop to find the corresponding
-        * iommu information.
-        */
-       for (i = 0; i < smi_iommu->larb_nr; i++) {
-               if (dev == smi_iommu->larb_imu[i].dev) {
-                       /* The 'mmu' may be updated in iommu-attach/detach. */
-                       larb->mmu = &smi_iommu->larb_imu[i].mmu;
+       for (i = 0; i < MTK_LARB_NR_MAX; i++) {
+               if (dev == larb_mmu[i].dev) {
+                       larb->larbid = i;
+                       larb->mmu = &larb_mmu[i].mmu;
                        return 0;
                }
        }
        return -ENODEV;
 }
 
-static void mtk_smi_larb_config_port_mt2712(struct device *dev)
+static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
 {
        struct mtk_smi_larb *larb = dev_get_drvdata(dev);
        u32 reg;
        int i;
 
-       /*
-        * larb 8/9 is the bdpsys larb, the iommu_en is enabled defaultly.
-        * Don't need to set it again.
-        */
-       if (larb->larbid == 8 || larb->larbid == 9)
+       if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
                return;
 
        for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
@@ -243,7 +227,6 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
 };
 
 static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
-       .need_larbid = true,
        .port_in_larb = {
                LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
                LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
@@ -252,8 +235,15 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
 };
 
 static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
-       .need_larbid = true,
-       .config_port = mtk_smi_larb_config_port_mt2712,
+       .config_port                = mtk_smi_larb_config_port_gen2_general,
+       .larb_direct_to_common_mask = BIT(8) | BIT(9),      /* bdpsys */
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
+       .has_gals                   = true,
+       .config_port                = mtk_smi_larb_config_port_gen2_general,
+       .larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7),
+                                     /* IPU0 | IPU1 | CCU */
 };
 
 static const struct of_device_id mtk_smi_larb_of_ids[] = {
@@ -269,6 +259,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
                .compatible = "mediatek,mt2712-smi-larb",
                .data = &mtk_smi_larb_mt2712
        },
+       {
+               .compatible = "mediatek,mt8183-smi-larb",
+               .data = &mtk_smi_larb_mt8183
+       },
        {}
 };
 
@@ -279,7 +273,6 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct device_node *smi_node;
        struct platform_device *smi_pdev;
-       int err;
 
        larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
        if (!larb)
@@ -298,16 +291,16 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
        larb->smi.clk_smi = devm_clk_get(dev, "smi");
        if (IS_ERR(larb->smi.clk_smi))
                return PTR_ERR(larb->smi.clk_smi);
-       larb->smi.dev = dev;
 
-       if (larb->larb_gen->need_larbid) {
-               err = of_property_read_u32(dev->of_node, "mediatek,larb-id",
-                                          &larb->larbid);
-               if (err) {
-                       dev_err(dev, "missing larbid property\n");
-                       return err;
-               }
+       if (larb->larb_gen->has_gals) {
+               /* The larbs may still haven't gals even if the SoC support.*/
+               larb->smi.clk_gals0 = devm_clk_get(dev, "gals");
+               if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT)
+                       larb->smi.clk_gals0 = NULL;
+               else if (IS_ERR(larb->smi.clk_gals0))
+                       return PTR_ERR(larb->smi.clk_gals0);
        }
+       larb->smi.dev = dev;
 
        smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
        if (!smi_node)
@@ -336,27 +329,86 @@ static int mtk_smi_larb_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+{
+       struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+       const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
+       int ret;
+
+       /* Power on smi-common. */
+       ret = pm_runtime_get_sync(larb->smi_common_dev);
+       if (ret < 0) {
+               dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
+               return ret;
+       }
+
+       ret = mtk_smi_clk_enable(&larb->smi);
+       if (ret < 0) {
+               dev_err(dev, "Failed to enable clock(%d).\n", ret);
+               pm_runtime_put_sync(larb->smi_common_dev);
+               return ret;
+       }
+
+       /* Configure the basic setting for this larb */
+       larb_gen->config_port(dev);
+
+       return 0;
+}
+
+static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
+{
+       struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+       mtk_smi_clk_disable(&larb->smi);
+       pm_runtime_put_sync(larb->smi_common_dev);
+       return 0;
+}
+
+static const struct dev_pm_ops smi_larb_pm_ops = {
+       SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
+};
+
 static struct platform_driver mtk_smi_larb_driver = {
        .probe  = mtk_smi_larb_probe,
        .remove = mtk_smi_larb_remove,
        .driver = {
                .name = "mtk-smi-larb",
                .of_match_table = mtk_smi_larb_of_ids,
+               .pm             = &smi_larb_pm_ops,
        }
 };
 
+static const struct mtk_smi_common_plat mtk_smi_common_gen1 = {
+       .gen = MTK_SMI_GEN1,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
+       .gen = MTK_SMI_GEN2,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
+       .gen      = MTK_SMI_GEN2,
+       .has_gals = true,
+       .bus_sel  = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
+                   F_MMU1_LARB(7),
+};
+
 static const struct of_device_id mtk_smi_common_of_ids[] = {
        {
                .compatible = "mediatek,mt8173-smi-common",
-               .data = (void *)MTK_SMI_GEN2
+               .data = &mtk_smi_common_gen2,
        },
        {
                .compatible = "mediatek,mt2701-smi-common",
-               .data = (void *)MTK_SMI_GEN1
+               .data = &mtk_smi_common_gen1,
        },
        {
                .compatible = "mediatek,mt2712-smi-common",
-               .data = (void *)MTK_SMI_GEN2
+               .data = &mtk_smi_common_gen2,
+       },
+       {
+               .compatible = "mediatek,mt8183-smi-common",
+               .data = &mtk_smi_common_mt8183,
        },
        {}
 };
@@ -366,13 +418,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct mtk_smi *common;
        struct resource *res;
-       enum mtk_smi_gen smi_gen;
        int ret;
 
        common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
        if (!common)
                return -ENOMEM;
        common->dev = dev;
+       common->plat = of_device_get_match_data(dev);
 
        common->clk_apb = devm_clk_get(dev, "apb");
        if (IS_ERR(common->clk_apb))
@@ -382,14 +434,23 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
        if (IS_ERR(common->clk_smi))
                return PTR_ERR(common->clk_smi);
 
+       if (common->plat->has_gals) {
+               common->clk_gals0 = devm_clk_get(dev, "gals0");
+               if (IS_ERR(common->clk_gals0))
+                       return PTR_ERR(common->clk_gals0);
+
+               common->clk_gals1 = devm_clk_get(dev, "gals1");
+               if (IS_ERR(common->clk_gals1))
+                       return PTR_ERR(common->clk_gals1);
+       }
+
        /*
         * for mtk smi gen 1, we need to get the ao(always on) base to config
         * m4u port, and we need to enable the aync clock for transform the smi
         * clock into emi clock domain, but for mtk smi gen2, there's no smi ao
         * base.
         */
-       smi_gen = (enum mtk_smi_gen)of_device_get_match_data(dev);
-       if (smi_gen == MTK_SMI_GEN1) {
+       if (common->plat->gen == MTK_SMI_GEN1) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
                common->smi_ao_base = devm_ioremap_resource(dev, res);
                if (IS_ERR(common->smi_ao_base))
@@ -402,6 +463,11 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
                ret = clk_prepare_enable(common->clk_async);
                if (ret)
                        return ret;
+       } else {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               common->base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(common->base))
+                       return PTR_ERR(common->base);
        }
        pm_runtime_enable(dev);
        platform_set_drvdata(pdev, common);
@@ -414,12 +480,42 @@ static int mtk_smi_common_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+{
+       struct mtk_smi *common = dev_get_drvdata(dev);
+       u32 bus_sel = common->plat->bus_sel;
+       int ret;
+
+       ret = mtk_smi_clk_enable(common);
+       if (ret) {
+               dev_err(common->dev, "Failed to enable clock(%d).\n", ret);
+               return ret;
+       }
+
+       if (common->plat->gen == MTK_SMI_GEN2 && bus_sel)
+               writel(bus_sel, common->base + SMI_BUS_SEL);
+       return 0;
+}
+
+static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
+{
+       struct mtk_smi *common = dev_get_drvdata(dev);
+
+       mtk_smi_clk_disable(common);
+       return 0;
+}
+
+static const struct dev_pm_ops smi_common_pm_ops = {
+       SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
+};
+
 static struct platform_driver mtk_smi_common_driver = {
        .probe  = mtk_smi_common_probe,
        .remove = mtk_smi_common_remove,
        .driver = {
                .name = "mtk-smi-common",
                .of_match_table = mtk_smi_common_of_ids,
+               .pm             = &smi_common_pm_ops,
        }
 };
 
index 601cefb5c9d8eb2b24e6f2c194cf446a3477b680..050478cabc95d045f3a42880353954e2a296a861 100644 (file)
@@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client)
        return 0;
 }
 
-static int rk8xx_suspend(struct device *dev)
+static int __maybe_unused rk8xx_suspend(struct device *dev)
 {
        struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
        int ret = 0;
@@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev)
        return ret;
 }
 
-static int rk8xx_resume(struct device *dev)
+static int __maybe_unused rk8xx_resume(struct device *dev)
 {
        struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
        int ret = 0;
@@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev)
 
        return ret;
 }
-SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
+static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
 
 static struct i2c_driver rk808_i2c_driver = {
        .driver = {
index 1606658b9b7e35ce307c25d3dcf770ef3620f6d8..24245ccdba7207f9bb021708de67fd422d04cacb 100644 (file)
@@ -22,7 +22,7 @@ struct lkdtm_list {
  * recurse past the end of THREAD_SIZE by default.
  */
 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
-#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
+#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
 #else
 #define REC_STACK_SIZE (THREAD_SIZE / 8)
 #endif
@@ -91,7 +91,7 @@ void lkdtm_LOOP(void)
 
 void lkdtm_EXHAUST_STACK(void)
 {
-       pr_info("Calling function with %d frame size to depth %d ...\n",
+       pr_info("Calling function with %lu frame size to depth %d ...\n",
                REC_STACK_SIZE, recur_count);
        recursive_loop(recur_count);
        pr_info("FAIL: survived without exhausting stack?!\n");
index 6c0173772162741f3d4b461a1c8bc0e221e0211a..77f7dff7098d4633f4e37e691027fd3c36c1d7a6 100644 (file)
@@ -81,6 +81,8 @@
 
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
 
+#define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
+
 #define MEI_DEV_ID_MCC        0x4B70  /* Mule Creek Canyon (EHL) */
 #define MEI_DEV_ID_MCC_4      0x4B75  /* Mule Creek Canyon 4 (EHL) */
 
index 57cb68f5cc64034b7369729dc2c4d66742c92675..541538eff8b110490420099be16b6bd0b5c18601 100644 (file)
@@ -98,6 +98,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+
        {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
 
index 8840299420e0b287f3cd81733a1c13a0c7957fb0..5e6be1527571aaf0d7e7e4646768b123f9e85f57 100644 (file)
@@ -691,7 +691,6 @@ static int vmballoon_alloc_page_list(struct vmballoon *b,
                }
 
                if (page) {
-                       vmballoon_mark_page_offline(page, ctl->page_size);
                        /* Success. Add the page to the list and continue. */
                        list_add(&page->lru, &ctl->pages);
                        continue;
@@ -930,7 +929,6 @@ static void vmballoon_release_page_list(struct list_head *page_list,
 
        list_for_each_entry_safe(page, tmp, page_list, lru) {
                list_del(&page->lru);
-               vmballoon_mark_page_online(page, page_size);
                __free_pages(page, vmballoon_page_order(page_size));
        }
 
@@ -1005,6 +1003,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
                                        enum vmballoon_page_size_type page_size)
 {
        unsigned long flags;
+       struct page *page;
 
        if (page_size == VMW_BALLOON_4K_PAGE) {
                balloon_page_list_enqueue(&b->b_dev_info, pages);
@@ -1014,6 +1013,11 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
                 * for the balloon compaction mechanism.
                 */
                spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+
+               list_for_each_entry(page, pages, lru) {
+                       vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
+               }
+
                list_splice_init(pages, &b->huge_pages);
                __count_vm_events(BALLOON_INFLATE, *n_pages *
                                  vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
@@ -1056,6 +1060,8 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
        /* 2MB pages */
        spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
        list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
+               vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
+
                list_move(&page->lru, pages);
                if (++i == n_req_pages)
                        break;
index bad89b6e08024f8b296701cd0a2aebbb79dbad0b..345addd9306defffd6e43d667672df6f22966bc1 100644 (file)
@@ -310,7 +310,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
 
        entry = container_of(resource, struct dbell_entry, resource);
        if (entry->run_delayed) {
-               schedule_work(&entry->work);
+               if (!schedule_work(&entry->work))
+                       vmci_resource_put(resource);
        } else {
                entry->notify_cb(entry->client_data);
                vmci_resource_put(resource);
@@ -361,7 +362,8 @@ static void dbell_fire_entries(u32 notify_idx)
                    atomic_read(&dbell->active) == 1) {
                        if (dbell->run_delayed) {
                                vmci_resource_get(&dbell->resource);
-                               schedule_work(&dbell->work);
+                               if (!schedule_work(&dbell->work))
+                                       vmci_resource_put(&dbell->resource);
                        } else {
                                dbell->notify_cb(dbell->client_data);
                        }
index 74e4364bc9fb5df992cffe028db41e3de1072415..09113b9ad67907822ed7e53bcae652e40ec44f4c 100644 (file)
@@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
        if (index == EXT_CSD_SANITIZE_START)
                cmd.sanitize_busy = true;
 
-       err = mmc_wait_for_cmd(host, &cmd, 0);
+       err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
        if (err)
                goto out;
 
index d681e8aaca837f19298a7d144ea5ae02f5547ada..fe914ff5f5d660e3df1d8e4ce6c22a95d83e72c1 100644 (file)
@@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host)
                        goto err;
        }
 
+       /*
+        * Some SD cards claims an out of spec VDD voltage range. Let's treat
+        * these bits as being in-valid and especially also bit7.
+        */
+       ocr &= ~0x7FFF;
+
        rocr = mmc_select_voltage(host, ocr);
 
        /*
index 7e0d3a49c06d8ec443f0b8493c617411efbf7af8..bb31e13648d6577d47c5b0bd268a7cf0ccc80d5a 100644 (file)
@@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host)
        struct dma_chan *terminate_chan = NULL;
        struct mmc_request *mrq;
 
-       cancel_delayed_work_sync(&host->timeout_work);
+       cancel_delayed_work(&host->timeout_work);
 
        mrq = host->mrq;
 
index 64d3b5fb7fe563e0c958dc21e2a8090ce3e0d2d2..4a2872f49a6079445c339fa3c52f6111f1f63b16 100644 (file)
@@ -774,8 +774,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        /* All SDHI have SDIO status bits which must be 1 */
        mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
 
-       pm_runtime_enable(&pdev->dev);
-
        ret = renesas_sdhi_clk_enable(host);
        if (ret)
                goto efree;
@@ -856,8 +854,6 @@ edisclk:
 efree:
        tmio_mmc_host_free(host);
 
-       pm_runtime_disable(&pdev->dev);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
@@ -869,8 +865,6 @@ int renesas_sdhi_remove(struct platform_device *pdev)
        tmio_mmc_host_remove(host);
        renesas_sdhi_clk_disable(host);
 
-       pm_runtime_disable(&pdev->dev);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
index 163d1cf4367ecb87d0a113cbcee30bfe0315d7d4..44139fceac2467d08fda740ce4a69d80bd7304c5 100644 (file)
@@ -369,6 +369,7 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
        host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
        host->mmc_host_ops.hs400_enhanced_strobe =
                                sdhci_cdns_hs400_enhanced_strobe;
+       sdhci_enable_v4_mode(host);
 
        sdhci_get_of_property(pdev);
 
index d4e7e8b7be772dadfb8d07206770f98e31c0801c..e7d1920729fbc98aa3fe527e3e2fce5a28379af5 100644 (file)
@@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
        pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
        pm_runtime_use_autosuspend(&pdev->dev);
 
+       /* HS200 is broken at this moment */
+       host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+
        ret = sdhci_add_host(host);
        if (ret)
                goto pm_runtime_disable;
index 9dc4548271b4b25d95c82f035b98f4f62f1e3c29..19944b0049db0d4b8778c00a4b2fa93535f029a9 100644 (file)
@@ -432,7 +432,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
                                        mmc_hostname(host->mmc));
                                host->flags &= ~SDHCI_SIGNALING_330;
                                host->flags |= SDHCI_SIGNALING_180;
-                               host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
                                host->mmc->caps2 |= MMC_CAP2_NO_SD;
                                host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
                                pci_write_config_dword(chip->pdev,
@@ -682,6 +681,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
 const struct sdhci_pci_fixes sdhci_o2 = {
        .probe = sdhci_pci_o2_probe,
        .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+       .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
        .probe_slot = sdhci_pci_o2_probe_slot,
 #ifdef CONFIG_PM_SLEEP
        .resume = sdhci_pci_o2_resume,
index 83a4767ca680f45f5ebe1fcddf3f2491ccb310cc..d07b9793380f06b2dac0c1225ce58e71a59f3807 100644 (file)
@@ -217,10 +217,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
        struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
        u32 div, val, mask;
 
-       div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
+       sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
 
-       clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
-       sdhci_enable_clk(host, clk);
+       div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
+       div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+       sdhci_enable_clk(host, div);
 
        /* enable auto gate sdhc_enable_auto_gate */
        val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
@@ -373,6 +374,11 @@ static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host)
        return 1 << 31;
 }
 
+static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
+{
+       return 0;
+}
+
 static struct sdhci_ops sdhci_sprd_ops = {
        .read_l = sdhci_sprd_readl,
        .write_l = sdhci_sprd_writel,
@@ -385,6 +391,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
        .set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
        .hw_reset = sdhci_sprd_hw_reset,
        .get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
+       .get_ro = sdhci_sprd_get_ro,
 };
 
 static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -501,9 +508,12 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
 }
 
 static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
-       .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+       .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+                 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+                 SDHCI_QUIRK_MISSING_CAPS,
        .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
-                  SDHCI_QUIRK2_USE_32BIT_BLK_CNT,
+                  SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
+                  SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
        .ops = &sdhci_sprd_ops,
 };
 
@@ -605,6 +615,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
 
        sdhci_enable_v4_mode(host);
 
+       /*
+        * Supply the existing CAPS, but clear the UHS-I modes. This
+        * will allow these modes to be specified only by device
+        * tree properties through mmc_of_parse().
+        */
+       host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+       host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+       host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+                        SDHCI_SUPPORT_DDR50);
+
        ret = sdhci_setup_host(host);
        if (ret)
                goto pm_runtime_disable;
index f4d4761cf20ab4f700e1f4054dc8dfbf9318962c..02d8f524bb9e6fc9b181d3d9616040ce3bb94dc9 100644 (file)
@@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
        }
 }
 
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
+{
+       /*
+        * Write-enable shall be assumed if GPIO is missing in a board's
+        * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
+        * Tegra.
+        */
+       return mmc_gpio_get_ro(host->mmc);
+}
+
 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
 };
 
 static const struct sdhci_ops tegra_sdhci_ops = {
+       .get_ro     = tegra_sdhci_get_ro,
        .read_w     = tegra_sdhci_readw,
        .write_l    = tegra_sdhci_writel,
        .set_clock  = tegra_sdhci_set_clock,
@@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
 };
 
 static const struct sdhci_ops tegra114_sdhci_ops = {
+       .get_ro     = tegra_sdhci_get_ro,
        .read_w     = tegra_sdhci_readw,
        .write_w    = tegra_sdhci_writew,
        .write_l    = tegra_sdhci_writel,
@@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
 };
 
 static const struct sdhci_ops tegra210_sdhci_ops = {
+       .get_ro     = tegra_sdhci_get_ro,
        .read_w     = tegra_sdhci_readw,
        .write_w    = tegra210_sdhci_writew,
        .write_l    = tegra_sdhci_writel,
@@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
 };
 
 static const struct sdhci_ops tegra186_sdhci_ops = {
+       .get_ro     = tegra_sdhci_get_ro,
        .read_w     = tegra_sdhci_readw,
        .write_l    = tegra_sdhci_writel,
        .set_clock  = tegra_sdhci_set_clock,
index 8539e10784b4096186024319c18f0df4e555e435..93e83ad25976e756bdb04408b51c506925b02534 100644 (file)
@@ -172,8 +172,6 @@ static int tmio_mmc_probe(struct platform_device *pdev)
        host->mmc->f_max = pdata->hclk;
        host->mmc->f_min = pdata->hclk / 512;
 
-       pm_runtime_enable(&pdev->dev);
-
        ret = tmio_mmc_host_probe(host);
        if (ret)
                goto host_free;
@@ -193,7 +191,6 @@ host_remove:
        tmio_mmc_host_remove(host);
 host_free:
        tmio_mmc_host_free(host);
-       pm_runtime_disable(&pdev->dev);
 cell_disable:
        if (cell->disable)
                cell->disable(pdev);
@@ -210,8 +207,6 @@ static int tmio_mmc_remove(struct platform_device *pdev)
        if (cell->disable)
                cell->disable(pdev);
 
-       pm_runtime_disable(&pdev->dev);
-
        return 0;
 }
 
index c5ba13fae39920e656de9737412804e2f5155a77..2f0b092d6dcc64d199b75ec7563600f1954502dd 100644 (file)
@@ -163,6 +163,7 @@ struct tmio_mmc_host {
        unsigned long           last_req_ts;
        struct mutex            ios_lock;       /* protect set_ios() context */
        bool                    native_hotplug;
+       bool                    runtime_synced;
        bool                    sdio_irq_enabled;
 
        /* Mandatory callback */
index 2cb3f951c3e2b793761726feb2bfa7a3ac45a980..9b6e1001e77c3acdc5c959b43bb397853f2d9eb2 100644 (file)
@@ -1153,15 +1153,6 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
 }
 EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
 
-/**
- * tmio_mmc_host_probe() - Common probe for all implementations
- * @_host: Host to probe
- *
- * Perform tasks common to all implementations probe functions.
- *
- * The caller should have called pm_runtime_enable() prior to calling
- * the common probe function.
- */
 int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
 {
        struct platform_device *pdev = _host->pdev;
@@ -1257,19 +1248,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
        /* See if we also get DMA */
        tmio_mmc_request_dma(_host, pdata);
 
-       pm_runtime_set_active(&pdev->dev);
        pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
        pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
 
        ret = mmc_add_host(mmc);
        if (ret)
                goto remove_host;
 
        dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
+       pm_runtime_put(&pdev->dev);
 
        return 0;
 
 remove_host:
+       pm_runtime_put_noidle(&pdev->dev);
        tmio_mmc_host_remove(_host);
        return ret;
 }
@@ -1280,12 +1274,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
        struct platform_device *pdev = host->pdev;
        struct mmc_host *mmc = host->mmc;
 
+       pm_runtime_get_sync(&pdev->dev);
+
        if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
                sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
 
-       if (!host->native_hotplug)
-               pm_runtime_get_sync(&pdev->dev);
-
        dev_pm_qos_hide_latency_limit(&pdev->dev);
 
        mmc_remove_host(mmc);
@@ -1294,7 +1287,10 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
        tmio_mmc_release_dma(host);
 
        pm_runtime_dont_use_autosuspend(&pdev->dev);
+       if (host->native_hotplug)
+               pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
 }
 EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
 
@@ -1337,6 +1333,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
 {
        struct tmio_mmc_host *host = dev_get_drvdata(dev);
 
+       if (!host->runtime_synced) {
+               host->runtime_synced = true;
+               return 0;
+       }
+
        tmio_mmc_clk_enable(host);
        tmio_mmc_hw_reset(host->mmc);
 
index 49aad9a79c18d24aba0d45340964d24b5656aeb9..91a2be41edf6196bd9fc7f73262ab206efa46d1f 100644 (file)
@@ -631,7 +631,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
        host->clk_disable = uniphier_sd_clk_disable;
        host->set_clock = uniphier_sd_set_clock;
 
-       pm_runtime_enable(&pdev->dev);
        ret = uniphier_sd_clk_enable(host);
        if (ret)
                goto free_host;
@@ -653,7 +652,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
 
 free_host:
        tmio_mmc_host_free(host);
-       pm_runtime_disable(&pdev->dev);
 
        return ret;
 }
@@ -664,7 +662,6 @@ static int uniphier_sd_remove(struct platform_device *pdev)
 
        tmio_mmc_host_remove(host);
        uniphier_sd_clk_disable(host);
-       pm_runtime_disable(&pdev->dev);
 
        return 0;
 }
index b4e3caf7d799e62ff875a1d9c98f41b3fe0e7823..a4d8968d133ddbc9b7aff8e08a37e6e262eb561f 100644 (file)
@@ -1,5 +1,6 @@
 menuconfig MTD_HYPERBUS
        tristate "HyperBus support"
+       depends on HAS_IOMEM
        select MTD_CFI
        select MTD_MAP_BANK_WIDTH_2
        select MTD_CFI_AMDSTD
index 3811fdbda13e93c5033e0fc0c6bdbfc089bfb5f8..28c963a21dac33465f01e04515d9af3543d8e9c1 100644 (file)
@@ -478,6 +478,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
                                unsigned long *supported,
                                struct phylink_link_state *state)
 {
+       struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 
        if (!phy_interface_mode_is_rgmii(state->interface) &&
@@ -487,8 +488,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
            state->interface != PHY_INTERFACE_MODE_INTERNAL &&
            state->interface != PHY_INTERFACE_MODE_MOCA) {
                bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
-               dev_err(ds->dev,
-                       "Unsupported interface: %d\n", state->interface);
+               if (port != core_readl(priv, CORE_IMP0_PRT_ID))
+                       dev_err(ds->dev,
+                               "Unsupported interface: %d for port %d\n",
+                               state->interface, port);
                return;
        }
 
@@ -526,6 +529,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
        u32 id_mode_dis = 0, port_mode;
        u32 reg, offset;
 
+       if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+               return;
+
        if (priv->type == BCM7445_DEVICE_ID)
                offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
        else
index 5a9e27b337a8296839442c21b2db005fd3179eac..098b01e4ed1a9b0c4841d5c0e7241248a75b2234 100644 (file)
@@ -81,6 +81,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
        { .compatible = "microchip,ksz9897" },
        { .compatible = "microchip,ksz9893" },
        { .compatible = "microchip,ksz9563" },
+       { .compatible = "microchip,ksz8563" },
        {},
 };
 MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
index ee7096d8af070e86543888c971fea0983e1e5433..72ec250b954088e011be15b57ebf0d66b19aa18b 100644 (file)
@@ -128,6 +128,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
 
 #define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign)                \
        {                                                               \
+               .name = #width,                                         \
                .val_bits = (width),                                    \
                .reg_stride = (width) / 8,                              \
                .reg_bits = (regbits) + (regalign),                     \
index b41f23679a087b2a49676ae362044c1b8085820a..7ce9c69e9c44f3d4288d04f710b96222ebf2fb77 100644 (file)
@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
 
        ret = xgbe_platform_init();
        if (ret)
-               return ret;
+               goto err_platform_init;
 
        ret = xgbe_pci_init();
        if (ret)
-               return ret;
+               goto err_pci_init;
 
        return 0;
+
+err_pci_init:
+       xgbe_platform_exit();
+err_platform_init:
+       unregister_netdevice_notifier(&xgbe_netdev_notifier);
+       return ret;
 }
 
 static void __exit xgbe_mod_exit(void)
index 440690b1873409b843bfd898f57944b231f5b047..aee827f07c160cb1591e5156dd51dc81de9fe527 100644 (file)
@@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
                if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
                        break;
        }
-       if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+       if (rule && rule->type == aq_rx_filter_vlan &&
+           be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
                struct ethtool_rxnfc cmd;
 
                cmd.fs.location = rule->aq_fsp.location;
@@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
                return err;
 
        if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
-               if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) {
+               if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
                        err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
                                !(aq_nic->packet_filter & IFF_PROMISC));
                        aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
index 100722ad5c2d8002631addb5bbc3f858b1ee9d62..b4a0fb281e69ea2631db1e6e45373e0d5a556a27 100644 (file)
@@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev)
        if (err < 0)
                goto err_exit;
 
+       err = aq_filters_vlans_update(aq_nic);
+       if (err < 0)
+               goto err_exit;
+
        err = aq_nic_start(aq_nic);
        if (err < 0)
                goto err_exit;
index e1392766e21e7980d39777fba5fbda3865d136c4..8f66e781781182e553c53ba73fde92581de21ee7 100644 (file)
@@ -393,7 +393,7 @@ int aq_nic_start(struct aq_nic_s *self)
                                                   self->aq_nic_cfg.link_irq_vec);
                        err = request_threaded_irq(irqvec, NULL,
                                                   aq_linkstate_threaded_isr,
-                                                  IRQF_SHARED,
+                                                  IRQF_SHARED | IRQF_ONESHOT,
                                                   self->ndev->name, self);
                        if (err < 0)
                                goto err_exit;
index 715685aa48c399dbf75c91833d466a8bff7983a4..28892b8acd0e1e070ed4b246548b780728c69948 100644 (file)
@@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
                        }
                }
 
+err_exit:
                if (!was_tx_cleaned)
                        work_done = budget;
 
@@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
                                        1U << self->aq_ring_param.vec_idx);
                }
        }
-err_exit:
+
        return work_done;
 }
 
index d3a0b614dbfa2148d4243238bf18495bf36c456f..b22196880d6d3226ce5845c02ef8d7fa601bae95 100644 (file)
@@ -1124,6 +1124,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
        .set_coalesce           = bcmgenet_set_coalesce,
        .get_link_ksettings     = bcmgenet_get_link_ksettings,
        .set_link_ksettings     = bcmgenet_set_link_ksettings,
+       .get_ts_info            = ethtool_op_get_ts_info,
 };
 
 /* Power down the unimac, based on mode. */
index 5ca17e62dc3e07c5fade8c4881a73d9b8b45be49..35b59b5edf0f2ec653cd0cb21361247e4194a876 100644 (file)
@@ -4154,7 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "cdns,emac", .data = &emac_config },
        { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
        { .compatible = "cdns,zynq-gem", .data = &zynq_config },
-       { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config },
+       { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, macb_dt_ids);
index 2fd2586e42bf3f6c799df24e520a1387af080292..bc594892507acb2f569975e338886288fbeff062 100644 (file)
@@ -82,7 +82,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
        n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
        if (n != 1) {
                err = -EPERM;
-               goto err_irq;
+               goto err_irq_vectors;
        }
 
        ptp_qoriq->irq = pci_irq_vector(pdev, 0);
@@ -107,6 +107,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
 err_no_clock:
        free_irq(ptp_qoriq->irq, ptp_qoriq);
 err_irq:
+       pci_free_irq_vectors(pdev);
+err_irq_vectors:
        iounmap(base);
 err_ioremap:
        kfree(ptp_qoriq);
@@ -125,6 +127,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
 
        enetc_phc_index = -1;
        ptp_qoriq_free(ptp_qoriq);
+       pci_free_irq_vectors(pdev);
        kfree(ptp_qoriq);
 
        pci_release_mem_regions(pdev);
index 49729875238123cdc88b20e3feabe291dd89dcfd..aca95f64bde80b50642ea2e3066111dcb6745a09 100644 (file)
@@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
                                  u64_stats_fetch_begin(&priv->tx[ring].statss);
                                s->tx_packets += priv->tx[ring].pkt_done;
                                s->tx_bytes += priv->tx[ring].bytes_done;
-                       } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+                       } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
                                                       start));
                }
        }
index 0a7243825e7bae6d52f0b441e3174eeed61968f8..85a3b062371df881d7184e1733803e59c58259f2 100644 (file)
@@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
          .reset_level = HNAE3_GLOBAL_RESET },
        { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
          .reset_level = HNAE3_GLOBAL_RESET },
-       { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
+       { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
          .reset_level = HNAE3_GLOBAL_RESET },
        { .int_msk = BIT(3), .msg = "tx_buf_overflow",
          .reset_level = HNAE3_GLOBAL_RESET },
index cebd20f3128d4a6c6344c94b80496875e091d8d7..5cb55ea671e3535ba1e23099f2b722a18f91b2af 100644 (file)
@@ -1983,6 +1983,13 @@ static void __ibmvnic_reset(struct work_struct *work)
 
        rwi = get_next_rwi(adapter);
        while (rwi) {
+               if (adapter->state == VNIC_REMOVING ||
+                   adapter->state == VNIC_REMOVED) {
+                       kfree(rwi);
+                       rc = EBUSY;
+                       break;
+               }
+
                if (adapter->force_reset_recovery) {
                        adapter->force_reset_recovery = false;
                        rc = do_hard_reset(adapter, rwi, reset_state);
index 7882148abb4320ccf3a979b565fa93990366f991..51c696b6bf644d2a902cd0de495bdb792c23e6e0 100644 (file)
@@ -36,6 +36,7 @@
 #include <net/vxlan.h>
 #include <net/mpls.h>
 #include <net/xdp_sock.h>
+#include <net/xfrm.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -2621,7 +2622,7 @@ adjust_by_size:
                /* 16K ints/sec to 9.2K ints/sec */
                avg_wire_size *= 15;
                avg_wire_size += 11452;
-       } else if (avg_wire_size <= 1980) {
+       } else if (avg_wire_size < 1968) {
                /* 9.2K ints/sec to 8K ints/sec */
                avg_wire_size *= 5;
                avg_wire_size += 22420;
@@ -2654,6 +2655,8 @@ adjust_by_size:
        case IXGBE_LINK_SPEED_2_5GB_FULL:
        case IXGBE_LINK_SPEED_1GB_FULL:
        case IXGBE_LINK_SPEED_10_FULL:
+               if (avg_wire_size > 8064)
+                       avg_wire_size = 8064;
                itr += DIV_ROUND_UP(avg_wire_size,
                                    IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
                       IXGBE_ITR_ADAPTIVE_MIN_INC;
@@ -8695,7 +8698,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 #endif /* IXGBE_FCOE */
 
 #ifdef CONFIG_IXGBE_IPSEC
-       if (secpath_exists(skb) &&
+       if (xfrm_offload(skb) &&
            !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
                goto out_drop;
 #endif
index 6b609553329fa98647b78f4e1e5d3e9223a8aa7a..a3b6d8c89127f1a2fcfdfb84a1824dbbedc019e6 100644 (file)
@@ -633,19 +633,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
                            struct ixgbe_ring *tx_ring, int napi_budget)
 {
+       u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
        unsigned int total_packets = 0, total_bytes = 0;
-       u32 i = tx_ring->next_to_clean, xsk_frames = 0;
-       unsigned int budget = q_vector->tx.work_limit;
        struct xdp_umem *umem = tx_ring->xsk_umem;
        union ixgbe_adv_tx_desc *tx_desc;
        struct ixgbe_tx_buffer *tx_bi;
-       bool xmit_done;
+       u32 xsk_frames = 0;
 
-       tx_bi = &tx_ring->tx_buffer_info[i];
-       tx_desc = IXGBE_TX_DESC(tx_ring, i);
-       i -= tx_ring->count;
+       tx_bi = &tx_ring->tx_buffer_info[ntc];
+       tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
 
-       do {
+       while (ntc != ntu) {
                if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
                        break;
 
@@ -661,22 +659,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
 
                tx_bi++;
                tx_desc++;
-               i++;
-               if (unlikely(!i)) {
-                       i -= tx_ring->count;
+               ntc++;
+               if (unlikely(ntc == tx_ring->count)) {
+                       ntc = 0;
                        tx_bi = tx_ring->tx_buffer_info;
                        tx_desc = IXGBE_TX_DESC(tx_ring, 0);
                }
 
                /* issue prefetch for next Tx descriptor */
                prefetch(tx_desc);
+       }
 
-               /* update budget accounting */
-               budget--;
-       } while (likely(budget));
-
-       i += tx_ring->count;
-       tx_ring->next_to_clean = i;
+       tx_ring->next_to_clean = ntc;
 
        u64_stats_update_begin(&tx_ring->syncp);
        tx_ring->stats.bytes += total_bytes;
@@ -688,8 +682,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
        if (xsk_frames)
                xsk_umem_complete_tx(umem, xsk_frames);
 
-       xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
-       return budget > 0 && xmit_done;
+       return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
 }
 
 int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
index d2b41f9f87f80513a793eafe5e65899464488a9c..72872d6ca80c4992bfe2594bfdd273385a3b331c 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/atomic.h>
+#include <net/xfrm.h>
 
 #include "ixgbevf.h"
 
@@ -4161,7 +4162,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
        first->protocol = vlan_get_protocol(skb);
 
 #ifdef CONFIG_IXGBEVF_IPSEC
-       if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+       if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
                goto out_drop;
 #endif
        tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
index a01c75ede871a378ac6e32e7b939c4bdd1d112a6..e0363870f3a55c2bb784a55e9d3ef1aafcf89e4a 100644 (file)
@@ -4931,6 +4931,13 @@ static const struct dmi_system_id msi_blacklist[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "P6T"),
                },
        },
+       {
+               .ident = "ASUS P6X",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+               },
+       },
        {}
 };
 
index 1f6e16d5ea6be56b5cc663ec1f480e827ece7ae9..309470ec02190471476744602f98be74ab02f57b 100644 (file)
@@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
        for (i = 1; i <= dev->caps.num_ports; i++) {
                if (mlx4_dev_port(dev, i, &port_cap)) {
                        mlx4_err(dev,
-                                "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
+                                "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
                } else if ((dev->caps.dmfs_high_steer_mode !=
                            MLX4_STEERING_DMFS_A0_DEFAULT) &&
                           (port_cap.dmfs_optimized_state ==
index 8b93101e1a09b633bb4f84c932ee3b6207f5895d..7833ddef0427897043d7a6cbaa5fc41436ff2189 100644 (file)
@@ -109,13 +109,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
 
 static void tx_fill_wi(struct mlx5e_txqsq *sq,
                       u16 pi, u8 num_wqebbs,
-                      skb_frag_t *resync_dump_frag)
+                      skb_frag_t *resync_dump_frag,
+                      u32 num_bytes)
 {
        struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
 
        wi->skb              = NULL;
        wi->num_wqebbs       = num_wqebbs;
        wi->resync_dump_frag = resync_dump_frag;
+       wi->num_bytes        = num_bytes;
 }
 
 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq,
 
        umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
        build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
-       tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
        sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
 }
 
@@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
 
        wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
        build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
-       tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
        sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
 }
 
@@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
        mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
 }
 
+struct mlx5e_dump_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_data_seg data;
+};
+
 static int
 tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                    skb_frag_t *frag, u32 tisn, bool first)
 {
        struct mlx5_wqe_ctrl_seg *cseg;
-       struct mlx5_wqe_eth_seg  *eseg;
        struct mlx5_wqe_data_seg *dseg;
-       struct mlx5e_tx_wqe *wqe;
+       struct mlx5e_dump_wqe *wqe;
        dma_addr_t dma_addr = 0;
-       u16 ds_cnt, ds_cnt_inl;
        u8  num_wqebbs;
-       u16 pi, ihs;
+       u16 ds_cnt;
        int fsz;
-
-       ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
-       ihs    = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
-       ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
-       ds_cnt += ds_cnt_inl;
-       ds_cnt += 1; /* one frag */
+       u16 pi;
 
        wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
 
+       ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
        num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 
        cseg = &wqe->ctrl;
-       eseg = &wqe->eth;
-       dseg =  wqe->data;
+       dseg = &wqe->data;
 
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
        cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
        cseg->tisn             = cpu_to_be32(tisn << 8);
        cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
 
-       eseg->inline_hdr.sz = cpu_to_be16(ihs);
-       memcpy(eseg->inline_hdr.start, skb->data, ihs);
-       dseg += ds_cnt_inl;
-
        fsz = skb_frag_size(frag);
        dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
                                    DMA_TO_DEVICE);
@@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        dseg->byte_count = cpu_to_be32(fsz);
        mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 
-       tx_fill_wi(sq, pi, num_wqebbs, frag);
+       tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
        sq->pc += num_wqebbs;
 
        WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
@@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
        struct mlx5_wq_cyc *wq = &sq->wq;
        u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 
-       tx_fill_wi(sq, pi, 1, NULL);
+       tx_fill_wi(sq, pi, 1, NULL, 0);
 
        mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
 }
index 9314777d99e3f1e542e9cb5d4aa6960ac3849a10..d685122d9ff761794a7f89d860a90e25801f29fc 100644 (file)
@@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
                        data_size = crdump_size - offset;
                else
                        data_size = MLX5_CR_DUMP_CHUNK_SIZE;
-               err = devlink_fmsg_binary_put(fmsg, cr_data, data_size);
+               err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
+                                             data_size);
                if (err)
                        goto free_data;
        }
@@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t)
        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
                goto out;
 
+       fatal_error = check_fatal_sensors(dev);
+
+       if (fatal_error && !health->fatal_error) {
+               mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
+               dev->priv.health.fatal_error = fatal_error;
+               print_health_info(dev);
+               mlx5_trigger_health_work(dev);
+               goto out;
+       }
+
        count = ioread32be(health->health_counter);
        if (count == health->prev)
                ++health->miss_counter;
@@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t)
        if (health->synd && health->synd != prev_synd)
                queue_work(health->wq, &health->report_work);
 
-       fatal_error = check_fatal_sensors(dev);
-
-       if (fatal_error && !health->fatal_error) {
-               mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
-               dev->priv.health.fatal_error = fatal_error;
-               print_health_info(dev);
-               mlx5_trigger_health_work(dev);
-       }
-
 out:
        mod_timer(&health->timer, get_next_poll_jiffies());
 }
index 39aca1ab46878330e60a939550dc7e52089eb8b3..86fc6e6b46dd481838c8a75740917abcf6d19569 100644 (file)
@@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data,
                break;
        case OCELOT_ACL_ACTION_TRAP:
                VCAP_ACT_SET(PORT_MASK, 0x0);
-               VCAP_ACT_SET(MASK_MODE, 0x0);
+               VCAP_ACT_SET(MASK_MODE, 0x1);
                VCAP_ACT_SET(POLICE_ENA, 0x0);
                VCAP_ACT_SET(POLICE_IDX, 0x0);
                VCAP_ACT_SET(CPU_QU_NUM, 0x0);
index d0a01e8f000aee71413425d94efc921856a062f7..b339125b2f097fcfdafd7e45f9ee8c5eeac5190a 100644 (file)
@@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
 
        laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
        if (!laddr) {
-               printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
-               dev_kfree_skb(skb);
-               return NETDEV_TX_BUSY;
+               pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
        }
 
        sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
index 4054b70d77196d2fb21d2c337487aaeb06393a50..5afcb3c4c2ef115709a266d42bdcb2b9cd997416 100644 (file)
@@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
             bool clr_gpr, lmem_step step)
 {
        s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
-       bool first = true, last;
+       bool first = true, narrow_ld, last;
        bool needs_inc = false;
        swreg stack_off_reg;
        u8 prev_gpr = 255;
@@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 
                needs_inc = true;
        }
+
+       narrow_ld = clr_gpr && size < 8;
+
        if (lm3) {
+               unsigned int nop_cnt;
+
                emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
-               /* For size < 4 one slot will be filled by zeroing of upper. */
-               wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
+               /* For size < 4 one slot will be filled by zeroing of upper,
+                * but be careful, that zeroing could be eliminated by zext
+                * optimization.
+                */
+               nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
+               wrp_nops(nfp_prog, nop_cnt);
        }
 
-       if (clr_gpr && size < 8)
+       if (narrow_ld)
                wrp_zext(nfp_prog, meta, gpr);
 
        while (size) {
index d5bbe3d6048bf3cec01fc5aadce5c75cc0d5f066..05981b54eaab7dccd70d0f90dd90508c230481ad 100644 (file)
@@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 
        type = cmsg_hdr->type;
        switch (type) {
-       case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
-               nfp_flower_cmsg_portreify_rx(app, skb);
-               break;
        case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
                nfp_flower_cmsg_portmod_rx(app, skb);
                break;
@@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
        struct nfp_flower_priv *priv = app->priv;
        struct sk_buff_head *skb_head;
 
-       if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
-           type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
+       if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
                skb_head = &priv->cmsg_skbs_high;
        else
                skb_head = &priv->cmsg_skbs_low;
@@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
        } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
                /* Acks from the NFP that the route is added - ignore. */
                dev_consume_skb_any(skb);
+       } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
+               /* Handle REIFY acks outside wq to prevent RTNL conflict. */
+               nfp_flower_cmsg_portreify_rx(app, skb);
+               dev_consume_skb_any(skb);
        } else {
                nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
        }
index e209f150c5f25e6fbef793efe2912543e2a16490..457bdc60f3ee8027bff83198c83bf7c612d67782 100644 (file)
@@ -1409,13 +1409,21 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
        struct nfp_flower_priv *priv = app->priv;
        struct flow_block_cb *block_cb;
 
-       if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
-           !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
-             nfp_flower_internal_port_can_offload(app, netdev)))
+       if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+            !nfp_flower_internal_port_can_offload(app, netdev)) ||
+           (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+            nfp_flower_internal_port_can_offload(app, netdev)))
                return -EOPNOTSUPP;
 
        switch (f->command) {
        case FLOW_BLOCK_BIND:
+               cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+               if (cb_priv &&
+                   flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
+                                         cb_priv,
+                                         &nfp_block_cb_list))
+                       return -EBUSY;
+
                cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
                if (!cb_priv)
                        return -ENOMEM;
index a7a80f4b722a9d7dc2d5efa7d5b89fc93bedcb33..f0ee982eb1b5f27672292997477f565545729e22 100644 (file)
@@ -328,13 +328,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
 
        flow.daddr = *(__be32 *)n->primary_key;
 
-       /* Only concerned with route changes for representors. */
-       if (!nfp_netdev_is_nfp_repr(n->dev))
-               return NOTIFY_DONE;
-
        app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
        app = app_priv->app;
 
+       if (!nfp_netdev_is_nfp_repr(n->dev) &&
+           !nfp_flower_internal_port_can_offload(app, n->dev))
+               return NOTIFY_DONE;
+
        /* Only concerned with changes to routes already added to NFP. */
        if (!nfp_tun_has_route(app, flow.daddr))
                return NOTIFY_DONE;
index b327b29f5d577a3f72bbeb3b744ecda2cb27219f..a6b4bfae4684fbbfdc1d83125c11cf55d920a9b0 100644 (file)
@@ -713,6 +713,21 @@ struct nv_skb_map {
        struct nv_skb_map *next_tx_ctx;
 };
 
+struct nv_txrx_stats {
+       u64 stat_rx_packets;
+       u64 stat_rx_bytes; /* not always available in HW */
+       u64 stat_rx_missed_errors;
+       u64 stat_rx_dropped;
+       u64 stat_tx_packets; /* not always available in HW */
+       u64 stat_tx_bytes;
+       u64 stat_tx_dropped;
+};
+
+#define nv_txrx_stats_inc(member) \
+               __this_cpu_inc(np->txrx_stats->member)
+#define nv_txrx_stats_add(member, count) \
+               __this_cpu_add(np->txrx_stats->member, (count))
+
 /*
  * SMP locking:
  * All hardware access under netdev_priv(dev)->lock, except the performance
@@ -797,10 +812,7 @@ struct fe_priv {
 
        /* RX software stats */
        struct u64_stats_sync swstats_rx_syncp;
-       u64 stat_rx_packets;
-       u64 stat_rx_bytes; /* not always available in HW */
-       u64 stat_rx_missed_errors;
-       u64 stat_rx_dropped;
+       struct nv_txrx_stats __percpu *txrx_stats;
 
        /* media detection workaround.
         * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -826,9 +838,6 @@ struct fe_priv {
 
        /* TX software stats */
        struct u64_stats_sync swstats_tx_syncp;
-       u64 stat_tx_packets; /* not always available in HW */
-       u64 stat_tx_bytes;
-       u64 stat_tx_dropped;
 
        /* msi/msi-x fields */
        u32 msi_flags;
@@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev)
        }
 }
 
+static void nv_get_stats(int cpu, struct fe_priv *np,
+                        struct rtnl_link_stats64 *storage)
+{
+       struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
+       unsigned int syncp_start;
+       u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
+       u64 tx_packets, tx_bytes, tx_dropped;
+
+       do {
+               syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
+               rx_packets       = src->stat_rx_packets;
+               rx_bytes         = src->stat_rx_bytes;
+               rx_dropped       = src->stat_rx_dropped;
+               rx_missed_errors = src->stat_rx_missed_errors;
+       } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
+
+       storage->rx_packets       += rx_packets;
+       storage->rx_bytes         += rx_bytes;
+       storage->rx_dropped       += rx_dropped;
+       storage->rx_missed_errors += rx_missed_errors;
+
+       do {
+               syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
+               tx_packets  = src->stat_tx_packets;
+               tx_bytes    = src->stat_tx_bytes;
+               tx_dropped  = src->stat_tx_dropped;
+       } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
+
+       storage->tx_packets += tx_packets;
+       storage->tx_bytes   += tx_bytes;
+       storage->tx_dropped += tx_dropped;
+}
+
 /*
  * nv_get_stats64: dev->ndo_get_stats64 function
  * Get latest stats value from the nic.
@@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
        __releases(&netdev_priv(dev)->hwstats_lock)
 {
        struct fe_priv *np = netdev_priv(dev);
-       unsigned int syncp_start;
+       int cpu;
 
        /*
         * Note: because HW stats are not always available and for
@@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
         */
 
        /* software stats */
-       do {
-               syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
-               storage->rx_packets       = np->stat_rx_packets;
-               storage->rx_bytes         = np->stat_rx_bytes;
-               storage->rx_dropped       = np->stat_rx_dropped;
-               storage->rx_missed_errors = np->stat_rx_missed_errors;
-       } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
-
-       do {
-               syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
-               storage->tx_packets = np->stat_tx_packets;
-               storage->tx_bytes   = np->stat_tx_bytes;
-               storage->tx_dropped = np->stat_tx_dropped;
-       } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
+       for_each_online_cpu(cpu)
+               nv_get_stats(cpu, np, storage);
 
        /* If the nic supports hw counters then retrieve latest values */
        if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev)
                } else {
 packet_dropped:
                        u64_stats_update_begin(&np->swstats_rx_syncp);
-                       np->stat_rx_dropped++;
+                       nv_txrx_stats_inc(stat_rx_dropped);
                        u64_stats_update_end(&np->swstats_rx_syncp);
                        return 1;
                }
@@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
                } else {
 packet_dropped:
                        u64_stats_update_begin(&np->swstats_rx_syncp);
-                       np->stat_rx_dropped++;
+                       nv_txrx_stats_inc(stat_rx_dropped);
                        u64_stats_update_end(&np->swstats_rx_syncp);
                        return 1;
                }
@@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev)
                }
                if (nv_release_txskb(np, &np->tx_skb[i])) {
                        u64_stats_update_begin(&np->swstats_tx_syncp);
-                       np->stat_tx_dropped++;
+                       nv_txrx_stats_inc(stat_tx_dropped);
                        u64_stats_update_end(&np->swstats_tx_syncp);
                }
                np->tx_skb[i].dma = 0;
@@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        /* on DMA mapping error - drop the packet */
                        dev_kfree_skb_any(skb);
                        u64_stats_update_begin(&np->swstats_tx_syncp);
-                       np->stat_tx_dropped++;
+                       nv_txrx_stats_inc(stat_tx_dropped);
                        u64_stats_update_end(&np->swstats_tx_syncp);
                        return NETDEV_TX_OK;
                }
@@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                dev_kfree_skb_any(skb);
                                np->put_tx_ctx = start_tx_ctx;
                                u64_stats_update_begin(&np->swstats_tx_syncp);
-                               np->stat_tx_dropped++;
+                               nv_txrx_stats_inc(stat_tx_dropped);
                                u64_stats_update_end(&np->swstats_tx_syncp);
                                return NETDEV_TX_OK;
                        }
@@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                        /* on DMA mapping error - drop the packet */
                        dev_kfree_skb_any(skb);
                        u64_stats_update_begin(&np->swstats_tx_syncp);
-                       np->stat_tx_dropped++;
+                       nv_txrx_stats_inc(stat_tx_dropped);
                        u64_stats_update_end(&np->swstats_tx_syncp);
                        return NETDEV_TX_OK;
                }
@@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                                dev_kfree_skb_any(skb);
                                np->put_tx_ctx = start_tx_ctx;
                                u64_stats_update_begin(&np->swstats_tx_syncp);
-                               np->stat_tx_dropped++;
+                               nv_txrx_stats_inc(stat_tx_dropped);
                                u64_stats_update_end(&np->swstats_tx_syncp);
                                return NETDEV_TX_OK;
                        }
@@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
                                            && !(flags & NV_TX_RETRYCOUNT_MASK))
                                                nv_legacybackoff_reseed(dev);
                                } else {
+                                       unsigned int len;
+
                                        u64_stats_update_begin(&np->swstats_tx_syncp);
-                                       np->stat_tx_packets++;
-                                       np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                                       nv_txrx_stats_inc(stat_tx_packets);
+                                       len = np->get_tx_ctx->skb->len;
+                                       nv_txrx_stats_add(stat_tx_bytes, len);
                                        u64_stats_update_end(&np->swstats_tx_syncp);
                                }
                                bytes_compl += np->get_tx_ctx->skb->len;
@@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
                                            && !(flags & NV_TX2_RETRYCOUNT_MASK))
                                                nv_legacybackoff_reseed(dev);
                                } else {
+                                       unsigned int len;
+
                                        u64_stats_update_begin(&np->swstats_tx_syncp);
-                                       np->stat_tx_packets++;
-                                       np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                                       nv_txrx_stats_inc(stat_tx_packets);
+                                       len = np->get_tx_ctx->skb->len;
+                                       nv_txrx_stats_add(stat_tx_bytes, len);
                                        u64_stats_update_end(&np->swstats_tx_syncp);
                                }
                                bytes_compl += np->get_tx_ctx->skb->len;
@@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
                                                nv_legacybackoff_reseed(dev);
                                }
                        } else {
+                               unsigned int len;
+
                                u64_stats_update_begin(&np->swstats_tx_syncp);
-                               np->stat_tx_packets++;
-                               np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                               nv_txrx_stats_inc(stat_tx_packets);
+                               len = np->get_tx_ctx->skb->len;
+                               nv_txrx_stats_add(stat_tx_bytes, len);
                                u64_stats_update_end(&np->swstats_tx_syncp);
                        }
 
@@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
        }
 }
 
+static void rx_missing_handler(u32 flags, struct fe_priv *np)
+{
+       if (flags & NV_RX_MISSEDFRAME) {
+               u64_stats_update_begin(&np->swstats_rx_syncp);
+               nv_txrx_stats_inc(stat_rx_missed_errors);
+               u64_stats_update_end(&np->swstats_rx_syncp);
+       }
+}
+
 static int nv_rx_process(struct net_device *dev, int limit)
 {
        struct fe_priv *np = netdev_priv(dev);
@@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
                                        }
                                        /* the rest are hard errors */
                                        else {
-                                               if (flags & NV_RX_MISSEDFRAME) {
-                                                       u64_stats_update_begin(&np->swstats_rx_syncp);
-                                                       np->stat_rx_missed_errors++;
-                                                       u64_stats_update_end(&np->swstats_rx_syncp);
-                                               }
+                                               rx_missing_handler(flags, np);
                                                dev_kfree_skb(skb);
                                                goto next_pkt;
                                        }
@@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
                skb->protocol = eth_type_trans(skb, dev);
                napi_gro_receive(&np->napi, skb);
                u64_stats_update_begin(&np->swstats_rx_syncp);
-               np->stat_rx_packets++;
-               np->stat_rx_bytes += len;
+               nv_txrx_stats_inc(stat_rx_packets);
+               nv_txrx_stats_add(stat_rx_bytes, len);
                u64_stats_update_end(&np->swstats_rx_syncp);
 next_pkt:
                if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
@@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
                        }
                        napi_gro_receive(&np->napi, skb);
                        u64_stats_update_begin(&np->swstats_rx_syncp);
-                       np->stat_rx_packets++;
-                       np->stat_rx_bytes += len;
+                       nv_txrx_stats_inc(stat_rx_packets);
+                       nv_txrx_stats_add(stat_rx_bytes, len);
                        u64_stats_update_end(&np->swstats_rx_syncp);
                } else {
                        dev_kfree_skb(skb);
@@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
        SET_NETDEV_DEV(dev, &pci_dev->dev);
        u64_stats_init(&np->swstats_rx_syncp);
        u64_stats_init(&np->swstats_tx_syncp);
+       np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
+       if (!np->txrx_stats) {
+               pr_err("np->txrx_stats, alloc memory error.\n");
+               err = -ENOMEM;
+               goto out_alloc_percpu;
+       }
 
        timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
        timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
@@ -6060,6 +6110,8 @@ out_relreg:
 out_disable:
        pci_disable_device(pci_dev);
 out_free:
+       free_percpu(np->txrx_stats);
+out_alloc_percpu:
        free_netdev(dev);
 out:
        return err;
@@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
 static void nv_remove(struct pci_dev *pci_dev)
 {
        struct net_device *dev = pci_get_drvdata(pci_dev);
+       struct fe_priv *np = netdev_priv(dev);
+
+       free_percpu(np->txrx_stats);
 
        unregister_netdev(dev);
 
index 829dd60ab9370e3a1441b2f57c23478ffe4adcec..1efff7f68ef6b62b1ed543f3ae94d9d745d1e9ec 100644 (file)
@@ -1325,7 +1325,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
                                              &drv_version);
                if (rc) {
                        DP_NOTICE(cdev, "Failed sending drv version command\n");
-                       return rc;
+                       goto err4;
                }
        }
 
@@ -1333,6 +1333,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
 
        return 0;
 
+err4:
+       qed_ll2_dealloc_if(cdev);
 err3:
        qed_hw_stop(cdev);
 err2:
index e1dd6ea60d67050f2784a08d2ec2589eef040a35..bae0074ab9aadf287df7cf07b4deef285393cd9c 100644 (file)
@@ -5921,6 +5921,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
        skb = napi_alloc_skb(&tp->napi, pkt_size);
        if (skb)
                skb_copy_to_linear_data(skb, data, pkt_size);
+       dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
 
        return skb;
 }
index 7a5e6c5abb57bd48407fde0438a51a4f2ecb0acd..276c7cae7ceeb02b721a6d0118a5df570fd2704e 100644 (file)
@@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
                printk(KERN_ERR "Sgiseeq: Cannot register net device, "
                       "aborting.\n");
                err = -ENODEV;
-               goto err_out_free_page;
+               goto err_out_free_attrs;
        }
 
        printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
 
        return 0;
 
-err_out_free_page:
-       free_page((unsigned long) sp->srings);
+err_out_free_attrs:
+       dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
+                      sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
 err_out_free_dev:
        free_netdev(dev);
 
index 4644b2aeeba1cd39ea3d56b7bd3b66024115671d..e2e469c37a4d0713fcaf5e96b5bc3050503cf133 100644 (file)
@@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
        int ret;
        struct device *dev = &bsp_priv->pdev->dev;
 
-       if (!ldo) {
-               dev_err(dev, "no regulator found\n");
-               return -1;
-       }
+       if (!ldo)
+               return 0;
 
        if (enable) {
                ret = regulator_enable(ldo);
index 4083019c547a4b1b04509738ac4a6cb4ee0808e5..f97a4096f8fc8e44ef2456e822952a885e48de83 100644 (file)
@@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
        int ret;
        u32 reg, val;
 
-       regmap_field_read(gmac->regmap_field, &val);
+       ret = regmap_field_read(gmac->regmap_field, &val);
+       if (ret) {
+               dev_err(priv->device, "Fail to read from regmap field.\n");
+               return ret;
+       }
+
        reg = gmac->variant->default_syscon_value;
        if (reg != val)
                dev_warn(priv->device,
index 32a89744972db40c866ade7dccbd2c2232f7ec00..a46b8b2e44e14d60f1b04af3c64d3a763dba42d0 100644 (file)
@@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (!cpsw)
                return -ENOMEM;
 
+       platform_set_drvdata(pdev, cpsw);
        cpsw->dev = dev;
 
        mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
@@ -2879,7 +2880,6 @@ static int cpsw_probe(struct platform_device *pdev)
                goto clean_cpts;
        }
 
-       platform_set_drvdata(pdev, cpsw);
        priv = netdev_priv(ndev);
        priv->cpsw = cpsw;
        priv->ndev = ndev;
index 331c16d30d5df02d1a00bca2c4871362e5dbfeb6..23281aeeb2226ee4dc9d7655aebc247c94c94def 100644 (file)
@@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd)
 
        sp->dev->stats.rx_bytes += count;
 
-       if ((skb = dev_alloc_skb(count)) == NULL)
+       if ((skb = dev_alloc_skb(count + 1)) == NULL)
                goto out_mem;
 
-       ptr = skb_put(skb, count);
+       ptr = skb_put(skb, count + 1);
        *ptr++ = cmd;   /* KISS command */
 
        memcpy(ptr, sp->cooked_buf + 1, count);
index b41696e16bdc835ff1ec8f516c8ed4c711501ad6..c20e7ef18bc955984f58dd9f8cba38e1bedcb663 100644 (file)
@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
                err = hwsim_subscribe_all_others(phy);
                if (err < 0) {
                        mutex_unlock(&hwsim_phys_lock);
-                       goto err_reg;
+                       goto err_subscribe;
                }
        }
        list_add_tail(&phy->list, &hwsim_phys);
@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
 
        return idx;
 
+err_subscribe:
+       ieee802154_unregister_hw(phy->hw);
 err_reg:
        kfree(pib);
 err_pib:
@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
        return 0;
 
 platform_drv:
-       genl_unregister_family(&hwsim_genl_family);
-platform_dev:
        platform_device_unregister(mac802154hwsim_dev);
+platform_dev:
+       genl_unregister_family(&hwsim_genl_family);
        return rc;
 }
 
index 58bb25e4af1066ab8d4cc624884e247e37c6c5a7..7935593debb11eaeec20a931350d64dc0746b82d 100644 (file)
@@ -523,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev)
 }
 EXPORT_SYMBOL_GPL(genphy_c45_read_status);
 
+/**
+ * genphy_c45_config_aneg - restart auto-negotiation or forced setup
+ * @phydev: target phy_device struct
+ *
+ * Description: If auto-negotiation is enabled, we configure the
+ *   advertising, and then restart auto-negotiation.  If it is not
+ *   enabled, then we force a configuration.
+ */
+int genphy_c45_config_aneg(struct phy_device *phydev)
+{
+       bool changed = false;
+       int ret;
+
+       if (phydev->autoneg == AUTONEG_DISABLE)
+               return genphy_c45_pma_setup_forced(phydev);
+
+       ret = genphy_c45_an_config_aneg(phydev);
+       if (ret < 0)
+               return ret;
+       if (ret > 0)
+               changed = true;
+
+       return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_config_aneg);
+
 /* The gen10g_* functions are the old Clause 45 stub */
 
 int gen10g_config_aneg(struct phy_device *phydev)
index ef7aa738e0dc4537e3234340c6b2e56110c949c6..6b0f89369b460e7c01eef92bc13e24e430f22102 100644 (file)
@@ -507,7 +507,7 @@ static int phy_config_aneg(struct phy_device *phydev)
         * allowed to call genphy_config_aneg()
         */
        if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
-               return -EOPNOTSUPP;
+               return genphy_c45_config_aneg(phydev);
 
        return genphy_config_aneg(phydev);
 }
index a45c5de96ab1c9688c0ed1eaa99f60fef3294230..a5a57ca94c1ac541092867b685fe7d616218f96b 100644 (file)
@@ -376,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
  *  Local device  Link partner
  *  Pause AsymDir Pause AsymDir Result
  *    1     X       1     X     TX+RX
- *    0     1       1     1     RX
- *    1     1       0     1     TX
+ *    0     1       1     1     TX
+ *    1     1       0     1     RX
  */
 static void phylink_resolve_flow(struct phylink *pl,
                                 struct phylink_link_state *state)
@@ -398,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl,
                        new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
                else if (pause & MLO_PAUSE_ASYM)
                        new_pause = state->pause & MLO_PAUSE_SYM ?
-                                MLO_PAUSE_RX : MLO_PAUSE_TX;
+                                MLO_PAUSE_TX : MLO_PAUSE_RX;
        } else {
                new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
        }
index db16d7a13e00c197574b1f9f70044775648bbd72..aab0be40d4430ab1c1fed92782f4a263f38020bd 100644 (file)
@@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev)
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file,
-                     bool skip_filter, bool napi, bool napi_frags)
+                     bool skip_filter, bool napi, bool napi_frags,
+                     bool publish_tun)
 {
        struct tun_file *tfile = file->private_data;
        struct net_device *dev = tun->dev;
@@ -870,7 +871,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
         * initialized tfile; otherwise we risk using half-initialized
         * object.
         */
-       rcu_assign_pointer(tfile->tun, tun);
+       if (publish_tun)
+               rcu_assign_pointer(tfile->tun, tun);
        rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
        tun->numqueues++;
        tun_set_real_num_queues(tun);
@@ -2730,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
                                 ifr->ifr_flags & IFF_NAPI,
-                                ifr->ifr_flags & IFF_NAPI_FRAGS);
+                                ifr->ifr_flags & IFF_NAPI_FRAGS, true);
                if (err < 0)
                        return err;
 
@@ -2829,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                INIT_LIST_HEAD(&tun->disabled);
                err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
-                                ifr->ifr_flags & IFF_NAPI_FRAGS);
+                                ifr->ifr_flags & IFF_NAPI_FRAGS, false);
                if (err < 0)
                        goto err_free_flow;
 
                err = register_netdevice(tun->dev);
                if (err < 0)
                        goto err_detach;
+               /* free_netdev() won't check refcnt, to aovid race
+                * with dev_put() we need publish tun after registration.
+                */
+               rcu_assign_pointer(tfile->tun, tun);
        }
 
        netif_carrier_on(tun->dev);
@@ -2978,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                if (ret < 0)
                        goto unlock;
                ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
-                                tun->flags & IFF_NAPI_FRAGS);
+                                tun->flags & IFF_NAPI_FRAGS, true);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
index 8458e88c18e9e017f579128c36a9707b1232ab97..32f53de5b1fe7abff0eb2882240bcd295977af08 100644 (file)
@@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
                goto bad_desc;
        }
 skip:
-       if (rndis && header.usb_cdc_acm_descriptor &&
+       /* Communcation class functions with bmCapabilities are not
+        * RNDIS.  But some Wireless class RNDIS functions use
+        * bmCapabilities for their own purpose. The failsafe is
+        * therefore applied only to Communication class RNDIS
+        * functions.  The rndis test is redundant, but a cheap
+        * optimization.
+        */
+       if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
+           header.usb_cdc_acm_descriptor &&
            header.usb_cdc_acm_descriptor->bmCapabilities) {
                dev_dbg(&intf->dev,
                        "ACM capabilities %02x, not really RNDIS?\n",
index 0cc03a9ff5457922a581cbdf5d76d34186575ae6..04137ac373b07f261c5f276b4cd4d051c80812d9 100644 (file)
@@ -799,8 +799,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
        ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
                              RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
                              value, index, tmp, size, 500);
+       if (ret < 0)
+               memset(data, 0xff, size);
+       else
+               memcpy(data, tmp, size);
 
-       memcpy(data, tmp, size);
        kfree(tmp);
 
        return ret;
@@ -4018,8 +4021,7 @@ static int rtl8152_close(struct net_device *netdev)
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&tp->pm_notifier);
 #endif
-       if (!test_bit(RTL8152_UNPLUG, &tp->flags))
-               napi_disable(&tp->napi);
+       napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
@@ -5350,7 +5352,6 @@ static int rtl8152_probe(struct usb_interface *intf,
        return 0;
 
 out1:
-       netif_napi_del(&tp->napi);
        usb_set_intfdata(intf, NULL);
 out:
        free_netdev(netdev);
@@ -5365,7 +5366,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
        if (tp) {
                rtl_set_unplug(tp);
 
-               netif_napi_del(&tp->napi);
                unregister_netdev(tp->netdev);
                cancel_delayed_work_sync(&tp->hw_phy_work);
                tp->rtl_ops.unload(tp);
index 4f3de0ac8b0b44a91f06f1c2dd37fde7bc5e0069..ba98e0971b842df0a6e2c690773eb3ee32986b47 100644 (file)
@@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
                }
        }
 
-       if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
+       if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
                if (!try_fill_recv(vi, rq, GFP_ATOMIC))
                        schedule_delayed_work(&vi->refill, 0);
        }
index d74349628db2209e99ed5c4e24c172bf1603ed23..0e6a51525d9134d18a87a69362a9af7cdfc32790 100644 (file)
@@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
     sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
     LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
 
-    lmc_trace(dev, "lmc_runnin_reset_out");
+    lmc_trace(dev, "lmc_running_reset_out");
 }
 
 
index 6642bcb27761d63e8545865c1e2c91097cf9e5c2..8efb493ceec2fc96cda43cb56cde5e6d19e91015 100644 (file)
@@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
                        "%d\n", result);
        result = 0;
 error_cmd:
+       kfree(cmd);
        kfree_skb(ack_skb);
 error_msg_to_dev:
 error_alloc:
index 1f500cddb3a75b799645d67ed7a374e0027da11a..55b713255b8eac22e4a5e31264e088d99c52dc16 100644 (file)
@@ -556,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
+const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
+       .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+       .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
+       .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+       .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
 const struct iwl_cfg iwl22000_2ax_cfg_jf = {
        .name = "Intel(R) Dual Band Wireless AX 22000",
        .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
index 1c1bf1b281cd90d82adc31d09dbb2d898a220693..6c04f8223aff323166fb75c27fcfc92a94352236 100644 (file)
@@ -577,6 +577,8 @@ extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
 extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
 extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
+extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
+extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
 extern const struct iwl_cfg killer1650x_2ax_cfg;
 extern const struct iwl_cfg killer1650w_2ax_cfg;
 extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
index cb22d447fcb8a9d68aa566a1933202dddc1bca9d..fe776e35b9d06bf152ce12eec6a664eee2317e62 100644 (file)
@@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
                cpu_to_le32(vif->bss_conf.use_short_slot ?
                            MAC_FLG_SHORT_SLOT : 0);
 
-       cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+       cmd->filter_flags = 0;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
@@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
        /* We need the dtim_period to set the MAC as associated */
        if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
            !force_assoc_off) {
+               struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+               u8 ap_sta_id = mvmvif->ap_sta_id;
                u32 dtim_offs;
 
                /*
@@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
                               dtim_offs);
 
                ctxt_sta->is_assoc = cpu_to_le32(1);
+
+               /*
+                * allow multicast data frames only as long as the station is
+                * authorized, i.e., GTK keys are already installed (if needed)
+                */
+               if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+                       struct ieee80211_sta *sta;
+
+                       rcu_read_lock();
+
+                       sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
+                       if (!IS_ERR_OR_NULL(sta)) {
+                               struct iwl_mvm_sta *mvmsta =
+                                       iwl_mvm_sta_from_mac80211(sta);
+
+                               if (mvmsta->sta_state ==
+                                   IEEE80211_STA_AUTHORIZED)
+                                       cmd.filter_flags |=
+                                               cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+                       }
+
+                       rcu_read_unlock();
+               }
        } else {
                ctxt_sta->is_assoc = cpu_to_le32(0);
 
@@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
                                       MAC_FILTER_IN_CONTROL_AND_MGMT |
                                       MAC_FILTER_IN_BEACON |
                                       MAC_FILTER_IN_PROBE_REQUEST |
-                                      MAC_FILTER_IN_CRC32);
+                                      MAC_FILTER_IN_CRC32 |
+                                      MAC_FILTER_ACCEPT_GRP);
        ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
 
        /* Allocate sniffer station */
@@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
        iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
 
        cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
-                                      MAC_FILTER_IN_PROBE_REQUEST);
+                                      MAC_FILTER_IN_PROBE_REQUEST |
+                                      MAC_FILTER_ACCEPT_GRP);
 
        /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
        cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
index 1c904b5226aa42d875ba503b725b33fec85e0ffd..a7bc00d1296fec755e8415dd7f226207ff2408a2 100644 (file)
@@ -3327,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                /* enable beacon filtering */
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
 
+               /*
+                * Now that the station is authorized, i.e., keys were already
+                * installed, need to indicate to the FW that
+                * multicast data frames can be forwarded to the driver
+                */
+               iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
                iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
                                     true);
        } else if (old_state == IEEE80211_STA_AUTHORIZED &&
                   new_state == IEEE80211_STA_ASSOC) {
+               /* Multicast data frames are no longer allowed */
+               iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
                /* disable beacon filtering */
                ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
                WARN_ON(ret &&
index de711c1160d316320bc8d92ec3f537e390bbd517..3b12e7ad35e1dd06b36c6a5ff9c8b20fba5bfbc8 100644 (file)
@@ -1062,7 +1062,28 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
                else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
                        iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+               else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+                       iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
+               else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+                       iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
        }
+
+       /* same thing for QuZ... */
+       if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+               if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
+                       iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
+               else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+                       iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
+               else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
+                       iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
+               else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
+                       iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+               else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
+                       iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
+               else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
+                       iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
+       }
+
 #endif
 
        pci_set_drvdata(pdev, iwl_trans);
index f5df5b370d78b585dd277b61b86c00498b8e5f74..db62c831460350fd01d9bbb4d8ff0e3752d75d7c 100644 (file)
@@ -3602,11 +3602,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  ((trans->cfg != &iwl_ax200_cfg_cc &&
-                    trans->cfg != &killer1650x_2ax_cfg &&
-                    trans->cfg != &killer1650w_2ax_cfg &&
-                    trans->cfg != &iwl_ax201_cfg_quz_hr) ||
-                   trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+                  trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
                u32 hw_status;
 
                hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
index 38d11033898716b3e9c1c5fae581c692d4ae44fe..9ef6b8fe03c1b5cc7bad24564c47f6d07bf367e0 100644 (file)
@@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
        u16 len = byte_cnt;
        __le16 bc_ent;
 
-       if (trans_pcie->bc_table_dword)
-               len = DIV_ROUND_UP(len, 4);
-
-       if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
+       if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
                return;
 
        filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
@@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
         */
        num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
 
-       bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
-       if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+       if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+               /* Starting from 22560, the HW expects bytes */
+               WARN_ON(trans_pcie->bc_table_dword);
+               WARN_ON(len > 0x3FFF);
+               bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
                scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
-       else
+       } else {
+               /* Until 22560, the HW expects DW */
+               WARN_ON(!trans_pcie->bc_table_dword);
+               len = DIV_ROUND_UP(len, 4);
+               WARN_ON(len > 0xFFF);
+               bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
                scd_bc_tbl->tfd_offset[idx] = bc_ent;
+       }
 }
 
 /*
index 653d347a9a19d3e77eb3fbd1ad5d7da3278d6d4a..580387f9f12a9af3b2f64e5ea760ea1607dafe29 100644 (file)
@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
                }
 
                vs_ie = (struct ieee_types_header *)vendor_ie;
+               if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
+                       IEEE_MAX_IE_SIZE)
+                       return -EINVAL;
                memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
                       vs_ie, vs_ie->len + 2);
                le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
index 18f7d9bf30b28edc19acef376675b15d726bcc00..0939a8c8f3ab5cf74a55ae08c3feadc1c486f3ca 100644 (file)
@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
 
        rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
        if (rate_ie) {
+               if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
+                       return;
                memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
                rate_len = rate_ie->len;
        }
@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
        rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
                                           params->beacon.tail,
                                           params->beacon.tail_len);
-       if (rate_ie)
+       if (rate_ie) {
+               if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
+                       return;
                memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
+       }
 
        return;
 }
@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
                                            params->beacon.tail_len);
        if (vendor_ie) {
                wmm_ie = vendor_ie;
+               if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
+                       return;
                memcpy(&bss_cfg->wmm_info, wmm_ie +
                       sizeof(struct ieee_types_header), *(wmm_ie + 1));
                priv->wmm_enabled = 1;
index 40c0d536e20d04468569bdaa4575c74cb0aee40c..9d4426f6905f29591b9f66a613a3830acf88ccf1 100644 (file)
@@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
                dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
        }
 
+       if (is_mt7630(dev)) {
+               dev->mt76.cap.has_5ghz = false;
+               dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
+       }
+
        if (!mt76x02_field_valid(nic_conf1 & 0xff))
                nic_conf1 &= 0xff00;
 
index 4585e1b756c2096d93321e9d3aa8da1f2ff25d49..6117e6ca08cb7cf6cedb6b46b9a426f1bd0d0b10 100644 (file)
@@ -62,6 +62,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
        mt76x0e_stop_hw(dev);
 }
 
+static int
+mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+               struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+               struct ieee80211_key_conf *key)
+{
+       struct mt76x02_dev *dev = hw->priv;
+
+       if (is_mt7630(dev))
+               return -EOPNOTSUPP;
+
+       return mt76x02_set_key(hw, cmd, vif, sta, key);
+}
+
 static void
 mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
              u32 queues, bool drop)
@@ -78,7 +91,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
        .configure_filter = mt76x02_configure_filter,
        .bss_info_changed = mt76x02_bss_info_changed,
        .sta_state = mt76_sta_state,
-       .set_key = mt76x02_set_key,
+       .set_key = mt76x0e_set_key,
        .conf_tx = mt76x02_conf_tx,
        .sw_scan_start = mt76x02_sw_scan,
        .sw_scan_complete = mt76x02_sw_scan_complete,
index 627ed1fc7b1584414b97f9eef9a5d81543dc9d74..645f4d15fb619635d3f2bdc067eea3e308c889d8 100644 (file)
@@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
        .release_buffered_frames = mt76_release_buffered_frames,
 };
 
-static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
 {
        int err;
 
-       mt76x0_chip_onoff(dev, true, true);
+       mt76x0_chip_onoff(dev, true, reset);
 
        if (!mt76x02_wait_for_mac(&dev->mt76))
                return -ETIMEDOUT;
@@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
        if (err < 0)
                goto out_err;
 
-       err = mt76x0u_init_hardware(dev);
+       err = mt76x0u_init_hardware(dev, true);
        if (err < 0)
                goto out_err;
 
@@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
        if (ret < 0)
                goto err;
 
-       ret = mt76x0u_init_hardware(dev);
+       ret = mt76x0u_init_hardware(dev, false);
        if (ret)
                goto err;
 
index c9b957ac5733262d9faa5d58261bcfa4d77fe243..f1cdcd61c54a5eeec18fc349e63a5cd832cb17fa 100644 (file)
@@ -1654,13 +1654,18 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
 
        offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
 
-       rt2800_register_multiread(rt2x00dev, offset,
-                                 &iveiv_entry, sizeof(iveiv_entry));
-       if ((crypto->cipher == CIPHER_TKIP) ||
-           (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
-           (crypto->cipher == CIPHER_AES))
-               iveiv_entry.iv[3] |= 0x20;
-       iveiv_entry.iv[3] |= key->keyidx << 6;
+       if (crypto->cmd == SET_KEY) {
+               rt2800_register_multiread(rt2x00dev, offset,
+                                         &iveiv_entry, sizeof(iveiv_entry));
+               if ((crypto->cipher == CIPHER_TKIP) ||
+                   (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+                   (crypto->cipher == CIPHER_AES))
+                       iveiv_entry.iv[3] |= 0x20;
+               iveiv_entry.iv[3] |= key->keyidx << 6;
+       } else {
+               memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+       }
+
        rt2800_register_multiwrite(rt2x00dev, offset,
                                   &iveiv_entry, sizeof(iveiv_entry));
 }
@@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        switch (rt2x00dev->default_ant.rx_chain_num) {
        case 3:
                /* Turn on tertiary LNAs */
-               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
-                                  rf->channel > 14);
-               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
-                                  rf->channel <= 14);
+               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
+               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
                /* fall-through */
        case 2:
                /* Turn on secondary LNAs */
-               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
-                                  rf->channel > 14);
-               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
-                                  rf->channel <= 14);
+               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
+               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
                /* fall-through */
        case 1:
                /* Turn on primary LNAs */
-               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
-                                  rf->channel > 14);
-               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
-                                  rf->channel <= 14);
+               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
+               rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
                break;
        }
 
@@ -6094,6 +6093,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_delete_wcid_attr(rt2x00dev, i);
        }
 
+       /*
+        * Clear encryption initialization vectors on start, but keep them
+        * for watchdog reset. Otherwise we will have wrong IVs and not be
+        * able to keep connections after reset.
+        */
+       if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
+               for (i = 0; i < 256; i++)
+                       rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
+
        /*
         * Clear all beacons
         */
index 7e43690a861c41654de8863c77111c99fb475fb7..2b216edd0c7d2cbecb10ca1ec46834910538cbed 100644 (file)
@@ -658,6 +658,7 @@ enum rt2x00_state_flags {
        DEVICE_STATE_ENABLED_RADIO,
        DEVICE_STATE_SCANNING,
        DEVICE_STATE_FLUSHING,
+       DEVICE_STATE_RESET,
 
        /*
         * Driver configuration
index 35414f97a978dfc9250faf8eedc6ddaaec97b56b..9d158237ac6746db6f8808584ee140313915cac2 100644 (file)
@@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
 
 int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
 {
-       int retval;
+       int retval = 0;
 
        if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
                /*
                 * This is special case for ieee80211_restart_hw(), otherwise
                 * mac80211 never call start() two times in row without stop();
                 */
+               set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
                rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
                rt2x00lib_stop(rt2x00dev);
        }
@@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
         */
        retval = rt2x00lib_load_firmware(rt2x00dev);
        if (retval)
-               return retval;
+               goto out;
 
        /*
         * Initialize the device.
         */
        retval = rt2x00lib_initialize(rt2x00dev);
        if (retval)
-               return retval;
+               goto out;
 
        rt2x00dev->intf_ap_count = 0;
        rt2x00dev->intf_sta_count = 0;
@@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
        /* Enable the radio */
        retval = rt2x00lib_enable_radio(rt2x00dev);
        if (retval)
-               return retval;
+               goto out;
 
        set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
 
-       return 0;
+out:
+       clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+       return retval;
 }
 
 void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
index f5048d4b8cb6d3d32ba0f44f271023cc15544601..760eaffeebd64a100392f5d97da43ae303428520 100644 (file)
@@ -645,7 +645,6 @@ fail_rx:
        kfree(rsi_dev->tx_buffer);
 
 fail_eps:
-       kfree(rsi_dev);
 
        return status;
 }
index e42850095892d41fbbf43dc797d28efd5a3d3537..7eda62a9e0dfa3d95616c85e3339570725b01cd8 100644 (file)
@@ -316,7 +316,7 @@ static int st95hf_echo_command(struct st95hf_context *st95context)
                                          &echo_response);
        if (result) {
                dev_err(&st95context->spicontext.spidev->dev,
-                       "err: echo response receieve error = 0x%x\n", result);
+                       "err: echo response receive error = 0x%x\n", result);
                return result;
        }
 
index 3e7b11cf1aaec67f69ed4faf7d3a7b3c48215d5e..cb98b8fe786e2232bbdb1d0a371ce5df0e2e13a3 100644 (file)
@@ -655,6 +655,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        resource_size_t start, size;
        struct nd_region *nd_region;
        unsigned long npfns, align;
+       u32 end_trunc;
        struct nd_pfn_sb *pfn_sb;
        phys_addr_t offset;
        const char *sig;
@@ -696,6 +697,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        size = resource_size(&nsio->res);
        npfns = PHYS_PFN(size - SZ_8K);
        align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
+       end_trunc = start + size - ALIGN_DOWN(start + size, align);
        if (nd_pfn->mode == PFN_MODE_PMEM) {
                /*
                 * The altmap should be padded out to the block size used
@@ -714,7 +716,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
                return -ENXIO;
        }
 
-       npfns = PHYS_PFN(size - offset);
+       npfns = PHYS_PFN(size - offset - end_trunc);
        pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
        pfn_sb->dataoff = cpu_to_le64(offset);
        pfn_sb->npfns = cpu_to_le64(npfns);
@@ -723,6 +725,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
        pfn_sb->version_major = cpu_to_le16(1);
        pfn_sb->version_minor = cpu_to_le16(3);
+       pfn_sb->end_trunc = cpu_to_le32(end_trunc);
        pfn_sb->align = cpu_to_le32(nd_pfn->align);
        checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
        pfn_sb->checksum = cpu_to_le64(checksum);
index 9cdf14b9aaabe06a4a0dde1029e8d28425287564..223d617ecfe17458207dfc6dfb3c5b9c134d937f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/debugfs.h>
 #include <linux/serial_core.h>
 #include <linux/sysfs.h>
+#include <linux/random.h>
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #include <asm/page.h>
@@ -1044,6 +1045,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 {
        int l;
        const char *p;
+       const void *rng_seed;
 
        pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
@@ -1078,6 +1080,18 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 
        pr_debug("Command line is: %s\n", (char*)data);
 
+       rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
+       if (rng_seed && l > 0) {
+               add_bootloader_randomness(rng_seed, l);
+
+               /* try to clear seed so it won't be found. */
+               fdt_nop_property(initial_boot_params, node, "rng-seed");
+
+               /* update CRC check value */
+               of_fdt_crc32 = crc32_be(~0, initial_boot_params,
+                               fdt_totalsize(initial_boot_params));
+       }
+
        /* break now */
        return 1;
 }
index da71c741cb46c51e85303a2cbe72e0309047697c..abcf54f7d19c94cc7031e1561b91c6eb42d91cb7 100644 (file)
@@ -113,8 +113,6 @@ struct smmu_pmu {
        u64 counter_mask;
        u32 options;
        bool global_filter;
-       u32 global_filter_span;
-       u32 global_filter_sid;
 };
 
 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
@@ -260,6 +258,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event,
        smmu_pmu_set_smr(smmu_pmu, idx, sid);
 }
 
+static bool smmu_pmu_check_global_filter(struct perf_event *curr,
+                                        struct perf_event *new)
+{
+       if (get_filter_enable(new) != get_filter_enable(curr))
+               return false;
+
+       if (!get_filter_enable(new))
+               return true;
+
+       return get_filter_span(new) == get_filter_span(curr) &&
+              get_filter_stream_id(new) == get_filter_stream_id(curr);
+}
+
 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
                                       struct perf_event *event, int idx)
 {
@@ -279,17 +290,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
        }
 
        /* Requested settings same as current global settings*/
-       if (span == smmu_pmu->global_filter_span &&
-           sid == smmu_pmu->global_filter_sid)
+       idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
+       if (idx == num_ctrs ||
+           smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
+               smmu_pmu_set_event_filter(event, 0, span, sid);
                return 0;
+       }
 
-       if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
-               return -EAGAIN;
-
-       smmu_pmu_set_event_filter(event, 0, span, sid);
-       smmu_pmu->global_filter_span = span;
-       smmu_pmu->global_filter_sid = sid;
-       return 0;
+       return -EAGAIN;
 }
 
 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
@@ -312,6 +320,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
        return idx;
 }
 
+static bool smmu_pmu_events_compatible(struct perf_event *curr,
+                                      struct perf_event *new)
+{
+       if (new->pmu != curr->pmu)
+               return false;
+
+       if (to_smmu_pmu(new->pmu)->global_filter &&
+           !smmu_pmu_check_global_filter(curr, new))
+               return false;
+
+       return true;
+}
+
 /*
  * Implementation of abstract pmu functionality required by
  * the core perf events code.
@@ -323,6 +344,7 @@ static int smmu_pmu_event_init(struct perf_event *event)
        struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
        struct device *dev = smmu_pmu->dev;
        struct perf_event *sibling;
+       int group_num_events = 1;
        u16 event_id;
 
        if (event->attr.type != event->pmu->type)
@@ -347,18 +369,23 @@ static int smmu_pmu_event_init(struct perf_event *event)
        }
 
        /* Don't allow groups with mixed PMUs, except for s/w events */
-       if (event->group_leader->pmu != event->pmu &&
-           !is_software_event(event->group_leader)) {
-               dev_dbg(dev, "Can't create mixed PMU group\n");
-               return -EINVAL;
+       if (!is_software_event(event->group_leader)) {
+               if (!smmu_pmu_events_compatible(event->group_leader, event))
+                       return -EINVAL;
+
+               if (++group_num_events > smmu_pmu->num_counters)
+                       return -EINVAL;
        }
 
        for_each_sibling_event(sibling, event->group_leader) {
-               if (sibling->pmu != event->pmu &&
-                   !is_software_event(sibling)) {
-                       dev_dbg(dev, "Can't create mixed PMU group\n");
+               if (is_software_event(sibling))
+                       continue;
+
+               if (!smmu_pmu_events_compatible(sibling, event))
+                       return -EINVAL;
+
+               if (++group_num_events > smmu_pmu->num_counters)
                        return -EINVAL;
-               }
        }
 
        hwc->idx = -1;
index 63fe21600072476af2188c6bec9adb2e77a59cdc..ce7345745b42c655dc2a3c9c831b47bd896590e1 100644 (file)
@@ -35,6 +35,8 @@
 #define EVENT_CYCLES_COUNTER   0
 #define NUM_COUNTERS           4
 
+#define AXI_MASKING_REVERT     0xffff0000      /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
+
 #define to_ddr_pmu(p)          container_of(p, struct ddr_pmu, pmu)
 
 #define DDR_PERF_DEV_NAME      "imx8_ddr"
 
 static DEFINE_IDA(ddr_ida);
 
+/* DDR Perf hardware feature */
+#define DDR_CAP_AXI_ID_FILTER          0x1     /* support AXI ID filter */
+
+struct fsl_ddr_devtype_data {
+       unsigned int quirks;    /* quirks needed for different DDR Perf core */
+};
+
+static const struct fsl_ddr_devtype_data imx8_devtype_data;
+
+static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
+       .quirks = DDR_CAP_AXI_ID_FILTER,
+};
+
 static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
-       { .compatible = "fsl,imx8-ddr-pmu",},
-       { .compatible = "fsl,imx8m-ddr-pmu",},
+       { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
+       { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
 
 struct ddr_pmu {
        struct pmu pmu;
@@ -57,6 +73,7 @@ struct ddr_pmu {
        struct perf_event *events[NUM_COUNTERS];
        int active_events;
        enum cpuhp_state cpuhp_state;
+       const struct fsl_ddr_devtype_data *devtype_data;
        int irq;
        int id;
 };
@@ -128,6 +145,8 @@ static struct attribute *ddr_perf_events_attrs[] = {
        IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
        IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
        IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
+       IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
+       IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
        NULL,
 };
 
@@ -137,9 +156,13 @@ static struct attribute_group ddr_perf_events_attr_group = {
 };
 
 PMU_FORMAT_ATTR(event, "config:0-7");
+PMU_FORMAT_ATTR(axi_id, "config1:0-15");
+PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
 
 static struct attribute *ddr_perf_format_attrs[] = {
        &format_attr_event.attr,
+       &format_attr_axi_id.attr,
+       &format_attr_axi_mask.attr,
        NULL,
 };
 
@@ -189,6 +212,26 @@ static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
        return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
 }
 
+static bool ddr_perf_is_filtered(struct perf_event *event)
+{
+       return event->attr.config == 0x41 || event->attr.config == 0x42;
+}
+
+static u32 ddr_perf_filter_val(struct perf_event *event)
+{
+       return event->attr.config1;
+}
+
+static bool ddr_perf_filters_compatible(struct perf_event *a,
+                                       struct perf_event *b)
+{
+       if (!ddr_perf_is_filtered(a))
+               return true;
+       if (!ddr_perf_is_filtered(b))
+               return true;
+       return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
+}
+
 static int ddr_perf_event_init(struct perf_event *event)
 {
        struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -215,6 +258,15 @@ static int ddr_perf_event_init(struct perf_event *event)
                        !is_software_event(event->group_leader))
                return -EINVAL;
 
+       if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
+               if (!ddr_perf_filters_compatible(event, event->group_leader))
+                       return -EINVAL;
+               for_each_sibling_event(sibling, event->group_leader) {
+                       if (!ddr_perf_filters_compatible(event, sibling))
+                               return -EINVAL;
+               }
+       }
+
        for_each_sibling_event(sibling, event->group_leader) {
                if (sibling->pmu != event->pmu &&
                                !is_software_event(sibling))
@@ -287,6 +339,23 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
        struct hw_perf_event *hwc = &event->hw;
        int counter;
        int cfg = event->attr.config;
+       int cfg1 = event->attr.config1;
+
+       if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
+               int i;
+
+               for (i = 1; i < NUM_COUNTERS; i++) {
+                       if (pmu->events[i] &&
+                           !ddr_perf_filters_compatible(event, pmu->events[i]))
+                               return -EINVAL;
+               }
+
+               if (ddr_perf_is_filtered(event)) {
+                       /* revert axi id masking(axi_mask) value */
+                       cfg1 ^= AXI_MASKING_REVERT;
+                       writel(cfg1, pmu->base + COUNTER_DPCR1);
+               }
+       }
 
        counter = ddr_perf_alloc_counter(pmu, cfg);
        if (counter < 0) {
@@ -472,6 +541,8 @@ static int ddr_perf_probe(struct platform_device *pdev)
        if (!name)
                return -ENOMEM;
 
+       pmu->devtype_data = of_device_get_match_data(&pdev->dev);
+
        pmu->cpu = raw_smp_processor_id();
        ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
                                      DDR_CPUHP_CB_NAME,
index 6ad0823bcf23781b68000637d3dd956756c3e4cf..e42d4464c2cf7216191f1082d35abc8f3a060294 100644 (file)
@@ -217,10 +217,8 @@ static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
 
        /* Read and init IRQ */
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
                               IRQF_NOBALANCING | IRQF_NO_THREAD,
index 4f2917f3e25e7e2f1736afa05f832bda861fb8be..f28063873e1117e57bc7f4c1212a32125df430fd 100644 (file)
@@ -207,10 +207,8 @@ static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
 
        /* Read and init IRQ */
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
                              IRQF_NOBALANCING | IRQF_NO_THREAD,
index 9153e093f9df136bca9841a89b5503b88f937f76..078b8dc5725034d00f600cdc71fa956c2bb77460 100644 (file)
@@ -206,10 +206,8 @@ static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
 
        /* Read and init IRQ */
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq);
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
                               IRQF_NOBALANCING | IRQF_NO_THREAD,
index d06182fe14b85cee047ba36d52840094f59d52dc..21d6991dbe0ba8efa054499c9695a916752336b1 100644 (file)
@@ -909,12 +909,8 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
        cluster->cluster_id = fw_cluster_id;
 
        irq = platform_get_irq(sdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev,
-                       "Failed to get valid irq for cluster %ld\n",
-                       fw_cluster_id);
+       if (irq < 0)
                return irq;
-       }
        irq_set_status_flags(irq, IRQ_NOAUTOEN);
        cluster->irq = irq;
 
index 3259e2ebeb392a0aef655f8243cae57d5225b396..7e328d6385c37be0344c8e9d4ca9e3d914458bec 100644 (file)
@@ -1901,10 +1901,8 @@ static int xgene_pmu_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "No IRQ resource\n");
+       if (irq < 0)
                return -EINVAL;
-       }
 
        rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
                                IRQF_NOBALANCING | IRQF_NO_THREAD,
index ba6438ac4d72a0623da3abd65a03f57d326224b8..ff84d1afd229d34f50be3564277222221342582a 100644 (file)
@@ -2552,7 +2552,7 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
                        if (IS_ERR(map))
                                return map;
                } else
-                       map = ERR_PTR(-ENODEV);
+                       return ERR_PTR(-ENODEV);
 
                ctx->maps[ASPEED_IP_LPC] = map;
                dev_dbg(ctx->dev, "Acquired LPC regmap");
@@ -2562,6 +2562,33 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
        return ERR_PTR(-EINVAL);
 }
 
+static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
+                                  const struct aspeed_sig_expr *expr,
+                                  bool enabled)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < expr->ndescs; i++) {
+               const struct aspeed_sig_desc *desc = &expr->descs[i];
+               struct regmap *map;
+
+               map = aspeed_g5_acquire_regmap(ctx, desc->ip);
+               if (IS_ERR(map)) {
+                       dev_err(ctx->dev,
+                               "Failed to acquire regmap for IP block %d\n",
+                               desc->ip);
+                       return PTR_ERR(map);
+               }
+
+               ret = aspeed_sig_desc_eval(desc, enabled, ctx->maps[desc->ip]);
+               if (ret <= 0)
+                       return ret;
+       }
+
+       return 1;
+}
+
 /**
  * Configure a pin's signal by applying an expression's descriptor state for
  * all descriptors in the expression.
@@ -2647,6 +2674,7 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
 }
 
 static const struct aspeed_pinmux_ops aspeed_g5_ops = {
+       .eval = aspeed_g5_sig_expr_eval,
        .set = aspeed_g5_sig_expr_set,
 };
 
index 839c01b7953f2b845f1e0db64f4269feb0bb2131..57305ca838a7c800fb93cf5377bf1ecb5a9bc562 100644 (file)
@@ -78,11 +78,14 @@ int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
  * neither the enabled nor disabled state. Thus we must explicitly test for
  * either condition as required.
  */
-int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
+int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
                         const struct aspeed_sig_expr *expr, bool enabled)
 {
-       int i;
        int ret;
+       int i;
+
+       if (ctx->ops->eval)
+               return ctx->ops->eval(ctx, expr, enabled);
 
        for (i = 0; i < expr->ndescs; i++) {
                const struct aspeed_sig_desc *desc = &expr->descs[i];
index 52d299b59ce2fe20bebe72a491f66ce59a0e567d..db3457c86f48c5f0314ce917b6c6136560975e86 100644 (file)
@@ -702,6 +702,8 @@ struct aspeed_pin_function {
 struct aspeed_pinmux_data;
 
 struct aspeed_pinmux_ops {
+       int (*eval)(struct aspeed_pinmux_data *ctx,
+                   const struct aspeed_sig_expr *expr, bool enabled);
        int (*set)(struct aspeed_pinmux_data *ctx,
                   const struct aspeed_sig_expr *expr, bool enabled);
 };
@@ -722,9 +724,8 @@ struct aspeed_pinmux_data {
 int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled,
                         struct regmap *map);
 
-int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
-                        const struct aspeed_sig_expr *expr,
-                        bool enabled);
+int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
+                        const struct aspeed_sig_expr *expr, bool enabled);
 
 static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx,
                                      const struct aspeed_sig_expr *expr,
index e5a112a8e0671551c978e668d912471723f05af1..297b7b5fcb28df6776f919dfcde750deda59da5a 100644 (file)
@@ -1455,6 +1455,20 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
        chip->irq_eoi(data);
 }
 
+static void byt_init_irq_valid_mask(struct gpio_chip *chip,
+                                   unsigned long *valid_mask,
+                                   unsigned int ngpios)
+{
+       /*
+        * FIXME: currently the valid_mask is filled in as part of
+        * initializing the irq_chip below in byt_gpio_irq_init_hw().
+        * when converting this driver to the new way of passing the
+        * gpio_irq_chip along when adding the gpio_chip, move the
+        * mask initialization into this callback instead. Right now
+        * this callback is here to make sure the mask gets allocated.
+        */
+}
+
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 {
        struct gpio_chip *gc = &vg->chip;
@@ -1525,7 +1539,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
        gc->can_sleep   = false;
        gc->parent      = &vg->pdev->dev;
        gc->ngpio       = vg->soc_data->npins;
-       gc->irq.need_valid_mask = true;
+       gc->irq.init_valid_mask = byt_init_irq_valid_mask;
 
 #ifdef CONFIG_PM_SLEEP
        vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
index 03ec7a5d9d0bd8014382cad9f316071828bc7f63..ab681d1a3a7440b22c5ceccae8c2e6174f55f0f1 100644 (file)
@@ -1543,6 +1543,30 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
        {}
 };
 
+static void chv_init_irq_valid_mask(struct gpio_chip *chip,
+                                   unsigned long *valid_mask,
+                                   unsigned int ngpios)
+{
+       struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+       const struct chv_community *community = pctrl->community;
+       int i;
+
+       /* Do not add GPIOs that can only generate GPEs to the IRQ domain */
+       for (i = 0; i < community->npins; i++) {
+               const struct pinctrl_pin_desc *desc;
+               u32 intsel;
+
+               desc = &community->pins[i];
+
+               intsel = readl(chv_padreg(pctrl, desc->number, CHV_PADCTRL0));
+               intsel &= CHV_PADCTRL0_INTSEL_MASK;
+               intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+
+               if (intsel >= community->nirqs)
+                       clear_bit(i, valid_mask);
+       }
+}
+
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 {
        const struct chv_gpio_pinrange *range;
@@ -1557,7 +1581,8 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        chip->label = dev_name(pctrl->dev);
        chip->parent = pctrl->dev;
        chip->base = -1;
-       chip->irq.need_valid_mask = need_valid_mask;
+       if (need_valid_mask)
+               chip->irq.init_valid_mask = chv_init_irq_valid_mask;
 
        ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
        if (ret) {
@@ -1576,21 +1601,6 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                }
        }
 
-       /* Do not add GPIOs that can only generate GPEs to the IRQ domain */
-       for (i = 0; i < community->npins; i++) {
-               const struct pinctrl_pin_desc *desc;
-               u32 intsel;
-
-               desc = &community->pins[i];
-
-               intsel = readl(chv_padreg(pctrl, desc->number, CHV_PADCTRL0));
-               intsel &= CHV_PADCTRL0_INTSEL_MASK;
-               intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
-
-               if (need_valid_mask && intsel >= community->nirqs)
-                       clear_bit(i, chip->irq.valid_mask);
-       }
-
        /*
         * The same set of machines in chv_no_valid_mask[] have incorrectly
         * configured GPIOs that generate spurious interrupts so we use
index d3332da356372f9c666c5e3fd50609beb2fc5a0c..dd5aa9a2dfe545bb5c1315ac0bfbb3daf67fd6bf 100644 (file)
@@ -585,12 +585,24 @@ static int stmfx_pinctrl_gpio_function_enable(struct stmfx_pinctrl *pctl)
        return stmfx_function_enable(pctl->stmfx, func);
 }
 
+static int stmfx_pinctrl_gpio_init_valid_mask(struct gpio_chip *gc,
+                                             unsigned long *valid_mask,
+                                             unsigned int ngpios)
+{
+       struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+       u32 n;
+
+       for_each_clear_bit(n, &pctl->gpio_valid_mask, ngpios)
+               clear_bit(n, valid_mask);
+
+       return 0;
+}
+
 static int stmfx_pinctrl_probe(struct platform_device *pdev)
 {
        struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
        struct device_node *np = pdev->dev.of_node;
        struct stmfx_pinctrl *pctl;
-       u32 n;
        int irq, ret;
 
        pctl = devm_kzalloc(stmfx->dev, sizeof(*pctl), GFP_KERNEL);
@@ -650,7 +662,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
        pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
        pctl->gpio_chip.can_sleep = true;
        pctl->gpio_chip.of_node = np;
-       pctl->gpio_chip.need_valid_mask = true;
+       pctl->gpio_chip.init_valid_mask = stmfx_pinctrl_gpio_init_valid_mask;
 
        ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
        if (ret) {
@@ -668,8 +680,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
        pctl->irq_chip.irq_set_type = stmfx_pinctrl_irq_set_type;
        pctl->irq_chip.irq_bus_lock = stmfx_pinctrl_irq_bus_lock;
        pctl->irq_chip.irq_bus_sync_unlock = stmfx_pinctrl_irq_bus_sync_unlock;
-       for_each_clear_bit(n, &pctl->gpio_valid_mask, pctl->gpio_chip.ngpio)
-               clear_bit(n, pctl->gpio_chip.valid_mask);
 
        ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
                                          0, handle_bad_irq, IRQ_TYPE_NONE);
index 8e14a5f2e97000ee873ba8778ec7d3c363228cdb..fa2c878214010c9480775c270bbc9516e5b64ad7 100644 (file)
@@ -138,6 +138,7 @@ config PINCTRL_QCOM_SPMI_PMIC
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
+       select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
index 7f35c196bb3e866500282d9aadb56ee7c48796ff..b8a1c43222f805ca12fbd94abc52672723421e63 100644 (file)
@@ -593,24 +593,25 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 #define msm_gpio_dbg_show NULL
 #endif
 
-static int msm_gpio_init_valid_mask(struct gpio_chip *chip)
+static int msm_gpio_init_valid_mask(struct gpio_chip *gc,
+                                   unsigned long *valid_mask,
+                                   unsigned int ngpios)
 {
-       struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
+       struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        int ret;
        unsigned int len, i;
-       unsigned int max_gpios = pctrl->soc->ngpios;
        const int *reserved = pctrl->soc->reserved_gpios;
        u16 *tmp;
 
        /* Driver provided reserved list overrides DT and ACPI */
        if (reserved) {
-               bitmap_fill(chip->valid_mask, max_gpios);
+               bitmap_fill(valid_mask, ngpios);
                for (i = 0; reserved[i] >= 0; i++) {
-                       if (i >= max_gpios || reserved[i] >= max_gpios) {
+                       if (i >= ngpios || reserved[i] >= ngpios) {
                                dev_err(pctrl->dev, "invalid list of reserved GPIOs\n");
                                return -EINVAL;
                        }
-                       clear_bit(reserved[i], chip->valid_mask);
+                       clear_bit(reserved[i], valid_mask);
                }
 
                return 0;
@@ -622,7 +623,7 @@ static int msm_gpio_init_valid_mask(struct gpio_chip *chip)
        if (ret < 0)
                return 0;
 
-       if (ret > max_gpios)
+       if (ret > ngpios)
                return -EINVAL;
 
        tmp = kmalloc_array(len, sizeof(*tmp), GFP_KERNEL);
@@ -635,9 +636,9 @@ static int msm_gpio_init_valid_mask(struct gpio_chip *chip)
                goto out;
        }
 
-       bitmap_zero(chip->valid_mask, max_gpios);
+       bitmap_zero(valid_mask, ngpios);
        for (i = 0; i < len; i++)
-               set_bit(tmp[i], chip->valid_mask);
+               set_bit(tmp[i], valid_mask);
 
 out:
        kfree(tmp);
@@ -653,7 +654,6 @@ static const struct gpio_chip msm_gpio_template = {
        .request          = gpiochip_generic_request,
        .free             = gpiochip_generic_free,
        .dbg_show         = msm_gpio_dbg_show,
-       .init_valid_mask  = msm_gpio_init_valid_mask,
 };
 
 /* For dual-edge interrupts in software, since some hardware has no
@@ -1015,7 +1015,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
        chip->parent = pctrl->dev;
        chip->owner = THIS_MODULE;
        chip->of_node = pctrl->dev->of_node;
-       chip->need_valid_mask = msm_gpio_needs_valid_mask(pctrl);
+       if (msm_gpio_needs_valid_mask(pctrl))
+               chip->init_valid_mask = msm_gpio_init_valid_mask;
 
        pctrl->irq_chip.name = "msmgpio";
        pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
index f39da87ea1850aa840827148ebe8e4147da81e19..442db15e0729285427b3cebf148d9f4004f17c57 100644 (file)
@@ -170,8 +170,6 @@ struct pmic_gpio_state {
        struct regmap   *map;
        struct pinctrl_dev *ctrl;
        struct gpio_chip chip;
-       struct fwnode_handle *fwnode;
-       struct irq_domain *domain;
 };
 
 static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -751,23 +749,6 @@ static int pmic_gpio_of_xlate(struct gpio_chip *chip,
        return gpio_desc->args[0] - PMIC_GPIO_PHYSICAL_OFFSET;
 }
 
-static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
-{
-       struct pmic_gpio_state *state = gpiochip_get_data(chip);
-       struct irq_fwspec fwspec;
-
-       fwspec.fwnode = state->fwnode;
-       fwspec.param_count = 2;
-       fwspec.param[0] = pin + PMIC_GPIO_PHYSICAL_OFFSET;
-       /*
-        * Set the type to a safe value temporarily. This will be overwritten
-        * later with the proper value by irq_set_type.
-        */
-       fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
-
-       return irq_create_fwspec_mapping(&fwspec);
-}
-
 static void pmic_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 {
        struct pmic_gpio_state *state = gpiochip_get_data(chip);
@@ -787,7 +768,6 @@ static const struct gpio_chip pmic_gpio_gpio_template = {
        .request                = gpiochip_generic_request,
        .free                   = gpiochip_generic_free,
        .of_xlate               = pmic_gpio_of_xlate,
-       .to_irq                 = pmic_gpio_to_irq,
        .dbg_show               = pmic_gpio_dbg_show,
 };
 
@@ -964,46 +944,24 @@ static int pmic_gpio_domain_translate(struct irq_domain *domain,
        return 0;
 }
 
-static int pmic_gpio_domain_alloc(struct irq_domain *domain, unsigned int virq,
-                                 unsigned int nr_irqs, void *data)
+static unsigned int pmic_gpio_child_offset_to_irq(struct gpio_chip *chip,
+                                                 unsigned int offset)
 {
-       struct pmic_gpio_state *state = container_of(domain->host_data,
-                                                    struct pmic_gpio_state,
-                                                    chip);
-       struct irq_fwspec *fwspec = data;
-       struct irq_fwspec parent_fwspec;
-       irq_hw_number_t hwirq;
-       unsigned int type;
-       int ret, i;
-
-       ret = pmic_gpio_domain_translate(domain, fwspec, &hwirq, &type);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < nr_irqs; i++)
-               irq_domain_set_info(domain, virq + i, hwirq + i,
-                                   &pmic_gpio_irq_chip, state,
-                                   handle_level_irq, NULL, NULL);
+       return offset + PMIC_GPIO_PHYSICAL_OFFSET;
+}
 
-       parent_fwspec.fwnode = domain->parent->fwnode;
-       parent_fwspec.param_count = 4;
-       parent_fwspec.param[0] = 0;
-       parent_fwspec.param[1] = hwirq + 0xc0;
-       parent_fwspec.param[2] = 0;
-       parent_fwspec.param[3] = fwspec->param[1];
+static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
+                                          unsigned int child_hwirq,
+                                          unsigned int child_type,
+                                          unsigned int *parent_hwirq,
+                                          unsigned int *parent_type)
+{
+       *parent_hwirq = child_hwirq + 0xc0;
+       *parent_type = child_type;
 
-       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
-                                           &parent_fwspec);
+       return 0;
 }
 
-static const struct irq_domain_ops pmic_gpio_domain_ops = {
-       .activate = gpiochip_irq_domain_activate,
-       .alloc = pmic_gpio_domain_alloc,
-       .deactivate = gpiochip_irq_domain_deactivate,
-       .free = irq_domain_free_irqs_common,
-       .translate = pmic_gpio_domain_translate,
-};
-
 static int pmic_gpio_probe(struct platform_device *pdev)
 {
        struct irq_domain *parent_domain;
@@ -1013,6 +971,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
        struct pinctrl_desc *pctrldesc;
        struct pmic_gpio_pad *pad, *pads;
        struct pmic_gpio_state *state;
+       struct gpio_irq_chip *girq;
        int ret, npins, i;
        u32 reg;
 
@@ -1092,19 +1051,21 @@ static int pmic_gpio_probe(struct platform_device *pdev)
        if (!parent_domain)
                return -ENXIO;
 
-       state->fwnode = of_node_to_fwnode(state->dev->of_node);
-       state->domain = irq_domain_create_hierarchy(parent_domain, 0,
-                                                   state->chip.ngpio,
-                                                   state->fwnode,
-                                                   &pmic_gpio_domain_ops,
-                                                   &state->chip);
-       if (!state->domain)
-               return -ENODEV;
+       girq = &state->chip.irq;
+       girq->chip = &pmic_gpio_irq_chip;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_level_irq;
+       girq->fwnode = of_node_to_fwnode(state->dev->of_node);
+       girq->parent_domain = parent_domain;
+       girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
+       girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+       girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
+       girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
 
        ret = gpiochip_add_data(&state->chip, state);
        if (ret) {
                dev_err(state->dev, "can't add gpio chip\n");
-               goto err_chip_add_data;
+               return ret;
        }
 
        /*
@@ -1130,8 +1091,6 @@ static int pmic_gpio_probe(struct platform_device *pdev)
 
 err_range:
        gpiochip_remove(&state->chip);
-err_chip_add_data:
-       irq_domain_remove(state->domain);
        return ret;
 }
 
@@ -1140,7 +1099,6 @@ static int pmic_gpio_remove(struct platform_device *pdev)
        struct pmic_gpio_state *state = platform_get_drvdata(pdev);
 
        gpiochip_remove(&state->chip);
-       irq_domain_remove(state->domain);
        return 0;
 }
 
index 006a8ff640573fa56e42a9c421cfd8f6664f5cd4..714306bc3f79f32f3d82628ccaa2311ce3623ab3 100644 (file)
@@ -706,7 +706,7 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
                                           struct cros_ec_spi *ec_spi)
 {
        struct sched_param sched_priority = {
-               .sched_priority = MAX_RT_PRIO - 1,
+               .sched_priority = MAX_RT_PRIO / 2,
        };
        int err;
 
index d9542c661ddc38e30e9c57f1f75e02685a2a1835..f57bbbeb24952add23b251bf6d2f437997f96e92 100644 (file)
@@ -152,6 +152,13 @@ static const struct x86_cpu_id int0002_cpu_ids[] = {
        {}
 };
 
+static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
+                                       unsigned long *valid_mask,
+                                       unsigned int ngpios)
+{
+       bitmap_clear(valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
+}
+
 static int int0002_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -184,7 +191,7 @@ static int int0002_probe(struct platform_device *pdev)
        chip->direction_output = int0002_gpio_direction_output;
        chip->base = -1;
        chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
-       chip->irq.need_valid_mask = true;
+       chip->irq.init_valid_mask = int0002_init_irq_valid_mask;
 
        ret = devm_gpiochip_add_data(&pdev->dev, chip, NULL);
        if (ret) {
@@ -192,8 +199,6 @@ static int int0002_probe(struct platform_device *pdev)
                return ret;
        }
 
-       bitmap_clear(chip->irq.valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
-
        /*
         * We manually request the irq here instead of passing a flow-handler
         * to gpiochip_set_chained_irqchip, because the irq is shared.
index ef6777e14d3df080b5d5afa14d0169e38e8ad621..6f0404f501071b37200119fe7d4f90d3c73cf49a 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_RAS)      += ras.o debugfs.o
+obj-$(CONFIG_RAS)      += ras.o
+obj-$(CONFIG_DEBUG_FS) += debugfs.o
 obj-$(CONFIG_RAS_CEC)  += cec.o
index 5d545806d930372ce02c5637d230b77e03dd2f5a..c09cf55e2d2040031b0688d827d806f57db9df13 100644 (file)
@@ -4,6 +4,7 @@
  */
 #include <linux/mm.h>
 #include <linux/gfp.h>
+#include <linux/ras.h>
 #include <linux/kernel.h>
 #include <linux/workqueue.h>
 
index 9c1b717efad861d332460d32663d82bae3894d3a..0d4f985afbf37f92e9f0f54531f2dd58248ee877 100644 (file)
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 #include <linux/debugfs.h>
+#include <linux/ras.h>
+#include "debugfs.h"
 
 struct dentry *ras_debugfs_dir;
 
index b57093d7c01f660ff65da897f64133944f5058e1..3ee63531f6d5c36d36ffd24b2e7477cf1290e439 100644 (file)
@@ -83,6 +83,7 @@ config REGULATOR_88PM8607
 config REGULATOR_ACT8865
        tristate "Active-semi act8865 voltage regulator"
        depends on I2C
+       depends on POWER_SUPPLY
        select REGMAP_I2C
        help
          This driver controls a active-semi act8865 voltage output
@@ -618,6 +619,15 @@ config REGULATOR_MT6323
          This driver supports the control of different power rails of device
          through regulator interface.
 
+config REGULATOR_MT6358
+       tristate "MediaTek MT6358 PMIC"
+       depends on MFD_MT6397 && BROKEN
+       help
+         Say y here to select this option to enable the power regulator of
+         MediaTek MT6358 PMIC.
+         This driver supports the control of different power rails of device
+         through regulator interface.
+
 config REGULATOR_MT6380
        tristate "MediaTek MT6380 PMIC"
        depends on MTK_PMIC_WRAP
@@ -906,6 +916,13 @@ config REGULATOR_SY8106A
        help
          This driver supports SY8106A single output regulator.
 
+config REGULATOR_SY8824X
+       tristate "Silergy SY8824C/SY8824E regulator"
+       depends on I2C && (OF || COMPILE_TEST)
+       select REGMAP_I2C
+       help
+         This driver supports SY8824C single output regulator.
+
 config REGULATOR_TPS51632
        tristate "TI TPS51632 Power Regulator"
        depends on I2C
index eef73b5a35a4dd1e467a26e6f07e4f7e8b8f6635..2210ba56f9bd115e6229e02861d7009358d977dd 100644 (file)
@@ -79,6 +79,7 @@ obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
 obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
 obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
 obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
+obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
 obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
 obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
@@ -111,6 +112,7 @@ obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
 obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
 obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
 obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
+obj-$(CONFIG_REGULATOR_SY8824X) += sy8824x.o
 obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
 obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
index cf72d7c6b8c9fb75eb06cdf0575cf85945a939c5..0fa97f934df49b0f3998fd7ee51287085ae9f2f5 100644 (file)
 #include <linux/regulator/act8865.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/power_supply.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regmap.h>
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
 
 /*
  * ACT8600 Global Register Map.
  */
 #define        ACT8865_SYS_MODE        0x00
 #define        ACT8865_SYS_CTRL        0x01
+#define        ACT8865_SYS_UNLK_REGS   0x0b
 #define        ACT8865_DCDC1_VSET1     0x20
 #define        ACT8865_DCDC1_VSET2     0x21
 #define        ACT8865_DCDC1_CTRL      0x22
+#define        ACT8865_DCDC1_SUS       0x24
 #define        ACT8865_DCDC2_VSET1     0x30
 #define        ACT8865_DCDC2_VSET2     0x31
 #define        ACT8865_DCDC2_CTRL      0x32
+#define        ACT8865_DCDC2_SUS       0x34
 #define        ACT8865_DCDC3_VSET1     0x40
 #define        ACT8865_DCDC3_VSET2     0x41
 #define        ACT8865_DCDC3_CTRL      0x42
+#define        ACT8865_DCDC3_SUS       0x44
 #define        ACT8865_LDO1_VSET       0x50
 #define        ACT8865_LDO1_CTRL       0x51
+#define        ACT8865_LDO1_SUS        0x52
 #define        ACT8865_LDO2_VSET       0x54
 #define        ACT8865_LDO2_CTRL       0x55
+#define        ACT8865_LDO2_SUS        0x56
 #define        ACT8865_LDO3_VSET       0x60
 #define        ACT8865_LDO3_CTRL       0x61
+#define        ACT8865_LDO3_SUS        0x62
 #define        ACT8865_LDO4_VSET       0x64
 #define        ACT8865_LDO4_CTRL       0x65
+#define        ACT8865_LDO4_SUS        0x66
 #define        ACT8865_MSTROFF         0x20
 
 /*
  * Field Definitions.
  */
 #define        ACT8865_ENA             0x80    /* ON - [7] */
+#define        ACT8865_DIS             0x40    /* DIS - [6] */
+
 #define        ACT8865_VSEL_MASK       0x3F    /* VSET - [5:0] */
 
 
 #define ACT8600_LDO10_ENA              0x40    /* ON - [6] */
 #define ACT8600_SUDCDC_VSEL_MASK       0xFF    /* SUDCDC VSET - [7:0] */
 
+#define ACT8600_APCH_CHG_ACIN          BIT(7)
+#define ACT8600_APCH_CHG_USB           BIT(6)
+#define ACT8600_APCH_CSTATE0           BIT(5)
+#define ACT8600_APCH_CSTATE1           BIT(4)
+
 /*
  * ACT8865 voltage number
  */
@@ -217,6 +234,171 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
        REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
 };
 
+static int act8865_set_suspend_state(struct regulator_dev *rdev, bool enable)
+{
+       struct regmap *regmap = rdev->regmap;
+       int id = rdev->desc->id, reg, val;
+
+       switch (id) {
+       case ACT8865_ID_DCDC1:
+               reg = ACT8865_DCDC1_SUS;
+               val = 0xa8;
+               break;
+       case ACT8865_ID_DCDC2:
+               reg = ACT8865_DCDC2_SUS;
+               val = 0xa8;
+               break;
+       case ACT8865_ID_DCDC3:
+               reg = ACT8865_DCDC3_SUS;
+               val = 0xa8;
+               break;
+       case ACT8865_ID_LDO1:
+               reg = ACT8865_LDO1_SUS;
+               val = 0xe8;
+               break;
+       case ACT8865_ID_LDO2:
+               reg = ACT8865_LDO2_SUS;
+               val = 0xe8;
+               break;
+       case ACT8865_ID_LDO3:
+               reg = ACT8865_LDO3_SUS;
+               val = 0xe8;
+               break;
+       case ACT8865_ID_LDO4:
+               reg = ACT8865_LDO4_SUS;
+               val = 0xe8;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (enable)
+               val |= BIT(4);
+
+       /*
+        * Ask the PMIC to enable/disable this output when entering hibernate
+        * mode.
+        */
+       return regmap_write(regmap, reg, val);
+}
+
+static int act8865_set_suspend_enable(struct regulator_dev *rdev)
+{
+       return act8865_set_suspend_state(rdev, true);
+}
+
+static int act8865_set_suspend_disable(struct regulator_dev *rdev)
+{
+       return act8865_set_suspend_state(rdev, false);
+}
+
+static unsigned int act8865_of_map_mode(unsigned int mode)
+{
+       switch (mode) {
+       case ACT8865_REGULATOR_MODE_FIXED:
+               return REGULATOR_MODE_FAST;
+       case ACT8865_REGULATOR_MODE_NORMAL:
+               return REGULATOR_MODE_NORMAL;
+       case ACT8865_REGULATOR_MODE_LOWPOWER:
+               return REGULATOR_MODE_STANDBY;
+       default:
+               return REGULATOR_MODE_INVALID;
+       }
+}
+
+static int act8865_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+       struct regmap *regmap = rdev->regmap;
+       int id = rdev_get_id(rdev);
+       int reg, val = 0;
+
+       switch (id) {
+       case ACT8865_ID_DCDC1:
+               reg = ACT8865_DCDC1_CTRL;
+               break;
+       case ACT8865_ID_DCDC2:
+               reg = ACT8865_DCDC2_CTRL;
+               break;
+       case ACT8865_ID_DCDC3:
+               reg = ACT8865_DCDC3_CTRL;
+               break;
+       case ACT8865_ID_LDO1:
+               reg = ACT8865_LDO1_CTRL;
+               break;
+       case ACT8865_ID_LDO2:
+               reg = ACT8865_LDO2_CTRL;
+               break;
+       case ACT8865_ID_LDO3:
+               reg = ACT8865_LDO3_CTRL;
+               break;
+       case ACT8865_ID_LDO4:
+               reg = ACT8865_LDO4_CTRL;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (mode) {
+       case REGULATOR_MODE_FAST:
+       case REGULATOR_MODE_NORMAL:
+               if (id <= ACT8865_ID_DCDC3)
+                       val = BIT(5);
+               break;
+       case REGULATOR_MODE_STANDBY:
+               if (id > ACT8865_ID_DCDC3)
+                       val = BIT(5);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return regmap_update_bits(regmap, reg, BIT(5), val);
+}
+
+static unsigned int act8865_get_mode(struct regulator_dev *rdev)
+{
+       struct regmap *regmap = rdev->regmap;
+       int id = rdev_get_id(rdev);
+       int reg, ret, val = 0;
+
+       switch (id) {
+       case ACT8865_ID_DCDC1:
+               reg = ACT8865_DCDC1_CTRL;
+               break;
+       case ACT8865_ID_DCDC2:
+               reg = ACT8865_DCDC2_CTRL;
+               break;
+       case ACT8865_ID_DCDC3:
+               reg = ACT8865_DCDC3_CTRL;
+               break;
+       case ACT8865_ID_LDO1:
+               reg = ACT8865_LDO1_CTRL;
+               break;
+       case ACT8865_ID_LDO2:
+               reg = ACT8865_LDO2_CTRL;
+               break;
+       case ACT8865_ID_LDO3:
+               reg = ACT8865_LDO3_CTRL;
+               break;
+       case ACT8865_ID_LDO4:
+               reg = ACT8865_LDO4_CTRL;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = regmap_read(regmap, reg, &val);
+       if (ret)
+               return ret;
+
+       if (id <= ACT8865_ID_DCDC3 && (val & BIT(5)))
+               return REGULATOR_MODE_FAST;
+       else if (id > ACT8865_ID_DCDC3 && !(val & BIT(5)))
+               return REGULATOR_MODE_NORMAL;
+       else
+               return REGULATOR_MODE_STANDBY;
+}
+
 static const struct regulator_ops act8865_ops = {
        .list_voltage           = regulator_list_voltage_linear_range,
        .map_voltage            = regulator_map_voltage_linear_range,
@@ -224,24 +406,44 @@ static const struct regulator_ops act8865_ops = {
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
+       .set_mode               = act8865_set_mode,
+       .get_mode               = act8865_get_mode,
        .is_enabled             = regulator_is_enabled_regmap,
+       .set_suspend_enable     = act8865_set_suspend_enable,
+       .set_suspend_disable    = act8865_set_suspend_disable,
 };
 
 static const struct regulator_ops act8865_ldo_ops = {
+       .list_voltage           = regulator_list_voltage_linear_range,
+       .map_voltage            = regulator_map_voltage_linear_range,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .set_mode               = act8865_set_mode,
+       .get_mode               = act8865_get_mode,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .set_suspend_enable     = act8865_set_suspend_enable,
+       .set_suspend_disable    = act8865_set_suspend_disable,
+       .set_pull_down          = regulator_set_pull_down_regmap,
+};
+
+static const struct regulator_ops act8865_fixed_ldo_ops = {
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
        .is_enabled             = regulator_is_enabled_regmap,
 };
 
-#define ACT88xx_REG(_name, _family, _id, _vsel_reg, _supply)           \
+#define ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, _ops)    \
        [_family##_ID_##_id] = {                                        \
                .name                   = _name,                        \
                .of_match               = of_match_ptr(_name),          \
+               .of_map_mode            = act8865_of_map_mode,          \
                .regulators_node        = of_match_ptr("regulators"),   \
                .supply_name            = _supply,                      \
                .id                     = _family##_ID_##_id,           \
                .type                   = REGULATOR_VOLTAGE,            \
-               .ops                    = &act8865_ops,                 \
+               .ops                    = _ops,                         \
                .n_voltages             = ACT8865_VOLTAGE_NUM,          \
                .linear_ranges          = act8865_voltage_ranges,       \
                .n_linear_ranges        = ARRAY_SIZE(act8865_voltage_ranges), \
@@ -249,9 +451,17 @@ static const struct regulator_ops act8865_ldo_ops = {
                .vsel_mask              = ACT8865_VSEL_MASK,            \
                .enable_reg             = _family##_##_id##_CTRL,       \
                .enable_mask            = ACT8865_ENA,                  \
+               .pull_down_reg          = _family##_##_id##_CTRL,       \
+               .pull_down_mask         = ACT8865_DIS,                  \
                .owner                  = THIS_MODULE,                  \
        }
 
+#define ACT88xx_REG(_name, _family, _id, _vsel_reg, _supply) \
+       ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, &act8865_ops)
+
+#define ACT88xx_LDO(_name, _family, _id, _vsel_reg, _supply) \
+       ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, &act8865_ldo_ops)
+
 static const struct regulator_desc act8600_regulators[] = {
        ACT88xx_REG("DCDC1", ACT8600, DCDC1, VSET, "vp1"),
        ACT88xx_REG("DCDC2", ACT8600, DCDC2, VSET, "vp2"),
@@ -281,7 +491,7 @@ static const struct regulator_desc act8600_regulators[] = {
                .of_match = of_match_ptr("LDO_REG9"),
                .regulators_node = of_match_ptr("regulators"),
                .id = ACT8600_ID_LDO9,
-               .ops = &act8865_ldo_ops,
+               .ops = &act8865_fixed_ldo_ops,
                .type = REGULATOR_VOLTAGE,
                .n_voltages = 1,
                .fixed_uV = 3300000,
@@ -294,7 +504,7 @@ static const struct regulator_desc act8600_regulators[] = {
                .of_match = of_match_ptr("LDO_REG10"),
                .regulators_node = of_match_ptr("regulators"),
                .id = ACT8600_ID_LDO10,
-               .ops = &act8865_ldo_ops,
+               .ops = &act8865_fixed_ldo_ops,
                .type = REGULATOR_VOLTAGE,
                .n_voltages = 1,
                .fixed_uV = 1200000,
@@ -323,20 +533,20 @@ static const struct regulator_desc act8865_regulators[] = {
        ACT88xx_REG("DCDC_REG1", ACT8865, DCDC1, VSET1, "vp1"),
        ACT88xx_REG("DCDC_REG2", ACT8865, DCDC2, VSET1, "vp2"),
        ACT88xx_REG("DCDC_REG3", ACT8865, DCDC3, VSET1, "vp3"),
-       ACT88xx_REG("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
-       ACT88xx_REG("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
-       ACT88xx_REG("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
-       ACT88xx_REG("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
+       ACT88xx_LDO("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
+       ACT88xx_LDO("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
+       ACT88xx_LDO("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
+       ACT88xx_LDO("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
 };
 
 static const struct regulator_desc act8865_alt_regulators[] = {
        ACT88xx_REG("DCDC_REG1", ACT8865, DCDC1, VSET2, "vp1"),
        ACT88xx_REG("DCDC_REG2", ACT8865, DCDC2, VSET2, "vp2"),
        ACT88xx_REG("DCDC_REG3", ACT8865, DCDC3, VSET2, "vp3"),
-       ACT88xx_REG("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
-       ACT88xx_REG("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
-       ACT88xx_REG("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
-       ACT88xx_REG("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
+       ACT88xx_LDO("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
+       ACT88xx_LDO("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
+       ACT88xx_LDO("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
+       ACT88xx_LDO("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
 };
 
 #ifdef CONFIG_OF
@@ -372,6 +582,75 @@ static void act8865_power_off(void)
        while (1);
 }
 
+static int act8600_charger_get_status(struct regmap *map)
+{
+       unsigned int val;
+       int ret;
+       u8 state0, state1;
+
+       ret = regmap_read(map, ACT8600_APCH_STAT, &val);
+       if (ret < 0)
+               return ret;
+
+       state0 = val & ACT8600_APCH_CSTATE0;
+       state1 = val & ACT8600_APCH_CSTATE1;
+
+       if (state0 && !state1)
+               return POWER_SUPPLY_STATUS_CHARGING;
+       if (!state0 && state1)
+               return POWER_SUPPLY_STATUS_NOT_CHARGING;
+       if (!state0 && !state1)
+               return POWER_SUPPLY_STATUS_DISCHARGING;
+
+       return POWER_SUPPLY_STATUS_UNKNOWN;
+}
+
+static int act8600_charger_get_property(struct power_supply *psy,
+               enum power_supply_property psp, union power_supply_propval *val)
+{
+       struct regmap *map = power_supply_get_drvdata(psy);
+       int ret;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+               ret = act8600_charger_get_status(map);
+               if (ret < 0)
+                       return ret;
+
+               val->intval = ret;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static enum power_supply_property act8600_charger_properties[] = {
+       POWER_SUPPLY_PROP_STATUS,
+};
+
+static const struct power_supply_desc act8600_charger_desc = {
+       .name = "act8600-charger",
+       .type = POWER_SUPPLY_TYPE_BATTERY,
+       .properties = act8600_charger_properties,
+       .num_properties = ARRAY_SIZE(act8600_charger_properties),
+       .get_property = act8600_charger_get_property,
+};
+
+static int act8600_charger_probe(struct device *dev, struct regmap *regmap)
+{
+       struct power_supply *charger;
+       struct power_supply_config cfg = {
+               .drv_data = regmap,
+               .of_node = dev->of_node,
+       };
+
+       charger = devm_power_supply_register(dev, &act8600_charger_desc, &cfg);
+
+       return PTR_ERR_OR_ZERO(charger);
+}
+
 static int act8865_pmic_probe(struct i2c_client *client,
                              const struct i2c_device_id *i2c_id)
 {
@@ -483,9 +762,20 @@ static int act8865_pmic_probe(struct i2c_client *client,
                }
        }
 
+       if (type == ACT8600) {
+               ret = act8600_charger_probe(dev, act8865->regmap);
+               if (ret < 0) {
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(dev, "Failed to probe charger");
+                       return ret;
+               }
+       }
+
        i2c_set_clientdata(client, act8865);
 
-       return 0;
+       /* Unlock expert registers for ACT8865. */
+       return type != ACT8865 ? 0 : regmap_write(act8865->regmap,
+                                                 ACT8865_SYS_UNLK_REGS, 0xef);
 }
 
 static const struct i2c_device_id act8865_ids[] = {
index 584284938ac9b23584e25bfa67a8456311093371..d2f804dbc785da98d871984c1e3c7e392426b46b 100644 (file)
@@ -169,16 +169,16 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
                reg = ACT8945A_DCDC3_CTRL;
                break;
        case ACT8945A_ID_LDO1:
-               reg = ACT8945A_LDO1_SUS;
+               reg = ACT8945A_LDO1_CTRL;
                break;
        case ACT8945A_ID_LDO2:
-               reg = ACT8945A_LDO2_SUS;
+               reg = ACT8945A_LDO2_CTRL;
                break;
        case ACT8945A_ID_LDO3:
-               reg = ACT8945A_LDO3_SUS;
+               reg = ACT8945A_LDO3_CTRL;
                break;
        case ACT8945A_ID_LDO4:
-               reg = ACT8945A_LDO4_SUS;
+               reg = ACT8945A_LDO4_CTRL;
                break;
        default:
                return -EINVAL;
index e0c0cf462004958635420a95d11ca00eb2b9d472..afe94470b67f7588a28930731d3a4ab6f4d2a331 100644 (file)
@@ -381,12 +381,16 @@ static struct device_node *of_get_child_regulator(struct device_node *parent,
                if (!regnode) {
                        regnode = of_get_child_regulator(child, prop_name);
                        if (regnode)
-                               return regnode;
+                               goto err_node_put;
                } else {
-                       return regnode;
+                       goto err_node_put;
                }
        }
        return NULL;
+
+err_node_put:
+       of_node_put(child);
+       return regnode;
 }
 
 /**
@@ -564,13 +568,15 @@ static ssize_t regulator_uV_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct regulator_dev *rdev = dev_get_drvdata(dev);
-       ssize_t ret;
+       int uV;
 
        regulator_lock(rdev);
-       ret = sprintf(buf, "%d\n", regulator_get_voltage_rdev(rdev));
+       uV = regulator_get_voltage_rdev(rdev);
        regulator_unlock(rdev);
 
-       return ret;
+       if (uV < 0)
+               return uV;
+       return sprintf(buf, "%d\n", uV);
 }
 static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL);
 
@@ -5640,7 +5646,7 @@ static int __init regulator_init(void)
 /* init early to allow our consumers to complete system booting */
 core_initcall(regulator_init);
 
-static int __init regulator_late_cleanup(struct device *dev, void *data)
+static int regulator_late_cleanup(struct device *dev, void *data)
 {
        struct regulator_dev *rdev = dev_to_rdev(dev);
        const struct regulator_ops *ops = rdev->desc->ops;
@@ -5689,17 +5695,8 @@ unlock:
        return 0;
 }
 
-static int __init regulator_init_complete(void)
+static void regulator_init_complete_work_function(struct work_struct *work)
 {
-       /*
-        * Since DT doesn't provide an idiomatic mechanism for
-        * enabling full constraints and since it's much more natural
-        * with DT to provide them just assume that a DT enabled
-        * system has full constraints.
-        */
-       if (of_have_populated_dt())
-               has_full_constraints = true;
-
        /*
         * Regulators may had failed to resolve their input supplies
         * when were registered, either because the input supply was
@@ -5717,6 +5714,35 @@ static int __init regulator_init_complete(void)
         */
        class_for_each_device(&regulator_class, NULL, NULL,
                              regulator_late_cleanup);
+}
+
+static DECLARE_DELAYED_WORK(regulator_init_complete_work,
+                           regulator_init_complete_work_function);
+
+static int __init regulator_init_complete(void)
+{
+       /*
+        * Since DT doesn't provide an idiomatic mechanism for
+        * enabling full constraints and since it's much more natural
+        * with DT to provide them just assume that a DT enabled
+        * system has full constraints.
+        */
+       if (of_have_populated_dt())
+               has_full_constraints = true;
+
+       /*
+        * We punt completion for an arbitrary amount of time since
+        * systems like distros will load many drivers from userspace
+        * so consumers might not always be ready yet, this is
+        * particularly an issue with laptops where this might bounce
+        * the display off then on.  Ideally we'd get a notification
+        * from userspace when this happens but we don't so just wait
+        * a bit and hope we waited long enough.  It'd be better if
+        * we'd only do this on systems that need it, and a kernel
+        * command line option might be useful.
+        */
+       schedule_delayed_work(&regulator_init_complete_work,
+                             msecs_to_jiffies(30000));
 
        return 0;
 }
index 2ffc64622451e0cdacae463f62a7dd27b5207276..56f3f72d77075e01e61beb9592acbbc221750fd7 100644 (file)
@@ -1032,10 +1032,8 @@ static int da9062_regulator_probe(struct platform_device *pdev)
 
        /* LDOs overcurrent event support */
        irq = platform_get_irq_byname(pdev, "LDO_LIM");
-       if (irq < 0) {
-               dev_err(&pdev->dev, "Failed to get IRQ.\n");
+       if (irq < 0)
                return irq;
-       }
        regulators->irq_ldo_lim = irq;
 
        ret = devm_request_threaded_irq(&pdev->dev, irq,
index 02f816318fba9762862af0536b401a3818118ba5..28b1b20f45bd1a1fbcedc92143ea6650fd510830 100644 (file)
@@ -863,10 +863,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
 
        /* LDOs overcurrent event support */
        irq = platform_get_irq_byname(pdev, "LDO_LIM");
-       if (irq < 0) {
-               dev_err(&pdev->dev, "Failed to get IRQ.\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_threaded_irq(&pdev->dev, irq,
                                NULL, da9063_ldo_lim_event,
index 0309823d2c726035b0598b09b3add990c35a8363..bf80748f1ccc2a80ab014ae84bde52696e117a41 100644 (file)
@@ -285,7 +285,7 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
                pdata->reg_node[n] = da9211_matches[i].of_node;
                pdata->gpiod_ren[n] = devm_gpiod_get_from_of_node(dev,
                                  da9211_matches[i].of_node,
-                                 "enable",
+                                 "enable-gpios",
                                  0,
                                  GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
                                  "da9211-enable");
index 999547dde99d2726e01b17ff7252393686326abe..d90a6fd8cbc7468f67324322443254388eff2d17 100644 (file)
 #include <linux/gpio/consumer.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/machine.h>
+#include <linux/clk.h>
+
 
 struct fixed_voltage_data {
        struct regulator_desc desc;
        struct regulator_dev *dev;
+
+       struct clk *enable_clock;
+       unsigned int clk_enable_counter;
 };
 
+struct fixed_dev_type {
+       bool has_enable_clock;
+};
+
+static const struct fixed_dev_type fixed_voltage_data = {
+       .has_enable_clock = false,
+};
+
+static const struct fixed_dev_type fixed_clkenable_data = {
+       .has_enable_clock = true,
+};
+
+static int reg_clock_enable(struct regulator_dev *rdev)
+{
+       struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
+       int ret = 0;
+
+       ret = clk_prepare_enable(priv->enable_clock);
+       if (ret)
+               return ret;
+
+       priv->clk_enable_counter++;
+
+       return ret;
+}
+
+static int reg_clock_disable(struct regulator_dev *rdev)
+{
+       struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
+
+       clk_disable_unprepare(priv->enable_clock);
+       priv->clk_enable_counter--;
+
+       return 0;
+}
+
+static int reg_clock_is_enabled(struct regulator_dev *rdev)
+{
+       struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
+
+       return priv->clk_enable_counter > 0;
+}
+
 
 /**
  * of_get_fixed_voltage_config - extract fixed_voltage_config structure info
@@ -84,10 +133,19 @@ of_get_fixed_voltage_config(struct device *dev,
 static struct regulator_ops fixed_voltage_ops = {
 };
 
+static struct regulator_ops fixed_voltage_clkenabled_ops = {
+       .enable = reg_clock_enable,
+       .disable = reg_clock_disable,
+       .is_enabled = reg_clock_is_enabled,
+};
+
 static int reg_fixed_voltage_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        struct fixed_voltage_config *config;
        struct fixed_voltage_data *drvdata;
+       const struct fixed_dev_type *drvtype =
+               of_match_device(dev->driver->of_match_table, dev)->data;
        struct regulator_config cfg = { };
        enum gpiod_flags gflags;
        int ret;
@@ -118,7 +176,18 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        }
        drvdata->desc.type = REGULATOR_VOLTAGE;
        drvdata->desc.owner = THIS_MODULE;
-       drvdata->desc.ops = &fixed_voltage_ops;
+
+       if (drvtype->has_enable_clock) {
+               drvdata->desc.ops = &fixed_voltage_clkenabled_ops;
+
+               drvdata->enable_clock = devm_clk_get(dev, NULL);
+               if (IS_ERR(drvdata->enable_clock)) {
+                       dev_err(dev, "Cant get enable-clock from devicetree\n");
+                       return -ENOENT;
+               }
+       } else {
+               drvdata->desc.ops = &fixed_voltage_ops;
+       }
 
        drvdata->desc.enable_time = config->startup_delay;
 
@@ -191,8 +260,16 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
 
 #if defined(CONFIG_OF)
 static const struct of_device_id fixed_of_match[] = {
-       { .compatible = "regulator-fixed", },
-       {},
+       {
+               .compatible = "regulator-fixed",
+               .data = &fixed_voltage_data,
+       },
+       {
+               .compatible = "regulator-fixed-clock",
+               .data = &fixed_clkenable_data,
+       },
+       {
+       },
 };
 MODULE_DEVICE_TABLE(of, fixed_of_match);
 #endif
index 4986cc5064a1fbed266ada36b54303814d628693..ca3dc3f3bb292c679e8521d12b082b5e82447349 100644 (file)
@@ -860,3 +860,24 @@ int regulator_get_current_limit_regmap(struct regulator_dev *rdev)
        return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(regulator_get_current_limit_regmap);
+
+/**
+ * regulator_bulk_set_supply_names - initialize the 'supply' fields in an array
+ *                                   of regulator_bulk_data structs
+ *
+ * @consumers: array of regulator_bulk_data entries to initialize
+ * @supply_names: array of supply name strings
+ * @num_supplies: number of supply names to initialize
+ *
+ * Note: the 'consumers' array must be the size of 'num_supplies'.
+ */
+void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
+                                    const char *const *supply_names,
+                                    unsigned int num_supplies)
+{
+       unsigned int i;
+
+       for (i = 0; i < num_supplies; i++)
+               consumers[i].supply = supply_names[i];
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names);
index 5647e2f97ff8d5fca78b9ef610055bf61bb96b7e..4b9f618b07e971682a5704c83f78e5632952b6f8 100644 (file)
 
 /* LM3632 */
 #define LM3632_BOOST_VSEL_MAX          0x26
-#define LM3632_LDO_VSEL_MAX            0x29
+#define LM3632_LDO_VSEL_MAX            0x28
 #define LM3632_VBOOST_MIN              4500000
 #define LM3632_VLDO_MIN                        4000000
 
 /* LM36274 */
 #define LM36274_BOOST_VSEL_MAX         0x3f
-#define LM36274_LDO_VSEL_MAX           0x34
+#define LM36274_LDO_VSEL_MAX           0x32
 #define LM36274_VOLTAGE_MIN            4000000
 
 /* Common */
@@ -226,7 +226,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
                .of_match       = "vboost",
                .id             = LM36274_BOOST,
                .ops            = &lm363x_boost_voltage_table_ops,
-               .n_voltages     = LM36274_BOOST_VSEL_MAX,
+               .n_voltages     = LM36274_BOOST_VSEL_MAX + 1,
                .min_uV         = LM36274_VOLTAGE_MIN,
                .uV_step        = LM363X_STEP_50mV,
                .type           = REGULATOR_VOLTAGE,
@@ -239,7 +239,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
                .of_match       = "vpos",
                .id             = LM36274_LDO_POS,
                .ops            = &lm363x_regulator_voltage_table_ops,
-               .n_voltages     = LM36274_LDO_VSEL_MAX,
+               .n_voltages     = LM36274_LDO_VSEL_MAX + 1,
                .min_uV         = LM36274_VOLTAGE_MIN,
                .uV_step        = LM363X_STEP_50mV,
                .type           = REGULATOR_VOLTAGE,
@@ -254,7 +254,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
                .of_match       = "vneg",
                .id             = LM36274_LDO_NEG,
                .ops            = &lm363x_regulator_voltage_table_ops,
-               .n_voltages     = LM36274_LDO_VSEL_MAX,
+               .n_voltages     = LM36274_LDO_VSEL_MAX + 1,
                .min_uV         = LM36274_VOLTAGE_MIN,
                .uV_step        = LM363X_STEP_50mV,
                .type           = REGULATOR_VOLTAGE,
index 0c440c5e28327c80b596f0996c3ef6b122021a6d..4ae12ac1f4c64452368034cd672b7f7eeea9172b 100644 (file)
@@ -65,7 +65,6 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
                                       int ramp_delay)
 {
        int id = rdev_get_id(rdev);
-       struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
        unsigned int reg;
        int ret;
 
@@ -86,11 +85,11 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
        else
                reg = 0;
 
-       ret = regmap_update_bits(lp87565->regmap, regulators[id].ctrl2_reg,
+       ret = regmap_update_bits(rdev->regmap, regulators[id].ctrl2_reg,
                                 LP87565_BUCK_CTRL_2_SLEW_RATE,
                                 reg << __ffs(LP87565_BUCK_CTRL_2_SLEW_RATE));
        if (ret) {
-               dev_err(lp87565->dev, "SLEW RATE write failed: %d\n", ret);
+               dev_err(&rdev->dev, "SLEW RATE write failed: %d\n", ret);
                return ret;
        }
 
index 1b00f363899660d08926ac8ec02a83d7617283af..00e9bb92c326b223a769328713e533010f87bb3b 100644 (file)
@@ -464,7 +464,7 @@ static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
 {
        struct lp8788 *lp = ldo->lp;
        enum lp8788_ext_ldo_en_id enable_id;
-       u8 en_mask[] = {
+       static const u8 en_mask[] = {
                [EN_ALDO1]   = LP8788_EN_SEL_ALDO1_M,
                [EN_ALDO234] = LP8788_EN_SEL_ALDO234_M,
                [EN_ALDO5]   = LP8788_EN_SEL_ALDO5_M,
index 8020eb57374a1b32d3a1c322483c5a0bb308c514..c8e579e993160d9ac8424ee1f63a11461d3e1732 100644 (file)
@@ -257,7 +257,7 @@ static int max77686_of_parse_cb(struct device_node *np,
        case MAX77686_BUCK9:
        case MAX77686_LDO20 ... MAX77686_LDO22:
                config->ena_gpiod = gpiod_get_from_of_node(np,
-                               "maxim,ena",
+                               "maxim,ena-gpios",
                                0,
                                GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
                                "max77686-regulator");
index 4bca54446287363464c92af84c2347865ebd6271..347043a5a9a7df999722fd73870cda070123abbd 100644 (file)
@@ -485,7 +485,6 @@ static int max8660_probe(struct i2c_client *client,
                rdev = devm_regulator_register(&client->dev,
                                                  &max8660_reg[id], &config);
                if (IS_ERR(rdev)) {
-                       ret = PTR_ERR(rdev);
                        dev_err(&client->dev, "failed to register %s\n",
                                max8660_reg[id].name);
                        return PTR_ERR(rdev);
diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
new file mode 100644 (file)
index 0000000..ba42682
--- /dev/null
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6358-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6358_BUCK_MODE_AUTO  0
+#define MT6358_BUCK_MODE_FORCE_PWM     1
+
+/*
+ * MT6358 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ */
+struct mt6358_regulator_info {
+       struct regulator_desc desc;
+       u32 status_reg;
+       u32 qi;
+       const u32 *index_table;
+       unsigned int n_table;
+       u32 vsel_shift;
+       u32 da_vsel_reg;
+       u32 da_vsel_mask;
+       u32 da_vsel_shift;
+       u32 modeset_reg;
+       u32 modeset_mask;
+       u32 modeset_shift;
+};
+
+#define MT6358_BUCK(match, vreg, min, max, step,               \
+       volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask,   \
+       _da_vsel_shift, _modeset_reg, _modeset_shift)           \
+[MT6358_ID_##vreg] = { \
+       .desc = {       \
+               .name = #vreg,  \
+               .of_match = of_match_ptr(match),        \
+               .ops = &mt6358_volt_range_ops,  \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = MT6358_ID_##vreg,         \
+               .owner = THIS_MODULE,           \
+               .n_voltages = ((max) - (min)) / (step) + 1,     \
+               .linear_ranges = volt_ranges,           \
+               .n_linear_ranges = ARRAY_SIZE(volt_ranges),     \
+               .vsel_reg = MT6358_BUCK_##vreg##_ELR0,  \
+               .vsel_mask = vosel_mask,        \
+               .enable_reg = MT6358_BUCK_##vreg##_CON0,        \
+               .enable_mask = BIT(0),  \
+               .of_map_mode = mt6358_map_mode, \
+       },      \
+       .status_reg = MT6358_BUCK_##vreg##_DBG1,        \
+       .qi = BIT(0),   \
+       .da_vsel_reg = _da_vsel_reg,    \
+       .da_vsel_mask = _da_vsel_mask,  \
+       .da_vsel_shift = _da_vsel_shift,        \
+       .modeset_reg = _modeset_reg,    \
+       .modeset_mask = BIT(_modeset_shift),    \
+       .modeset_shift = _modeset_shift \
+}
+
+#define MT6358_LDO(match, vreg, ldo_volt_table,        \
+       ldo_index_table, enreg, enbit, vosel,   \
+       vosel_mask, vosel_shift)        \
+[MT6358_ID_##vreg] = { \
+       .desc = {       \
+               .name = #vreg,  \
+               .of_match = of_match_ptr(match),        \
+               .ops = &mt6358_volt_table_ops,  \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = MT6358_ID_##vreg, \
+               .owner = THIS_MODULE,   \
+               .n_voltages = ARRAY_SIZE(ldo_volt_table),       \
+               .volt_table = ldo_volt_table,   \
+               .vsel_reg = vosel,      \
+               .vsel_mask = vosel_mask,        \
+               .enable_reg = enreg,    \
+               .enable_mask = BIT(enbit),      \
+       },      \
+       .status_reg = MT6358_LDO_##vreg##_CON1, \
+       .qi = BIT(15),  \
+       .index_table = ldo_index_table, \
+       .n_table = ARRAY_SIZE(ldo_index_table), \
+       .vsel_shift = vosel_shift,      \
+}
+
+#define MT6358_LDO1(match, vreg, min, max, step,       \
+       volt_ranges, _da_vsel_reg, _da_vsel_mask,       \
+       _da_vsel_shift, vosel, vosel_mask)      \
+[MT6358_ID_##vreg] = { \
+       .desc = {       \
+               .name = #vreg,  \
+               .of_match = of_match_ptr(match),        \
+               .ops = &mt6358_volt_range_ops,  \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = MT6358_ID_##vreg, \
+               .owner = THIS_MODULE,   \
+               .n_voltages = ((max) - (min)) / (step) + 1,     \
+               .linear_ranges = volt_ranges,   \
+               .n_linear_ranges = ARRAY_SIZE(volt_ranges),     \
+               .vsel_reg = vosel,      \
+               .vsel_mask = vosel_mask,        \
+               .enable_reg = MT6358_LDO_##vreg##_CON0, \
+               .enable_mask = BIT(0),  \
+       },      \
+       .da_vsel_reg = _da_vsel_reg,    \
+       .da_vsel_mask = _da_vsel_mask,  \
+       .da_vsel_shift = _da_vsel_shift,        \
+       .status_reg = MT6358_LDO_##vreg##_DBG1, \
+       .qi = BIT(0),   \
+}
+
+#define MT6358_REG_FIXED(match, vreg,  \
+       enreg, enbit, volt)     \
+[MT6358_ID_##vreg] = { \
+       .desc = {       \
+               .name = #vreg,  \
+               .of_match = of_match_ptr(match),        \
+               .ops = &mt6358_volt_fixed_ops,  \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = MT6358_ID_##vreg, \
+               .owner = THIS_MODULE,   \
+               .n_voltages = 1,        \
+               .enable_reg = enreg,    \
+               .enable_mask = BIT(enbit),      \
+               .min_uV = volt, \
+       },      \
+       .status_reg = MT6358_LDO_##vreg##_CON1, \
+       .qi = BIT(15),                                                  \
+}
+
+static const struct regulator_linear_range buck_volt_range1[] = {
+       REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range2[] = {
+       REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 12500),
+};
+
+static const struct regulator_linear_range buck_volt_range3[] = {
+       REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
+};
+
+static const struct regulator_linear_range buck_volt_range4[] = {
+       REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500),
+};
+
+static const u32 vdram2_voltages[] = {
+       600000, 1800000,
+};
+
+static const u32 vsim_voltages[] = {
+       1700000, 1800000, 2700000, 3000000, 3100000,
+};
+
+static const u32 vibr_voltages[] = {
+       1200000, 1300000, 1500000, 1800000,
+       2000000, 2800000, 3000000, 3300000,
+};
+
+static const u32 vusb_voltages[] = {
+       3000000, 3100000,
+};
+
+static const u32 vcamd_voltages[] = {
+       900000, 1000000, 1100000, 1200000,
+       1300000, 1500000, 1800000,
+};
+
+static const u32 vefuse_voltages[] = {
+       1700000, 1800000, 1900000,
+};
+
+static const u32 vmch_vemc_voltages[] = {
+       2900000, 3000000, 3300000,
+};
+
+static const u32 vcama_voltages[] = {
+       1800000, 2500000, 2700000,
+       2800000, 2900000, 3000000,
+};
+
+static const u32 vcn33_bt_wifi_voltages[] = {
+       3300000, 3400000, 3500000,
+};
+
+static const u32 vmc_voltages[] = {
+       1800000, 2900000, 3000000, 3300000,
+};
+
+static const u32 vldo28_voltages[] = {
+       2800000, 3000000,
+};
+
+static const u32 vdram2_idx[] = {
+       0, 12,
+};
+
+static const u32 vsim_idx[] = {
+       3, 4, 8, 11, 12,
+};
+
+static const u32 vibr_idx[] = {
+       0, 1, 2, 4, 5, 9, 11, 13,
+};
+
+static const u32 vusb_idx[] = {
+       3, 4,
+};
+
+static const u32 vcamd_idx[] = {
+       3, 4, 5, 6, 7, 9, 12,
+};
+
+static const u32 vefuse_idx[] = {
+       11, 12, 13,
+};
+
+static const u32 vmch_vemc_idx[] = {
+       2, 3, 5,
+};
+
+static const u32 vcama_idx[] = {
+       0, 7, 9, 10, 11, 12,
+};
+
+static const u32 vcn33_bt_wifi_idx[] = {
+       1, 2, 3,
+};
+
+static const u32 vmc_idx[] = {
+       4, 10, 11, 13,
+};
+
+static const u32 vldo28_idx[] = {
+       1, 3,
+};
+
+static unsigned int mt6358_map_mode(unsigned int mode)
+{
+       return mode == MT6358_BUCK_MODE_AUTO ?
+               REGULATOR_MODE_NORMAL : REGULATOR_MODE_FAST;
+}
+
+static int mt6358_set_voltage_sel(struct regulator_dev *rdev,
+                                 unsigned int selector)
+{
+       int idx, ret;
+       const u32 *pvol;
+       struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+       pvol = info->index_table;
+
+       idx = pvol[selector];
+       ret = regmap_update_bits(rdev->regmap, info->desc.vsel_reg,
+                                info->desc.vsel_mask,
+                                idx << info->vsel_shift);
+
+       return ret;
+}
+
+static int mt6358_get_voltage_sel(struct regulator_dev *rdev)
+{
+       int idx, ret;
+       u32 selector;
+       struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+       const u32 *pvol;
+
+       ret = regmap_read(rdev->regmap, info->desc.vsel_reg, &selector);
+       if (ret != 0) {
+               dev_info(&rdev->dev,
+                        "Failed to get mt6358 %s vsel reg: %d\n",
+                        info->desc.name, ret);
+               return ret;
+       }
+
+       selector = (selector & info->desc.vsel_mask) >> info->vsel_shift;
+       pvol = info->index_table;
+       for (idx = 0; idx < info->desc.n_voltages; idx++) {
+               if (pvol[idx] == selector)
+                       return idx;
+       }
+
+       return -EINVAL;
+}
+
+static int mt6358_get_buck_voltage_sel(struct regulator_dev *rdev)
+{
+       int ret, regval;
+       struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+       ret = regmap_read(rdev->regmap, info->da_vsel_reg, &regval);
+       if (ret != 0) {
+               dev_err(&rdev->dev,
+                       "Failed to get mt6358 Buck %s vsel reg: %d\n",
+                       info->desc.name, ret);
+               return ret;
+       }
+
+       ret = (regval >> info->da_vsel_shift) & info->da_vsel_mask;
+
+       return ret;
+}
+
+static int mt6358_get_status(struct regulator_dev *rdev)
+{
+       int ret;
+       u32 regval;
+       struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+       ret = regmap_read(rdev->regmap, info->status_reg, &regval);
+       if (ret != 0) {
+               dev_info(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+               return ret;
+       }
+
+       return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6358_regulator_set_mode(struct regulator_dev *rdev,
+                                    unsigned int mode)
+{
+       struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+       int val;
+
+       switch (mode) {
+       case REGULATOR_MODE_FAST:
+               val = MT6358_BUCK_MODE_FORCE_PWM;
+               break;
+       case REGULATOR_MODE_NORMAL:
+               val = MT6358_BUCK_MODE_AUTO;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x, %#x\n",
+               info->modeset_reg, info->modeset_mask,
+               info->modeset_shift, val);
+
+       val <<= info->modeset_shift;
+
+       return regmap_update_bits(rdev->regmap, info->modeset_reg,
+                                 info->modeset_mask, val);
+}
+
+static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
+{
+       struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+       int ret, regval;
+
+       ret = regmap_read(rdev->regmap, info->modeset_reg, &regval);
+       if (ret != 0) {
+               dev_err(&rdev->dev,
+                       "Failed to get mt6358 buck mode: %d\n", ret);
+               return ret;
+       }
+
+       switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+       case MT6358_BUCK_MODE_AUTO:
+               return REGULATOR_MODE_NORMAL;
+       case MT6358_BUCK_MODE_FORCE_PWM:
+               return REGULATOR_MODE_FAST;
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct regulator_ops mt6358_volt_range_ops = {
+       .list_voltage = regulator_list_voltage_linear_range,
+       .map_voltage = regulator_map_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = mt6358_get_buck_voltage_sel,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6358_get_status,
+       .set_mode = mt6358_regulator_set_mode,
+       .get_mode = mt6358_regulator_get_mode,
+};
+
+static const struct regulator_ops mt6358_volt_table_ops = {
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_iterate,
+       .set_voltage_sel = mt6358_set_voltage_sel,
+       .get_voltage_sel = mt6358_get_voltage_sel,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6358_get_status,
+};
+
+static const struct regulator_ops mt6358_volt_fixed_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6358_get_status,
+};
+
+/* The array is indexed by id(MT6358_ID_XXX) */
+static struct mt6358_regulator_info mt6358_regulators[] = {
+       MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
+                   buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
+                   0, MT6358_VDRAM1_ANA_CON0, 8),
+       MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
+                   buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
+                   0, MT6358_VCORE_VGPU_ANA_CON0, 1),
+       MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+                   buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, 0,
+                   MT6358_VPA_ANA_CON0, 3),
+       MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
+                   buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
+                   0, MT6358_VPROC_ANA_CON0, 1),
+       MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
+                   buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
+                   0, MT6358_VPROC_ANA_CON0, 2),
+       MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
+                   buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, 0,
+                   MT6358_VCORE_VGPU_ANA_CON0, 2),
+       MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
+                   buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, 0,
+                   MT6358_VS2_ANA_CON0, 8),
+       MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
+                   buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
+                   0, MT6358_VMODEM_ANA_CON0, 8),
+       MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
+                   buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, 0,
+                   MT6358_VS1_ANA_CON0, 8),
+       MT6358_REG_FIXED("ldo_vrf12", VRF12,
+                        MT6358_LDO_VRF12_CON0, 0, 1200000),
+       MT6358_REG_FIXED("ldo_vio18", VIO18,
+                        MT6358_LDO_VIO18_CON0, 0, 1800000),
+       MT6358_REG_FIXED("ldo_vcamio", VCAMIO,
+                        MT6358_LDO_VCAMIO_CON0, 0, 1800000),
+       MT6358_REG_FIXED("ldo_vcn18", VCN18, MT6358_LDO_VCN18_CON0, 0, 1800000),
+       MT6358_REG_FIXED("ldo_vfe28", VFE28, MT6358_LDO_VFE28_CON0, 0, 2800000),
+       MT6358_REG_FIXED("ldo_vcn28", VCN28, MT6358_LDO_VCN28_CON0, 0, 2800000),
+       MT6358_REG_FIXED("ldo_vxo22", VXO22, MT6358_LDO_VXO22_CON0, 0, 2200000),
+       MT6358_REG_FIXED("ldo_vaux18", VAUX18,
+                        MT6358_LDO_VAUX18_CON0, 0, 1800000),
+       MT6358_REG_FIXED("ldo_vbif28", VBIF28,
+                        MT6358_LDO_VBIF28_CON0, 0, 2800000),
+       MT6358_REG_FIXED("ldo_vio28", VIO28, MT6358_LDO_VIO28_CON0, 0, 2800000),
+       MT6358_REG_FIXED("ldo_va12", VA12, MT6358_LDO_VA12_CON0, 0, 1200000),
+       MT6358_REG_FIXED("ldo_vrf18", VRF18, MT6358_LDO_VRF18_CON0, 0, 1800000),
+       MT6358_REG_FIXED("ldo_vaud28", VAUD28,
+                        MT6358_LDO_VAUD28_CON0, 0, 2800000),
+       MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
+                  MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0x10, 0),
+       MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
+                  MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
+       MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
+                  MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00, 8),
+       MT6358_LDO("ldo_vusb", VUSB, vusb_voltages, vusb_idx,
+                  MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700, 8),
+       MT6358_LDO("ldo_vcamd", VCAMD, vcamd_voltages, vcamd_idx,
+                  MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00, 8),
+       MT6358_LDO("ldo_vefuse", VEFUSE, vefuse_voltages, vefuse_idx,
+                  MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00, 8),
+       MT6358_LDO("ldo_vmch", VMCH, vmch_vemc_voltages, vmch_vemc_idx,
+                  MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700, 8),
+       MT6358_LDO("ldo_vcama1", VCAMA1, vcama_voltages, vcama_idx,
+                  MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00, 8),
+       MT6358_LDO("ldo_vemc", VEMC, vmch_vemc_voltages, vmch_vemc_idx,
+                  MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700, 8),
+       MT6358_LDO("ldo_vcn33_bt", VCN33_BT, vcn33_bt_wifi_voltages,
+                  vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_0,
+                  0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+       MT6358_LDO("ldo_vcn33_wifi", VCN33_WIFI, vcn33_bt_wifi_voltages,
+                  vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_1,
+                  0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+       MT6358_LDO("ldo_vcama2", VCAMA2, vcama_voltages, vcama_idx,
+                  MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00, 8),
+       MT6358_LDO("ldo_vmc", VMC, vmc_voltages, vmc_idx,
+                  MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00, 8),
+       MT6358_LDO("ldo_vldo28", VLDO28, vldo28_voltages, vldo28_idx,
+                  MT6358_LDO_VLDO28_CON0_0, 0,
+                  MT6358_VLDO28_ANA_CON0, 0x300, 8),
+       MT6358_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
+                  MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00, 8),
+       MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
+                   buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f, 8,
+                   MT6358_LDO_VSRAM_CON0, 0x7f),
+       MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
+                   buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f, 8,
+                   MT6358_LDO_VSRAM_CON2, 0x7f),
+       MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
+                   buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f, 8,
+                   MT6358_LDO_VSRAM_CON3, 0x7f),
+       MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
+                   buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f, 8,
+                   MT6358_LDO_VSRAM_CON1, 0x7f),
+};
+
+static int mt6358_regulator_probe(struct platform_device *pdev)
+{
+       struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+       struct regulator_config config = {};
+       struct regulator_dev *rdev;
+       int i;
+
+       for (i = 0; i < MT6358_MAX_REGULATOR; i++) {
+               config.dev = &pdev->dev;
+               config.driver_data = &mt6358_regulators[i];
+               config.regmap = mt6397->regmap;
+
+               rdev = devm_regulator_register(&pdev->dev,
+                                              &mt6358_regulators[i].desc,
+                                              &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(&pdev->dev, "failed to register %s\n",
+                               mt6358_regulators[i].desc.name);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct platform_device_id mt6358_platform_ids[] = {
+       {"mt6358-regulator", 0},
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6358_platform_ids);
+
+static struct platform_driver mt6358_regulator_driver = {
+       .driver = {
+               .name = "mt6358-regulator",
+       },
+       .probe = mt6358_regulator_probe,
+       .id_table = mt6358_platform_ids,
+};
+
+module_platform_driver(mt6358_regulator_driver);
+
+MODULE_AUTHOR("Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6358 PMIC");
+MODULE_LICENSE("GPL");
index b2c2d01d1637060192f0d1554f201d276ef1b901..db6c085da65ecf742daeca83824072ab70a3834b 100644 (file)
@@ -50,6 +50,20 @@ enum rpmh_regulator_type {
 #define PMIC4_BOB_MODE_AUTO                    2
 #define PMIC4_BOB_MODE_PWM                     3
 
+#define PMIC5_LDO_MODE_RETENTION               3
+#define PMIC5_LDO_MODE_LPM                     4
+#define PMIC5_LDO_MODE_HPM                     7
+
+#define PMIC5_SMPS_MODE_RETENTION              3
+#define PMIC5_SMPS_MODE_PFM                    4
+#define PMIC5_SMPS_MODE_AUTO                   6
+#define PMIC5_SMPS_MODE_PWM                    7
+
+#define PMIC5_BOB_MODE_PASS                    2
+#define PMIC5_BOB_MODE_PFM                     4
+#define PMIC5_BOB_MODE_AUTO                    6
+#define PMIC5_BOB_MODE_PWM                     7
+
 /**
  * struct rpmh_vreg_hw_data - RPMh regulator hardware configurations
  * @regulator_type:            RPMh accelerator type used to manage this
@@ -488,6 +502,14 @@ static const int pmic_mode_map_pmic4_ldo[REGULATOR_MODE_STANDBY + 1] = {
        [REGULATOR_MODE_FAST]    = -EINVAL,
 };
 
+static const int pmic_mode_map_pmic5_ldo[REGULATOR_MODE_STANDBY + 1] = {
+       [REGULATOR_MODE_INVALID] = -EINVAL,
+       [REGULATOR_MODE_STANDBY] = PMIC5_LDO_MODE_RETENTION,
+       [REGULATOR_MODE_IDLE]    = PMIC5_LDO_MODE_LPM,
+       [REGULATOR_MODE_NORMAL]  = PMIC5_LDO_MODE_HPM,
+       [REGULATOR_MODE_FAST]    = -EINVAL,
+};
+
 static unsigned int rpmh_regulator_pmic4_ldo_of_map_mode(unsigned int rpmh_mode)
 {
        unsigned int mode;
@@ -518,6 +540,14 @@ static const int pmic_mode_map_pmic4_smps[REGULATOR_MODE_STANDBY + 1] = {
        [REGULATOR_MODE_FAST]    = PMIC4_SMPS_MODE_PWM,
 };
 
+static const int pmic_mode_map_pmic5_smps[REGULATOR_MODE_STANDBY + 1] = {
+       [REGULATOR_MODE_INVALID] = -EINVAL,
+       [REGULATOR_MODE_STANDBY] = PMIC5_SMPS_MODE_RETENTION,
+       [REGULATOR_MODE_IDLE]    = PMIC5_SMPS_MODE_PFM,
+       [REGULATOR_MODE_NORMAL]  = PMIC5_SMPS_MODE_AUTO,
+       [REGULATOR_MODE_FAST]    = PMIC5_SMPS_MODE_PWM,
+};
+
 static unsigned int
 rpmh_regulator_pmic4_smps_of_map_mode(unsigned int rpmh_mode)
 {
@@ -552,6 +582,14 @@ static const int pmic_mode_map_pmic4_bob[REGULATOR_MODE_STANDBY + 1] = {
        [REGULATOR_MODE_FAST]    = PMIC4_BOB_MODE_PWM,
 };
 
+static const int pmic_mode_map_pmic5_bob[REGULATOR_MODE_STANDBY + 1] = {
+       [REGULATOR_MODE_INVALID] = -EINVAL,
+       [REGULATOR_MODE_STANDBY] = -EINVAL,
+       [REGULATOR_MODE_IDLE]    = PMIC5_BOB_MODE_PFM,
+       [REGULATOR_MODE_NORMAL]  = PMIC5_BOB_MODE_AUTO,
+       [REGULATOR_MODE_FAST]    = PMIC5_BOB_MODE_PWM,
+};
+
 static unsigned int rpmh_regulator_pmic4_bob_of_map_mode(unsigned int rpmh_mode)
 {
        unsigned int mode;
@@ -637,6 +675,72 @@ static const struct rpmh_vreg_hw_data pmic4_lvs = {
        /* LVS hardware does not support voltage or mode configuration. */
 };
 
+static const struct rpmh_vreg_hw_data pmic5_pldo = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_drms_ops,
+       .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+       .n_voltages = 256,
+       .hpm_min_load_uA = 10000,
+       .pmic_mode_map = pmic_mode_map_pmic5_ldo,
+       .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_pldo_lv = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_drms_ops,
+       .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000),
+       .n_voltages = 63,
+       .hpm_min_load_uA = 10000,
+       .pmic_mode_map = pmic_mode_map_pmic5_ldo,
+       .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_nldo = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_drms_ops,
+       .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000),
+       .n_voltages = 124,
+       .hpm_min_load_uA = 30000,
+       .pmic_mode_map = pmic_mode_map_pmic5_ldo,
+       .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_ops,
+       .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+       .n_voltages = 216,
+       .pmic_mode_map = pmic_mode_map_pmic5_smps,
+       .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_ops,
+       .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+       .n_voltages = 264,
+       .pmic_mode_map = pmic_mode_map_pmic5_smps,
+       .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_ops,
+       .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+       .n_voltages = 5,
+       .pmic_mode_map = pmic_mode_map_pmic5_smps,
+       .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_bob = {
+       .regulator_type = VRM,
+       .ops = &rpmh_regulator_vrm_bypass_ops,
+       .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000),
+       .n_voltages = 136,
+       .pmic_mode_map = pmic_mode_map_pmic5_bob,
+       .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
+};
+
 #define RPMH_VREG(_name, _resource_name, _hw_data, _supply_name) \
 { \
        .name           = _name, \
@@ -705,6 +809,75 @@ static const struct rpmh_vreg_init_data pm8005_vreg_data[] = {
        {},
 };
 
+static const struct rpmh_vreg_init_data pm8150_vreg_data[] = {
+       RPMH_VREG("smps1",  "smp%s1",  &pmic5_ftsmps510, "vdd-s1"),
+       RPMH_VREG("smps2",  "smp%s2",  &pmic5_ftsmps510, "vdd-s2"),
+       RPMH_VREG("smps3",  "smp%s3",  &pmic5_ftsmps510, "vdd-s3"),
+       RPMH_VREG("smps4",  "smp%s4",  &pmic5_hfsmps510,   "vdd-s4"),
+       RPMH_VREG("smps5",  "smp%s5",  &pmic5_hfsmps510,   "vdd-s5"),
+       RPMH_VREG("smps6",  "smp%s6",  &pmic5_ftsmps510, "vdd-s6"),
+       RPMH_VREG("smps7",  "smp%s7",  &pmic5_ftsmps510, "vdd-s7"),
+       RPMH_VREG("smps8",  "smp%s8",  &pmic5_ftsmps510, "vdd-s8"),
+       RPMH_VREG("smps9",  "smp%s9",  &pmic5_ftsmps510, "vdd-s9"),
+       RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps510, "vdd-s10"),
+       RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo,      "vdd-l1-l8-l11"),
+       RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_pldo,      "vdd-l2-l10"),
+       RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo,      "vdd-l3-l4-l5-l18"),
+       RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo,      "vdd-l3-l4-l5-l18"),
+       RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_nldo,      "vdd-l3-l4-l5-l18"),
+       RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_nldo,      "vdd-l6-l9"),
+       RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_pldo,      "vdd-l7-l12-l14-l15"),
+       RPMH_VREG("ldo8",   "ldo%s8",  &pmic5_nldo,      "vdd-l1-l8-l11"),
+       RPMH_VREG("ldo9",   "ldo%s9",  &pmic5_nldo,      "vdd-l6-l9"),
+       RPMH_VREG("ldo10",  "ldo%s10", &pmic5_pldo,      "vdd-l2-l10"),
+       RPMH_VREG("ldo11",  "ldo%s11", &pmic5_nldo,      "vdd-l1-l8-l11"),
+       RPMH_VREG("ldo12",  "ldo%s12", &pmic5_pldo_lv,   "vdd-l7-l12-l14-l15"),
+       RPMH_VREG("ldo13",  "ldo%s13", &pmic5_pldo,      "vdd-l13-l6-l17"),
+       RPMH_VREG("ldo14",  "ldo%s14", &pmic5_pldo_lv,   "vdd-l7-l12-l14-l15"),
+       RPMH_VREG("ldo15",  "ldo%s15", &pmic5_pldo_lv,   "vdd-l7-l12-l14-l15"),
+       RPMH_VREG("ldo16",  "ldo%s16", &pmic5_pldo,      "vdd-l13-l6-l17"),
+       RPMH_VREG("ldo17",  "ldo%s17", &pmic5_pldo,      "vdd-l13-l6-l17"),
+       RPMH_VREG("ldo18",  "ldo%s18", &pmic5_nldo,      "vdd-l3-l4-l5-l18"),
+       {},
+};
+
+static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = {
+       RPMH_VREG("smps1",  "smp%s1",  &pmic5_ftsmps510, "vdd-s1"),
+       RPMH_VREG("smps2",  "smp%s2",  &pmic5_ftsmps510, "vdd-s2"),
+       RPMH_VREG("smps3",  "smp%s3",  &pmic5_ftsmps510, "vdd-s3"),
+       RPMH_VREG("smps4",  "smp%s4",  &pmic5_ftsmps510, "vdd-s4"),
+       RPMH_VREG("smps5",  "smp%s5",  &pmic5_ftsmps510, "vdd-s5"),
+       RPMH_VREG("smps6",  "smp%s6",  &pmic5_ftsmps510, "vdd-s6"),
+       RPMH_VREG("smps7",  "smp%s7",  &pmic5_ftsmps510, "vdd-s7"),
+       RPMH_VREG("smps8",  "smp%s8",  &pmic5_hfsmps510, "vdd-s8"),
+       RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_pldo_lv,   "vdd-l1-l8"),
+       RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_nldo,      "vdd-l2-l3"),
+       RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo,      "vdd-l2-l3"),
+       RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_pldo,      "vdd-l4-l5-l6"),
+       RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_pldo,      "vdd-l4-l5-l6"),
+       RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo,      "vdd-l4-l5-l6"),
+       RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_pldo,      "vdd-l7-l11"),
+       RPMH_VREG("ldo8",   "ldo%s8",  &pmic5_pldo_lv,   "vdd-l1-l8-l11"),
+       RPMH_VREG("ldo9",   "ldo%s9",  &pmic5_pldo,      "vdd-l9-l10"),
+       RPMH_VREG("ldo10",  "ldo%s10", &pmic5_pldo,      "vdd-l9-l10"),
+       RPMH_VREG("ldo11",  "ldo%s11", &pmic5_pldo,      "vdd-l7-l11"),
+       RPMH_VREG("bob",    "bob%s1",  &pmic5_bob,       "vdd-bob"),
+       {},
+};
+
+static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
+       RPMH_VREG("smps1",  "smp%s1",  &pmic5_hfsmps510, "vdd-s1"),
+       RPMH_VREG("smps2",  "smp%s2",  &pmic5_hfsmps515, "vdd-s2"),
+       RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo,      "vdd-l1"),
+       RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_nldo,      "vdd-l2"),
+       RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo,      "vdd-l3"),
+       RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo,      "vdd-l4"),
+       RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_pldo,      "vdd-l5-l6"),
+       RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo,      "vdd-l5-l6"),
+       RPMH_VREG("ldo7",   "ldo%s6",  &pmic5_pldo_lv,   "vdd-l7"),
+       {},
+};
+
 static int rpmh_regulator_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -743,6 +916,22 @@ static int rpmh_regulator_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id rpmh_regulator_match_table[] = {
+       {
+               .compatible = "qcom,pm8005-rpmh-regulators",
+               .data = pm8005_vreg_data,
+       },
+       {
+               .compatible = "qcom,pm8009-rpmh-regulators",
+               .data = pm8009_vreg_data,
+       },
+       {
+               .compatible = "qcom,pm8150-rpmh-regulators",
+               .data = pm8150_vreg_data,
+       },
+       {
+               .compatible = "qcom,pm8150l-rpmh-regulators",
+               .data = pm8150l_vreg_data,
+       },
        {
                .compatible = "qcom,pm8998-rpmh-regulators",
                .data = pm8998_vreg_data,
@@ -751,10 +940,6 @@ static const struct of_device_id rpmh_regulator_match_table[] = {
                .compatible = "qcom,pmi8998-rpmh-regulators",
                .data = pmi8998_vreg_data,
        },
-       {
-               .compatible = "qcom,pm8005-rpmh-regulators",
-               .data = pm8005_vreg_data,
-       },
        {}
 };
 MODULE_DEVICE_TABLE(of, rpmh_regulator_match_table);
index e7af0c53d4496f5ddafdfad971ff1c9aa66dc4dc..61bd5ef0806c22f4eba2f530a9264d9ee7fdd26b 100644 (file)
@@ -606,7 +606,7 @@ static unsigned int rk8xx_regulator_of_map_mode(unsigned int mode)
        case 2:
                return REGULATOR_MODE_NORMAL;
        default:
-               return -EINVAL;
+               return REGULATOR_MODE_INVALID;
        }
 }
 
index 054baaadfdfddc39c83f7ffd1fa58c6a29f2eb7f..5bc00884cf512d4ebecfe470603a97f9a3827a43 100644 (file)
@@ -1226,7 +1226,7 @@ common_reg:
                        goto out;
                }
 
-               if (s2mps11->ext_control_gpiod[i]) {
+               if (config.ena_gpiod) {
                        ret = s2mps14_pmic_enable_ext_control(s2mps11,
                                        regulator);
                        if (ret < 0) {
index 04b732991d6955f3cf917644725be7fff756acd3..a0565daecacef3bbf9971f23286e7f0151bea89a 100644 (file)
@@ -205,7 +205,7 @@ static int slg51000_of_parse_cb(struct device_node *np,
        ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np,
                                                "enable-gpios", 0,
                                                gflags, "gpio-en-ldo");
-       if (ena_gpiod) {
+       if (!IS_ERR(ena_gpiod)) {
                config->ena_gpiod = ena_gpiod;
                devm_gpiod_unhinge(chip->dev, config->ena_gpiod);
        }
@@ -447,18 +447,19 @@ static int slg51000_i2c_probe(struct i2c_client *client,
 {
        struct device *dev = &client->dev;
        struct slg51000 *chip;
-       struct gpio_desc *cs_gpiod = NULL;
+       struct gpio_desc *cs_gpiod;
        int error, ret;
 
        chip = devm_kzalloc(dev, sizeof(struct slg51000), GFP_KERNEL);
        if (!chip)
                return -ENOMEM;
 
-       cs_gpiod = devm_gpiod_get_from_of_node(dev, dev->of_node,
-                                              "dlg,cs-gpios", 0,
-                                              GPIOD_OUT_HIGH
-                                              | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
-                                              "slg51000-cs");
+       cs_gpiod = devm_gpiod_get_optional(dev, "dlg,cs",
+                                          GPIOD_OUT_HIGH |
+                                               GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+       if (IS_ERR(cs_gpiod))
+               return PTR_ERR(cs_gpiod);
+
        if (cs_gpiod) {
                dev_info(dev, "Found chip selector property\n");
                chip->cs_gpiod = cs_gpiod;
index 2a897666c65064b42ac69f49c32be3f1d5a21454..03f162ffd1440f7b0aad52b62dbdd52b75b6f56b 100644 (file)
@@ -20,7 +20,6 @@
 #define STM32MP1_SYSCFG_EN_BOOSTER_MASK        BIT(8)
 
 static const struct regulator_ops stm32h7_booster_ops = {
-       .list_voltage   = regulator_list_voltage_linear,
        .enable         = regulator_enable_regmap,
        .disable        = regulator_disable_regmap,
        .is_enabled     = regulator_is_enabled_regmap,
@@ -31,7 +30,6 @@ static const struct regulator_desc stm32h7_booster_desc = {
        .supply_name = "vdda",
        .n_voltages = 1,
        .type = REGULATOR_VOLTAGE,
-       .min_uV = 3300000,
        .fixed_uV = 3300000,
        .ramp_delay = 66000, /* up to 50us to stabilize */
        .ops = &stm32h7_booster_ops,
@@ -53,7 +51,6 @@ static int stm32mp1_booster_disable(struct regulator_dev *rdev)
 }
 
 static const struct regulator_ops stm32mp1_booster_ops = {
-       .list_voltage   = regulator_list_voltage_linear,
        .enable         = stm32mp1_booster_enable,
        .disable        = stm32mp1_booster_disable,
        .is_enabled     = regulator_is_enabled_regmap,
@@ -64,7 +61,6 @@ static const struct regulator_desc stm32mp1_booster_desc = {
        .supply_name = "vdda",
        .n_voltages = 1,
        .type = REGULATOR_VOLTAGE,
-       .min_uV = 3300000,
        .fixed_uV = 3300000,
        .ramp_delay = 66000,
        .ops = &stm32mp1_booster_ops,
diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c
new file mode 100644 (file)
index 0000000..92adb4f
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// SY8824C/SY8824E regulator driver
+//
+// Copyright (C) 2019 Synaptics Incorporated
+//
+// Author: Jisheng Zhang <jszhang@kernel.org>
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define SY8824C_BUCK_EN                (1 << 7)
+#define SY8824C_MODE           (1 << 6)
+
+struct sy8824_config {
+       /* registers */
+       unsigned int vol_reg;
+       unsigned int mode_reg;
+       unsigned int enable_reg;
+       /* Voltage range and step(linear) */
+       unsigned int vsel_min;
+       unsigned int vsel_step;
+       unsigned int vsel_count;
+};
+
+struct sy8824_device_info {
+       struct device *dev;
+       struct regulator_desc desc;
+       struct regulator_init_data *regulator;
+       const struct sy8824_config *cfg;
+};
+
+static int sy8824_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+       struct sy8824_device_info *di = rdev_get_drvdata(rdev);
+       const struct sy8824_config *cfg = di->cfg;
+
+       switch (mode) {
+       case REGULATOR_MODE_FAST:
+               regmap_update_bits(rdev->regmap, cfg->mode_reg,
+                                  SY8824C_MODE, SY8824C_MODE);
+               break;
+       case REGULATOR_MODE_NORMAL:
+               regmap_update_bits(rdev->regmap, cfg->mode_reg,
+                                  SY8824C_MODE, 0);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static unsigned int sy8824_get_mode(struct regulator_dev *rdev)
+{
+       struct sy8824_device_info *di = rdev_get_drvdata(rdev);
+       const struct sy8824_config *cfg = di->cfg;
+       u32 val;
+       int ret = 0;
+
+       ret = regmap_read(rdev->regmap, cfg->mode_reg, &val);
+       if (ret < 0)
+               return ret;
+       if (val & SY8824C_MODE)
+               return REGULATOR_MODE_FAST;
+       else
+               return REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops sy8824_regulator_ops = {
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .map_voltage = regulator_map_voltage_linear,
+       .list_voltage = regulator_list_voltage_linear,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_mode = sy8824_set_mode,
+       .get_mode = sy8824_get_mode,
+};
+
+static int sy8824_regulator_register(struct sy8824_device_info *di,
+                       struct regulator_config *config)
+{
+       struct regulator_desc *rdesc = &di->desc;
+       const struct sy8824_config *cfg = di->cfg;
+       struct regulator_dev *rdev;
+
+       rdesc->name = "sy8824-reg";
+       rdesc->supply_name = "vin";
+       rdesc->ops = &sy8824_regulator_ops;
+       rdesc->type = REGULATOR_VOLTAGE;
+       rdesc->n_voltages = cfg->vsel_count;
+       rdesc->enable_reg = cfg->enable_reg;
+       rdesc->enable_mask = SY8824C_BUCK_EN;
+       rdesc->min_uV = cfg->vsel_min;
+       rdesc->uV_step = cfg->vsel_step;
+       rdesc->vsel_reg = cfg->vol_reg;
+       rdesc->vsel_mask = cfg->vsel_count - 1;
+       rdesc->owner = THIS_MODULE;
+
+       rdev = devm_regulator_register(di->dev, &di->desc, config);
+       return PTR_ERR_OR_ZERO(rdev);
+}
+
+static const struct regmap_config sy8824_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+};
+
+static int sy8824_i2c_probe(struct i2c_client *client,
+                           const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct device_node *np = dev->of_node;
+       struct sy8824_device_info *di;
+       struct regulator_config config = { };
+       struct regmap *regmap;
+       int ret;
+
+       di = devm_kzalloc(dev, sizeof(struct sy8824_device_info), GFP_KERNEL);
+       if (!di)
+               return -ENOMEM;
+
+       di->regulator = of_get_regulator_init_data(dev, np, &di->desc);
+       if (!di->regulator) {
+               dev_err(dev, "Platform data not found!\n");
+               return -EINVAL;
+       }
+
+       di->dev = dev;
+       di->cfg = of_device_get_match_data(dev);
+
+       regmap = devm_regmap_init_i2c(client, &sy8824_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(dev, "Failed to allocate regmap!\n");
+               return PTR_ERR(regmap);
+       }
+       i2c_set_clientdata(client, di);
+
+       config.dev = di->dev;
+       config.init_data = di->regulator;
+       config.regmap = regmap;
+       config.driver_data = di;
+       config.of_node = np;
+
+       ret = sy8824_regulator_register(di, &config);
+       if (ret < 0)
+               dev_err(dev, "Failed to register regulator!\n");
+       return ret;
+}
+
+static const struct sy8824_config sy8824c_cfg = {
+       .vol_reg = 0x00,
+       .mode_reg = 0x00,
+       .enable_reg = 0x00,
+       .vsel_min = 762500,
+       .vsel_step = 12500,
+       .vsel_count = 64,
+};
+
+static const struct sy8824_config sy8824e_cfg = {
+       .vol_reg = 0x00,
+       .mode_reg = 0x00,
+       .enable_reg = 0x00,
+       .vsel_min = 700000,
+       .vsel_step = 12500,
+       .vsel_count = 64,
+};
+
+static const struct sy8824_config sy20276_cfg = {
+       .vol_reg = 0x00,
+       .mode_reg = 0x01,
+       .enable_reg = 0x01,
+       .vsel_min = 600000,
+       .vsel_step = 10000,
+       .vsel_count = 128,
+};
+
+static const struct sy8824_config sy20278_cfg = {
+       .vol_reg = 0x00,
+       .mode_reg = 0x01,
+       .enable_reg = 0x01,
+       .vsel_min = 762500,
+       .vsel_step = 12500,
+       .vsel_count = 64,
+};
+
+static const struct of_device_id sy8824_dt_ids[] = {
+       {
+               .compatible = "silergy,sy8824c",
+               .data = &sy8824c_cfg
+       },
+       {
+               .compatible = "silergy,sy8824e",
+               .data = &sy8824e_cfg
+       },
+       {
+               .compatible = "silergy,sy20276",
+               .data = &sy20276_cfg
+       },
+       {
+               .compatible = "silergy,sy20278",
+               .data = &sy20278_cfg
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sy8824_dt_ids);
+
+static const struct i2c_device_id sy8824_id[] = {
+       { "sy8824", },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, sy8824_id);
+
+static struct i2c_driver sy8824_regulator_driver = {
+       .driver = {
+               .name = "sy8824-regulator",
+               .of_match_table = of_match_ptr(sy8824_dt_ids),
+       },
+       .probe = sy8824_i2c_probe,
+       .id_table = sy8824_id,
+};
+module_i2c_driver(sy8824_regulator_driver);
+
+MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
+MODULE_DESCRIPTION("SY8824C/SY8824E regulator driver");
+MODULE_LICENSE("GPL v2");
index 6e22f5ebba2e84358839731452e6b61d5549620e..e302bd01a0849e261e40b5024ff6836f449557ea 100644 (file)
@@ -138,7 +138,7 @@ static int tps65132_of_parse_cb(struct device_node *np,
 
        rpdata->en_gpiod = devm_fwnode_get_index_gpiod_from_child(tps->dev,
                                        "enable", 0, &np->fwnode, 0, "enable");
-       if (IS_ERR(rpdata->en_gpiod)) {
+       if (IS_ERR_OR_NULL(rpdata->en_gpiod)) {
                ret = PTR_ERR(rpdata->en_gpiod);
 
                /* Ignore the error other than probe defer */
@@ -150,7 +150,7 @@ static int tps65132_of_parse_cb(struct device_node *np,
        rpdata->act_dis_gpiod = devm_fwnode_get_index_gpiod_from_child(
                                        tps->dev, "active-discharge", 0,
                                        &np->fwnode, 0, "active-discharge");
-       if (IS_ERR(rpdata->act_dis_gpiod)) {
+       if (IS_ERR_OR_NULL(rpdata->act_dis_gpiod)) {
                ret = PTR_ERR(rpdata->act_dis_gpiod);
 
                /* Ignore the error other than probe defer */
index 6fa15b2d6fb33ef7353a8e5c3b37fe0f61ab825b..866b4dd01da9ea0911da113969f3c2a453aff6d0 100644 (file)
@@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = {
        2500, 2750,
 };
 
+/* 600mV to 1450mV in 12.5 mV steps */
+static const struct regulator_linear_range VDD1_ranges[] = {
+       REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
+};
+
+/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
+static const struct regulator_linear_range VDD2_ranges[] = {
+       REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
+       REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
+};
+
 static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
 {
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
@@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
 }
 
 static const struct regulator_ops twl4030smps_ops = {
+       .list_voltage   = regulator_list_voltage_linear_range,
+
        .set_voltage    = twl4030smps_set_voltage,
        .get_voltage    = twl4030smps_get_voltage,
 };
@@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
                }, \
        }
 
-#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \
+               n_volt) \
 static const struct twlreg_info TWL4030_INFO_##label = { \
        .base = offset, \
        .id = num, \
@@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
                .owner = THIS_MODULE, \
                .enable_time = turnon_delay, \
                .of_map_mode = twl4030reg_map_mode, \
+               .n_voltages = n_volt, \
+               .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \
+               .linear_ranges = label ## _ranges, \
                }, \
        }
 
@@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00);
 TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08);
 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08);
 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
-TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08);
-TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08);
+TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68);
+TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69);
 /* VUSBCP is managed *only* by the USB subchip */
 TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
index 5fe208b381eb26ab8ae55d2f6be3c0fc656c45ce..b8100c3cedad3fc91bebe9c2f09fb09b85d15b28 100644 (file)
@@ -57,6 +57,9 @@ struct twlreg_info {
 #define VREG_BC_PROC           3
 #define VREG_BC_CLK_RST                4
 
+/* TWL6030 LDO register values for VREG_VOLTAGE */
+#define TWL6030_VREG_VOLTAGE_WR_S   BIT(7)
+
 /* TWL6030 LDO register values for CFG_STATE */
 #define TWL6030_CFG_STATE_OFF  0x00
 #define TWL6030_CFG_STATE_ON   0x01
@@ -68,9 +71,10 @@ struct twlreg_info {
 #define TWL6030_CFG_STATE_APP(v)       (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
                                                TWL6030_CFG_STATE_APP_SHIFT)
 
-/* Flags for SMPS Voltage reading */
+/* Flags for SMPS Voltage reading and LDO reading*/
 #define SMPS_OFFSET_EN         BIT(0)
 #define SMPS_EXTENDED_EN       BIT(1)
+#define TWL_6030_WARM_RESET    BIT(3)
 
 /* twl6032 SMPS EPROM values */
 #define TWL6030_SMPS_OFFSET            0xB0
@@ -250,6 +254,9 @@ twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
 {
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
 
+       if (info->flags & TWL_6030_WARM_RESET)
+               selector |= TWL6030_VREG_VOLTAGE_WR_S;
+
        return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE,
                            selector);
 }
@@ -259,6 +266,9 @@ static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
        int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
 
+       if (info->flags & TWL_6030_WARM_RESET)
+               vsel &= ~TWL6030_VREG_VOLTAGE_WR_S;
+
        return vsel;
 }
 
@@ -665,14 +675,14 @@ static int twlreg_probe(struct platform_device *pdev)
        struct regulation_constraints   *c;
        struct regulator_dev            *rdev;
        struct regulator_config         config = { };
+       struct device_node              *np = pdev->dev.of_node;
 
        template = of_device_get_match_data(&pdev->dev);
        if (!template)
                return -ENODEV;
 
        id = template->desc.id;
-       initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
-                                               &template->desc);
+       initdata = of_get_regulator_init_data(&pdev->dev, np, &template->desc);
        if (!initdata)
                return -EINVAL;
 
@@ -710,10 +720,13 @@ static int twlreg_probe(struct platform_device *pdev)
                break;
        }
 
+       if (of_get_property(np, "ti,retain-on-reset", NULL))
+               info->flags |= TWL_6030_WARM_RESET;
+
        config.dev = &pdev->dev;
        config.init_data = initdata;
        config.driver_data = info;
-       config.of_node = pdev->dev.of_node;
+       config.of_node = np;
 
        rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
        if (IS_ERR(rdev)) {
index 9026d5a3e964811cd1c7108903dc850c81b7e6ed..2311924c31039423652b71150614a617c0031504 100644 (file)
@@ -185,6 +185,10 @@ static const struct of_device_id uniphier_regulator_match[] = {
                .compatible = "socionext,uniphier-pro4-usb3-regulator",
                .data = &uniphier_pro4_usb3_data,
        },
+       {
+               .compatible = "socionext,uniphier-pro5-usb3-regulator",
+               .data = &uniphier_pro4_usb3_data,
+       },
        {
                .compatible = "socionext,uniphier-pxs2-usb3-regulator",
                .data = &uniphier_pxs2_usb3_data,
index 9c3310c4d61d670426bfa5a842b5f2c750119f04..6502b148541e3337e9044ed52d66f2d35fc9cce4 100644 (file)
@@ -4374,6 +4374,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
            get_user(req_len, &ureq->hdr.req_len))
                return -EFAULT;
 
+       /* Sanitize user input, to avoid overflows in iob size calculation: */
+       if (req_len > QETH_BUFSIZE)
+               return -EINVAL;
+
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
        if (!iob)
                return -ENOMEM;
index 8d8c495b5b60c87506526aeee498435b9950273b..d65558619ab057f0670af6187f8f5c353b749c06 100644 (file)
@@ -5715,7 +5715,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
  *      0    = Set nr_hw_queues by the number of CPUs or HW queues.
  *      1,128 = Manually specify the maximum nr_hw_queue value to be set,
  *
- * Value range is [0,128]. Default value is 8.
+ * Value range is [0,256]. Default value is 8.
  */
 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
            LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
index 329f7aa7e169bb2e62d431360f799bab7f0d0b7f..a81ef0293696d34efe7da7e73ed5e0d738fdc826 100644 (file)
@@ -46,7 +46,7 @@
 
 /* FCP MQ queue count limiting */
 #define LPFC_FCP_MQ_THRESHOLD_MIN      0
-#define LPFC_FCP_MQ_THRESHOLD_MAX      128
+#define LPFC_FCP_MQ_THRESHOLD_MAX      256
 #define LPFC_FCP_MQ_THRESHOLD_DEF      8
 
 /* Common buffer size to accomidate SCSI and NVME IO buffers */
index de2e62c3310a37d006d50ec223da1883d97a4a6d..e3eb19b85fa4b358950f702d4a74f35889ee84eb 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
+if ARCH_IXP4XX || COMPILE_TEST
+
 menu "IXP4xx SoC drivers"
 
 config IXP4XX_QMGR
@@ -15,3 +17,5 @@ config IXP4XX_NPE
          and is automatically selected by Ethernet and HSS drivers.
 
 endmenu
+
+endif
index d5cf953b4337020acad2ee5fd83973640f50fe13..7d622ea1274ebc8aeb240041225a7a519df07468 100644 (file)
@@ -630,6 +630,9 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
        struct geni_wrapper *wrapper = se->wrapper;
        u32 val;
 
+       if (!wrapper)
+               return -EINVAL;
+
        *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
        if (dma_mapping_error(wrapper->dev, *iova))
                return -EIO;
@@ -663,6 +666,9 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
        struct geni_wrapper *wrapper = se->wrapper;
        u32 val;
 
+       if (!wrapper)
+               return -EINVAL;
+
        *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
        if (dma_mapping_error(wrapper->dev, *iova))
                return -EIO;
index bb77c220b6f82b34c81cdf9f2e43bef82bef10b3..ccc6d53fe788ff754a5a402d65f46985567a176a 100644 (file)
@@ -141,7 +141,7 @@ static int __init am43xx_map_gic(void)
 }
 
 #ifdef CONFIG_SUSPEND
-struct wkup_m3_wakeup_src rtc_wake_src(void)
+static struct wkup_m3_wakeup_src rtc_wake_src(void)
 {
        u32 i;
 
@@ -157,7 +157,7 @@ struct wkup_m3_wakeup_src rtc_wake_src(void)
        return rtc_ext_wakeup;
 }
 
-int am33xx_rtc_only_idle(unsigned long wfi_flags)
+static int am33xx_rtc_only_idle(unsigned long wfi_flags)
 {
        omap_rtc_power_off_program(&omap_rtc->dev);
        am33xx_do_wfi_sram(wfi_flags);
@@ -252,7 +252,7 @@ static int am33xx_pm_begin(suspend_state_t state)
        if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
                nvmem = devm_nvmem_device_get(&omap_rtc->dev,
                                              "omap_rtc_scratch0");
-               if (nvmem)
+               if (!IS_ERR(nvmem))
                        nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
                                           (void *)&rtc_magic_val);
                rtc_only_idle = 1;
@@ -278,9 +278,12 @@ static void am33xx_pm_end(void)
        struct nvmem_device *nvmem;
 
        nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
+       if (IS_ERR(nvmem))
+               return;
+
        m3_ipc->ops->finish_low_power(m3_ipc);
        if (rtc_only_idle) {
-               if (retrigger_irq)
+               if (retrigger_irq) {
                        /*
                         * 32 bits of Interrupt Set-Pending correspond to 32
                         * 32 interrupts. Compute the bit offset of the
@@ -291,8 +294,10 @@ static void am33xx_pm_end(void)
                        writel_relaxed(1 << (retrigger_irq & 31),
                                       gic_dist_base + GIC_INT_SET_PENDING_BASE
                                       + retrigger_irq / 32 * 4);
-                       nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
-                                          (void *)&val);
+               }
+
+               nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+                                  (void *)&val);
        }
 
        rtc_only_idle = 0;
@@ -415,7 +420,7 @@ static int am33xx_pm_rtc_setup(void)
 
                nvmem = devm_nvmem_device_get(&omap_rtc->dev,
                                              "omap_rtc_scratch0");
-               if (nvmem) {
+               if (!IS_ERR(nvmem)) {
                        nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
                                          4, (void *)&rtc_magic_val);
                        if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
index 3a1d8f1170de0c6ab1006f15fbf3e559b2bf94ff..6ee514fd09202dfe154ac0760beafd259959ce89 100644 (file)
@@ -433,6 +433,16 @@ config SPI_MT7621
        help
          This selects a driver for the MediaTek MT7621 SPI Controller.
 
+config SPI_NPCM_FIU
+       tristate "Nuvoton NPCM FLASH Interface Unit"
+       depends on ARCH_NPCM || COMPILE_TEST
+       depends on OF && HAS_IOMEM
+       help
+         This enables support for the Flash Interface Unit SPI controller
+         in master mode.
+         This driver does not support generic SPI. The implementation only
+         supports spi-mem interface.
+
 config SPI_NPCM_PSPI
        tristate "Nuvoton NPCM PSPI Controller"
        depends on ARCH_NPCM || COMPILE_TEST
index 63dcab552bcbafb6a484fb3a23d60c4f803a0b3b..adbebee93a75a5063364cc9b624d25880945add0 100644 (file)
@@ -63,6 +63,7 @@ obj-$(CONFIG_SPI_MT65XX)                += spi-mt65xx.o
 obj-$(CONFIG_SPI_MT7621)               += spi-mt7621.o
 obj-$(CONFIG_SPI_MXIC)                 += spi-mxic.o
 obj-$(CONFIG_SPI_MXS)                  += spi-mxs.o
+obj-$(CONFIG_SPI_NPCM_FIU)             += spi-npcm-fiu.o
 obj-$(CONFIG_SPI_NPCM_PSPI)            += spi-npcm-pspi.o
 obj-$(CONFIG_SPI_NUC900)               += spi-nuc900.o
 obj-$(CONFIG_SPI_NXP_FLEXSPI)          += spi-nxp-fspi.o
index 6a7d7b553d956c4d2ba7c60a92092766486fb200..fd8007ebb14589ba6ef0d5419878ab5e573687ae 100644 (file)
@@ -526,7 +526,6 @@ static int atmel_qspi_probe(struct platform_device *pdev)
        /* Request the IRQ */
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "missing IRQ\n");
                err = irq;
                goto disable_qspick;
        }
index ea160f117f883aa205896c0cf653a65a40d03706..41d71ba7fd3284b2eea2c0d1891909c8862d21eb 100644 (file)
@@ -170,7 +170,6 @@ static int altera_spi_probe(struct platform_device *pdev)
 {
        struct altera_spi *hw;
        struct spi_master *master;
-       struct resource *res;
        int err = -ENODEV;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
@@ -189,8 +188,7 @@ static int altera_spi_probe(struct platform_device *pdev)
        hw = spi_master_get_devdata(master);
 
        /* find and map our resources */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hw->base = devm_ioremap_resource(&pdev->dev, res);
+       hw->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hw->base)) {
                err = PTR_ERR(hw->base);
                goto exit;
index 032888344822c9b85969d534012288f7ff1d7b60..e450ee17787f0ecebb2c88e262722ea088736563 100644 (file)
@@ -817,7 +817,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct device_node *of_node = dev->of_node;
-       struct resource *res;
        struct spi_master *master;
        struct a3700_spi *spi;
        u32 num_cs = 0;
@@ -855,8 +854,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
 
        spi->master = master;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       spi->base = devm_ioremap_resource(dev, res);
+       spi->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(spi->base)) {
                ret = PTR_ERR(spi->base);
                goto error;
@@ -864,7 +862,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(dev, "could not get irq: %d\n", irq);
                ret = -ENXIO;
                goto error;
        }
index 032a615e4ccd852f8ed6e658910062ce7f17679d..eb9a243e95265c3fb0c4780ebbbf7bbc6a65a53b 100644 (file)
@@ -139,7 +139,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct ath79_spi *sp;
        struct ath79_spi_platform_data *pdata;
-       struct resource *r;
        unsigned long rate;
        int ret;
 
@@ -169,8 +168,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
        sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
        sp->bitbang.flags = SPI_CS_HIGH;
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       sp->base = devm_ioremap_resource(&pdev->dev, r);
+       sp->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sp->base)) {
                ret = PTR_ERR(sp->base);
                goto err_put_master;
index f00b367523cd7722d1aa62f70a43ab4b30e82743..acf318e7330c4ae851385e8f9a641761cd5c504f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
+#include <trace/events/spi.h>
 
 /* SPI register offsets */
 #define SPI_CR                                 0x0000
@@ -1409,9 +1410,13 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
        msg->actual_length = 0;
 
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               trace_spi_transfer_start(msg, xfer);
+
                ret = atmel_spi_one_transfer(master, msg, xfer);
                if (ret)
                        goto msg_done;
+
+               trace_spi_transfer_stop(msg, xfer);
        }
 
        if (as->use_pdc)
index 3b1833e6c7ad0d54e89516349bd8d47eee3843fb..74842f6019ed13a5640a7e451cca79584f8cb3bf 100644 (file)
@@ -460,7 +460,6 @@ static int spi_engine_probe(struct platform_device *pdev)
        struct spi_engine *spi_engine;
        struct spi_master *master;
        unsigned int version;
-       struct resource *res;
        int irq;
        int ret;
 
@@ -480,8 +479,7 @@ static int spi_engine_probe(struct platform_device *pdev)
 
        spin_lock_init(&spi_engine->lock);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       spi_engine->base = devm_ioremap_resource(&pdev->dev, res);
+       spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(spi_engine->base)) {
                ret = PTR_ERR(spi_engine->base);
                goto err_put_master;
index 902bdbfedea8752099c1772b4026b61fb4fa0464..7a3531856491269f15adf6926b6051908df74987 100644 (file)
@@ -343,7 +343,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
 {
        int bpc = 0, bpp = 0;
        u8 command = op->cmd.opcode;
-       int width  = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE;
+       int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
        int addrlen = op->addr.nbytes;
        int flex_mode = 1;
 
@@ -897,6 +897,7 @@ static int bcm_qspi_transfer_one(struct spi_master *master,
 
                read_from_hw(qspi, slots);
        }
+       bcm_qspi_enable_bspi(qspi);
 
        return 0;
 }
@@ -981,7 +982,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
        if (mspi_read)
                return bcm_qspi_mspi_exec_mem_op(spi, op);
 
-       ret = bcm_qspi_bspi_set_mode(qspi, op, -1);
+       ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
 
        if (!ret)
                ret = bcm_qspi_bspi_exec_mem_op(spi, op);
index 840b1b8ff3dcbf644eff61ad239d9917dcae1406..b4070c0de3dff56fd172271a98109351c28824d3 100644 (file)
@@ -25,7 +25,9 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h> /* FIXME: using chip internals */
+#include <linux/gpio/driver.h> /* FIXME: using chip internals */
 #include <linux/of_irq.h>
 #include <linux/spi/spi.h>
 
@@ -66,6 +68,7 @@
 #define BCM2835_SPI_FIFO_SIZE          64
 #define BCM2835_SPI_FIFO_SIZE_3_4      48
 #define BCM2835_SPI_DMA_MIN_LENGTH     96
+#define BCM2835_SPI_NUM_CS             3   /* raise as necessary */
 #define BCM2835_SPI_MODE_BITS  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
                                | SPI_NO_CS | SPI_3WIRE)
 
@@ -92,7 +95,8 @@ MODULE_PARM_DESC(polling_limit_us,
  * @rx_prologue: bytes received without DMA if first RX sglist entry's
  *     length is not a multiple of 4 (to overcome hardware limitation)
  * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
- * @dma_pending: whether a DMA transfer is in progress
+ * @prepare_cs: precalculated CS register value for ->prepare_message()
+ *     (uses slave-specific clock polarity and phase settings)
  * @debugfs_dir: the debugfs directory - neede to remove debugfs when
  *      unloading the module
  * @count_transfer_polling: count of how often polling mode is used
@@ -102,6 +106,19 @@ MODULE_PARM_DESC(polling_limit_us,
  *      These are counted as well in @count_transfer_polling and
  *      @count_transfer_irq
  * @count_transfer_dma: count how often dma mode is used
+ * @chip_select: SPI slave currently selected
+ *     (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
+ * @tx_dma_active: whether a TX DMA descriptor is in progress
+ * @rx_dma_active: whether a RX DMA descriptor is in progress
+ *     (used by bcm2835_spi_dma_tx_done() to handle a race)
+ * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
+ *     (cyclically copies from zero page to TX FIFO)
+ * @fill_tx_addr: bus address of zero page
+ * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
+ *     (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
+ * @clear_rx_addr: bus address of @clear_rx_cs
+ * @clear_rx_cs: precalculated CS register value to clear RX FIFO
+ *     (uses slave-specific clock polarity and phase settings)
  */
 struct bcm2835_spi {
        void __iomem *regs;
@@ -115,13 +132,22 @@ struct bcm2835_spi {
        int tx_prologue;
        int rx_prologue;
        unsigned int tx_spillover;
-       unsigned int dma_pending;
+       u32 prepare_cs[BCM2835_SPI_NUM_CS];
 
        struct dentry *debugfs_dir;
        u64 count_transfer_polling;
        u64 count_transfer_irq;
        u64 count_transfer_irq_after_polling;
        u64 count_transfer_dma;
+
+       u8 chip_select;
+       unsigned int tx_dma_active;
+       unsigned int rx_dma_active;
+       struct dma_async_tx_descriptor *fill_tx_desc;
+       dma_addr_t fill_tx_addr;
+       struct dma_async_tx_descriptor *clear_rx_desc[BCM2835_SPI_NUM_CS];
+       dma_addr_t clear_rx_addr;
+       u32 clear_rx_cs[BCM2835_SPI_NUM_CS] ____cacheline_aligned;
 };
 
 #if defined(CONFIG_DEBUG_FS)
@@ -319,6 +345,13 @@ static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
                BCM2835_SPI_CS_INTD |
                BCM2835_SPI_CS_DMAEN |
                BCM2835_SPI_CS_TA);
+       /*
+        * Transmission sometimes breaks unless the DONE bit is written at the
+        * end of every transfer.  The spec says it's a RO bit.  Either the
+        * spec is wrong and the bit is actually of type RW1C, or it's a
+        * hardware erratum.
+        */
+       cs |= BCM2835_SPI_CS_DONE;
        /* and reset RX/TX FIFOS */
        cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
 
@@ -448,14 +481,14 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
        bs->rx_prologue  = 0;
        bs->tx_spillover = false;
 
-       if (!sg_is_last(&tfr->tx_sg.sgl[0]))
+       if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
                bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
 
-       if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
+       if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
                bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
 
                if (bs->rx_prologue > bs->tx_prologue) {
-                       if (sg_is_last(&tfr->tx_sg.sgl[0])) {
+                       if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
                                bs->tx_prologue  = bs->rx_prologue;
                        } else {
                                bs->tx_prologue += 4;
@@ -477,7 +510,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
                bcm2835_wr_fifo_count(bs, bs->rx_prologue);
                bcm2835_wait_tx_fifo_empty(bs);
                bcm2835_rd_fifo_count(bs, bs->rx_prologue);
-               bcm2835_spi_reset_hw(ctlr);
+               bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
+                                                 | BCM2835_SPI_CS_CLEAR_TX
+                                                 | BCM2835_SPI_CS_DONE);
 
                dma_sync_single_for_device(ctlr->dma_rx->device->dev,
                                           sg_dma_address(&tfr->rx_sg.sgl[0]),
@@ -487,6 +522,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
                sg_dma_len(&tfr->rx_sg.sgl[0])     -= bs->rx_prologue;
        }
 
+       if (!bs->tx_buf)
+               return;
+
        /*
         * Write remaining TX prologue.  Adjust first entry in TX sglist.
         * Also adjust second entry if prologue spills over to it.
@@ -498,7 +536,8 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
                                                  | BCM2835_SPI_CS_DMAEN);
                bcm2835_wr_fifo_count(bs, tx_remaining);
                bcm2835_wait_tx_fifo_empty(bs);
-               bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX);
+               bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
+                                                 | BCM2835_SPI_CS_DONE);
        }
 
        if (likely(!bs->tx_spillover)) {
@@ -531,6 +570,9 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
                sg_dma_len(&tfr->rx_sg.sgl[0])     += bs->rx_prologue;
        }
 
+       if (!bs->tx_buf)
+               goto out;
+
        if (likely(!bs->tx_spillover)) {
                sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
                sg_dma_len(&tfr->tx_sg.sgl[0])     += bs->tx_prologue;
@@ -539,32 +581,85 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
                sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
                sg_dma_len(&tfr->tx_sg.sgl[1])     += 4;
        }
+out:
+       bs->tx_prologue = 0;
 }
 
-static void bcm2835_spi_dma_done(void *data)
+/**
+ * bcm2835_spi_dma_rx_done() - callback for DMA RX channel
+ * @data: SPI master controller
+ *
+ * Used for bidirectional and RX-only transfers.
+ */
+static void bcm2835_spi_dma_rx_done(void *data)
 {
        struct spi_controller *ctlr = data;
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 
-       /* reset fifo and HW */
-       bcm2835_spi_reset_hw(ctlr);
-
-       /* and terminate tx-dma as we do not have an irq for it
+       /* terminate tx-dma as we do not have an irq for it
         * because when the rx dma will terminate and this callback
         * is called the tx-dma must have finished - can't get to this
         * situation otherwise...
         */
-       if (cmpxchg(&bs->dma_pending, true, false)) {
-               dmaengine_terminate_async(ctlr->dma_tx);
-               bcm2835_spi_undo_prologue(bs);
-       }
+       dmaengine_terminate_async(ctlr->dma_tx);
+       bs->tx_dma_active = false;
+       bs->rx_dma_active = false;
+       bcm2835_spi_undo_prologue(bs);
+
+       /* reset fifo and HW */
+       bcm2835_spi_reset_hw(ctlr);
 
        /* and mark as completed */;
        complete(&ctlr->xfer_completion);
 }
 
+/**
+ * bcm2835_spi_dma_tx_done() - callback for DMA TX channel
+ * @data: SPI master controller
+ *
+ * Used for TX-only transfers.
+ */
+static void bcm2835_spi_dma_tx_done(void *data)
+{
+       struct spi_controller *ctlr = data;
+       struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+       /* busy-wait for TX FIFO to empty */
+       while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
+               bcm2835_wr(bs, BCM2835_SPI_CS,
+                          bs->clear_rx_cs[bs->chip_select]);
+
+       bs->tx_dma_active = false;
+       smp_wmb();
+
+       /*
+        * In case of a very short transfer, RX DMA may not have been
+        * issued yet.  The onus is then on bcm2835_spi_transfer_one_dma()
+        * to terminate it immediately after issuing.
+        */
+       if (cmpxchg(&bs->rx_dma_active, true, false))
+               dmaengine_terminate_async(ctlr->dma_rx);
+
+       bcm2835_spi_undo_prologue(bs);
+       bcm2835_spi_reset_hw(ctlr);
+       complete(&ctlr->xfer_completion);
+}
+
+/**
+ * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
+ * @ctlr: SPI master controller
+ * @spi: SPI slave
+ * @tfr: SPI transfer
+ * @bs: BCM2835 SPI controller
+ * @is_tx: whether to submit DMA descriptor for TX or RX sglist
+ *
+ * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
+ * Return 0 on success or a negative error number.
+ */
 static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
+                                 struct spi_device *spi,
                                  struct spi_transfer *tfr,
+                                 struct bcm2835_spi *bs,
                                  bool is_tx)
 {
        struct dma_chan *chan;
@@ -581,8 +676,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
                chan  = ctlr->dma_tx;
                nents = tfr->tx_sg.nents;
                sgl   = tfr->tx_sg.sgl;
-               flags = 0 /* no  tx interrupt */;
-
+               flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT;
        } else {
                dir   = DMA_DEV_TO_MEM;
                chan  = ctlr->dma_rx;
@@ -595,10 +689,17 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
        if (!desc)
                return -EINVAL;
 
-       /* set callback for rx */
+       /*
+        * Completion is signaled by the RX channel for bidirectional and
+        * RX-only transfers; else by the TX channel for TX-only transfers.
+        */
        if (!is_tx) {
-               desc->callback = bcm2835_spi_dma_done;
+               desc->callback = bcm2835_spi_dma_rx_done;
+               desc->callback_param = ctlr;
+       } else if (!tfr->rx_buf) {
+               desc->callback = bcm2835_spi_dma_tx_done;
                desc->callback_param = ctlr;
+               bs->chip_select = spi->chip_select;
        }
 
        /* submit it to DMA-engine */
@@ -607,12 +708,60 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
        return dma_submit_error(cookie);
 }
 
+/**
+ * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
+ * @ctlr: SPI master controller
+ * @spi: SPI slave
+ * @tfr: SPI transfer
+ * @cs: CS register
+ *
+ * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
+ * the TX and RX DMA channel to copy between memory and FIFO register.
+ *
+ * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
+ * memory is pointless.  However not reading the RX FIFO isn't an option either
+ * because transmission is halted once it's full.  As a workaround, cyclically
+ * clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
+ *
+ * The CS register value is precalculated in bcm2835_spi_setup().  Normally
+ * this is called only once, on slave registration.  A DMA descriptor to write
+ * this value is preallocated in bcm2835_dma_init().  All that's left to do
+ * when performing a TX-only transfer is to submit this descriptor to the RX
+ * DMA channel.  Latency is thereby minimized.  The descriptor does not
+ * generate any interrupts while running.  It must be terminated once the
+ * TX DMA channel is done.
+ *
+ * Clearing the RX FIFO is paced by the DREQ signal.  The signal is asserted
+ * when the RX FIFO becomes half full, i.e. 32 bytes.  (Tuneable with the DC
+ * register.)  Reading 32 bytes from the RX FIFO would normally require 8 bus
+ * accesses, whereas clearing it requires only 1 bus access.  So an 8-fold
+ * reduction in bus traffic and thus energy consumption is achieved.
+ *
+ * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
+ * copying from the zero page.  The DMA descriptor to do this is preallocated
+ * in bcm2835_dma_init().  It must be terminated once the RX DMA channel is
+ * done and can then be reused.
+ *
+ * The BCM2835 DMA driver autodetects when a transaction copies from the zero
+ * page and utilizes the DMA controller's ability to synthesize zeroes instead
+ * of copying them from memory.  This reduces traffic on the memory bus.  The
+ * feature is not available on so-called "lite" channels, but normally TX DMA
+ * is backed by a full-featured channel.
+ *
+ * Zero-filling the TX FIFO is paced by the DREQ signal.  Unfortunately the
+ * BCM2835 SPI controller continues to assert DREQ even after the DLEN register
+ * has been counted down to zero (hardware erratum).  Thus, when the transfer
+ * has finished, the DMA engine zero-fills the TX FIFO until it is half full.
+ * (Tuneable with the DC register.)  So up to 9 gratuitous bus accesses are
+ * performed at the end of an RX-only transfer.
+ */
 static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
                                        struct spi_device *spi,
                                        struct spi_transfer *tfr,
                                        u32 cs)
 {
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+       dma_cookie_t cookie;
        int ret;
 
        /* update usage statistics */
@@ -625,16 +774,15 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
        bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
 
        /* setup tx-DMA */
-       ret = bcm2835_spi_prepare_sg(ctlr, tfr, true);
+       if (bs->tx_buf) {
+               ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true);
+       } else {
+               cookie = dmaengine_submit(bs->fill_tx_desc);
+               ret = dma_submit_error(cookie);
+       }
        if (ret)
                goto err_reset_hw;
 
-       /* start TX early */
-       dma_async_issue_pending(ctlr->dma_tx);
-
-       /* mark as dma pending */
-       bs->dma_pending = 1;
-
        /* set the DMA length */
        bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
 
@@ -642,20 +790,43 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
        bcm2835_wr(bs, BCM2835_SPI_CS,
                   cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
 
+       bs->tx_dma_active = true;
+       smp_wmb();
+
+       /* start TX early */
+       dma_async_issue_pending(ctlr->dma_tx);
+
        /* setup rx-DMA late - to run transfers while
         * mapping of the rx buffers still takes place
         * this saves 10us or more.
         */
-       ret = bcm2835_spi_prepare_sg(ctlr, tfr, false);
+       if (bs->rx_buf) {
+               ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false);
+       } else {
+               cookie = dmaengine_submit(bs->clear_rx_desc[spi->chip_select]);
+               ret = dma_submit_error(cookie);
+       }
        if (ret) {
                /* need to reset on errors */
                dmaengine_terminate_sync(ctlr->dma_tx);
-               bs->dma_pending = false;
+               bs->tx_dma_active = false;
                goto err_reset_hw;
        }
 
        /* start rx dma late */
        dma_async_issue_pending(ctlr->dma_rx);
+       bs->rx_dma_active = true;
+       smp_mb();
+
+       /*
+        * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done()
+        * may run before RX DMA is issued.  Terminate RX DMA if so.
+        */
+       if (!bs->rx_buf && !bs->tx_dma_active &&
+           cmpxchg(&bs->rx_dma_active, true, false)) {
+               dmaengine_terminate_async(ctlr->dma_rx);
+               bcm2835_spi_reset_hw(ctlr);
+       }
 
        /* wait for wakeup in framework */
        return 1;
@@ -678,26 +849,52 @@ static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
        return true;
 }
 
-static void bcm2835_dma_release(struct spi_controller *ctlr)
+static void bcm2835_dma_release(struct spi_controller *ctlr,
+                               struct bcm2835_spi *bs)
 {
+       int i;
+
        if (ctlr->dma_tx) {
                dmaengine_terminate_sync(ctlr->dma_tx);
+
+               if (bs->fill_tx_desc)
+                       dmaengine_desc_free(bs->fill_tx_desc);
+
+               if (bs->fill_tx_addr)
+                       dma_unmap_page_attrs(ctlr->dma_tx->device->dev,
+                                            bs->fill_tx_addr, sizeof(u32),
+                                            DMA_TO_DEVICE,
+                                            DMA_ATTR_SKIP_CPU_SYNC);
+
                dma_release_channel(ctlr->dma_tx);
                ctlr->dma_tx = NULL;
        }
+
        if (ctlr->dma_rx) {
                dmaengine_terminate_sync(ctlr->dma_rx);
+
+               for (i = 0; i < BCM2835_SPI_NUM_CS; i++)
+                       if (bs->clear_rx_desc[i])
+                               dmaengine_desc_free(bs->clear_rx_desc[i]);
+
+               if (bs->clear_rx_addr)
+                       dma_unmap_single(ctlr->dma_rx->device->dev,
+                                        bs->clear_rx_addr,
+                                        sizeof(bs->clear_rx_cs),
+                                        DMA_TO_DEVICE);
+
                dma_release_channel(ctlr->dma_rx);
                ctlr->dma_rx = NULL;
        }
 }
 
-static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
+static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+                            struct bcm2835_spi *bs)
 {
        struct dma_slave_config slave_config;
        const __be32 *addr;
        dma_addr_t dma_reg_base;
-       int ret;
+       int ret, i;
 
        /* base address in dma-space */
        addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
@@ -719,7 +916,11 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
                goto err_release;
        }
 
-       /* configure DMAs */
+       /*
+        * The TX DMA channel either copies a transfer's TX buffer to the FIFO
+        * or, in case of an RX-only transfer, cyclically copies from the zero
+        * page to the FIFO using a preallocated, reusable descriptor.
+        */
        slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
        slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
@@ -727,17 +928,74 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
        if (ret)
                goto err_config;
 
+       bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
+                                             ZERO_PAGE(0), 0, sizeof(u32),
+                                             DMA_TO_DEVICE,
+                                             DMA_ATTR_SKIP_CPU_SYNC);
+       if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
+               dev_err(dev, "cannot map zero page - not using DMA mode\n");
+               bs->fill_tx_addr = 0;
+               goto err_release;
+       }
+
+       bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
+                                                    bs->fill_tx_addr,
+                                                    sizeof(u32), 0,
+                                                    DMA_MEM_TO_DEV, 0);
+       if (!bs->fill_tx_desc) {
+               dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
+               goto err_release;
+       }
+
+       ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
+       if (ret) {
+               dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n");
+               goto err_release;
+       }
+
+       /*
+        * The RX DMA channel is used bidirectionally:  It either reads the
+        * RX FIFO or, in case of a TX-only transfer, cyclically writes a
+        * precalculated value to the CS register to clear the RX FIFO.
+        */
        slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
        slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
+       slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
        ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
        if (ret)
                goto err_config;
 
+       bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
+                                          bs->clear_rx_cs,
+                                          sizeof(bs->clear_rx_cs),
+                                          DMA_TO_DEVICE);
+       if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) {
+               dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n");
+               bs->clear_rx_addr = 0;
+               goto err_release;
+       }
+
+       for (i = 0; i < BCM2835_SPI_NUM_CS; i++) {
+               bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
+                                          bs->clear_rx_addr + i * sizeof(u32),
+                                          sizeof(u32), 0,
+                                          DMA_MEM_TO_DEV, 0);
+               if (!bs->clear_rx_desc[i]) {
+                       dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n");
+                       goto err_release;
+               }
+
+               ret = dmaengine_desc_set_reuse(bs->clear_rx_desc[i]);
+               if (ret) {
+                       dev_err(dev, "cannot reuse clear_rx_desc - not using DMA mode\n");
+                       goto err_release;
+               }
+       }
+
        /* all went well, so set can_dma */
        ctlr->can_dma = bcm2835_spi_can_dma;
-       /* need to do TX AND RX DMA, so we need dummy buffers */
-       ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
 
        return;
 
@@ -745,7 +1003,7 @@ err_config:
        dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
                ret);
 err_release:
-       bcm2835_dma_release(ctlr);
+       bcm2835_dma_release(ctlr, bs);
 err:
        return;
 }
@@ -812,7 +1070,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
        unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
        unsigned long hz_per_byte, byte_limit;
-       u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+       u32 cs = bs->prepare_cs[spi->chip_select];
 
        /* set clock */
        spi_hz = tfr->speed_hz;
@@ -834,18 +1092,8 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
        bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 
        /* handle all the 3-wire mode */
-       if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
-           tfr->rx_buf != ctlr->dummy_rx)
+       if (spi->mode & SPI_3WIRE && tfr->rx_buf)
                cs |= BCM2835_SPI_CS_REN;
-       else
-               cs &= ~BCM2835_SPI_CS_REN;
-
-       /*
-        * The driver always uses software-controlled GPIO Chip Select.
-        * Set the hardware-controlled native Chip Select to an invalid
-        * value to prevent it from interfering.
-        */
-       cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
 
        /* set transmit buffers and length */
        bs->tx_buf = tfr->tx_buf;
@@ -882,7 +1130,6 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
 {
        struct spi_device *spi = msg->spi;
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
-       u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
        int ret;
 
        if (ctlr->can_dma) {
@@ -897,14 +1144,11 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
                        return ret;
        }
 
-       cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
-
-       if (spi->mode & SPI_CPOL)
-               cs |= BCM2835_SPI_CS_CPOL;
-       if (spi->mode & SPI_CPHA)
-               cs |= BCM2835_SPI_CS_CPHA;
-
-       bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+       /*
+        * Set up clock polarity before spi_transfer_one_message() asserts
+        * chip select to avoid a gratuitous clock signal edge.
+        */
+       bcm2835_wr(bs, BCM2835_SPI_CS, bs->prepare_cs[spi->chip_select]);
 
        return 0;
 }
@@ -915,11 +1159,12 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 
        /* if an error occurred and we have an active dma, then terminate */
-       if (cmpxchg(&bs->dma_pending, true, false)) {
-               dmaengine_terminate_sync(ctlr->dma_tx);
-               dmaengine_terminate_sync(ctlr->dma_rx);
-               bcm2835_spi_undo_prologue(bs);
-       }
+       dmaengine_terminate_sync(ctlr->dma_tx);
+       bs->tx_dma_active = false;
+       dmaengine_terminate_sync(ctlr->dma_rx);
+       bs->rx_dma_active = false;
+       bcm2835_spi_undo_prologue(bs);
+
        /* and reset */
        bcm2835_spi_reset_hw(ctlr);
 }
@@ -931,14 +1176,50 @@ static int chip_match_name(struct gpio_chip *chip, void *data)
 
 static int bcm2835_spi_setup(struct spi_device *spi)
 {
-       int err;
+       struct spi_controller *ctlr = spi->controller;
+       struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
        struct gpio_chip *chip;
+       enum gpio_lookup_flags lflags;
+       u32 cs;
+
+       /*
+        * Precalculate SPI slave's CS register value for ->prepare_message():
+        * The driver always uses software-controlled GPIO chip select, hence
+        * set the hardware-controlled native chip select to an invalid value
+        * to prevent it from interfering.
+        */
+       cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+       if (spi->mode & SPI_CPOL)
+               cs |= BCM2835_SPI_CS_CPOL;
+       if (spi->mode & SPI_CPHA)
+               cs |= BCM2835_SPI_CS_CPHA;
+       bs->prepare_cs[spi->chip_select] = cs;
+
+       /*
+        * Precalculate SPI slave's CS register value to clear RX FIFO
+        * in case of a TX-only DMA transfer.
+        */
+       if (ctlr->dma_rx) {
+               bs->clear_rx_cs[spi->chip_select] = cs |
+                                                   BCM2835_SPI_CS_TA |
+                                                   BCM2835_SPI_CS_DMAEN |
+                                                   BCM2835_SPI_CS_CLEAR_RX;
+               dma_sync_single_for_device(ctlr->dma_rx->device->dev,
+                                          bs->clear_rx_addr,
+                                          sizeof(bs->clear_rx_cs),
+                                          DMA_TO_DEVICE);
+       }
+
        /*
         * sanity checking the native-chipselects
         */
        if (spi->mode & SPI_NO_CS)
                return 0;
-       if (gpio_is_valid(spi->cs_gpio))
+       /*
+        * The SPI core has successfully requested the CS GPIO line from the
+        * device tree, so we are done.
+        */
+       if (spi->cs_gpiod)
                return 0;
        if (spi->chip_select > 1) {
                /* error in the case of native CS requested with CS > 1
@@ -949,29 +1230,43 @@ static int bcm2835_spi_setup(struct spi_device *spi)
                        "setup: only two native chip-selects are supported\n");
                return -EINVAL;
        }
-       /* now translate native cs to GPIO */
+
+       /*
+        * Translate native CS to GPIO
+        *
+        * FIXME: poking around in the gpiolib internals like this is
+        * not very good practice. Find a way to locate the real problem
+        * and fix it. Why is the GPIO descriptor in spi->cs_gpiod
+        * sometimes not assigned correctly? Erroneous device trees?
+        */
 
        /* get the gpio chip for the base */
        chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
        if (!chip)
                return 0;
 
-       /* and calculate the real CS */
-       spi->cs_gpio = chip->base + 8 - spi->chip_select;
+       /*
+        * Retrieve the corresponding GPIO line used for CS.
+        * The inversion semantics will be handled by the GPIO core
+        * code, so we pass GPIOS_OUT_LOW for "unasserted" and
+        * the correct flag for inversion semantics. The SPI_CS_HIGH
+        * on spi->mode cannot be checked for polarity in this case
+        * as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
+        */
+       if (of_property_read_bool(spi->dev.of_node, "spi-cs-high"))
+               lflags = GPIO_ACTIVE_HIGH;
+       else
+               lflags = GPIO_ACTIVE_LOW;
+       spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
+                                                 DRV_NAME,
+                                                 lflags,
+                                                 GPIOD_OUT_LOW);
+       if (IS_ERR(spi->cs_gpiod))
+               return PTR_ERR(spi->cs_gpiod);
 
        /* and set up the "mode" and level */
-       dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n",
-                spi->chip_select, spi->cs_gpio);
-
-       /* set up GPIO as output and pull to the correct level */
-       err = gpio_direction_output(spi->cs_gpio,
-                                   (spi->mode & SPI_CS_HIGH) ? 0 : 1);
-       if (err) {
-               dev_err(&spi->dev,
-                       "could not set CS%i gpio %i as output: %i",
-                       spi->chip_select, spi->cs_gpio, err);
-               return err;
-       }
+       dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
+                spi->chip_select);
 
        return 0;
 }
@@ -980,18 +1275,19 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
 {
        struct spi_controller *ctlr;
        struct bcm2835_spi *bs;
-       struct resource *res;
        int err;
 
-       ctlr = spi_alloc_master(&pdev->dev, sizeof(*bs));
+       ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
+                                                 dma_get_cache_alignment()));
        if (!ctlr)
                return -ENOMEM;
 
        platform_set_drvdata(pdev, ctlr);
 
+       ctlr->use_gpio_descriptors = true;
        ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
        ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
-       ctlr->num_chipselect = 3;
+       ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
        ctlr->setup = bcm2835_spi_setup;
        ctlr->transfer_one = bcm2835_spi_transfer_one;
        ctlr->handle_err = bcm2835_spi_handle_err;
@@ -1000,8 +1296,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
 
        bs = spi_controller_get_devdata(ctlr);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       bs->regs = devm_ioremap_resource(&pdev->dev, res);
+       bs->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(bs->regs)) {
                err = PTR_ERR(bs->regs);
                goto out_controller_put;
@@ -1016,14 +1311,13 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
 
        bs->irq = platform_get_irq(pdev, 0);
        if (bs->irq <= 0) {
-               dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
                err = bs->irq ? bs->irq : -ENODEV;
                goto out_controller_put;
        }
 
        clk_prepare_enable(bs->clk);
 
-       bcm2835_dma_init(ctlr, &pdev->dev);
+       bcm2835_dma_init(ctlr, &pdev->dev, bs);
 
        /* initialise the hardware with the default polarities */
        bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -1067,7 +1361,7 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
 
        clk_disable_unprepare(bs->clk);
 
-       bcm2835_dma_release(ctlr);
+       bcm2835_dma_release(ctlr, bs);
 
        return 0;
 }
index bb57035c5770381e58d53133571f1350b0249d37..a2162ff56a121f1a5072d9c87251c8986cb80521 100644 (file)
@@ -491,7 +491,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct bcm2835aux_spi *bs;
-       struct resource *res;
        unsigned long clk_hz;
        int err;
 
@@ -524,8 +523,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
        bs = spi_master_get_devdata(master);
 
        /* the main area */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       bs->regs = devm_ioremap_resource(&pdev->dev, res);
+       bs->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(bs->regs)) {
                err = PTR_ERR(bs->regs);
                goto out_master_put;
@@ -540,7 +538,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
 
        bs->irq = platform_get_irq(pdev, 0);
        if (bs->irq <= 0) {
-               dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
                err = bs->irq ? bs->irq : -ENODEV;
                goto out_master_put;
        }
index 9a06ffdb73b88641e68da218e63f59ba15704ebf..c6836a931dbf957dd0b81fe01aa598953f975deb 100644 (file)
@@ -330,7 +330,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct bcm63xx_hsspi *bs;
-       struct resource *res_mem;
        void __iomem *regs;
        struct device *dev = &pdev->dev;
        struct clk *clk, *pll_clk = NULL;
@@ -338,13 +337,10 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
        u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "no irq: %d\n", irq);
+       if (irq < 0)
                return irq;
-       }
 
-       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       regs = devm_ioremap_resource(dev, res_mem);
+       regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(regs))
                return PTR_ERR(regs);
 
index df1c94a131e62842f1cbc431545582331382fd4a..fdd7eaa0b8ede36acde7834485d865448ff6d689 100644 (file)
@@ -520,10 +520,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "no irq: %d\n", irq);
+       if (irq < 0)
                return irq;
-       }
 
        clk = devm_clk_get(dev, "spi");
        if (IS_ERR(clk)) {
index 7c41e4e828492c52800bdf996547db86cdde9b8b..c36587b42e951eab806d4e9a12715db175619a8a 100644 (file)
@@ -474,7 +474,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
        int ret = 0, irq;
        struct spi_master *master;
        struct cdns_spi *xspi;
-       struct resource *res;
        u32 num_cs;
 
        master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
@@ -485,8 +484,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
        master->dev.of_node = pdev->dev.of_node;
        platform_set_drvdata(pdev, master);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       xspi->regs = devm_ioremap_resource(&pdev->dev, res);
+       xspi->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(xspi->regs)) {
                ret = PTR_ERR(xspi->regs);
                goto remove_master;
@@ -540,7 +538,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        if (irq <= 0) {
                ret = -ENXIO;
-               dev_err(&pdev->dev, "irq number is invalid\n");
                goto clk_dis_all;
        }
 
index ee4703e84622edafb5e5304c536fbd028c60b1bb..1a2de6ce906475674e4b2d31e632a7369d8f56b9 100644 (file)
@@ -18,7 +18,6 @@
 
 static int octeon_spi_probe(struct platform_device *pdev)
 {
-       struct resource *res_mem;
        void __iomem *reg_base;
        struct spi_master *master;
        struct octeon_spi *p;
@@ -30,8 +29,7 @@ static int octeon_spi_probe(struct platform_device *pdev)
        p = spi_master_get_devdata(master);
        platform_set_drvdata(pdev, master);
 
-       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+       reg_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(reg_base)) {
                err = PTR_ERR(reg_base);
                goto fail;
index 4daba12ec8436cbabf04f4d7b9abf7ebdcb66fb6..5e900f2289199b260f127ece4431c9bb0d84d2e0 100644 (file)
@@ -91,7 +91,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
 {
        struct spi_clps711x_data *hw;
        struct spi_master *master;
-       struct resource *res;
        int irq, ret;
 
        irq = platform_get_irq(pdev, 0);
@@ -125,8 +124,7 @@ static int spi_clps711x_probe(struct platform_device *pdev)
                goto err_out;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hw->syncio = devm_ioremap_resource(&pdev->dev, res);
+       hw->syncio = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hw->syncio)) {
                ret = PTR_ERR(hw->syncio);
                goto err_out;
index 5ff48ab2f5344ec2b223ea020467faf18c7b94d6..f80e06c87fbe35924151f2d2c8abc053a47f6379 100644 (file)
@@ -339,7 +339,6 @@ static int mcfqspi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct mcfqspi *mcfqspi;
-       struct resource *res;
        struct mcfqspi_platform_data *pdata;
        int status;
 
@@ -362,8 +361,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
 
        mcfqspi = spi_master_get_devdata(master);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       mcfqspi->iobase = devm_ioremap_resource(&pdev->dev, res);
+       mcfqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(mcfqspi->iobase)) {
                status = PTR_ERR(mcfqspi->iobase);
                goto fail0;
index 18c06568805e76276619fc1d1a66d32780aaadab..bd46fca3f0944aa81d1958f6f0015d9262d3b3a5 100644 (file)
@@ -79,14 +79,12 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
                            const char *cpu_syscon, u32 if_si_owner_offset)
 {
        struct dw_spi_mscc *dwsmscc;
-       struct resource *res;
 
        dwsmscc = devm_kzalloc(&pdev->dev, sizeof(*dwsmscc), GFP_KERNEL);
        if (!dwsmscc)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       dwsmscc->spi_mst = devm_ioremap_resource(&pdev->dev, res);
+       dwsmscc->spi_mst = devm_platform_ioremap_resource(pdev, 1);
        if (IS_ERR(dwsmscc->spi_mst)) {
                dev_err(&pdev->dev, "SPI_MST region map failed\n");
                return PTR_ERR(dwsmscc->spi_mst);
@@ -138,7 +136,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
                         struct dw_spi_mmio *dwsmmio);
        struct dw_spi_mmio *dwsmmio;
        struct dw_spi *dws;
-       struct resource *mem;
        int ret;
        int num_cs;
 
@@ -150,18 +147,15 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
        dws = &dwsmmio->dws;
 
        /* Get basic io resource and map it */
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+       dws->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(dws->regs)) {
                dev_err(&pdev->dev, "SPI region map failed\n");
                return PTR_ERR(dws->regs);
        }
 
        dws->irq = platform_get_irq(pdev, 0);
-       if (dws->irq < 0) {
-               dev_err(&pdev->dev, "no irq resource?\n");
+       if (dws->irq < 0)
                return dws->irq; /* -ENXIO */
-       }
 
        dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(dwsmmio->clk))
@@ -172,8 +166,10 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
 
        /* Optional clock needed to access the registers */
        dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
-       if (IS_ERR(dwsmmio->pclk))
-               return PTR_ERR(dwsmmio->pclk);
+       if (IS_ERR(dwsmmio->pclk)) {
+               ret = PTR_ERR(dwsmmio->pclk);
+               goto out_clk;
+       }
        ret = clk_prepare_enable(dwsmmio->pclk);
        if (ret)
                goto out_clk;
index 9651679ee7f7330a7f68ca469fb7d43226dfb515..140644913e6c5c4aafa1d56c3f5a2a1abf249d3a 100644 (file)
@@ -19,6 +19,7 @@ struct spi_pci_desc {
        int     (*setup)(struct dw_spi *);
        u16     num_cs;
        u16     bus_num;
+       u32     max_freq;
 };
 
 static struct spi_pci_desc spi_pci_mid_desc_1 = {
@@ -33,6 +34,12 @@ static struct spi_pci_desc spi_pci_mid_desc_2 = {
        .bus_num = 1,
 };
 
+static struct spi_pci_desc spi_pci_ehl_desc = {
+       .num_cs = 1,
+       .bus_num = -1,
+       .max_freq = 100000000,
+};
+
 static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct dw_spi *dws;
@@ -65,6 +72,7 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (desc) {
                dws->num_cs = desc->num_cs;
                dws->bus_num = desc->bus_num;
+               dws->max_freq = desc->max_freq;
 
                if (desc->setup) {
                        ret = desc->setup(dws);
@@ -98,16 +106,14 @@ static void spi_pci_remove(struct pci_dev *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int spi_suspend(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct dw_spi *dws = pci_get_drvdata(pdev);
+       struct dw_spi *dws = dev_get_drvdata(dev);
 
        return dw_spi_suspend_host(dws);
 }
 
 static int spi_resume(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct dw_spi *dws = pci_get_drvdata(pdev);
+       struct dw_spi *dws = dev_get_drvdata(dev);
 
        return dw_spi_resume_host(dws);
 }
@@ -125,8 +131,14 @@ static const struct pci_device_id pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
        /* Intel MID platform SPI controller 2 */
        { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
+       /* Intel Elkhart Lake PSE SPI controllers */
+       { PCI_VDEVICE(INTEL, 0x4b84), (kernel_ulong_t)&spi_pci_ehl_desc},
+       { PCI_VDEVICE(INTEL, 0x4b85), (kernel_ulong_t)&spi_pci_ehl_desc},
+       { PCI_VDEVICE(INTEL, 0x4b86), (kernel_ulong_t)&spi_pci_ehl_desc},
+       { PCI_VDEVICE(INTEL, 0x4b87), (kernel_ulong_t)&spi_pci_ehl_desc},
        {},
 };
+MODULE_DEVICE_TABLE(pci, pci_ids);
 
 static struct pci_driver dw_spi_driver = {
        .name =         DRIVER_NAME,
index eb1f2142a335161fa4c5a037d9cc4f98394550e1..64d4c441b6419536bc813eebfcd66e80cd72a7cc 100644 (file)
@@ -400,10 +400,8 @@ static int efm32_spi_probe(struct platform_device *pdev)
        }
 
        ret = platform_get_irq(pdev, 0);
-       if (ret <= 0) {
-               dev_err(&pdev->dev, "failed to get rx irq (%d)\n", ret);
+       if (ret <= 0)
                goto err;
-       }
 
        ddata->rxirq = ret;
 
index 4034e3ec0ba2a8f23bbfc248f1c70e8906df803a..4e1ccd4e52b6c1644a66cae83eb705f6c37a3031 100644 (file)
@@ -656,10 +656,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "failed to get irq resources\n");
+       if (irq < 0)
                return -EBUSY;
-       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
index e967ac564761ba106987f12ce15e454865b7b4de..858f0544289e6a7211c2e85fc1d82672923401a7 100644 (file)
@@ -305,12 +305,10 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
        }
 
        if (mspi->flags & SPI_CPM1) {
-               struct resource *res;
                void *pram;
 
-               res = platform_get_resource(to_platform_device(dev),
-                                           IORESOURCE_MEM, 1);
-               pram = devm_ioremap_resource(dev, res);
+               pram = devm_platform_ioremap_resource(to_platform_device(dev),
+                                                     1);
                if (IS_ERR(pram))
                        mspi->pram = NULL;
                else
index 53335ccc98f61812d30e78eea8e036b28314cfe0..bec758e978fb122e5b954081eda5394b2f4f3b2e 100644 (file)
@@ -9,26 +9,16 @@
 #include <linux/delay.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/errno.h>
 #include <linux/interrupt.h>
-#include <linux/io.h>
 #include <linux/kernel.h>
-#include <linux/math64.h>
 #include <linux/module.h>
-#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
-#include <linux/sched.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/spi-fsl-dspi.h>
-#include <linux/spi/spi_bitbang.h>
-#include <linux/time.h>
 
-#define DRIVER_NAME "fsl-dspi"
+#define DRIVER_NAME                    "fsl-dspi"
 
 #ifdef CONFIG_M5441x
 #define DSPI_FIFO_SIZE                 16
 #endif
 #define DSPI_DMA_BUFSIZE               (DSPI_FIFO_SIZE * 1024)
 
-#define SPI_MCR                0x00
-#define SPI_MCR_MASTER         (1 << 31)
-#define SPI_MCR_PCSIS          (0x3F << 16)
-#define SPI_MCR_CLR_TXF        (1 << 11)
-#define SPI_MCR_CLR_RXF        (1 << 10)
-#define SPI_MCR_XSPI           (1 << 3)
-
-#define SPI_TCR                        0x08
-#define SPI_TCR_GET_TCNT(x)    (((x) & 0xffff0000) >> 16)
-
-#define SPI_CTAR(x)            (0x0c + (((x) & 0x3) * 4))
-#define SPI_CTAR_FMSZ(x)       (((x) & 0x0000000f) << 27)
-#define SPI_CTAR_CPOL(x)       ((x) << 26)
-#define SPI_CTAR_CPHA(x)       ((x) << 25)
-#define SPI_CTAR_LSBFE(x)      ((x) << 24)
-#define SPI_CTAR_PCSSCK(x)     (((x) & 0x00000003) << 22)
-#define SPI_CTAR_PASC(x)       (((x) & 0x00000003) << 20)
-#define SPI_CTAR_PDT(x)        (((x) & 0x00000003) << 18)
-#define SPI_CTAR_PBR(x)        (((x) & 0x00000003) << 16)
-#define SPI_CTAR_CSSCK(x)      (((x) & 0x0000000f) << 12)
-#define SPI_CTAR_ASC(x)        (((x) & 0x0000000f) << 8)
-#define SPI_CTAR_DT(x)         (((x) & 0x0000000f) << 4)
-#define SPI_CTAR_BR(x)         ((x) & 0x0000000f)
-#define SPI_CTAR_SCALE_BITS    0xf
-
-#define SPI_CTAR0_SLAVE        0x0c
-
-#define SPI_SR                 0x2c
-#define SPI_SR_EOQF            0x10000000
-#define SPI_SR_TCFQF           0x80000000
-#define SPI_SR_CLEAR           0x9aaf0000
-
-#define SPI_RSER_TFFFE         BIT(25)
-#define SPI_RSER_TFFFD         BIT(24)
-#define SPI_RSER_RFDFE         BIT(17)
-#define SPI_RSER_RFDFD         BIT(16)
-
-#define SPI_RSER               0x30
-#define SPI_RSER_EOQFE         0x10000000
-#define SPI_RSER_TCFQE         0x80000000
-
-#define SPI_PUSHR              0x34
-#define SPI_PUSHR_CMD_CONT     (1 << 15)
-#define SPI_PUSHR_CONT         (SPI_PUSHR_CMD_CONT << 16)
-#define SPI_PUSHR_CMD_CTAS(x)  (((x) & 0x0003) << 12)
-#define SPI_PUSHR_CTAS(x)      (SPI_PUSHR_CMD_CTAS(x) << 16)
-#define SPI_PUSHR_CMD_EOQ      (1 << 11)
-#define SPI_PUSHR_EOQ          (SPI_PUSHR_CMD_EOQ << 16)
-#define SPI_PUSHR_CMD_CTCNT    (1 << 10)
-#define SPI_PUSHR_CTCNT                (SPI_PUSHR_CMD_CTCNT << 16)
-#define SPI_PUSHR_CMD_PCS(x)   ((1 << x) & 0x003f)
-#define SPI_PUSHR_PCS(x)       (SPI_PUSHR_CMD_PCS(x) << 16)
-#define SPI_PUSHR_TXDATA(x)    ((x) & 0x0000ffff)
-
-#define SPI_PUSHR_SLAVE        0x34
-
-#define SPI_POPR               0x38
-#define SPI_POPR_RXDATA(x)     ((x) & 0x0000ffff)
-
-#define SPI_TXFR0              0x3c
-#define SPI_TXFR1              0x40
-#define SPI_TXFR2              0x44
-#define SPI_TXFR3              0x48
-#define SPI_RXFR0              0x7c
-#define SPI_RXFR1              0x80
-#define SPI_RXFR2              0x84
-#define SPI_RXFR3              0x88
-
-#define SPI_CTARE(x)           (0x11c + (((x) & 0x3) * 4))
-#define SPI_CTARE_FMSZE(x)     (((x) & 0x1) << 16)
-#define SPI_CTARE_DTCP(x)      ((x) & 0x7ff)
-
-#define SPI_SREX               0x13c
-
-#define SPI_FRAME_BITS(bits)   SPI_CTAR_FMSZ((bits) - 1)
-#define SPI_FRAME_BITS_MASK    SPI_CTAR_FMSZ(0xf)
-#define SPI_FRAME_BITS_16      SPI_CTAR_FMSZ(0xf)
-#define SPI_FRAME_BITS_8       SPI_CTAR_FMSZ(0x7)
-
-#define SPI_FRAME_EBITS(bits)  SPI_CTARE_FMSZE(((bits) - 1) >> 4)
-#define SPI_FRAME_EBITS_MASK   SPI_CTARE_FMSZE(1)
+#define SPI_MCR                                0x00
+#define SPI_MCR_MASTER                 BIT(31)
+#define SPI_MCR_PCSIS                  (0x3F << 16)
+#define SPI_MCR_CLR_TXF                        BIT(11)
+#define SPI_MCR_CLR_RXF                        BIT(10)
+#define SPI_MCR_XSPI                   BIT(3)
+
+#define SPI_TCR                                0x08
+#define SPI_TCR_GET_TCNT(x)            (((x) & GENMASK(31, 16)) >> 16)
+
+#define SPI_CTAR(x)                    (0x0c + (((x) & GENMASK(1, 0)) * 4))
+#define SPI_CTAR_FMSZ(x)               (((x) << 27) & GENMASK(30, 27))
+#define SPI_CTAR_CPOL                  BIT(26)
+#define SPI_CTAR_CPHA                  BIT(25)
+#define SPI_CTAR_LSBFE                 BIT(24)
+#define SPI_CTAR_PCSSCK(x)             (((x) << 22) & GENMASK(23, 22))
+#define SPI_CTAR_PASC(x)               (((x) << 20) & GENMASK(21, 20))
+#define SPI_CTAR_PDT(x)                        (((x) << 18) & GENMASK(19, 18))
+#define SPI_CTAR_PBR(x)                        (((x) << 16) & GENMASK(17, 16))
+#define SPI_CTAR_CSSCK(x)              (((x) << 12) & GENMASK(15, 12))
+#define SPI_CTAR_ASC(x)                        (((x) << 8) & GENMASK(11, 8))
+#define SPI_CTAR_DT(x)                 (((x) << 4) & GENMASK(7, 4))
+#define SPI_CTAR_BR(x)                 ((x) & GENMASK(3, 0))
+#define SPI_CTAR_SCALE_BITS            0xf
+
+#define SPI_CTAR0_SLAVE                        0x0c
+
+#define SPI_SR                         0x2c
+#define SPI_SR_TCFQF                   BIT(31)
+#define SPI_SR_EOQF                    BIT(28)
+#define SPI_SR_TFUF                    BIT(27)
+#define SPI_SR_TFFF                    BIT(25)
+#define SPI_SR_CMDTCF                  BIT(23)
+#define SPI_SR_SPEF                    BIT(21)
+#define SPI_SR_RFOF                    BIT(19)
+#define SPI_SR_TFIWF                   BIT(18)
+#define SPI_SR_RFDF                    BIT(17)
+#define SPI_SR_CMDFFF                  BIT(16)
+#define SPI_SR_CLEAR                   (SPI_SR_TCFQF | SPI_SR_EOQF | \
+                                       SPI_SR_TFUF | SPI_SR_TFFF | \
+                                       SPI_SR_CMDTCF | SPI_SR_SPEF | \
+                                       SPI_SR_RFOF | SPI_SR_TFIWF | \
+                                       SPI_SR_RFDF | SPI_SR_CMDFFF)
+
+#define SPI_RSER_TFFFE                 BIT(25)
+#define SPI_RSER_TFFFD                 BIT(24)
+#define SPI_RSER_RFDFE                 BIT(17)
+#define SPI_RSER_RFDFD                 BIT(16)
+
+#define SPI_RSER                       0x30
+#define SPI_RSER_TCFQE                 BIT(31)
+#define SPI_RSER_EOQFE                 BIT(28)
+
+#define SPI_PUSHR                      0x34
+#define SPI_PUSHR_CMD_CONT             BIT(15)
+#define SPI_PUSHR_CMD_CTAS(x)          (((x) << 12 & GENMASK(14, 12)))
+#define SPI_PUSHR_CMD_EOQ              BIT(11)
+#define SPI_PUSHR_CMD_CTCNT            BIT(10)
+#define SPI_PUSHR_CMD_PCS(x)           (BIT(x) & GENMASK(5, 0))
+
+#define SPI_PUSHR_SLAVE                        0x34
+
+#define SPI_POPR                       0x38
+
+#define SPI_TXFR0                      0x3c
+#define SPI_TXFR1                      0x40
+#define SPI_TXFR2                      0x44
+#define SPI_TXFR3                      0x48
+#define SPI_RXFR0                      0x7c
+#define SPI_RXFR1                      0x80
+#define SPI_RXFR2                      0x84
+#define SPI_RXFR3                      0x88
+
+#define SPI_CTARE(x)                   (0x11c + (((x) & GENMASK(1, 0)) * 4))
+#define SPI_CTARE_FMSZE(x)             (((x) & 0x1) << 16)
+#define SPI_CTARE_DTCP(x)              ((x) & 0x7ff)
+
+#define SPI_SREX                       0x13c
+
+#define SPI_FRAME_BITS(bits)           SPI_CTAR_FMSZ((bits) - 1)
+#define SPI_FRAME_EBITS(bits)          SPI_CTARE_FMSZE(((bits) - 1) >> 4)
 
 /* Register offsets for regmap_pushr */
-#define PUSHR_CMD              0x0
-#define PUSHR_TX               0x2
+#define PUSHR_CMD                      0x0
+#define PUSHR_TX                       0x2
 
-#define SPI_CS_INIT            0x01
-#define SPI_CS_ASSERT          0x02
-#define SPI_CS_DROP            0x04
-
-#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
+#define DMA_COMPLETION_TIMEOUT         msecs_to_jiffies(3000)
 
 struct chip_data {
-       u32 ctar_val;
-       u16 void_write_data;
+       u32                     ctar_val;
+       u16                     void_write_data;
 };
 
 enum dspi_trans_mode {
@@ -141,75 +127,75 @@ enum dspi_trans_mode {
 };
 
 struct fsl_dspi_devtype_data {
-       enum dspi_trans_mode trans_mode;
-       u8 max_clock_factor;
-       bool xspi_mode;
+       enum dspi_trans_mode    trans_mode;
+       u8                      max_clock_factor;
+       bool                    xspi_mode;
 };
 
 static const struct fsl_dspi_devtype_data vf610_data = {
-       .trans_mode = DSPI_DMA_MODE,
-       .max_clock_factor = 2,
+       .trans_mode             = DSPI_DMA_MODE,
+       .max_clock_factor       = 2,
 };
 
 static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
-       .trans_mode = DSPI_TCFQ_MODE,
-       .max_clock_factor = 8,
-       .xspi_mode = true,
+       .trans_mode             = DSPI_TCFQ_MODE,
+       .max_clock_factor       = 8,
+       .xspi_mode              = true,
 };
 
 static const struct fsl_dspi_devtype_data ls2085a_data = {
-       .trans_mode = DSPI_TCFQ_MODE,
-       .max_clock_factor = 8,
+       .trans_mode             = DSPI_TCFQ_MODE,
+       .max_clock_factor       = 8,
 };
 
 static const struct fsl_dspi_devtype_data coldfire_data = {
-       .trans_mode = DSPI_EOQ_MODE,
-       .max_clock_factor = 8,
+       .trans_mode             = DSPI_EOQ_MODE,
+       .max_clock_factor       = 8,
 };
 
 struct fsl_dspi_dma {
        /* Length of transfer in words of DSPI_FIFO_SIZE */
-       u32 curr_xfer_len;
-
-       u32 *tx_dma_buf;
-       struct dma_chan *chan_tx;
-       dma_addr_t tx_dma_phys;
-       struct completion cmd_tx_complete;
-       struct dma_async_tx_descriptor *tx_desc;
-
-       u32 *rx_dma_buf;
-       struct dma_chan *chan_rx;
-       dma_addr_t rx_dma_phys;
-       struct completion cmd_rx_complete;
-       struct dma_async_tx_descriptor *rx_desc;
+       u32                                     curr_xfer_len;
+
+       u32                                     *tx_dma_buf;
+       struct dma_chan                         *chan_tx;
+       dma_addr_t                              tx_dma_phys;
+       struct completion                       cmd_tx_complete;
+       struct dma_async_tx_descriptor          *tx_desc;
+
+       u32                                     *rx_dma_buf;
+       struct dma_chan                         *chan_rx;
+       dma_addr_t                              rx_dma_phys;
+       struct completion                       cmd_rx_complete;
+       struct dma_async_tx_descriptor          *rx_desc;
 };
 
 struct fsl_dspi {
-       struct spi_master       *master;
-       struct platform_device  *pdev;
-
-       struct regmap           *regmap;
-       struct regmap           *regmap_pushr;
-       int                     irq;
-       struct clk              *clk;
-
-       struct spi_transfer     *cur_transfer;
-       struct spi_message      *cur_msg;
-       struct chip_data        *cur_chip;
-       size_t                  len;
-       const void              *tx;
-       void                    *rx;
-       void                    *rx_end;
-       u16                     void_write_data;
-       u16                     tx_cmd;
-       u8                      bits_per_word;
-       u8                      bytes_per_word;
-       const struct fsl_dspi_devtype_data *devtype_data;
-
-       wait_queue_head_t       waitq;
-       u32                     waitflags;
-
-       struct fsl_dspi_dma     *dma;
+       struct spi_controller                   *ctlr;
+       struct platform_device                  *pdev;
+
+       struct regmap                           *regmap;
+       struct regmap                           *regmap_pushr;
+       int                                     irq;
+       struct clk                              *clk;
+
+       struct spi_transfer                     *cur_transfer;
+       struct spi_message                      *cur_msg;
+       struct chip_data                        *cur_chip;
+       size_t                                  len;
+       const void                              *tx;
+       void                                    *rx;
+       void                                    *rx_end;
+       u16                                     void_write_data;
+       u16                                     tx_cmd;
+       u8                                      bits_per_word;
+       u8                                      bytes_per_word;
+       const struct fsl_dspi_devtype_data      *devtype_data;
+
+       wait_queue_head_t                       waitq;
+       u32                                     waitflags;
+
+       struct fsl_dspi_dma                     *dma;
 };
 
 static u32 dspi_pop_tx(struct fsl_dspi *dspi)
@@ -233,7 +219,7 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
 {
        u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
 
-       if (spi_controller_is_slave(dspi->master))
+       if (spi_controller_is_slave(dspi->ctlr))
                return data;
 
        if (dspi->len > 0)
@@ -246,7 +232,7 @@ static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
        if (!dspi->rx)
                return;
 
-       /* Mask of undefined bits */
+       /* Mask off undefined bits */
        rxdata &= (1 << dspi->bits_per_word) - 1;
 
        if (dspi->bytes_per_word == 1)
@@ -282,8 +268,8 @@ static void dspi_rx_dma_callback(void *arg)
 
 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
 {
-       struct fsl_dspi_dma *dma = dspi->dma;
        struct device *dev = &dspi->pdev->dev;
+       struct fsl_dspi_dma *dma = dspi->dma;
        int time_left;
        int i;
 
@@ -332,13 +318,13 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
        dma_async_issue_pending(dma->chan_rx);
        dma_async_issue_pending(dma->chan_tx);
 
-       if (spi_controller_is_slave(dspi->master)) {
+       if (spi_controller_is_slave(dspi->ctlr)) {
                wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
                return 0;
        }
 
        time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
-                                       DMA_COMPLETION_TIMEOUT);
+                                               DMA_COMPLETION_TIMEOUT);
        if (time_left == 0) {
                dev_err(dev, "DMA tx timeout\n");
                dmaengine_terminate_all(dma->chan_tx);
@@ -347,7 +333,7 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
        }
 
        time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
-                                       DMA_COMPLETION_TIMEOUT);
+                                               DMA_COMPLETION_TIMEOUT);
        if (time_left == 0) {
                dev_err(dev, "DMA rx timeout\n");
                dmaengine_terminate_all(dma->chan_tx);
@@ -360,9 +346,9 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
 
 static int dspi_dma_xfer(struct fsl_dspi *dspi)
 {
-       struct fsl_dspi_dma *dma = dspi->dma;
-       struct device *dev = &dspi->pdev->dev;
        struct spi_message *message = dspi->cur_msg;
+       struct device *dev = &dspi->pdev->dev;
+       struct fsl_dspi_dma *dma = dspi->dma;
        int curr_remaining_bytes;
        int bytes_per_buffer;
        int ret = 0;
@@ -397,9 +383,9 @@ exit:
 
 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
 {
-       struct fsl_dspi_dma *dma;
-       struct dma_slave_config cfg;
        struct device *dev = &dspi->pdev->dev;
+       struct dma_slave_config cfg;
+       struct fsl_dspi_dma *dma;
        int ret;
 
        dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
@@ -421,14 +407,14 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
        }
 
        dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
-                                       &dma->tx_dma_phys, GFP_KERNEL);
+                                            &dma->tx_dma_phys, GFP_KERNEL);
        if (!dma->tx_dma_buf) {
                ret = -ENOMEM;
                goto err_tx_dma_buf;
        }
 
        dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
-                                       &dma->rx_dma_phys, GFP_KERNEL);
+                                            &dma->rx_dma_phys, GFP_KERNEL);
        if (!dma->rx_dma_buf) {
                ret = -ENOMEM;
                goto err_rx_dma_buf;
@@ -485,30 +471,31 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
        struct fsl_dspi_dma *dma = dspi->dma;
        struct device *dev = &dspi->pdev->dev;
 
-       if (dma) {
-               if (dma->chan_tx) {
-                       dma_unmap_single(dev, dma->tx_dma_phys,
-                                       DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
-                       dma_release_channel(dma->chan_tx);
-               }
+       if (!dma)
+               return;
 
-               if (dma->chan_rx) {
-                       dma_unmap_single(dev, dma->rx_dma_phys,
-                                       DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
-                       dma_release_channel(dma->chan_rx);
-               }
+       if (dma->chan_tx) {
+               dma_unmap_single(dev, dma->tx_dma_phys,
+                                DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+               dma_release_channel(dma->chan_tx);
+       }
+
+       if (dma->chan_rx) {
+               dma_unmap_single(dev, dma->rx_dma_phys,
+                                DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+               dma_release_channel(dma->chan_rx);
        }
 }
 
 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
-               unsigned long clkrate)
+                          unsigned long clkrate)
 {
        /* Valid baud rate pre-scaler values */
        int pbr_tbl[4] = {2, 3, 5, 7};
        int brs[16] = { 2,      4,      6,      8,
-               16,     32,     64,     128,
-               256,    512,    1024,   2048,
-               4096,   8192,   16384,  32768 };
+                       16,     32,     64,     128,
+                       256,    512,    1024,   2048,
+                       4096,   8192,   16384,  32768 };
        int scale_needed, scale, minscale = INT_MAX;
        int i, j;
 
@@ -538,15 +525,15 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
 }
 
 static void ns_delay_scale(char *psc, char *sc, int delay_ns,
-               unsigned long clkrate)
+                          unsigned long clkrate)
 {
-       int pscale_tbl[4] = {1, 3, 5, 7};
        int scale_needed, scale, minscale = INT_MAX;
-       int i, j;
+       int pscale_tbl[4] = {1, 3, 5, 7};
        u32 remainder;
+       int i, j;
 
        scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
-                       &remainder);
+                                  &remainder);
        if (remainder)
                scale_needed++;
 
@@ -601,7 +588,7 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi)
                 */
                u32 data = dspi_pop_tx(dspi);
 
-               if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
+               if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
                        /* LSB */
                        tx_fifo_write(dspi, data & 0xFFFF);
                        tx_fifo_write(dspi, data >> 16);
@@ -655,19 +642,90 @@ static void dspi_eoq_read(struct fsl_dspi *dspi)
 {
        int fifo_size = DSPI_FIFO_SIZE;
 
-       /* Read one FIFO entry at and push to rx buffer */
+       /* Read one FIFO entry and push to rx buffer */
        while ((dspi->rx < dspi->rx_end) && fifo_size--)
                dspi_push_rx(dspi, fifo_read(dspi));
 }
 
-static int dspi_transfer_one_message(struct spi_master *master,
-               struct spi_message *message)
+static int dspi_rxtx(struct fsl_dspi *dspi)
 {
-       struct fsl_dspi *dspi = spi_master_get_devdata(master);
+       struct spi_message *msg = dspi->cur_msg;
+       enum dspi_trans_mode trans_mode;
+       u16 spi_tcnt;
+       u32 spi_tcr;
+
+       /* Get transfer counter (in number of SPI transfers). It was
+        * reset to 0 when transfer(s) were started.
+        */
+       regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
+       spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
+       /* Update total number of bytes that were transferred */
+       msg->actual_length += spi_tcnt * dspi->bytes_per_word;
+
+       trans_mode = dspi->devtype_data->trans_mode;
+       if (trans_mode == DSPI_EOQ_MODE)
+               dspi_eoq_read(dspi);
+       else if (trans_mode == DSPI_TCFQ_MODE)
+               dspi_tcfq_read(dspi);
+
+       if (!dspi->len)
+               /* Success! */
+               return 0;
+
+       if (trans_mode == DSPI_EOQ_MODE)
+               dspi_eoq_write(dspi);
+       else if (trans_mode == DSPI_TCFQ_MODE)
+               dspi_tcfq_write(dspi);
+
+       return -EINPROGRESS;
+}
+
+static int dspi_poll(struct fsl_dspi *dspi)
+{
+       int tries = 1000;
+       u32 spi_sr;
+
+       do {
+               regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+               regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+               if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF))
+                       break;
+       } while (--tries);
+
+       if (!tries)
+               return -ETIMEDOUT;
+
+       return dspi_rxtx(dspi);
+}
+
+static irqreturn_t dspi_interrupt(int irq, void *dev_id)
+{
+       struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
+       u32 spi_sr;
+
+       regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+       regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+       if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)))
+               return IRQ_NONE;
+
+       if (dspi_rxtx(dspi) == 0) {
+               dspi->waitflags = 1;
+               wake_up_interruptible(&dspi->waitq);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int dspi_transfer_one_message(struct spi_controller *ctlr,
+                                    struct spi_message *message)
+{
+       struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
        struct spi_device *spi = message->spi;
+       enum dspi_trans_mode trans_mode;
        struct spi_transfer *transfer;
        int status = 0;
-       enum dspi_trans_mode trans_mode;
 
        message->actual_length = 0;
 
@@ -677,7 +735,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
                dspi->cur_chip = spi_get_ctldata(spi);
                /* Prepare command word for CMD FIFO */
                dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
-                       SPI_PUSHR_CMD_PCS(spi->chip_select);
+                              SPI_PUSHR_CMD_PCS(spi->chip_select);
                if (list_is_last(&dspi->cur_transfer->transfer_list,
                                 &dspi->cur_msg->transfers)) {
                        /* Leave PCS activated after last transfer when
@@ -718,8 +776,8 @@ static int dspi_transfer_one_message(struct spi_master *master,
                             SPI_FRAME_BITS(transfer->bits_per_word));
                if (dspi->devtype_data->xspi_mode)
                        regmap_write(dspi->regmap, SPI_CTARE(0),
-                                    SPI_FRAME_EBITS(transfer->bits_per_word)
-                                    SPI_CTARE_DTCP(1));
+                                    SPI_FRAME_EBITS(transfer->bits_per_word) |
+                                    SPI_CTARE_DTCP(1));
 
                trans_mode = dspi->devtype_data->trans_mode;
                switch (trans_mode) {
@@ -733,8 +791,8 @@ static int dspi_transfer_one_message(struct spi_master *master,
                        break;
                case DSPI_DMA_MODE:
                        regmap_write(dspi->regmap, SPI_RSER,
-                               SPI_RSER_TFFFE | SPI_RSER_TFFFD |
-                               SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+                                    SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+                                    SPI_RSER_RFDFE | SPI_RSER_RFDFD);
                        status = dspi_dma_xfer(dspi);
                        break;
                default:
@@ -744,13 +802,18 @@ static int dspi_transfer_one_message(struct spi_master *master,
                        goto out;
                }
 
-               if (trans_mode != DSPI_DMA_MODE) {
-                       if (wait_event_interruptible(dspi->waitq,
-                                               dspi->waitflags))
-                               dev_err(&dspi->pdev->dev,
-                                       "wait transfer complete fail!\n");
+               if (!dspi->irq) {
+                       do {
+                               status = dspi_poll(dspi);
+                       } while (status == -EINPROGRESS);
+               } else if (trans_mode != DSPI_DMA_MODE) {
+                       status = wait_event_interruptible(dspi->waitq,
+                                                         dspi->waitflags);
                        dspi->waitflags = 0;
                }
+               if (status)
+                       dev_err(&dspi->pdev->dev,
+                               "Waiting for transfer to complete failed!\n");
 
                if (transfer->delay_usecs)
                        udelay(transfer->delay_usecs);
@@ -758,19 +821,19 @@ static int dspi_transfer_one_message(struct spi_master *master,
 
 out:
        message->status = status;
-       spi_finalize_current_message(master);
+       spi_finalize_current_message(ctlr);
 
        return status;
 }
 
 static int dspi_setup(struct spi_device *spi)
 {
-       struct chip_data *chip;
-       struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
-       struct fsl_dspi_platform_data *pdata;
-       u32 cs_sck_delay = 0, sck_cs_delay = 0;
+       struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
        unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
+       u32 cs_sck_delay = 0, sck_cs_delay = 0;
+       struct fsl_dspi_platform_data *pdata;
        unsigned char pasc = 0, asc = 0;
+       struct chip_data *chip;
        unsigned long clkrate;
 
        /* Only alloc on first setup */
@@ -785,10 +848,10 @@ static int dspi_setup(struct spi_device *spi)
 
        if (!pdata) {
                of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
-                               &cs_sck_delay);
+                                    &cs_sck_delay);
 
                of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
-                               &sck_cs_delay);
+                                    &sck_cs_delay);
        } else {
                cs_sck_delay = pdata->cs_sck_delay;
                sck_cs_delay = pdata->sck_cs_delay;
@@ -805,18 +868,22 @@ static int dspi_setup(struct spi_device *spi)
        /* Set After SCK delay scale values */
        ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
 
-       chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
-               | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0);
-
-       if (!spi_controller_is_slave(dspi->master)) {
-               chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode &
-                                                SPI_LSB_FIRST ? 1 : 0)
-                       | SPI_CTAR_PCSSCK(pcssck)
-                       | SPI_CTAR_CSSCK(cssck)
-                       | SPI_CTAR_PASC(pasc)
-                       | SPI_CTAR_ASC(asc)
-                       | SPI_CTAR_PBR(pbr)
-                       | SPI_CTAR_BR(br);
+       chip->ctar_val = 0;
+       if (spi->mode & SPI_CPOL)
+               chip->ctar_val |= SPI_CTAR_CPOL;
+       if (spi->mode & SPI_CPHA)
+               chip->ctar_val |= SPI_CTAR_CPHA;
+
+       if (!spi_controller_is_slave(dspi->ctlr)) {
+               chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
+                                 SPI_CTAR_CSSCK(cssck) |
+                                 SPI_CTAR_PASC(pasc) |
+                                 SPI_CTAR_ASC(asc) |
+                                 SPI_CTAR_PBR(pbr) |
+                                 SPI_CTAR_BR(br);
+
+               if (spi->mode & SPI_LSB_FIRST)
+                       chip->ctar_val |= SPI_CTAR_LSBFE;
        }
 
        spi_set_ctldata(spi, chip);
@@ -829,68 +896,11 @@ static void dspi_cleanup(struct spi_device *spi)
        struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
 
        dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
-                       spi->master->bus_num, spi->chip_select);
+               spi->controller->bus_num, spi->chip_select);
 
        kfree(chip);
 }
 
-static irqreturn_t dspi_interrupt(int irq, void *dev_id)
-{
-       struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
-       struct spi_message *msg = dspi->cur_msg;
-       enum dspi_trans_mode trans_mode;
-       u32 spi_sr, spi_tcr;
-       u16 spi_tcnt;
-
-       regmap_read(dspi->regmap, SPI_SR, &spi_sr);
-       regmap_write(dspi->regmap, SPI_SR, spi_sr);
-
-
-       if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
-               /* Get transfer counter (in number of SPI transfers). It was
-                * reset to 0 when transfer(s) were started.
-                */
-               regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
-               spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
-               /* Update total number of bytes that were transferred */
-               msg->actual_length += spi_tcnt * dspi->bytes_per_word;
-
-               trans_mode = dspi->devtype_data->trans_mode;
-               switch (trans_mode) {
-               case DSPI_EOQ_MODE:
-                       dspi_eoq_read(dspi);
-                       break;
-               case DSPI_TCFQ_MODE:
-                       dspi_tcfq_read(dspi);
-                       break;
-               default:
-                       dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
-                               trans_mode);
-                               return IRQ_HANDLED;
-               }
-
-               if (!dspi->len) {
-                       dspi->waitflags = 1;
-                       wake_up_interruptible(&dspi->waitq);
-               } else {
-                       switch (trans_mode) {
-                       case DSPI_EOQ_MODE:
-                               dspi_eoq_write(dspi);
-                               break;
-                       case DSPI_TCFQ_MODE:
-                               dspi_tcfq_write(dspi);
-                               break;
-                       default:
-                               dev_err(&dspi->pdev->dev,
-                                       "unsupported trans_mode %u\n",
-                                       trans_mode);
-                       }
-               }
-       }
-
-       return IRQ_HANDLED;
-}
-
 static const struct of_device_id fsl_dspi_dt_ids[] = {
        { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
        { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
@@ -902,10 +912,10 @@ MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
 #ifdef CONFIG_PM_SLEEP
 static int dspi_suspend(struct device *dev)
 {
-       struct spi_master *master = dev_get_drvdata(dev);
-       struct fsl_dspi *dspi = spi_master_get_devdata(master);
+       struct spi_controller *ctlr = dev_get_drvdata(dev);
+       struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
 
-       spi_master_suspend(master);
+       spi_controller_suspend(ctlr);
        clk_disable_unprepare(dspi->clk);
 
        pinctrl_pm_select_sleep_state(dev);
@@ -915,8 +925,8 @@ static int dspi_suspend(struct device *dev)
 
 static int dspi_resume(struct device *dev)
 {
-       struct spi_master *master = dev_get_drvdata(dev);
-       struct fsl_dspi *dspi = spi_master_get_devdata(master);
+       struct spi_controller *ctlr = dev_get_drvdata(dev);
+       struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
        int ret;
 
        pinctrl_pm_select_default_state(dev);
@@ -924,7 +934,7 @@ static int dspi_resume(struct device *dev)
        ret = clk_prepare_enable(dspi->clk);
        if (ret)
                return ret;
-       spi_master_resume(master);
+       spi_controller_resume(ctlr);
 
        return 0;
 }
@@ -939,16 +949,16 @@ static const struct regmap_range dspi_volatile_ranges[] = {
 };
 
 static const struct regmap_access_table dspi_volatile_table = {
-       .yes_ranges     = dspi_volatile_ranges,
-       .n_yes_ranges   = ARRAY_SIZE(dspi_volatile_ranges),
+       .yes_ranges     = dspi_volatile_ranges,
+       .n_yes_ranges   = ARRAY_SIZE(dspi_volatile_ranges),
 };
 
 static const struct regmap_config dspi_regmap_config = {
-       .reg_bits = 32,
-       .val_bits = 32,
-       .reg_stride = 4,
-       .max_register = 0x88,
-       .volatile_table = &dspi_volatile_table,
+       .reg_bits       = 32,
+       .val_bits       = 32,
+       .reg_stride     = 4,
+       .max_register   = 0x88,
+       .volatile_table = &dspi_volatile_table,
 };
 
 static const struct regmap_range dspi_xspi_volatile_ranges[] = {
@@ -959,33 +969,34 @@ static const struct regmap_range dspi_xspi_volatile_ranges[] = {
 };
 
 static const struct regmap_access_table dspi_xspi_volatile_table = {
-       .yes_ranges     = dspi_xspi_volatile_ranges,
-       .n_yes_ranges   = ARRAY_SIZE(dspi_xspi_volatile_ranges),
+       .yes_ranges     = dspi_xspi_volatile_ranges,
+       .n_yes_ranges   = ARRAY_SIZE(dspi_xspi_volatile_ranges),
 };
 
 static const struct regmap_config dspi_xspi_regmap_config[] = {
        {
-               .reg_bits = 32,
-               .val_bits = 32,
-               .reg_stride = 4,
-               .max_register = 0x13c,
-               .volatile_table = &dspi_xspi_volatile_table,
+               .reg_bits       = 32,
+               .val_bits       = 32,
+               .reg_stride     = 4,
+               .max_register   = 0x13c,
+               .volatile_table = &dspi_xspi_volatile_table,
        },
        {
-               .name = "pushr",
-               .reg_bits = 16,
-               .val_bits = 16,
-               .reg_stride = 2,
-               .max_register = 0x2,
+               .name           = "pushr",
+               .reg_bits       = 16,
+               .val_bits       = 16,
+               .reg_stride     = 2,
+               .max_register   = 0x2,
        },
 };
 
 static void dspi_init(struct fsl_dspi *dspi)
 {
-       unsigned int mcr = SPI_MCR_PCSIS |
-               (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0);
+       unsigned int mcr = SPI_MCR_PCSIS;
 
-       if (!spi_controller_is_slave(dspi->master))
+       if (dspi->devtype_data->xspi_mode)
+               mcr |= SPI_MCR_XSPI;
+       if (!spi_controller_is_slave(dspi->ctlr))
                mcr |= SPI_MCR_MASTER;
 
        regmap_write(dspi->regmap, SPI_MCR, mcr);
@@ -998,34 +1009,33 @@ static void dspi_init(struct fsl_dspi *dspi)
 static int dspi_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
-       struct spi_master *master;
+       const struct regmap_config *regmap_config;
+       struct fsl_dspi_platform_data *pdata;
+       struct spi_controller *ctlr;
+       int ret, cs_num, bus_num;
        struct fsl_dspi *dspi;
        struct resource *res;
-       const struct regmap_config *regmap_config;
        void __iomem *base;
-       struct fsl_dspi_platform_data *pdata;
-       int ret = 0, cs_num, bus_num;
 
-       master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
-       if (!master)
+       ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
+       if (!ctlr)
                return -ENOMEM;
 
-       dspi = spi_master_get_devdata(master);
+       dspi = spi_controller_get_devdata(ctlr);
        dspi->pdev = pdev;
-       dspi->master = master;
+       dspi->ctlr = ctlr;
 
-       master->transfer = NULL;
-       master->setup = dspi_setup;
-       master->transfer_one_message = dspi_transfer_one_message;
-       master->dev.of_node = pdev->dev.of_node;
+       ctlr->setup = dspi_setup;
+       ctlr->transfer_one_message = dspi_transfer_one_message;
+       ctlr->dev.of_node = pdev->dev.of_node;
 
-       master->cleanup = dspi_cleanup;
-       master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+       ctlr->cleanup = dspi_cleanup;
+       ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
 
        pdata = dev_get_platdata(&pdev->dev);
        if (pdata) {
-               master->num_chipselect = pdata->cs_num;
-               master->bus_num = pdata->bus_num;
+               ctlr->num_chipselect = pdata->cs_num;
+               ctlr->bus_num = pdata->bus_num;
 
                dspi->devtype_data = &coldfire_data;
        } else {
@@ -1033,38 +1043,38 @@ static int dspi_probe(struct platform_device *pdev)
                ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
                if (ret < 0) {
                        dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
-                       goto out_master_put;
+                       goto out_ctlr_put;
                }
-               master->num_chipselect = cs_num;
+               ctlr->num_chipselect = cs_num;
 
                ret = of_property_read_u32(np, "bus-num", &bus_num);
                if (ret < 0) {
                        dev_err(&pdev->dev, "can't get bus-num\n");
-                       goto out_master_put;
+                       goto out_ctlr_put;
                }
-               master->bus_num = bus_num;
+               ctlr->bus_num = bus_num;
 
                if (of_property_read_bool(np, "spi-slave"))
-                       master->slave = true;
+                       ctlr->slave = true;
 
                dspi->devtype_data = of_device_get_match_data(&pdev->dev);
                if (!dspi->devtype_data) {
                        dev_err(&pdev->dev, "can't get devtype_data\n");
                        ret = -EFAULT;
-                       goto out_master_put;
+                       goto out_ctlr_put;
                }
        }
 
        if (dspi->devtype_data->xspi_mode)
-               master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+               ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
        else
-               master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+               ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base)) {
                ret = PTR_ERR(base);
-               goto out_master_put;
+               goto out_ctlr_put;
        }
 
        if (dspi->devtype_data->xspi_mode)
@@ -1076,7 +1086,7 @@ static int dspi_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to init regmap: %ld\n",
                                PTR_ERR(dspi->regmap));
                ret = PTR_ERR(dspi->regmap);
-               goto out_master_put;
+               goto out_ctlr_put;
        }
 
        if (dspi->devtype_data->xspi_mode) {
@@ -1088,7 +1098,7 @@ static int dspi_probe(struct platform_device *pdev)
                                "failed to init pushr regmap: %ld\n",
                                PTR_ERR(dspi->regmap_pushr));
                        ret = PTR_ERR(dspi->regmap_pushr);
-                       goto out_master_put;
+                       goto out_ctlr_put;
                }
        }
 
@@ -1096,18 +1106,20 @@ static int dspi_probe(struct platform_device *pdev)
        if (IS_ERR(dspi->clk)) {
                ret = PTR_ERR(dspi->clk);
                dev_err(&pdev->dev, "unable to get clock\n");
-               goto out_master_put;
+               goto out_ctlr_put;
        }
        ret = clk_prepare_enable(dspi->clk);
        if (ret)
-               goto out_master_put;
+               goto out_ctlr_put;
 
        dspi_init(dspi);
+
        dspi->irq = platform_get_irq(pdev, 0);
-       if (dspi->irq < 0) {
-               dev_err(&pdev->dev, "can't get platform irq\n");
-               ret = dspi->irq;
-               goto out_clk_put;
+       if (dspi->irq <= 0) {
+               dev_info(&pdev->dev,
+                        "can't get platform irq, using poll mode\n");
+               dspi->irq = 0;
+               goto poll_mode;
        }
 
        ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
@@ -1117,6 +1129,9 @@ static int dspi_probe(struct platform_device *pdev)
                goto out_clk_put;
        }
 
+       init_waitqueue_head(&dspi->waitq);
+
+poll_mode:
        if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
                ret = dspi_request_dma(dspi, res->start);
                if (ret < 0) {
@@ -1125,15 +1140,14 @@ static int dspi_probe(struct platform_device *pdev)
                }
        }
 
-       master->max_speed_hz =
+       ctlr->max_speed_hz =
                clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
 
-       init_waitqueue_head(&dspi->waitq);
-       platform_set_drvdata(pdev, master);
+       platform_set_drvdata(pdev, ctlr);
 
-       ret = spi_register_master(master);
+       ret = spi_register_controller(ctlr);
        if (ret != 0) {
-               dev_err(&pdev->dev, "Problem registering DSPI master\n");
+               dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
                goto out_clk_put;
        }
 
@@ -1141,32 +1155,32 @@ static int dspi_probe(struct platform_device *pdev)
 
 out_clk_put:
        clk_disable_unprepare(dspi->clk);
-out_master_put:
-       spi_master_put(master);
+out_ctlr_put:
+       spi_controller_put(ctlr);
 
        return ret;
 }
 
 static int dspi_remove(struct platform_device *pdev)
 {
-       struct spi_master *master = platform_get_drvdata(pdev);
-       struct fsl_dspi *dspi = spi_master_get_devdata(master);
+       struct spi_controller *ctlr = platform_get_drvdata(pdev);
+       struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
 
        /* Disconnect from the SPI framework */
        dspi_release_dma(dspi);
        clk_disable_unprepare(dspi->clk);
-       spi_unregister_master(dspi->master);
+       spi_unregister_controller(dspi->ctlr);
 
        return 0;
 }
 
 static struct platform_driver fsl_dspi_driver = {
-       .driver.name    = DRIVER_NAME,
-       .driver.of_match_table = fsl_dspi_dt_ids,
-       .driver.owner   = THIS_MODULE,
-       .driver.pm = &dspi_pm,
-       .probe          = dspi_probe,
-       .remove         = dspi_remove,
+       .driver.name            = DRIVER_NAME,
+       .driver.of_match_table  = fsl_dspi_dt_ids,
+       .driver.owner           = THIS_MODULE,
+       .driver.pm              = &dspi_pm,
+       .probe                  = dspi_probe,
+       .remove                 = dspi_remove,
 };
 module_platform_driver(fsl_dspi_driver);
 
index 3576167283dc78473e483f901bb5056817f09b12..015a1abb6a84a760dbe0a985b49fabc3dc0e49fe 100644 (file)
@@ -91,9 +91,6 @@ static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
 
 struct mpc8xxx_spi_probe_info {
        struct fsl_spi_platform_data pdata;
-       int ngpios;
-       int *gpios;
-       bool *alow_flags;
        __be32 __iomem *immr_spi_cs;
 };
 
index 448c00e4065b86cad66e61c0db103f6f604bde80..c02e24c01136719799fb140c9a937cc011869a8f 100644 (file)
@@ -860,10 +860,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
 
        /* find the irq */
        ret = platform_get_irq(pdev, 0);
-       if (ret < 0) {
-               dev_err(dev, "failed to get the irq: %d\n", ret);
+       if (ret < 0)
                goto err_disable_clk;
-       }
 
        ret = devm_request_irq(dev, ret,
                        fsl_qspi_irq_handler, 0, pdev->name, q);
index 1d9b33aa1a3b53f8f4c8ff129afa8f15998c8af7..4b80ace1d137e9f39c722069c24fbc9faa919831 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/fsl_devices.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
@@ -28,7 +28,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/of_gpio.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
@@ -481,32 +480,6 @@ static int fsl_spi_setup(struct spi_device *spi)
                return retval;
        }
 
-       if (mpc8xxx_spi->type == TYPE_GRLIB) {
-               if (gpio_is_valid(spi->cs_gpio)) {
-                       int desel;
-
-                       retval = gpio_request(spi->cs_gpio,
-                                             dev_name(&spi->dev));
-                       if (retval)
-                               return retval;
-
-                       desel = !(spi->mode & SPI_CS_HIGH);
-                       retval = gpio_direction_output(spi->cs_gpio, desel);
-                       if (retval) {
-                               gpio_free(spi->cs_gpio);
-                               return retval;
-                       }
-               } else if (spi->cs_gpio != -ENOENT) {
-                       if (spi->cs_gpio < 0)
-                               return spi->cs_gpio;
-                       return -EINVAL;
-               }
-               /* When spi->cs_gpio == -ENOENT, a hole in the phandle list
-                * indicates to use native chipselect if present, or allow for
-                * an always selected chip
-                */
-       }
-
        /* Initialize chipselect - might be active for SPI_CS_HIGH mode */
        fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
 
@@ -515,12 +488,8 @@ static int fsl_spi_setup(struct spi_device *spi)
 
 static void fsl_spi_cleanup(struct spi_device *spi)
 {
-       struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
        struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
 
-       if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio))
-               gpio_free(spi->cs_gpio);
-
        kfree(cs);
        spi_set_ctldata(spi, NULL);
 }
@@ -586,8 +555,8 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
        u32 slvsel;
        u16 cs = spi->chip_select;
 
-       if (gpio_is_valid(spi->cs_gpio)) {
-               gpio_set_value(spi->cs_gpio, on);
+       if (spi->cs_gpiod) {
+               gpiod_set_value(spi->cs_gpiod, on);
        } else if (cs < mpc8xxx_spi->native_chipselects) {
                slvsel = mpc8xxx_spi_read_reg(&reg_base->slvsel);
                slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
@@ -718,139 +687,19 @@ err:
 
 static void fsl_spi_cs_control(struct spi_device *spi, bool on)
 {
-       struct device *dev = spi->dev.parent->parent;
-       struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
-       struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
-       u16 cs = spi->chip_select;
-
-       if (cs < pinfo->ngpios) {
-               int gpio = pinfo->gpios[cs];
-               bool alow = pinfo->alow_flags[cs];
-
-               gpio_set_value(gpio, on ^ alow);
+       if (spi->cs_gpiod) {
+               gpiod_set_value(spi->cs_gpiod, on);
        } else {
-               if (WARN_ON_ONCE(cs > pinfo->ngpios || !pinfo->immr_spi_cs))
+               struct device *dev = spi->dev.parent->parent;
+               struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+               struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+
+               if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
                        return;
                iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
        }
 }
 
-static int of_fsl_spi_get_chipselects(struct device *dev)
-{
-       struct device_node *np = dev->of_node;
-       struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
-       struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
-       bool spisel_boot = IS_ENABLED(CONFIG_FSL_SOC) &&
-               of_property_read_bool(np, "fsl,spisel_boot");
-       int ngpios;
-       int i = 0;
-       int ret;
-
-       ngpios = of_gpio_count(np);
-       ngpios = max(ngpios, 0);
-       if (ngpios == 0 && !spisel_boot) {
-               /*
-                * SPI w/o chip-select line. One SPI device is still permitted
-                * though.
-                */
-               pdata->max_chipselect = 1;
-               return 0;
-       }
-
-       pinfo->ngpios = ngpios;
-       pinfo->gpios = kmalloc_array(ngpios, sizeof(*pinfo->gpios),
-                                    GFP_KERNEL);
-       if (!pinfo->gpios)
-               return -ENOMEM;
-       memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
-
-       pinfo->alow_flags = kcalloc(ngpios, sizeof(*pinfo->alow_flags),
-                                   GFP_KERNEL);
-       if (!pinfo->alow_flags) {
-               ret = -ENOMEM;
-               goto err_alloc_flags;
-       }
-
-       for (; i < ngpios; i++) {
-               int gpio;
-               enum of_gpio_flags flags;
-
-               gpio = of_get_gpio_flags(np, i, &flags);
-               if (!gpio_is_valid(gpio)) {
-                       dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
-                       ret = gpio;
-                       goto err_loop;
-               }
-
-               ret = gpio_request(gpio, dev_name(dev));
-               if (ret) {
-                       dev_err(dev, "can't request gpio #%d: %d\n", i, ret);
-                       goto err_loop;
-               }
-
-               pinfo->gpios[i] = gpio;
-               pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW;
-
-               ret = gpio_direction_output(pinfo->gpios[i],
-                                           pinfo->alow_flags[i]);
-               if (ret) {
-                       dev_err(dev,
-                               "can't set output direction for gpio #%d: %d\n",
-                               i, ret);
-                       goto err_loop;
-               }
-       }
-
-#if IS_ENABLED(CONFIG_FSL_SOC)
-       if (spisel_boot) {
-               pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
-               if (!pinfo->immr_spi_cs) {
-                       ret = -ENOMEM;
-                       i = ngpios - 1;
-                       goto err_loop;
-               }
-       }
-#endif
-
-       pdata->max_chipselect = ngpios + spisel_boot;
-       pdata->cs_control = fsl_spi_cs_control;
-
-       return 0;
-
-err_loop:
-       while (i >= 0) {
-               if (gpio_is_valid(pinfo->gpios[i]))
-                       gpio_free(pinfo->gpios[i]);
-               i--;
-       }
-
-       kfree(pinfo->alow_flags);
-       pinfo->alow_flags = NULL;
-err_alloc_flags:
-       kfree(pinfo->gpios);
-       pinfo->gpios = NULL;
-       return ret;
-}
-
-static int of_fsl_spi_free_chipselects(struct device *dev)
-{
-       struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
-       struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
-       int i;
-
-       if (!pinfo->gpios)
-               return 0;
-
-       for (i = 0; i < pdata->max_chipselect; i++) {
-               if (gpio_is_valid(pinfo->gpios[i]))
-                       gpio_free(pinfo->gpios[i]);
-       }
-
-       kfree(pinfo->gpios);
-       kfree(pinfo->alow_flags);
-       return 0;
-}
-
 static int of_fsl_spi_probe(struct platform_device *ofdev)
 {
        struct device *dev = &ofdev->dev;
@@ -866,9 +715,21 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
 
        type = fsl_spi_get_type(&ofdev->dev);
        if (type == TYPE_FSL) {
-               ret = of_fsl_spi_get_chipselects(dev);
-               if (ret)
-                       goto err;
+               struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+#if IS_ENABLED(CONFIG_FSL_SOC)
+               struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+               bool spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
+
+               if (spisel_boot) {
+                       pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
+                       if (!pinfo->immr_spi_cs) {
+                               ret = -ENOMEM;
+                               goto err;
+                       }
+               }
+#endif
+
+               pdata->cs_control = fsl_spi_cs_control;
        }
 
        ret = of_address_to_resource(np, 0, &mem);
@@ -891,8 +752,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
 
 err:
        irq_dispose_mapping(irq);
-       if (type == TYPE_FSL)
-               of_fsl_spi_free_chipselects(dev);
        return ret;
 }
 
@@ -902,8 +761,6 @@ static int of_fsl_spi_remove(struct platform_device *ofdev)
        struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
 
        fsl_spi_cpm_free(mpc8xxx_spi);
-       if (mpc8xxx_spi->type == TYPE_FSL)
-               of_fsl_spi_free_chipselects(&ofdev->dev);
        return 0;
 }
 
index 5f0b0d5bfef48333fbf8fe36f3d2f2651e41bda0..6f3d64a1a2b3e2261d1dffeb17bd96babe4334b8 100644 (file)
@@ -534,18 +534,14 @@ static int spi_geni_probe(struct platform_device *pdev)
        int ret, irq;
        struct spi_master *spi;
        struct spi_geni_master *mas;
-       struct resource *res;
        void __iomem *base;
        struct clk *clk;
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "Err getting IRQ %d\n", irq);
+       if (irq < 0)
                return irq;
-       }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       base = devm_ioremap_resource(&pdev->dev, res);
+       base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
index 9eb82150666e88e98b83ba336123ce88267eab5b..1d3e23ec20a61fd926d29585fbd29d4d9d9d1593 100644 (file)
@@ -290,10 +290,7 @@ static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
                return PTR_ERR(spi_gpio->miso);
 
        spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
-       if (IS_ERR(spi_gpio->sck))
-               return PTR_ERR(spi_gpio->sck);
-
-       return 0;
+       return PTR_ERR_OR_ZERO(spi_gpio->sck);
 }
 
 #ifdef CONFIG_OF
index 8f01858c0ae62cf1240824d2d90f143b5de8ec81..9dfe8b04e6880cd355836732080bde209b01d266 100644 (file)
@@ -819,22 +819,16 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
        }
 
        rx_irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
-       if (rx_irq < 0) {
-               dev_err(dev, "failed to get %s\n", LTQ_SPI_RX_IRQ_NAME);
+       if (rx_irq < 0)
                return -ENXIO;
-       }
 
        tx_irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
-       if (tx_irq < 0) {
-               dev_err(dev, "failed to get %s\n", LTQ_SPI_TX_IRQ_NAME);
+       if (tx_irq < 0)
                return -ENXIO;
-       }
 
        err_irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
-       if (err_irq < 0) {
-               dev_err(dev, "failed to get %s\n", LTQ_SPI_ERR_IRQ_NAME);
+       if (err_irq < 0)
                return -ENXIO;
-       }
 
        master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
        if (!master)
index f50779fd329cdef33010741bdeaebf2dd58b0e98..2d436541d6c2aa6bb86c9466495f7c03a92c73d7 100644 (file)
@@ -185,7 +185,6 @@ spi_lp8841_rtc_probe(struct platform_device *pdev)
        int                             ret;
        struct spi_master               *master;
        struct spi_lp8841_rtc           *data;
-       void                            *iomem;
 
        master = spi_alloc_master(&pdev->dev, sizeof(*data));
        if (!master)
@@ -207,8 +206,7 @@ spi_lp8841_rtc_probe(struct platform_device *pdev)
 
        data = spi_master_get_devdata(master);
 
-       iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       data->iomem = devm_ioremap_resource(&pdev->dev, iomem);
+       data->iomem = devm_platform_ioremap_resource(pdev, 0);
        ret = PTR_ERR_OR_ZERO(data->iomem);
        if (ret) {
                dev_err(&pdev->dev, "failed to get IO address\n");
index 7fe4488ace5790d93bb29f04d73fd81591fa225b..f3f10443f9e26f3fee9b67827d07758157f3a081 100644 (file)
@@ -503,7 +503,6 @@ static int meson_spicc_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct meson_spicc_device *spicc;
-       struct resource *res;
        int ret, irq, rate;
 
        master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
@@ -517,8 +516,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
        spicc->pdev = pdev;
        platform_set_drvdata(pdev, spicc);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       spicc->base = devm_ioremap_resource(&pdev->dev, res);
+       spicc->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(spicc->base)) {
                dev_err(&pdev->dev, "io resource mapping failed\n");
                ret = PTR_ERR(spicc->base);
index f7fe9b13d122db67389c055d9909a84d46c9f9bd..c7b0399802913bc7a3949d973c32e8e2f2f416ff 100644 (file)
@@ -286,7 +286,6 @@ static int meson_spifc_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct meson_spifc *spifc;
-       struct resource *res;
        void __iomem *base;
        unsigned int rate;
        int ret = 0;
@@ -300,8 +299,7 @@ static int meson_spifc_probe(struct platform_device *pdev)
        spifc = spi_master_get_devdata(master);
        spifc->dev = &pdev->dev;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       base = devm_ioremap_resource(spifc->dev, res);
+       base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(base)) {
                ret = PTR_ERR(base);
                goto out_err;
index 45d8a7048b6c684688a85318f55174ad7de658d1..6888a4dcff6de78699dc5d182100cc0714ddd5f1 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/platform_data/spi-mt65xx.h>
 #include <linux/pm_runtime.h>
 #include <linux/spi/spi.h>
+#include <linux/dma-mapping.h>
 
 #define SPI_CFG0_REG                      0x0000
 #define SPI_CFG1_REG                      0x0004
@@ -28,6 +29,8 @@
 #define SPI_STATUS0_REG                   0x001c
 #define SPI_PAD_SEL_REG                   0x0024
 #define SPI_CFG2_REG                      0x0028
+#define SPI_TX_SRC_REG_64                 0x002c
+#define SPI_RX_DST_REG_64                 0x0030
 
 #define SPI_CFG0_SCK_HIGH_OFFSET          0
 #define SPI_CFG0_SCK_LOW_OFFSET           8
 
 #define MTK_SPI_MAX_FIFO_SIZE 32U
 #define MTK_SPI_PACKET_SIZE 1024
+#define MTK_SPI_32BITS_MASK  (0xffffffff)
+
+#define DMA_ADDR_EXT_BITS (36)
+#define DMA_ADDR_DEF_BITS (32)
 
 struct mtk_spi_compatible {
        bool need_pad_sel;
@@ -80,6 +87,8 @@ struct mtk_spi_compatible {
        bool must_tx;
        /* some IC design adjust cfg register to enhance time accuracy */
        bool enhance_timing;
+       /* some IC support DMA addr extension */
+       bool dma_ext;
 };
 
 struct mtk_spi {
@@ -102,6 +111,13 @@ static const struct mtk_spi_compatible mt2712_compat = {
        .must_tx = true,
 };
 
+static const struct mtk_spi_compatible mt6765_compat = {
+       .need_pad_sel = true,
+       .must_tx = true,
+       .enhance_timing = true,
+       .dma_ext = true,
+};
+
 static const struct mtk_spi_compatible mt7622_compat = {
        .must_tx = true,
        .enhance_timing = true,
@@ -137,6 +153,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
        { .compatible = "mediatek,mt6589-spi",
                .data = (void *)&mtk_common_compat,
        },
+       { .compatible = "mediatek,mt6765-spi",
+               .data = (void *)&mt6765_compat,
+       },
        { .compatible = "mediatek,mt7622-spi",
                .data = (void *)&mt7622_compat,
        },
@@ -371,10 +390,25 @@ static void mtk_spi_setup_dma_addr(struct spi_master *master,
 {
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
-       if (mdata->tx_sgl)
-               writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG);
-       if (mdata->rx_sgl)
-               writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG);
+       if (mdata->tx_sgl) {
+               writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
+                      mdata->base + SPI_TX_SRC_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+               if (mdata->dev_comp->dma_ext)
+                       writel((u32)(xfer->tx_dma >> 32),
+                              mdata->base + SPI_TX_SRC_REG_64);
+#endif
+       }
+
+       if (mdata->rx_sgl) {
+               writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
+                      mdata->base + SPI_RX_DST_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+               if (mdata->dev_comp->dma_ext)
+                       writel((u32)(xfer->rx_dma >> 32),
+                              mdata->base + SPI_RX_DST_REG_64);
+#endif
+       }
 }
 
 static int mtk_spi_fifo_transfer(struct spi_master *master,
@@ -586,7 +620,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
        struct mtk_spi *mdata;
        const struct of_device_id *of_id;
        struct resource *res;
-       int i, irq, ret;
+       int i, irq, ret, addr_bits;
 
        master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
        if (!master) {
@@ -664,7 +698,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
                ret = irq;
                goto err_put_master;
        }
@@ -753,6 +786,15 @@ static int mtk_spi_probe(struct platform_device *pdev)
                }
        }
 
+       if (mdata->dev_comp->dma_ext)
+               addr_bits = DMA_ADDR_EXT_BITS;
+       else
+               addr_bits = DMA_ADDR_DEF_BITS;
+       ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
+       if (ret)
+               dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
+                          addr_bits, ret);
+
        return 0;
 
 err_disable_runtime_pm:
index ff85982464d2e9623a8d5e7109473ebaea01dd88..2c3b7a2a1ec77a0f68733ec62ba1695abca6c5b7 100644 (file)
@@ -327,7 +327,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
        struct spi_controller *master;
        struct mt7621_spi *rs;
        void __iomem *base;
-       struct resource *r;
        int status = 0;
        struct clk *clk;
        int ret;
@@ -336,8 +335,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
        if (!match)
                return -EINVAL;
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       base = devm_ioremap_resource(&pdev->dev, r);
+       base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
index 7bf53cfc25d6323681b356069aadc9f5b9750103..996c1c8a9c719e77372c0358025eaa7ed193d232 100644 (file)
@@ -532,7 +532,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct mxs_spi *spi;
        struct mxs_ssp *ssp;
-       struct resource *iores;
        struct clk *clk;
        void __iomem *base;
        int devid, clk_freq;
@@ -545,12 +544,11 @@ static int mxs_spi_probe(struct platform_device *pdev)
         */
        const int clk_freq_default = 160000000;
 
-       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq_err = platform_get_irq(pdev, 0);
        if (irq_err < 0)
                return irq_err;
 
-       base = devm_ioremap_resource(&pdev->dev, iores);
+       base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
new file mode 100644 (file)
index 0000000..cb52fd8
--- /dev/null
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/mfd/syscon.h>
+
+/* NPCM7xx GCR module */
+#define NPCM7XX_INTCR3_OFFSET          0x9C
+#define NPCM7XX_INTCR3_FIU_FIX         BIT(6)
+
+/* Flash Interface Unit (FIU) Registers */
+#define NPCM_FIU_DRD_CFG               0x00
+#define NPCM_FIU_DWR_CFG               0x04
+#define NPCM_FIU_UMA_CFG               0x08
+#define NPCM_FIU_UMA_CTS               0x0C
+#define NPCM_FIU_UMA_CMD               0x10
+#define NPCM_FIU_UMA_ADDR              0x14
+#define NPCM_FIU_PRT_CFG               0x18
+#define NPCM_FIU_UMA_DW0               0x20
+#define NPCM_FIU_UMA_DW1               0x24
+#define NPCM_FIU_UMA_DW2               0x28
+#define NPCM_FIU_UMA_DW3               0x2C
+#define NPCM_FIU_UMA_DR0               0x30
+#define NPCM_FIU_UMA_DR1               0x34
+#define NPCM_FIU_UMA_DR2               0x38
+#define NPCM_FIU_UMA_DR3               0x3C
+#define NPCM_FIU_MAX_REG_LIMIT         0x80
+
+/* FIU Direct Read Configuration Register */
+#define NPCM_FIU_DRD_CFG_LCK           BIT(31)
+#define NPCM_FIU_DRD_CFG_R_BURST       GENMASK(25, 24)
+#define NPCM_FIU_DRD_CFG_ADDSIZ                GENMASK(17, 16)
+#define NPCM_FIU_DRD_CFG_DBW           GENMASK(13, 12)
+#define NPCM_FIU_DRD_CFG_ACCTYPE       GENMASK(9, 8)
+#define NPCM_FIU_DRD_CFG_RDCMD         GENMASK(7, 0)
+#define NPCM_FIU_DRD_ADDSIZ_SHIFT      16
+#define NPCM_FIU_DRD_DBW_SHIFT         12
+#define NPCM_FIU_DRD_ACCTYPE_SHIFT     8
+
+/* FIU Direct Write Configuration Register */
+#define NPCM_FIU_DWR_CFG_LCK           BIT(31)
+#define NPCM_FIU_DWR_CFG_W_BURST       GENMASK(25, 24)
+#define NPCM_FIU_DWR_CFG_ADDSIZ                GENMASK(17, 16)
+#define NPCM_FIU_DWR_CFG_ABPCK         GENMASK(11, 10)
+#define NPCM_FIU_DWR_CFG_DBPCK         GENMASK(9, 8)
+#define NPCM_FIU_DWR_CFG_WRCMD         GENMASK(7, 0)
+#define NPCM_FIU_DWR_ADDSIZ_SHIFT      16
+#define NPCM_FIU_DWR_ABPCK_SHIFT       10
+#define NPCM_FIU_DWR_DBPCK_SHIFT       8
+
+/* FIU UMA Configuration Register */
+#define NPCM_FIU_UMA_CFG_LCK           BIT(31)
+#define NPCM_FIU_UMA_CFG_CMMLCK                BIT(30)
+#define NPCM_FIU_UMA_CFG_RDATSIZ       GENMASK(28, 24)
+#define NPCM_FIU_UMA_CFG_DBSIZ         GENMASK(23, 21)
+#define NPCM_FIU_UMA_CFG_WDATSIZ       GENMASK(20, 16)
+#define NPCM_FIU_UMA_CFG_ADDSIZ                GENMASK(13, 11)
+#define NPCM_FIU_UMA_CFG_CMDSIZ                BIT(10)
+#define NPCM_FIU_UMA_CFG_RDBPCK                GENMASK(9, 8)
+#define NPCM_FIU_UMA_CFG_DBPCK         GENMASK(7, 6)
+#define NPCM_FIU_UMA_CFG_WDBPCK                GENMASK(5, 4)
+#define NPCM_FIU_UMA_CFG_ADBPCK                GENMASK(3, 2)
+#define NPCM_FIU_UMA_CFG_CMBPCK                GENMASK(1, 0)
+#define NPCM_FIU_UMA_CFG_ADBPCK_SHIFT  2
+#define NPCM_FIU_UMA_CFG_WDBPCK_SHIFT  4
+#define NPCM_FIU_UMA_CFG_DBPCK_SHIFT   6
+#define NPCM_FIU_UMA_CFG_RDBPCK_SHIFT  8
+#define NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT  11
+#define NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT 16
+#define NPCM_FIU_UMA_CFG_DBSIZ_SHIFT   21
+#define NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT 24
+
+/* FIU UMA Control and Status Register */
+#define NPCM_FIU_UMA_CTS_RDYIE         BIT(25)
+#define NPCM_FIU_UMA_CTS_RDYST         BIT(24)
+#define NPCM_FIU_UMA_CTS_SW_CS         BIT(16)
+#define NPCM_FIU_UMA_CTS_DEV_NUM       GENMASK(9, 8)
+#define NPCM_FIU_UMA_CTS_EXEC_DONE     BIT(0)
+#define NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT 8
+
+/* FIU UMA Command Register */
+#define NPCM_FIU_UMA_CMD_DUM3          GENMASK(31, 24)
+#define NPCM_FIU_UMA_CMD_DUM2          GENMASK(23, 16)
+#define NPCM_FIU_UMA_CMD_DUM1          GENMASK(15, 8)
+#define NPCM_FIU_UMA_CMD_CMD           GENMASK(7, 0)
+
+/* FIU UMA Address Register */
+#define NPCM_FIU_UMA_ADDR_UMA_ADDR     GENMASK(31, 0)
+#define NPCM_FIU_UMA_ADDR_AB3          GENMASK(31, 24)
+#define NPCM_FIU_UMA_ADDR_AB2          GENMASK(23, 16)
+#define NPCM_FIU_UMA_ADDR_AB1          GENMASK(15, 8)
+#define NPCM_FIU_UMA_ADDR_AB0          GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 0-3 Register */
+#define NPCM_FIU_UMA_DW0_WB3           GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW0_WB2           GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW0_WB1           GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW0_WB0           GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 4-7 Register */
+#define NPCM_FIU_UMA_DW1_WB7           GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW1_WB6           GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW1_WB5           GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW1_WB4           GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 8-11 Register */
+#define NPCM_FIU_UMA_DW2_WB11          GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW2_WB10          GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW2_WB9           GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW2_WB8           GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 12-15 Register */
+#define NPCM_FIU_UMA_DW3_WB15          GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW3_WB14          GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW3_WB13          GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW3_WB12          GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 0-3 Register */
+#define NPCM_FIU_UMA_DR0_RB3           GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR0_RB2           GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR0_RB1           GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR0_RB0           GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 4-7 Register */
+#define NPCM_FIU_UMA_DR1_RB15          GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR1_RB14          GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR1_RB13          GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR1_RB12          GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 8-11 Register */
+#define NPCM_FIU_UMA_DR2_RB15          GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR2_RB14          GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR2_RB13          GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR2_RB12          GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 12-15 Register */
+#define NPCM_FIU_UMA_DR3_RB15          GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR3_RB14          GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR3_RB13          GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR3_RB12          GENMASK(7, 0)
+
+/* FIU Read Mode */
+enum {
+       DRD_SINGLE_WIRE_MODE    = 0,
+       DRD_DUAL_IO_MODE        = 1,
+       DRD_QUAD_IO_MODE        = 2,
+       DRD_SPI_X_MODE          = 3,
+};
+
+enum {
+       DWR_ABPCK_BIT_PER_CLK   = 0,
+       DWR_ABPCK_2_BIT_PER_CLK = 1,
+       DWR_ABPCK_4_BIT_PER_CLK = 2,
+};
+
+enum {
+       DWR_DBPCK_BIT_PER_CLK   = 0,
+       DWR_DBPCK_2_BIT_PER_CLK = 1,
+       DWR_DBPCK_4_BIT_PER_CLK = 2,
+};
+
+#define NPCM_FIU_DRD_16_BYTE_BURST     0x3000000
+#define NPCM_FIU_DWR_16_BYTE_BURST     0x3000000
+
+#define MAP_SIZE_128MB                 0x8000000
+#define MAP_SIZE_16MB                  0x1000000
+#define MAP_SIZE_8MB                   0x800000
+
+#define NUM_BITS_IN_BYTE               8
+#define FIU_DRD_MAX_DUMMY_NUMBER       3
+#define NPCM_MAX_CHIP_NUM              4
+#define CHUNK_SIZE                     16
+#define UMA_MICRO_SEC_TIMEOUT          150
+
+enum {
+       FIU0 = 0,
+       FIU3,
+       FIUX,
+};
+
+struct npcm_fiu_info {
+       char *name;
+       u32 fiu_id;
+       u32 max_map_size;
+       u32 max_cs;
+};
+
+struct fiu_data {
+       const struct npcm_fiu_info *npcm_fiu_data_info;
+       int fiu_max;
+};
+
+static const struct npcm_fiu_info npxm7xx_fiu_info[] = {
+       {.name = "FIU0", .fiu_id = FIU0,
+               .max_map_size = MAP_SIZE_128MB, .max_cs = 2},
+       {.name = "FIU3", .fiu_id = FIU3,
+               .max_map_size = MAP_SIZE_128MB, .max_cs = 4},
+       {.name = "FIUX", .fiu_id = FIUX,
+               .max_map_size = MAP_SIZE_16MB, .max_cs = 2} };
+
+static const struct fiu_data npxm7xx_fiu_data = {
+       .npcm_fiu_data_info = npxm7xx_fiu_info,
+       .fiu_max = 3,
+};
+
+struct npcm_fiu_spi;
+
+struct npcm_fiu_chip {
+       void __iomem *flash_region_mapped_ptr;
+       struct npcm_fiu_spi *fiu;
+       unsigned long clkrate;
+       u32 chipselect;
+};
+
+struct npcm_fiu_spi {
+       struct npcm_fiu_chip chip[NPCM_MAX_CHIP_NUM];
+       const struct npcm_fiu_info *info;
+       struct spi_mem_op drd_op;
+       struct resource *res_mem;
+       struct regmap *regmap;
+       unsigned long clkrate;
+       struct device *dev;
+       struct clk *clk;
+       bool spix_mode;
+};
+
+static const struct regmap_config npcm_mtd_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = NPCM_FIU_MAX_REG_LIMIT,
+};
+
+static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu,
+                            const struct spi_mem_op *op)
+{
+       regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+                          NPCM_FIU_DRD_CFG_ACCTYPE,
+                          ilog2(op->addr.buswidth) <<
+                          NPCM_FIU_DRD_ACCTYPE_SHIFT);
+       fiu->drd_op.addr.buswidth = op->addr.buswidth;
+       regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+                          NPCM_FIU_DRD_CFG_DBW,
+                          ((op->dummy.nbytes * ilog2(op->addr.buswidth))
+                           / NUM_BITS_IN_BYTE) << NPCM_FIU_DRD_DBW_SHIFT);
+       fiu->drd_op.dummy.nbytes = op->dummy.nbytes;
+       regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+                          NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode);
+       fiu->drd_op.cmd.opcode = op->cmd.opcode;
+       regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+                          NPCM_FIU_DRD_CFG_ADDSIZ,
+                          (op->addr.nbytes - 3) << NPCM_FIU_DRD_ADDSIZ_SHIFT);
+       fiu->drd_op.addr.nbytes = op->addr.nbytes;
+}
+
+static ssize_t npcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc,
+                                   u64 offs, size_t len, void *buf)
+{
+       struct npcm_fiu_spi *fiu =
+               spi_controller_get_devdata(desc->mem->spi->master);
+       struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+       void __iomem *src = (void __iomem *)(chip->flash_region_mapped_ptr +
+                                            offs);
+       u8 *buf_rx = buf;
+       u32 i;
+
+       if (fiu->spix_mode) {
+               for (i = 0 ; i < len ; i++)
+                       *(buf_rx + i) = ioread8(src + i);
+       } else {
+               if (desc->info.op_tmpl.addr.buswidth != fiu->drd_op.addr.buswidth ||
+                   desc->info.op_tmpl.dummy.nbytes != fiu->drd_op.dummy.nbytes ||
+                   desc->info.op_tmpl.cmd.opcode != fiu->drd_op.cmd.opcode ||
+                   desc->info.op_tmpl.addr.nbytes != fiu->drd_op.addr.nbytes)
+                       npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
+
+               memcpy_fromio(buf_rx, src, len);
+       }
+
+       return len;
+}
+
+static ssize_t npcm_fiu_direct_write(struct spi_mem_dirmap_desc *desc,
+                                    u64 offs, size_t len, const void *buf)
+{
+       struct npcm_fiu_spi *fiu =
+               spi_controller_get_devdata(desc->mem->spi->master);
+       struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+       void __iomem *dst = (void __iomem *)(chip->flash_region_mapped_ptr +
+                                            offs);
+       const u8 *buf_tx = buf;
+       u32 i;
+
+       if (fiu->spix_mode)
+               for (i = 0 ; i < len ; i++)
+                       iowrite8(*(buf_tx + i), dst + i);
+       else
+               memcpy_toio(dst, buf_tx, len);
+
+       return len;
+}
+
+static int npcm_fiu_uma_read(struct spi_mem *mem,
+                            const struct spi_mem_op *op, u32 addr,
+                             bool is_address_size, u8 *data, u32 data_size)
+{
+       struct npcm_fiu_spi *fiu =
+               spi_controller_get_devdata(mem->spi->master);
+       u32 uma_cfg = BIT(10);
+       u32 data_reg[4];
+       int ret;
+       u32 val;
+       u32 i;
+
+       regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+                          NPCM_FIU_UMA_CTS_DEV_NUM,
+                          (mem->spi->chip_select <<
+                           NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+       regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
+                          NPCM_FIU_UMA_CMD_CMD, op->cmd.opcode);
+
+       if (is_address_size) {
+               uma_cfg |= ilog2(op->cmd.buswidth);
+               uma_cfg |= ilog2(op->addr.buswidth)
+                       << NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
+               uma_cfg |= ilog2(op->dummy.buswidth)
+                       << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+               uma_cfg |= ilog2(op->data.buswidth)
+                       << NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
+               uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
+               uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
+               regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, addr);
+       } else {
+               regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
+       }
+
+       uma_cfg |= data_size << NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT;
+       regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
+       regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+                         NPCM_FIU_UMA_CTS_EXEC_DONE,
+                         NPCM_FIU_UMA_CTS_EXEC_DONE);
+       ret = regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
+                                      (!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
+                                      UMA_MICRO_SEC_TIMEOUT);
+       if (ret)
+               return ret;
+
+       if (data_size) {
+               for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
+                       regmap_read(fiu->regmap, NPCM_FIU_UMA_DR0 + (i * 4),
+                                   &data_reg[i]);
+               memcpy(data, data_reg, data_size);
+       }
+
+       return 0;
+}
+
+static int npcm_fiu_uma_write(struct spi_mem *mem,
+                             const struct spi_mem_op *op, u8 cmd,
+                             bool is_address_size, u8 *data, u32 data_size)
+{
+       struct npcm_fiu_spi *fiu =
+               spi_controller_get_devdata(mem->spi->master);
+       u32 uma_cfg = BIT(10);
+       u32 data_reg[4] = {0};
+       u32 val;
+       u32 i;
+
+       regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+                          NPCM_FIU_UMA_CTS_DEV_NUM,
+                          (mem->spi->chip_select <<
+                           NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+
+       regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
+                          NPCM_FIU_UMA_CMD_CMD, cmd);
+
+       if (data_size) {
+               memcpy(data_reg, data, data_size);
+               for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
+                       regmap_write(fiu->regmap, NPCM_FIU_UMA_DW0 + (i * 4),
+                                    data_reg[i]);
+       }
+
+       if (is_address_size) {
+               uma_cfg |= ilog2(op->cmd.buswidth);
+               uma_cfg |= ilog2(op->addr.buswidth) <<
+                       NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
+               uma_cfg |= ilog2(op->data.buswidth) <<
+                       NPCM_FIU_UMA_CFG_WDBPCK_SHIFT;
+               uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
+               regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, op->addr.val);
+       } else {
+               regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
+       }
+
+       uma_cfg |= (data_size << NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT);
+       regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
+
+       regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+                         NPCM_FIU_UMA_CTS_EXEC_DONE,
+                         NPCM_FIU_UMA_CTS_EXEC_DONE);
+
+       return regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
+                                      (!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
+                                       UMA_MICRO_SEC_TIMEOUT);
+}
+
+static int npcm_fiu_manualwrite(struct spi_mem *mem,
+                               const struct spi_mem_op *op)
+{
+       struct npcm_fiu_spi *fiu =
+               spi_controller_get_devdata(mem->spi->master);
+       u8 *data = (u8 *)op->data.buf.out;
+       u32 num_data_chunks;
+       u32 remain_data;
+       u32 idx = 0;
+       int ret;
+
+       num_data_chunks  = op->data.nbytes / CHUNK_SIZE;
+       remain_data  = op->data.nbytes % CHUNK_SIZE;
+
+       regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+                          NPCM_FIU_UMA_CTS_DEV_NUM,
+                          (mem->spi->chip_select <<
+                           NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+       regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+                          NPCM_FIU_UMA_CTS_SW_CS, 0);
+
+       ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, true, NULL, 0);
+       if (ret)
+               return ret;
+
+       /* Starting the data writing loop in multiples of 8 */
+       for (idx = 0; idx < num_data_chunks; ++idx) {
+               ret = npcm_fiu_uma_write(mem, op, data[0], false,
+                                        &data[1], CHUNK_SIZE - 1);
+               if (ret)
+                       return ret;
+
+               data += CHUNK_SIZE;
+       }
+
+       /* Handling chunk remains */
+       if (remain_data > 0) {
+               ret = npcm_fiu_uma_write(mem, op, data[0], false,
+                                        &data[1], remain_data - 1);
+               if (ret)
+                       return ret;
+       }
+
+       regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+                          NPCM_FIU_UMA_CTS_SW_CS, NPCM_FIU_UMA_CTS_SW_CS);
+
+       return 0;
+}
+
+static int npcm_fiu_read(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+       u8 *data = op->data.buf.in;
+       int i, readlen, currlen;
+       u8 *buf_ptr;
+       u32 addr;
+       int ret;
+
+       i = 0;
+       currlen = op->data.nbytes;
+
+       do {
+               addr = ((u32)op->addr.val + i);
+               if (currlen < 16)
+                       readlen = currlen;
+               else
+                       readlen = 16;
+
+               buf_ptr = data + i;
+               ret = npcm_fiu_uma_read(mem, op, addr, true, buf_ptr,
+                                       readlen);
+               if (ret)
+                       return ret;
+
+               i += readlen;
+               currlen -= 16;
+       } while (currlen > 0);
+
+       return 0;
+}
+
+static void npcm_fiux_set_direct_wr(struct npcm_fiu_spi *fiu)
+{
+       regmap_write(fiu->regmap, NPCM_FIU_DWR_CFG,
+                    NPCM_FIU_DWR_16_BYTE_BURST);
+       regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
+                          NPCM_FIU_DWR_CFG_ABPCK,
+                          DWR_ABPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_ABPCK_SHIFT);
+       regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
+                          NPCM_FIU_DWR_CFG_DBPCK,
+                          DWR_DBPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_DBPCK_SHIFT);
+}
+
+static void npcm_fiux_set_direct_rd(struct npcm_fiu_spi *fiu)
+{
+       u32 rx_dummy = 0;
+
+       regmap_write(fiu->regmap, NPCM_FIU_DRD_CFG,
+                    NPCM_FIU_DRD_16_BYTE_BURST);
+       regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+                          NPCM_FIU_DRD_CFG_ACCTYPE,
+                          DRD_SPI_X_MODE << NPCM_FIU_DRD_ACCTYPE_SHIFT);
+       regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+                          NPCM_FIU_DRD_CFG_DBW,
+                          rx_dummy << NPCM_FIU_DRD_DBW_SHIFT);
+}
+
+static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+       struct npcm_fiu_spi *fiu =
+               spi_controller_get_devdata(mem->spi->master);
+       struct npcm_fiu_chip *chip = &fiu->chip[mem->spi->chip_select];
+       int ret = 0;
+       u8 *buf;
+
+       dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+               op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+               op->dummy.buswidth, op->data.buswidth, op->addr.val,
+               op->data.nbytes);
+
+       if (fiu->spix_mode || op->addr.nbytes > 4)
+               return -ENOTSUPP;
+
+       if (fiu->clkrate != chip->clkrate) {
+               ret = clk_set_rate(fiu->clk, chip->clkrate);
+               if (ret < 0)
+                       dev_warn(fiu->dev, "Failed setting %lu frequency, stay at %lu frequency\n",
+                                chip->clkrate, fiu->clkrate);
+               else
+                       fiu->clkrate = chip->clkrate;
+       }
+
+       if (op->data.dir == SPI_MEM_DATA_IN) {
+               if (!op->addr.nbytes) {
+                       buf = op->data.buf.in;
+                       ret = npcm_fiu_uma_read(mem, op, op->addr.val, false,
+                                               buf, op->data.nbytes);
+               } else {
+                       ret = npcm_fiu_read(mem, op);
+               }
+       } else  {
+               if (!op->addr.nbytes && !op->data.nbytes)
+                       ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+                                                NULL, 0);
+               if (op->addr.nbytes && !op->data.nbytes) {
+                       int i;
+                       u8 buf_addr[4];
+                       u32 addr = op->addr.val;
+
+                       for (i = op->addr.nbytes - 1; i >= 0; i--) {
+                               buf_addr[i] = addr & 0xff;
+                               addr >>= 8;
+                       }
+                       ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+                                                buf_addr, op->addr.nbytes);
+               }
+               if (!op->addr.nbytes && op->data.nbytes)
+                       ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+                                                (u8 *)op->data.buf.out,
+                                                op->data.nbytes);
+               if (op->addr.nbytes && op->data.nbytes)
+                       ret = npcm_fiu_manualwrite(mem, op);
+       }
+
+       return ret;
+}
+
+static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+       struct npcm_fiu_spi *fiu =
+               spi_controller_get_devdata(desc->mem->spi->master);
+       struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+       struct regmap *gcr_regmap;
+
+       if (!fiu->res_mem) {
+               dev_warn(fiu->dev, "Reserved memory not defined, direct read disabled\n");
+               desc->nodirmap = true;
+               return 0;
+       }
+
+       if (!fiu->spix_mode &&
+           desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) {
+               desc->nodirmap = true;
+               return 0;
+       }
+
+       if (!chip->flash_region_mapped_ptr) {
+               chip->flash_region_mapped_ptr =
+                       devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start +
+                                                       (fiu->info->max_map_size *
+                                                   desc->mem->spi->chip_select)),
+                                            (u32)desc->info.length);
+               if (!chip->flash_region_mapped_ptr) {
+                       dev_warn(fiu->dev, "Error mapping memory region, direct read disabled\n");
+                       desc->nodirmap = true;
+                       return 0;
+               }
+       }
+
+       if (of_device_is_compatible(fiu->dev->of_node, "nuvoton,npcm750-fiu")) {
+               gcr_regmap =
+                       syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+               if (IS_ERR(gcr_regmap)) {
+                       dev_warn(fiu->dev, "Didn't find nuvoton,npcm750-gcr, direct read disabled\n");
+                       desc->nodirmap = true;
+                       return 0;
+               }
+               regmap_update_bits(gcr_regmap, NPCM7XX_INTCR3_OFFSET,
+                                  NPCM7XX_INTCR3_FIU_FIX,
+                                  NPCM7XX_INTCR3_FIU_FIX);
+       }
+
+       if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) {
+               if (!fiu->spix_mode)
+                       npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
+               else
+                       npcm_fiux_set_direct_rd(fiu);
+
+       } else {
+               npcm_fiux_set_direct_wr(fiu);
+       }
+
+       return 0;
+}
+
+static int npcm_fiu_setup(struct spi_device *spi)
+{
+       struct spi_controller *ctrl = spi->master;
+       struct npcm_fiu_spi *fiu = spi_controller_get_devdata(ctrl);
+       struct npcm_fiu_chip *chip;
+
+       chip = &fiu->chip[spi->chip_select];
+       chip->fiu = fiu;
+       chip->chipselect = spi->chip_select;
+       chip->clkrate = spi->max_speed_hz;
+
+       fiu->clkrate = clk_get_rate(fiu->clk);
+
+       return 0;
+}
+
+static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
+       .exec_op = npcm_fiu_exec_op,
+       .dirmap_create = npcm_fiu_dirmap_create,
+       .dirmap_read = npcm_fiu_direct_read,
+       .dirmap_write = npcm_fiu_direct_write,
+};
+
+static const struct of_device_id npcm_fiu_dt_ids[] = {
+       { .compatible = "nuvoton,npcm750-fiu", .data = &npxm7xx_fiu_data  },
+       { /* sentinel */ }
+};
+
+static int npcm_fiu_probe(struct platform_device *pdev)
+{
+       const struct fiu_data *fiu_data_match;
+       const struct of_device_id *match;
+       struct device *dev = &pdev->dev;
+       struct spi_controller *ctrl;
+       struct npcm_fiu_spi *fiu;
+       void __iomem *regbase;
+       struct resource *res;
+       int ret;
+       int id;
+
+       ctrl = spi_alloc_master(dev, sizeof(*fiu));
+       if (!ctrl)
+               return -ENOMEM;
+
+       fiu = spi_controller_get_devdata(ctrl);
+
+       match = of_match_device(npcm_fiu_dt_ids, dev);
+       if (!match || !match->data) {
+               dev_err(dev, "No compatible OF match\n");
+               return -ENODEV;
+       }
+
+       fiu_data_match = match->data;
+       id = of_alias_get_id(dev->of_node, "fiu");
+       if (id < 0 || id >= fiu_data_match->fiu_max) {
+               dev_err(dev, "Invalid platform device id: %d\n", id);
+               return -EINVAL;
+       }
+
+       fiu->info = &fiu_data_match->npcm_fiu_data_info[id];
+
+       platform_set_drvdata(pdev, fiu);
+       fiu->dev = dev;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+       regbase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(regbase))
+               return PTR_ERR(regbase);
+
+       fiu->regmap = devm_regmap_init_mmio(dev, regbase,
+                                           &npcm_mtd_regmap_config);
+       if (IS_ERR(fiu->regmap)) {
+               dev_err(dev, "Failed to create regmap\n");
+               return PTR_ERR(fiu->regmap);
+       }
+
+       fiu->res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                   "memory");
+       fiu->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(fiu->clk))
+               return PTR_ERR(fiu->clk);
+
+       fiu->spix_mode = of_property_read_bool(dev->of_node,
+                                              "nuvoton,spix-mode");
+
+       platform_set_drvdata(pdev, fiu);
+       clk_prepare_enable(fiu->clk);
+
+       ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
+               | SPI_TX_DUAL | SPI_TX_QUAD;
+       ctrl->setup = npcm_fiu_setup;
+       ctrl->bus_num = -1;
+       ctrl->mem_ops = &npcm_fiu_mem_ops;
+       ctrl->num_chipselect = fiu->info->max_cs;
+       ctrl->dev.of_node = dev->of_node;
+
+       ret = devm_spi_register_master(dev, ctrl);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int npcm_fiu_remove(struct platform_device *pdev)
+{
+       struct npcm_fiu_spi *fiu = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(fiu->clk);
+       return 0;
+}
+
+MODULE_DEVICE_TABLE(of, npcm_fiu_dt_ids);
+
+static struct platform_driver npcm_fiu_driver = {
+       .driver = {
+               .name   = "NPCM-FIU",
+               .bus    = &platform_bus_type,
+               .of_match_table = npcm_fiu_dt_ids,
+       },
+       .probe      = npcm_fiu_probe,
+       .remove     = npcm_fiu_remove,
+};
+module_platform_driver(npcm_fiu_driver);
+
+MODULE_DESCRIPTION("Nuvoton FLASH Interface Unit SPI Controller Driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
index 734a2b9569596fea7117a1e6150cbedeb478381f..b191d57d1dc07f2a24ccc3fbde229e13c1fac700 100644 (file)
@@ -341,7 +341,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
 {
        struct npcm_pspi *priv;
        struct spi_master *master;
-       struct resource *res;
        unsigned long clk_hz;
        struct device_node *np = pdev->dev.of_node;
        int num_cs, i;
@@ -368,8 +367,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        priv->is_save_param = false;
        priv->id = pdev->id;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv->base = devm_ioremap_resource(&pdev->dev, res);
+       priv->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(priv->base)) {
                ret = PTR_ERR(priv->base);
                goto out_master_put;
@@ -388,7 +386,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "failed to get IRQ\n");
                ret = irq;
                goto out_disable_clk;
        }
index 37e2034ad4d512a94994541f0ad41dfd1ad11b17..61400358f4be77eeb7fd72ac47945d40c3bda65d 100644 (file)
@@ -327,7 +327,6 @@ static int nuc900_spi_probe(struct platform_device *pdev)
 {
        struct nuc900_spi *hw;
        struct spi_master *master;
-       struct resource *res;
        int err = 0;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
@@ -358,8 +357,7 @@ static int nuc900_spi_probe(struct platform_device *pdev)
        hw->bitbang.chipselect     = nuc900_spi_chipsel;
        hw->bitbang.txrx_bufs      = nuc900_spi_txrx;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hw->regs = devm_ioremap_resource(&pdev->dev, res);
+       hw->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hw->regs)) {
                err = PTR_ERR(hw->regs);
                goto err_pdata;
@@ -367,7 +365,6 @@ static int nuc900_spi_probe(struct platform_device *pdev)
 
        hw->irq = platform_get_irq(pdev, 0);
        if (hw->irq < 0) {
-               dev_err(&pdev->dev, "No IRQ specified\n");
                err = -ENOENT;
                goto err_pdata;
        }
index 8894f98cc99c6ce36475218d6fc44292ba1ae4de..501b923f2c27ade9f060d0e368f5f2e0e771f808 100644 (file)
@@ -1007,10 +1007,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
 
        /* find the irq */
        ret = platform_get_irq(pdev, 0);
-       if (ret < 0) {
-               dev_err(dev, "failed to get the irq: %d\n", ret);
+       if (ret < 0)
                goto err_disable_clk;
-       }
 
        ret = devm_request_irq(dev, ret,
                        nxp_fspi_irq_handler, 0, pdev->name, f);
index bbc4ba66571f28419e9d4edef29752daa19ab010..e2331eb7b47a515ca12387782e417620461ab1af 100644 (file)
@@ -240,7 +240,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
        struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
        struct tiny_spi *hw;
        struct spi_master *master;
-       struct resource *res;
        unsigned int i;
        int err = -ENODEV;
 
@@ -264,8 +263,7 @@ static int tiny_spi_probe(struct platform_device *pdev)
        hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
 
        /* find and map our resources */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hw->base = devm_ioremap_resource(&pdev->dev, res);
+       hw->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hw->base)) {
                err = PTR_ERR(hw->base);
                goto exit;
index b635526ad4143912e5d2f1b03cac9c6c5bea6587..86ad17597f5fd8f0a58c827fa68fc05bda920fc9 100644 (file)
@@ -570,7 +570,6 @@ static int pic32_sqi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct pic32_sqi *sqi;
-       struct resource *reg;
        int ret;
 
        master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
@@ -580,8 +579,7 @@ static int pic32_sqi_probe(struct platform_device *pdev)
        sqi = spi_master_get_devdata(master);
        sqi->master = master;
 
-       reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       sqi->regs = devm_ioremap_resource(&pdev->dev, reg);
+       sqi->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sqi->regs)) {
                ret = PTR_ERR(sqi->regs);
                goto err_free_master;
@@ -590,7 +588,6 @@ static int pic32_sqi_probe(struct platform_device *pdev)
        /* irq */
        sqi->irq = platform_get_irq(pdev, 0);
        if (sqi->irq < 0) {
-               dev_err(&pdev->dev, "no irq found\n");
                ret = sqi->irq;
                goto err_free_master;
        }
index 10cebeaa1e6bc4dea20811332aa0686de819d5f9..69f517ec59c68b7156cd0efbe93229593cee7907 100644 (file)
@@ -711,22 +711,16 @@ static int pic32_spi_hw_probe(struct platform_device *pdev,
 
        /* get irq resources: err-irq, rx-irq, tx-irq */
        pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
-       if (pic32s->fault_irq < 0) {
-               dev_err(&pdev->dev, "fault-irq not found\n");
+       if (pic32s->fault_irq < 0)
                return pic32s->fault_irq;
-       }
 
        pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
-       if (pic32s->rx_irq < 0) {
-               dev_err(&pdev->dev, "rx-irq not found\n");
+       if (pic32s->rx_irq < 0)
                return pic32s->rx_irq;
-       }
 
        pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
-       if (pic32s->tx_irq < 0) {
-               dev_err(&pdev->dev, "tx-irq not found\n");
+       if (pic32s->tx_irq < 0)
                return pic32s->tx_irq;
-       }
 
        /* get clock */
        pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
index e0f061139c8f5b97d6bed7e1846fadfe79290941..250fd60e1678211d718b3e3684bdc0b7f9c6f9fc 100644 (file)
@@ -424,7 +424,6 @@ static int qcom_qspi_probe(struct platform_device *pdev)
 {
        int ret;
        struct device *dev;
-       struct resource *res;
        struct spi_master *master;
        struct qcom_qspi *ctrl;
 
@@ -440,8 +439,7 @@ static int qcom_qspi_probe(struct platform_device *pdev)
 
        spin_lock_init(&ctrl->lock);
        ctrl->dev = dev;
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctrl->base = devm_ioremap_resource(dev, res);
+       ctrl->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctrl->base)) {
                ret = PTR_ERR(ctrl->base);
                goto exit_probe_master_put;
@@ -454,10 +452,8 @@ static int qcom_qspi_probe(struct platform_device *pdev)
                goto exit_probe_master_put;
 
        ret = platform_get_irq(pdev, 0);
-       if (ret < 0) {
-               dev_err(dev, "Failed to get irq %d\n", ret);
+       if (ret < 0)
                goto exit_probe_master_put;
-       }
        ret = devm_request_irq(dev, ret, qcom_qspi_irq,
                        IRQF_TRIGGER_HIGH, dev_name(dev), ctrl);
        if (ret) {
index 51f03d977ad62351cc5aeedf614c27b073b2e30c..4c9620e0d18ccdbb1555c65eb4851c44ea1bb513 100644 (file)
@@ -135,12 +135,10 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct clk *ahb_clk;
        struct rb4xx_spi *rbspi;
-       struct resource *r;
        int err;
        void __iomem *spi_base;
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       spi_base = devm_ioremap_resource(&pdev->dev, r);
+       spi_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(spi_base))
                return PTR_ERR(spi_base);
 
index 48d8dff05a3ad881a908cf770bc05d70ee2d3e8e..2d6e37f25e2d835735964022f45413eabcd512aa 100644 (file)
@@ -487,7 +487,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
        struct s3c2410_spi_info *pdata;
        struct s3c24xx_spi *hw;
        struct spi_master *master;
-       struct resource *res;
        int err = 0;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
@@ -536,8 +535,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
        dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
 
        /* find and map our resources */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hw->regs = devm_ioremap_resource(&pdev->dev, res);
+       hw->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hw->regs)) {
                err = PTR_ERR(hw->regs);
                goto err_no_pdata;
@@ -545,7 +543,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
 
        hw->irq = platform_get_irq(pdev, 0);
        if (hw->irq < 0) {
-               dev_err(&pdev->dev, "No IRQ specified\n");
                err = -ENOENT;
                goto err_no_pdata;
        }
index b50bdbc27e5899cc906255f31f32df21446e012e..8f134735291f14c31541b486b252aa5da3b2f23f 100644 (file)
@@ -1297,7 +1297,6 @@ static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
 
 static int sh_msiof_spi_probe(struct platform_device *pdev)
 {
-       struct resource *r;
        struct spi_controller *ctlr;
        const struct sh_msiof_chipdata *chipdata;
        struct sh_msiof_spi_info *info;
@@ -1346,13 +1345,11 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
 
        i = platform_get_irq(pdev, 0);
        if (i < 0) {
-               dev_err(&pdev->dev, "cannot get IRQ\n");
                ret = i;
                goto err1;
        }
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       p->mapbase = devm_ioremap_resource(&pdev->dev, r);
+       p->mapbase = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(p->mapbase)) {
                ret = PTR_ERR(p->mapbase);
                goto err1;
index f1ee58208216ca632c8c115e820f4011d4193d8c..20bdae5fdf3b827669bf33c107748890c80164d7 100644 (file)
@@ -437,10 +437,8 @@ static int spi_sh_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "platform_get_irq error: %d\n", irq);
+       if (irq < 0)
                return irq;
-       }
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
        if (master == NULL) {
index 93ec2c6cdbfd58c73c2ddc9f705629495333f518..35254bdc42c4890facdde5bbf683cd78c1a81f7a 100644 (file)
@@ -292,7 +292,6 @@ sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
 static int sifive_spi_probe(struct platform_device *pdev)
 {
        struct sifive_spi *spi;
-       struct resource *res;
        int ret, irq, num_cs;
        u32 cs_bits, max_bits_per_word;
        struct spi_master *master;
@@ -307,8 +306,7 @@ static int sifive_spi_probe(struct platform_device *pdev)
        init_completion(&spi->done);
        platform_set_drvdata(pdev, master);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       spi->regs = devm_ioremap_resource(&pdev->dev, res);
+       spi->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(spi->regs)) {
                ret = PTR_ERR(spi->regs);
                goto put_master;
@@ -323,7 +321,6 @@ static int sifive_spi_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "Unable to find interrupt\n");
                ret = irq;
                goto put_master;
        }
index 71b882ab31b959c2b00fc490a057c88b5374689a..e1e6391915577914990afad2f9e7b9ca997478fc 100644 (file)
@@ -1070,7 +1070,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
 {
        struct sirfsoc_spi *sspi;
        struct spi_master *master;
-       struct resource *mem_res;
        const struct sirf_spi_comp_data *spi_comp_data;
        int irq;
        int ret;
@@ -1097,8 +1096,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
        sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
        sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
        sspi->fifo_size = spi_comp_data->fifo_size;
-       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
+       sspi->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sspi->base)) {
                ret = PTR_ERR(sspi->base);
                goto free_master;
index d1075433f6a6f682057e930360ab92ce3686903a..61bc43b0fe5701215184e8d4a83bb81c79a0684e 100644 (file)
@@ -410,7 +410,6 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
                ret = irq;
                goto err_put_ctlr;
        }
index df5960bddfe6129afde034f70c363fcdca3f3c67..9a051286f120708b05590f50853442789927b0f0 100644 (file)
@@ -86,6 +86,7 @@
 #define BIT_WDG_EN                     BIT(2)
 
 /* Definition of PMIC reset status register */
+#define HWRST_STATUS_SECURITY          0x02
 #define HWRST_STATUS_RECOVERY          0x20
 #define HWRST_STATUS_NORMAL            0x40
 #define HWRST_STATUS_ALARM             0x50
@@ -97,6 +98,8 @@
 #define HWRST_STATUS_AUTODLOADER       0xa0
 #define HWRST_STATUS_IQMODE            0xb0
 #define HWRST_STATUS_SPRDISK           0xc0
+#define HWRST_STATUS_FACTORYTEST       0xe0
+#define HWRST_STATUS_WATCHDOG          0xf0
 
 /* Use default timeout 50 ms that converts to watchdog values */
 #define WDG_LOAD_VAL                   ((50 * 1000) / 32768)
@@ -162,14 +165,16 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
        int read_timeout = ADI_READ_TIMEOUT;
        unsigned long flags;
        u32 val, rd_addr;
-       int ret;
-
-       ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
-                                         ADI_HWSPINLOCK_TIMEOUT,
-                                         &flags);
-       if (ret) {
-               dev_err(sadi->dev, "get the hw lock failed\n");
-               return ret;
+       int ret = 0;
+
+       if (sadi->hwlock) {
+               ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+                                                 ADI_HWSPINLOCK_TIMEOUT,
+                                                 &flags);
+               if (ret) {
+                       dev_err(sadi->dev, "get the hw lock failed\n");
+                       return ret;
+               }
        }
 
        /*
@@ -216,7 +221,8 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
        *read_val = val & RD_VALUE_MASK;
 
 out:
-       hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+       if (sadi->hwlock)
+               hwspin_unlock_irqrestore(sadi->hwlock, &flags);
        return ret;
 }
 
@@ -227,12 +233,14 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
        unsigned long flags;
        int ret;
 
-       ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
-                                         ADI_HWSPINLOCK_TIMEOUT,
-                                         &flags);
-       if (ret) {
-               dev_err(sadi->dev, "get the hw lock failed\n");
-               return ret;
+       if (sadi->hwlock) {
+               ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+                                                 ADI_HWSPINLOCK_TIMEOUT,
+                                                 &flags);
+               if (ret) {
+                       dev_err(sadi->dev, "get the hw lock failed\n");
+                       return ret;
+               }
        }
 
        ret = sprd_adi_drain_fifo(sadi);
@@ -258,7 +266,8 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
        }
 
 out:
-       hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+       if (sadi->hwlock)
+               hwspin_unlock_irqrestore(sadi->hwlock, &flags);
        return ret;
 }
 
@@ -307,6 +316,18 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr,
        return 0;
 }
 
+static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi)
+{
+#ifdef CONFIG_SPRD_WATCHDOG
+       u32 val;
+
+       /* Set default watchdog reboot mode */
+       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+       val |= HWRST_STATUS_WATCHDOG;
+       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
+#endif
+}
+
 static int sprd_adi_restart_handler(struct notifier_block *this,
                                    unsigned long mode, void *cmd)
 {
@@ -336,11 +357,16 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
                reboot_mode = HWRST_STATUS_IQMODE;
        else if (!strncmp(cmd, "sprdisk", 7))
                reboot_mode = HWRST_STATUS_SPRDISK;
+       else if (!strncmp(cmd, "tospanic", 8))
+               reboot_mode = HWRST_STATUS_SECURITY;
+       else if (!strncmp(cmd, "factorytest", 11))
+               reboot_mode = HWRST_STATUS_FACTORYTEST;
        else
                reboot_mode = HWRST_STATUS_NORMAL;
 
        /* Record the reboot mode */
        sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+       val &= ~HWRST_STATUS_WATCHDOG;
        val |= reboot_mode;
        sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
 
@@ -380,9 +406,6 @@ static void sprd_adi_hw_init(struct sprd_adi *sadi)
        const __be32 *list;
        u32 tmp;
 
-       /* Address bits select default 12 bits */
-       writel_relaxed(0, sadi->base + REG_ADI_CTRL0);
-
        /* Set all channels as default priority */
        writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
        writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
@@ -459,19 +482,30 @@ static int sprd_adi_probe(struct platform_device *pdev)
        sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
        sadi->ctlr = ctlr;
        sadi->dev = &pdev->dev;
-       ret = of_hwspin_lock_get_id_byname(np, "adi");
-       if (ret < 0) {
-               dev_err(&pdev->dev, "can not get the hardware spinlock\n");
-               goto put_ctlr;
-       }
-
-       sadi->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
-       if (!sadi->hwlock) {
-               ret = -ENXIO;
-               goto put_ctlr;
+       ret = of_hwspin_lock_get_id(np, 0);
+       if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
+               sadi->hwlock =
+                       devm_hwspin_lock_request_specific(&pdev->dev, ret);
+               if (!sadi->hwlock) {
+                       ret = -ENXIO;
+                       goto put_ctlr;
+               }
+       } else {
+               switch (ret) {
+               case -ENOENT:
+                       dev_info(&pdev->dev, "no hardware spinlock supplied\n");
+                       break;
+               default:
+                       dev_err(&pdev->dev,
+                               "failed to find hwlock id, %d\n", ret);
+                       /* fall-through */
+               case -EPROBE_DEFER:
+                       goto put_ctlr;
+               }
        }
 
        sprd_adi_hw_init(sadi);
+       sprd_adi_set_wdt_rst_mode(sadi);
 
        ctlr->dev.of_node = pdev->dev.of_node;
        ctlr->bus_num = pdev->id;
index 1b7eebb72c078e6e1d143372470f3fbb363e3436..8c9021b7f7a981346bd4c01cc05fa96fee5f83a7 100644 (file)
@@ -843,10 +843,8 @@ static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
        int ret;
 
        ss->irq = platform_get_irq(pdev, 0);
-       if (ss->irq < 0) {
-               dev_err(&pdev->dev, "failed to get irq resource\n");
+       if (ss->irq < 0)
                return ss->irq;
-       }
 
        ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
                                0, pdev->name, ss);
index 840a6bf81336d50374b083cf62f8464054bdfb53..0c24c494f3865ff4ce2defd0eba1b46876a46c7c 100644 (file)
@@ -298,7 +298,6 @@ static int spi_st_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct spi_master *master;
-       struct resource *res;
        struct spi_st *spi_st;
        int irq, ret = 0;
        u32 var;
@@ -331,8 +330,7 @@ static int spi_st_probe(struct platform_device *pdev)
        init_completion(&spi_st->done);
 
        /* Get resources */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       spi_st->base = devm_ioremap_resource(&pdev->dev, res);
+       spi_st->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(spi_st->base)) {
                ret = PTR_ERR(spi_st->base);
                goto clk_disable;
index 655e4afbfb2a185932dda13722ae9c8e13931054..9ac6f9fe13cf679f292c1ae2957917851bc5f34c 100644 (file)
@@ -570,11 +570,8 @@ static int stm32_qspi_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               if (irq != -EPROBE_DEFER)
-                       dev_err(dev, "IRQ error missing or invalid\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
                               dev_name(dev), qspi);
index 5194bc07fd607870aedc2bc84493efff59d52a64..cbfac6596fad5ba7fbd77e0617015ce90e1a01f7 100644 (file)
@@ -428,7 +428,6 @@ static int sun4i_spi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct sun4i_spi *sspi;
-       struct resource *res;
        int ret = 0, irq;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
@@ -440,8 +439,7 @@ static int sun4i_spi_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, master);
        sspi = spi_master_get_devdata(master);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+       sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sspi->base_addr)) {
                ret = PTR_ERR(sspi->base_addr);
                goto err_free_master;
@@ -449,7 +447,6 @@ static int sun4i_spi_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "No spi IRQ specified\n");
                ret = -ENXIO;
                goto err_free_master;
        }
index ee2bdaf5b856014524bf9bc72feaf72affae38d9..ec7967be9e2f5622fe1e8091e89c10e6d9399a17 100644 (file)
@@ -435,7 +435,6 @@ static int sun6i_spi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct sun6i_spi *sspi;
-       struct resource *res;
        int ret = 0, irq;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
@@ -447,8 +446,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, master);
        sspi = spi_master_get_devdata(master);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+       sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sspi->base_addr)) {
                ret = PTR_ERR(sspi->base_addr);
                goto err_free_master;
@@ -456,7 +454,6 @@ static int sun6i_spi_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "No spi IRQ specified\n");
                ret = -ENXIO;
                goto err_free_master;
        }
index f99abd85c50a62e6590b7ce194303b73afef0f91..ae17c99cce037ad1bf5ea1323690a22164a9cd15 100644 (file)
@@ -670,7 +670,6 @@ static int synquacer_spi_probe(struct platform_device *pdev)
 
        rx_irq = platform_get_irq(pdev, 0);
        if (rx_irq <= 0) {
-               dev_err(&pdev->dev, "get rx_irq failed (%d)\n", rx_irq);
                ret = rx_irq;
                goto put_spi;
        }
@@ -685,7 +684,6 @@ static int synquacer_spi_probe(struct platform_device *pdev)
 
        tx_irq = platform_get_irq(pdev, 1);
        if (tx_irq <= 0) {
-               dev_err(&pdev->dev, "get tx_irq failed (%d)\n", tx_irq);
                ret = tx_irq;
                goto put_spi;
        }
index cd714a4f52c64b6b72c282dea10562b302fc9a57..a841a7250d14bf4863384fbd4c5f2e77fefc2d6e 100644 (file)
@@ -419,7 +419,6 @@ static int tegra_sflash_probe(struct platform_device *pdev)
 {
        struct spi_master       *master;
        struct tegra_sflash_data        *tsd;
-       struct resource         *r;
        int ret;
        const struct of_device_id *match;
 
@@ -451,8 +450,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
                                 &master->max_speed_hz))
                master->max_speed_hz = 25000000; /* 25MHz */
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       tsd->base = devm_ioremap_resource(&pdev->dev, r);
+       tsd->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(tsd->base)) {
                ret = PTR_ERR(tsd->base);
                goto exit_free_master;
index 6ca6007024706e02a4868de73908087842cc819b..3cb65371ae3bd45873f573fef70de052dfca373b 100644 (file)
@@ -717,7 +717,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "no irq resource?\n");
                ret = irq;
                goto free_master;
        }
index b32c77df5d49cf8acd5c253d9f6965909383e834..47cde1864630e02b5e3c8d253bb636c698cebdb9 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -16,6 +17,7 @@
 #include <asm/unaligned.h>
 
 #define SSI_TIMEOUT_MS         2000
+#define SSI_POLL_TIMEOUT_US    200
 #define SSI_MAX_CLK_DIVIDER    254
 #define SSI_MIN_CLK_DIVIDER    4
 
@@ -214,6 +216,7 @@ static void uniphier_spi_setup_transfer(struct spi_device *spi,
        if (!priv->is_save_param || priv->mode != spi->mode) {
                uniphier_spi_set_mode(spi);
                priv->mode = spi->mode;
+               priv->is_save_param = false;
        }
 
        if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
@@ -226,8 +229,7 @@ static void uniphier_spi_setup_transfer(struct spi_device *spi,
                priv->speed_hz = t->speed_hz;
        }
 
-       if (!priv->is_save_param)
-               priv->is_save_param = true;
+       priv->is_save_param = true;
 
        /* reset FIFOs */
        val = SSI_FC_TXFFL | SSI_FC_RXFFL;
@@ -290,21 +292,23 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
 
 static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
 {
-       unsigned int tx_count;
+       unsigned int fifo_threshold, fill_bytes;
        u32 val;
 
-       tx_count = DIV_ROUND_UP(priv->tx_bytes,
+       fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
                                bytes_per_word(priv->bits_per_word));
-       tx_count = min(tx_count, SSI_FIFO_DEPTH);
+       fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
+
+       fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
 
        /* set fifo threshold */
        val = readl(priv->base + SSI_FC);
        val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
-       val |= FIELD_PREP(SSI_FC_TXFTH_MASK, tx_count);
-       val |= FIELD_PREP(SSI_FC_RXFTH_MASK, tx_count);
+       val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
+       val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
        writel(val, priv->base + SSI_FC);
 
-       while (tx_count--)
+       while (fill_bytes--)
                uniphier_spi_send(priv);
 }
 
@@ -323,20 +327,14 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
        writel(val, priv->base + SSI_FPS);
 }
 
-static int uniphier_spi_transfer_one(struct spi_master *master,
-                                    struct spi_device *spi,
-                                    struct spi_transfer *t)
+static int uniphier_spi_transfer_one_irq(struct spi_master *master,
+                                        struct spi_device *spi,
+                                        struct spi_transfer *t)
 {
        struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
        struct device *dev = master->dev.parent;
        unsigned long time_left;
 
-       /* Terminate and return success for 0 byte length transfer */
-       if (!t->len)
-               return 0;
-
-       uniphier_spi_setup_transfer(spi, t);
-
        reinit_completion(&priv->xfer_done);
 
        uniphier_spi_fill_tx_fifo(priv);
@@ -356,6 +354,59 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
        return priv->error;
 }
 
+static int uniphier_spi_transfer_one_poll(struct spi_master *master,
+                                         struct spi_device *spi,
+                                         struct spi_transfer *t)
+{
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       int loop = SSI_POLL_TIMEOUT_US * 10;
+
+       while (priv->tx_bytes) {
+               uniphier_spi_fill_tx_fifo(priv);
+
+               while ((priv->rx_bytes - priv->tx_bytes) > 0) {
+                       while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
+                                                               && loop--)
+                               ndelay(100);
+
+                       if (loop == -1)
+                               goto irq_transfer;
+
+                       uniphier_spi_recv(priv);
+               }
+       }
+
+       return 0;
+
+irq_transfer:
+       return uniphier_spi_transfer_one_irq(master, spi, t);
+}
+
+static int uniphier_spi_transfer_one(struct spi_master *master,
+                                    struct spi_device *spi,
+                                    struct spi_transfer *t)
+{
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       unsigned long threshold;
+
+       /* Terminate and return success for 0 byte length transfer */
+       if (!t->len)
+               return 0;
+
+       uniphier_spi_setup_transfer(spi, t);
+
+       /*
+        * If the transfer operation will take longer than
+        * SSI_POLL_TIMEOUT_US, it should use irq.
+        */
+       threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
+                                       USEC_PER_SEC * BITS_PER_BYTE);
+       if (t->len > threshold)
+               return uniphier_spi_transfer_one_irq(master, spi, t);
+       else
+               return uniphier_spi_transfer_one_poll(master, spi, t);
+}
+
 static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
 {
        struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
@@ -419,7 +470,6 @@ static int uniphier_spi_probe(struct platform_device *pdev)
 {
        struct uniphier_spi_priv *priv;
        struct spi_master *master;
-       struct resource *res;
        unsigned long clk_rate;
        int irq;
        int ret;
@@ -434,8 +484,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
        priv->master = master;
        priv->is_save_param = false;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv->base = devm_ioremap_resource(&pdev->dev, res);
+       priv->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(priv->base)) {
                ret = PTR_ERR(priv->base);
                goto out_master_put;
@@ -454,7 +503,6 @@ static int uniphier_spi_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "failed to get IRQ\n");
                ret = irq;
                goto out_disable_clk;
        }
index 1dc479fab98cd5795efd29c7dc12d8cada5ee23b..797ac0ea8fa37762e13a8e95de1701b6bed7fea5 100644 (file)
@@ -370,7 +370,6 @@ static int xlp_spi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
        struct xlp_spi_priv *xspi;
-       struct resource *res;
        struct clk *clk;
        int irq, err;
 
@@ -378,16 +377,13 @@ static int xlp_spi_probe(struct platform_device *pdev)
        if (!xspi)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       xspi->base = devm_ioremap_resource(&pdev->dev, res);
+       xspi->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(xspi->base))
                return PTR_ERR(xspi->base);
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
+       if (irq < 0)
                return irq;
-       }
        err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
                        pdev->name, xspi);
        if (err) {
index c6bee67decb5012ed0abb97f717da80df04da2b8..5cf6993ddce57fa80450104be47351d769f54f23 100644 (file)
@@ -620,7 +620,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        struct zynq_qspi *xqspi;
-       struct resource *res;
        u32 num_cs;
 
        ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
@@ -630,8 +629,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        xqspi = spi_controller_get_devdata(ctlr);
        xqspi->dev = dev;
        platform_set_drvdata(pdev, xqspi);
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
+       xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(xqspi->regs)) {
                ret = PTR_ERR(xqspi->regs);
                goto remove_master;
@@ -671,7 +669,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        xqspi->irq = platform_get_irq(pdev, 0);
        if (xqspi->irq <= 0) {
                ret = -ENXIO;
-               dev_err(&pdev->dev, "irq resource not found\n");
                goto remove_master;
        }
        ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
@@ -695,7 +692,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        ctlr->setup = zynq_qspi_setup_op;
        ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
        ctlr->dev.of_node = np;
-       ret = spi_register_controller(ctlr);
+       ret = devm_spi_register_controller(&pdev->dev, ctlr);
        if (ret) {
                dev_err(&pdev->dev, "spi_register_master failed\n");
                goto clk_dis_all;
index 07a83ca164c22b07923716b727979cf491aa4ba0..60c4de4e448568f2a2b54ce1da5f21ea510401da 100644 (file)
@@ -1016,7 +1016,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
        int ret = 0;
        struct spi_master *master;
        struct zynqmp_qspi *xqspi;
-       struct resource *res;
        struct device *dev = &pdev->dev;
 
        eemi_ops = zynqmp_pm_get_eemi_ops();
@@ -1031,8 +1030,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
        master->dev.of_node = pdev->dev.of_node;
        platform_set_drvdata(pdev, master);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
+       xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(xqspi->regs)) {
                ret = PTR_ERR(xqspi->regs);
                goto remove_master;
@@ -1077,7 +1075,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
        xqspi->irq = platform_get_irq(pdev, 0);
        if (xqspi->irq <= 0) {
                ret = -ENXIO;
-               dev_err(dev, "irq resource not found\n");
                goto clk_dis_all;
        }
        ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
index 75ac046cae5267b712fef17dec0ba1f2d381838d..f8b4654a57d3950f9678e0b3df0de8ddbfe4bee5 100644 (file)
@@ -1265,8 +1265,9 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
  */
 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
 {
-       unsigned long flags;
+       struct spi_message *msg;
        bool was_busy = false;
+       unsigned long flags;
        int ret;
 
        /* Lock queue */
@@ -1325,10 +1326,10 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
        }
 
        /* Extract head of queue */
-       ctlr->cur_msg =
-               list_first_entry(&ctlr->queue, struct spi_message, queue);
+       msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
+       ctlr->cur_msg = msg;
 
-       list_del_init(&ctlr->cur_msg->queue);
+       list_del_init(&msg->queue);
        if (ctlr->busy)
                was_busy = true;
        else
@@ -1361,7 +1362,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
                        if (ctlr->auto_runtime_pm)
                                pm_runtime_put(ctlr->dev.parent);
 
-                       ctlr->cur_msg->status = ret;
+                       msg->status = ret;
                        spi_finalize_current_message(ctlr);
 
                        mutex_unlock(&ctlr->io_mutex);
@@ -1369,28 +1370,28 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
                }
        }
 
-       trace_spi_message_start(ctlr->cur_msg);
+       trace_spi_message_start(msg);
 
        if (ctlr->prepare_message) {
-               ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
+               ret = ctlr->prepare_message(ctlr, msg);
                if (ret) {
                        dev_err(&ctlr->dev, "failed to prepare message: %d\n",
                                ret);
-                       ctlr->cur_msg->status = ret;
+                       msg->status = ret;
                        spi_finalize_current_message(ctlr);
                        goto out;
                }
                ctlr->cur_msg_prepared = true;
        }
 
-       ret = spi_map_msg(ctlr, ctlr->cur_msg);
+       ret = spi_map_msg(ctlr, msg);
        if (ret) {
-               ctlr->cur_msg->status = ret;
+               msg->status = ret;
                spi_finalize_current_message(ctlr);
                goto out;
        }
 
-       ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
+       ret = ctlr->transfer_one_message(ctlr, msg);
        if (ret) {
                dev_err(&ctlr->dev,
                        "failed to transfer one message from queue\n");
@@ -1434,7 +1435,7 @@ static void spi_pump_messages(struct kthread_work *work)
  */
 static void spi_set_thread_rt(struct spi_controller *ctlr)
 {
-       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
 
        dev_info(&ctlr->dev,
                "will run message pump with realtime priority\n");
@@ -2105,8 +2106,8 @@ static int match_true(struct device *dev, void *data)
        return 1;
 }
 
-static ssize_t spi_slave_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
 {
        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
                                                   dev);
@@ -2117,9 +2118,8 @@ static ssize_t spi_slave_show(struct device *dev,
                       child ? to_spi_device(child)->modalias : NULL);
 }
 
-static ssize_t spi_slave_store(struct device *dev,
-                              struct device_attribute *attr, const char *buf,
-                              size_t count)
+static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
 {
        struct spi_controller *ctlr = container_of(dev, struct spi_controller,
                                                   dev);
@@ -2157,7 +2157,7 @@ static ssize_t spi_slave_store(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
+static DEVICE_ATTR_RW(slave);
 
 static struct attribute *spi_slave_attrs[] = {
        &dev_attr_slave.attr,
@@ -2188,8 +2188,10 @@ extern struct class spi_slave_class;     /* dummy */
  * __spi_alloc_controller - allocate an SPI master or slave controller
  * @dev: the controller, possibly using the platform_bus
  * @size: how much zeroed driver-private data to allocate; the pointer to this
- *     memory is in the driver_data field of the returned device,
- *     accessible with spi_controller_get_devdata().
+ *     memory is in the driver_data field of the returned device, accessible
+ *     with spi_controller_get_devdata(); the memory is cacheline aligned;
+ *     drivers granting DMA access to portions of their private data need to
+ *     round up @size using ALIGN(size, dma_get_cache_alignment()).
  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
  *     slave (true) controller
  * Context: can sleep
@@ -2211,11 +2213,12 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
                                              unsigned int size, bool slave)
 {
        struct spi_controller   *ctlr;
+       size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
 
        if (!dev)
                return NULL;
 
-       ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
+       ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
        if (!ctlr)
                return NULL;
 
@@ -2229,14 +2232,14 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
                ctlr->dev.class = &spi_master_class;
        ctlr->dev.parent = dev;
        pm_suspend_ignore_children(&ctlr->dev, true);
-       spi_controller_set_devdata(ctlr, &ctlr[1]);
+       spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
 
        return ctlr;
 }
 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
 
 #ifdef CONFIG_OF
-static int of_spi_register_master(struct spi_controller *ctlr)
+static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
 {
        int nb, i, *cs;
        struct device_node *np = ctlr->dev.of_node;
@@ -2269,7 +2272,7 @@ static int of_spi_register_master(struct spi_controller *ctlr)
        return 0;
 }
 #else
-static int of_spi_register_master(struct spi_controller *ctlr)
+static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
 {
        return 0;
 }
@@ -2456,7 +2459,7 @@ int spi_register_controller(struct spi_controller *ctlr)
                        ctlr->mode_bits |= SPI_CS_HIGH;
                } else {
                        /* Legacy code path for GPIOs from DT */
-                       status = of_spi_register_master(ctlr);
+                       status = of_spi_get_gpio_numbers(ctlr);
                        if (status)
                                return status;
                }
index 6a5ee8e6da10574ce9cb96e1bf082e8751a8aac7..67ad40b0a05b5bd6ff563e96e7ab8a0782c43807 100644 (file)
@@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
        struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
        unsigned long flags;
 
-       spin_lock_irqsave(&ci->lock, flags);
-       ci->gadget.speed = USB_SPEED_UNKNOWN;
-       ci->remote_wakeup = 0;
-       ci->suspended = 0;
-       spin_unlock_irqrestore(&ci->lock, flags);
-
        /* flush all endpoints */
        gadget_for_each_ep(ep, gadget) {
                usb_ep_fifo_flush(ep);
@@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
                ci->status = NULL;
        }
 
+       spin_lock_irqsave(&ci->lock, flags);
+       ci->gadget.speed = USB_SPEED_UNKNOWN;
+       ci->remote_wakeup = 0;
+       ci->suspended = 0;
+       spin_unlock_irqrestore(&ci->lock, flags);
+
        return 0;
 }
 
@@ -1303,6 +1303,10 @@ static int ep_disable(struct usb_ep *ep)
                return -EBUSY;
 
        spin_lock_irqsave(hwep->lock, flags);
+       if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+               spin_unlock_irqrestore(hwep->lock, flags);
+               return 0;
+       }
 
        /* only internal SW should disable ctrl endpts */
 
@@ -1392,6 +1396,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
                return -EINVAL;
 
        spin_lock_irqsave(hwep->lock, flags);
+       if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+               spin_unlock_irqrestore(hwep->lock, flags);
+               return 0;
+       }
        retval = _ep_queue(ep, req, gfp_flags);
        spin_unlock_irqrestore(hwep->lock, flags);
        return retval;
@@ -1415,8 +1423,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
                return -EINVAL;
 
        spin_lock_irqsave(hwep->lock, flags);
-
-       hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+       if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
+               hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
        list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
                dma_pool_free(hwep->td_pool, node->ptr, node->dma);
@@ -1487,6 +1495,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
        }
 
        spin_lock_irqsave(hwep->lock, flags);
+       if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+               spin_unlock_irqrestore(hwep->lock, flags);
+               return;
+       }
 
        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
@@ -1559,6 +1571,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
        int ret = 0;
 
        spin_lock_irqsave(&ci->lock, flags);
+       if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
+               spin_unlock_irqrestore(&ci->lock, flags);
+               return 0;
+       }
        if (!ci->remote_wakeup) {
                ret = -EOPNOTSUPP;
                goto out;
index a7824a51f86d2ee99c2da3410d719f3a6be20235..70afb2ca1eabde070ff163ce7dd96fce5da93f71 100644 (file)
@@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
 {
        struct wdm_device *desc = file->private_data;
 
-       wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
+       wait_event(desc->wait,
+                       /*
+                        * needs both flags. We cannot do with one
+                        * because resetting it would cause a race
+                        * with write() yet we need to signal
+                        * a disconnect
+                        */
+                       !test_bit(WDM_IN_USE, &desc->flags) ||
+                       test_bit(WDM_DISCONNECTING, &desc->flags));
 
        /* cannot dereference desc->intf if WDM_DISCONNECTING */
-       if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
+       if (test_bit(WDM_DISCONNECTING, &desc->flags))
+               return -ENODEV;
+       if (desc->werr < 0)
                dev_err(&desc->intf->dev, "Error in flush path: %d\n",
                        desc->werr);
 
@@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf)
        spin_lock_irqsave(&desc->iuspin, flags);
        set_bit(WDM_DISCONNECTING, &desc->flags);
        set_bit(WDM_READ, &desc->flags);
-       /* to terminate pending flushes */
-       clear_bit(WDM_IN_USE, &desc->flags);
        spin_unlock_irqrestore(&desc->iuspin, flags);
        wake_up_all(&desc->wait);
        mutex_lock(&desc->rlock);
index 4942122b2346d4804f2279fa8eb033296e1dd25b..36858ddd8d9bb2d7b383c9672a9c2c64ee82ecc4 100644 (file)
@@ -2362,8 +2362,11 @@ static int usbtmc_probe(struct usb_interface *intf,
                goto err_put;
        }
 
+       retcode = -EINVAL;
        data->bulk_in = bulk_in->bEndpointAddress;
        data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
+       if (!data->wMaxPacketSize)
+               goto err_put;
        dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
 
        data->bulk_out = bulk_out->bEndpointAddress;
index 03432467b05fb12810d7cac77d954a11ed0a002a..7537681355f6799c00b73fa6062565257e37d8e3 100644 (file)
@@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                /* EHCI, OHCI */
                hcd->rsrc_start = pci_resource_start(dev, 0);
                hcd->rsrc_len = pci_resource_len(dev, 0);
-               if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
-                               driver->description)) {
+               if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
+                               hcd->rsrc_len, driver->description)) {
                        dev_dbg(&dev->dev, "controller already in use\n");
                        retval = -EBUSY;
                        goto put_hcd;
                }
-               hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+               hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+                               hcd->rsrc_len);
                if (hcd->regs == NULL) {
                        dev_dbg(&dev->dev, "error mapping memory\n");
                        retval = -EFAULT;
-                       goto release_mem_region;
+                       goto put_hcd;
                }
 
        } else {
@@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
                        hcd->rsrc_start = pci_resource_start(dev, region);
                        hcd->rsrc_len = pci_resource_len(dev, region);
-                       if (request_region(hcd->rsrc_start, hcd->rsrc_len,
-                                       driver->description))
+                       if (devm_request_region(&dev->dev, hcd->rsrc_start,
+                                       hcd->rsrc_len, driver->description))
                                break;
                }
                if (region == PCI_ROM_RESOURCE) {
@@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
        }
 
        if (retval != 0)
-               goto unmap_registers;
+               goto put_hcd;
        device_wakeup_enable(hcd->self.controller);
 
        if (pci_dev_run_wake(dev))
                pm_runtime_put_noidle(&dev->dev);
        return retval;
 
-unmap_registers:
-       if (driver->flags & HCD_MEMORY) {
-               iounmap(hcd->regs);
-release_mem_region:
-               release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-       } else
-               release_region(hcd->rsrc_start, hcd->rsrc_len);
 put_hcd:
        usb_put_hcd(hcd);
 disable_pci:
@@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
                dev_set_drvdata(&dev->dev, NULL);
                up_read(&companions_rwsem);
        }
-
-       if (hcd->driver->flags & HCD_MEMORY) {
-               iounmap(hcd->regs);
-               release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-       } else {
-               release_region(hcd->rsrc_start, hcd->rsrc_len);
-       }
-
        usb_put_hcd(hcd);
        pci_disable_device(dev);
 }
index 5f1b14f3e5a07ba0282cbf0ec56535e1aef95608..bb6af6b5ac975ac54d2ecaf71704e8269a62faf4 100644 (file)
@@ -2265,7 +2265,7 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
                default:
                        break;
                }
-
+               break;
 
        case USB_REQ_SET_ADDRESS:
                if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
index b457fdaff29746a0e70794b06308289290b2baf9..1fe3deec35cf5495360f11bdc6a451f03abd650e 100644 (file)
@@ -419,8 +419,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
  * other cases where the next software may expect clean state from the
  * "firmware".  this is bus-neutral, unlike shutdown() methods.
  */
-static void
-ohci_shutdown (struct usb_hcd *hcd)
+static void _ohci_shutdown(struct usb_hcd *hcd)
 {
        struct ohci_hcd *ohci;
 
@@ -436,6 +435,16 @@ ohci_shutdown (struct usb_hcd *hcd)
        ohci->rh_state = OHCI_RH_HALTED;
 }
 
+static void ohci_shutdown(struct usb_hcd *hcd)
+{
+       struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ohci->lock, flags);
+       _ohci_shutdown(hcd);
+       spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
 /*-------------------------------------------------------------------------*
  * HC functions
  *-------------------------------------------------------------------------*/
@@ -760,7 +769,7 @@ static void io_watchdog_func(struct timer_list *t)
  died:
                        usb_hc_died(ohci_to_hcd(ohci));
                        ohci_dump(ohci);
-                       ohci_shutdown(ohci_to_hcd(ohci));
+                       _ohci_shutdown(ohci_to_hcd(ohci));
                        goto done;
                } else {
                        /* No write back because the done queue was empty */
index 8616c52849c6d2a8f6ac30dd887be966b3cf9eab..2b0ccd150209fe3a1f55bb73d29a965fe1cd6f26 100644 (file)
@@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
        return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
                of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
                of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
-               of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
+               of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
 }
 
 static int xhci_rcar_is_gen3(struct device *dev)
index dafc65911fc02625ae49be7197cc6e164fc874f3..2ff7c911fbd0d694f8c937f36418bf0949676fd3 100644 (file)
@@ -1194,6 +1194,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
 
        tegra_xusb_config(tegra, regs);
 
+       /*
+        * The XUSB Falcon microcontroller can only address 40 bits, so set
+        * the DMA mask accordingly.
+        */
+       err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+               goto put_rpm;
+       }
+
        err = tegra_xusb_load_firmware(tegra);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
index cc794e25a0b6ed043149685eb1400492a977b2c3..1d9ce9cbc831d1035a6ad086b10f1f99e3188a63 100644 (file)
@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
 
 static int auto_delink_en = 1;
 module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
 
 #ifdef CONFIG_REALTEK_AUTOPM
 static int ss_en = 1;
@@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us)
                        goto INIT_FAIL;
        }
 
-       if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
-           CHECK_FW_VER(chip, 0x5901))
-               SET_AUTO_DELINK(chip);
-       if (STATUS_LEN(chip) == 16) {
-               if (SUPPORT_AUTO_DELINK(chip))
+       if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
+           CHECK_PID(chip, 0x0159)) {
+               if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+                               CHECK_FW_VER(chip, 0x5901))
                        SET_AUTO_DELINK(chip);
+               if (STATUS_LEN(chip) == 16) {
+                       if (SUPPORT_AUTO_DELINK(chip))
+                               SET_AUTO_DELINK(chip);
+               }
        }
 #ifdef CONFIG_REALTEK_AUTOPM
        if (ss_en)
index ea0d27a94afe058b3671ad6c66989ecbbdb98568..1cd9b6305b06042fecf75942c4f79af41747feea 100644 (file)
@@ -2100,7 +2100,7 @@ UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                US_FL_IGNORE_RESIDUE ),
 
 /* Reported by Michael Büsch <m@bues.ch> */
-UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0116,
+UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0117,
                "JMicron",
                "USB to ATA/ATAPI Bridge",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
index 15abe1d9958fdb402f9025248b3836b995c3313b..bcfdb55fd1985e942eda1e5f8c95ae868104b208 100644 (file)
@@ -1446,7 +1446,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
                                else if ((pdo_min_voltage(pdo[i]) ==
                                          pdo_min_voltage(pdo[i - 1])) &&
                                         (pdo_max_voltage(pdo[i]) ==
-                                         pdo_min_voltage(pdo[i - 1])))
+                                         pdo_max_voltage(pdo[i - 1])))
                                        return PDO_ERR_DUPE_PDO;
                                break;
                        /*
index 054391f30fa836e013394c0f64917b6fa767ab8b..ad830abe10212a37b41f24fb23be7f0e54d1f24b 100644 (file)
@@ -650,12 +650,13 @@ unpin_exit:
 }
 
 static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
-                               struct list_head *regions)
+                           struct list_head *regions,
+                           struct iommu_iotlb_gather *iotlb_gather)
 {
        long unlocked = 0;
        struct vfio_regions *entry, *next;
 
-       iommu_tlb_sync(domain->domain);
+       iommu_tlb_sync(domain->domain, iotlb_gather);
 
        list_for_each_entry_safe(entry, next, regions, list) {
                unlocked += vfio_unpin_pages_remote(dma,
@@ -685,18 +686,19 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
                               struct vfio_dma *dma, dma_addr_t *iova,
                               size_t len, phys_addr_t phys, long *unlocked,
                               struct list_head *unmapped_list,
-                              int *unmapped_cnt)
+                              int *unmapped_cnt,
+                              struct iommu_iotlb_gather *iotlb_gather)
 {
        size_t unmapped = 0;
        struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 
        if (entry) {
-               unmapped = iommu_unmap_fast(domain->domain, *iova, len);
+               unmapped = iommu_unmap_fast(domain->domain, *iova, len,
+                                           iotlb_gather);
 
                if (!unmapped) {
                        kfree(entry);
                } else {
-                       iommu_tlb_range_add(domain->domain, *iova, unmapped);
                        entry->iova = *iova;
                        entry->phys = phys;
                        entry->len  = unmapped;
@@ -712,8 +714,8 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
         * or in case of errors.
         */
        if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
-               *unlocked += vfio_sync_unpin(dma, domain,
-                                            unmapped_list);
+               *unlocked += vfio_sync_unpin(dma, domain, unmapped_list,
+                                            iotlb_gather);
                *unmapped_cnt = 0;
        }
 
@@ -744,6 +746,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
        dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
        struct vfio_domain *domain, *d;
        LIST_HEAD(unmapped_region_list);
+       struct iommu_iotlb_gather iotlb_gather;
        int unmapped_region_cnt = 0;
        long unlocked = 0;
 
@@ -768,6 +771,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
                cond_resched();
        }
 
+       iommu_iotlb_gather_init(&iotlb_gather);
        while (iova < end) {
                size_t unmapped, len;
                phys_addr_t phys, next;
@@ -796,7 +800,8 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
                 */
                unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
                                            &unlocked, &unmapped_region_list,
-                                           &unmapped_region_cnt);
+                                           &unmapped_region_cnt,
+                                           &iotlb_gather);
                if (!unmapped) {
                        unmapped = unmap_unpin_slow(domain, dma, &iova, len,
                                                    phys, &unlocked);
@@ -807,8 +812,10 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 
        dma->iommu_mapped = false;
 
-       if (unmapped_region_cnt)
-               unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list);
+       if (unmapped_region_cnt) {
+               unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list,
+                                           &iotlb_gather);
+       }
 
        if (do_accounting) {
                vfio_lock_acct(dma, -unlocked, true);
index 9e90e969af5550254a8d2b68e58b1fc6c3033427..7804869c6a31398ffbcc9cd1c5786b76f1f54274 100644 (file)
  * Using this limit prevents one virtqueue from starving others. */
 #define VHOST_TEST_WEIGHT 0x80000
 
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
 enum {
        VHOST_TEST_VQ = 0,
        VHOST_TEST_VQ_MAX = 1,
@@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n)
                }
                vhost_add_used_and_signal(&n->dev, vq, head, 0);
                total_len += len;
-               if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
-                       vhost_poll_queue(&vq->poll);
+               if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
                        break;
-               }
        }
 
        mutex_unlock(&vq->mutex);
@@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
        dev = &n->dev;
        vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
        n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
-       vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
+       vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+                      VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
 
        f->private_data = n;
 
index 0536f85263599722b45fd9eaeb05da88085ea9c1..36ca2cf419bfebf475021d995640f383b154ee5e 100644 (file)
@@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
 int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 {
        __poll_t mask;
-       int ret = 0;
 
        if (poll->wqh)
                return 0;
@@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
                vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
        if (mask & EPOLLERR) {
                vhost_poll_stop(poll);
-               ret = -EINVAL;
+               return -EINVAL;
        }
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(vhost_poll_start);
 
@@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
                __vhost_vq_meta_reset(d->vqs[i]);
 }
 
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_map_unprefetch(struct vhost_map *map)
-{
-       kfree(map->pages);
-       map->pages = NULL;
-       map->npages = 0;
-       map->addr = NULL;
-}
-
-static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
-{
-       struct vhost_map *map[VHOST_NUM_ADDRS];
-       int i;
-
-       spin_lock(&vq->mmu_lock);
-       for (i = 0; i < VHOST_NUM_ADDRS; i++) {
-               map[i] = rcu_dereference_protected(vq->maps[i],
-                                 lockdep_is_held(&vq->mmu_lock));
-               if (map[i])
-                       rcu_assign_pointer(vq->maps[i], NULL);
-       }
-       spin_unlock(&vq->mmu_lock);
-
-       synchronize_rcu();
-
-       for (i = 0; i < VHOST_NUM_ADDRS; i++)
-               if (map[i])
-                       vhost_map_unprefetch(map[i]);
-
-}
-
-static void vhost_reset_vq_maps(struct vhost_virtqueue *vq)
-{
-       int i;
-
-       vhost_uninit_vq_maps(vq);
-       for (i = 0; i < VHOST_NUM_ADDRS; i++)
-               vq->uaddrs[i].size = 0;
-}
-
-static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
-                                    unsigned long start,
-                                    unsigned long end)
-{
-       if (unlikely(!uaddr->size))
-               return false;
-
-       return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
-}
-
-static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
-                                     int index,
-                                     unsigned long start,
-                                     unsigned long end)
-{
-       struct vhost_uaddr *uaddr = &vq->uaddrs[index];
-       struct vhost_map *map;
-       int i;
-
-       if (!vhost_map_range_overlap(uaddr, start, end))
-               return;
-
-       spin_lock(&vq->mmu_lock);
-       ++vq->invalidate_count;
-
-       map = rcu_dereference_protected(vq->maps[index],
-                                       lockdep_is_held(&vq->mmu_lock));
-       if (map) {
-               if (uaddr->write) {
-                       for (i = 0; i < map->npages; i++)
-                               set_page_dirty(map->pages[i]);
-               }
-               rcu_assign_pointer(vq->maps[index], NULL);
-       }
-       spin_unlock(&vq->mmu_lock);
-
-       if (map) {
-               synchronize_rcu();
-               vhost_map_unprefetch(map);
-       }
-}
-
-static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
-                                   int index,
-                                   unsigned long start,
-                                   unsigned long end)
-{
-       if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
-               return;
-
-       spin_lock(&vq->mmu_lock);
-       --vq->invalidate_count;
-       spin_unlock(&vq->mmu_lock);
-}
-
-static int vhost_invalidate_range_start(struct mmu_notifier *mn,
-                                       const struct mmu_notifier_range *range)
-{
-       struct vhost_dev *dev = container_of(mn, struct vhost_dev,
-                                            mmu_notifier);
-       int i, j;
-
-       if (!mmu_notifier_range_blockable(range))
-               return -EAGAIN;
-
-       for (i = 0; i < dev->nvqs; i++) {
-               struct vhost_virtqueue *vq = dev->vqs[i];
-
-               for (j = 0; j < VHOST_NUM_ADDRS; j++)
-                       vhost_invalidate_vq_start(vq, j,
-                                                 range->start,
-                                                 range->end);
-       }
-
-       return 0;
-}
-
-static void vhost_invalidate_range_end(struct mmu_notifier *mn,
-                                      const struct mmu_notifier_range *range)
-{
-       struct vhost_dev *dev = container_of(mn, struct vhost_dev,
-                                            mmu_notifier);
-       int i, j;
-
-       for (i = 0; i < dev->nvqs; i++) {
-               struct vhost_virtqueue *vq = dev->vqs[i];
-
-               for (j = 0; j < VHOST_NUM_ADDRS; j++)
-                       vhost_invalidate_vq_end(vq, j,
-                                               range->start,
-                                               range->end);
-       }
-}
-
-static const struct mmu_notifier_ops vhost_mmu_notifier_ops = {
-       .invalidate_range_start = vhost_invalidate_range_start,
-       .invalidate_range_end = vhost_invalidate_range_end,
-};
-
-static void vhost_init_maps(struct vhost_dev *dev)
-{
-       struct vhost_virtqueue *vq;
-       int i, j;
-
-       dev->mmu_notifier.ops = &vhost_mmu_notifier_ops;
-
-       for (i = 0; i < dev->nvqs; ++i) {
-               vq = dev->vqs[i];
-               for (j = 0; j < VHOST_NUM_ADDRS; j++)
-                       RCU_INIT_POINTER(vq->maps[j], NULL);
-       }
-}
-#endif
-
 static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
 {
@@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->busyloop_timeout = 0;
        vq->umem = NULL;
        vq->iotlb = NULL;
-       vq->invalidate_count = 0;
        __vhost_vq_meta_reset(vq);
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       vhost_reset_vq_maps(vq);
-#endif
 }
 
 static int vhost_worker(void *data)
@@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev,
        INIT_LIST_HEAD(&dev->read_list);
        INIT_LIST_HEAD(&dev->pending_list);
        spin_lock_init(&dev->iotlb_lock);
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       vhost_init_maps(dev);
-#endif
+
 
        for (i = 0; i < dev->nvqs; ++i) {
                vq = dev->vqs[i];
@@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev,
                vq->heads = NULL;
                vq->dev = dev;
                mutex_init(&vq->mutex);
-               spin_lock_init(&vq->mmu_lock);
                vhost_vq_reset(dev, vq);
                if (vq->handle_kick)
                        vhost_poll_init(&vq->poll, vq->handle_kick,
@@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
        if (err)
                goto err_cgroup;
 
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       err = mmu_notifier_register(&dev->mmu_notifier, dev->mm);
-       if (err)
-               goto err_mmu_notifier;
-#endif
-
        return 0;
-
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-err_mmu_notifier:
-       vhost_dev_free_iovecs(dev);
-#endif
 err_cgroup:
        kthread_stop(worker);
        dev->worker = NULL;
@@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev)
        spin_unlock(&dev->iotlb_lock);
 }
 
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_setup_uaddr(struct vhost_virtqueue *vq,
-                             int index, unsigned long uaddr,
-                             size_t size, bool write)
-{
-       struct vhost_uaddr *addr = &vq->uaddrs[index];
-
-       addr->uaddr = uaddr;
-       addr->size = size;
-       addr->write = write;
-}
-
-static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq)
-{
-       vhost_setup_uaddr(vq, VHOST_ADDR_DESC,
-                         (unsigned long)vq->desc,
-                         vhost_get_desc_size(vq, vq->num),
-                         false);
-       vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL,
-                         (unsigned long)vq->avail,
-                         vhost_get_avail_size(vq, vq->num),
-                         false);
-       vhost_setup_uaddr(vq, VHOST_ADDR_USED,
-                         (unsigned long)vq->used,
-                         vhost_get_used_size(vq, vq->num),
-                         true);
-}
-
-static int vhost_map_prefetch(struct vhost_virtqueue *vq,
-                              int index)
-{
-       struct vhost_map *map;
-       struct vhost_uaddr *uaddr = &vq->uaddrs[index];
-       struct page **pages;
-       int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE);
-       int npinned;
-       void *vaddr, *v;
-       int err;
-       int i;
-
-       spin_lock(&vq->mmu_lock);
-
-       err = -EFAULT;
-       if (vq->invalidate_count)
-               goto err;
-
-       err = -ENOMEM;
-       map = kmalloc(sizeof(*map), GFP_ATOMIC);
-       if (!map)
-               goto err;
-
-       pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
-       if (!pages)
-               goto err_pages;
-
-       err = EFAULT;
-       npinned = __get_user_pages_fast(uaddr->uaddr, npages,
-                                       uaddr->write, pages);
-       if (npinned > 0)
-               release_pages(pages, npinned);
-       if (npinned != npages)
-               goto err_gup;
-
-       for (i = 0; i < npinned; i++)
-               if (PageHighMem(pages[i]))
-                       goto err_gup;
-
-       vaddr = v = page_address(pages[0]);
-
-       /* For simplicity, fallback to userspace address if VA is not
-        * contigious.
-        */
-       for (i = 1; i < npinned; i++) {
-               v += PAGE_SIZE;
-               if (v != page_address(pages[i]))
-                       goto err_gup;
-       }
-
-       map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1));
-       map->npages = npages;
-       map->pages = pages;
-
-       rcu_assign_pointer(vq->maps[index], map);
-       /* No need for a synchronize_rcu(). This function should be
-        * called by dev->worker so we are serialized with all
-        * readers.
-        */
-       spin_unlock(&vq->mmu_lock);
-
-       return 0;
-
-err_gup:
-       kfree(pages);
-err_pages:
-       kfree(map);
-err:
-       spin_unlock(&vq->mmu_lock);
-       return err;
-}
-#endif
-
 void vhost_dev_cleanup(struct vhost_dev *dev)
 {
        int i;
@@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
                kthread_stop(dev->worker);
                dev->worker = NULL;
        }
-       if (dev->mm) {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-               mmu_notifier_unregister(&dev->mmu_notifier, dev->mm);
-#endif
+       if (dev->mm)
                mmput(dev->mm);
-       }
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       for (i = 0; i < dev->nvqs; i++)
-               vhost_uninit_vq_maps(dev->vqs[i]);
-#endif
        dev->mm = NULL;
 }
 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
@@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 
 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_used *used;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
-               if (likely(map)) {
-                       used = map->addr;
-                       *((__virtio16 *)&used->ring[vq->num]) =
-                               cpu_to_vhost16(vq, vq->avail_idx);
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
                              vhost_avail_event(vq));
 }
@@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
                                 struct vring_used_elem *head, int idx,
                                 int count)
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_used *used;
-       size_t size;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
-               if (likely(map)) {
-                       used = map->addr;
-                       size = count * sizeof(*head);
-                       memcpy(used->ring + idx, head, size);
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_copy_to_user(vq, vq->used->ring + idx, head,
                                  count * sizeof(*head));
 }
@@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
 
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_used *used;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
-               if (likely(map)) {
-                       used = map->addr;
-                       used->flags = cpu_to_vhost16(vq, vq->used_flags);
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
                              &vq->used->flags);
 }
@@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
 
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_used *used;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
-               if (likely(map)) {
-                       used = map->addr;
-                       used->idx = cpu_to_vhost16(vq, vq->last_used_idx);
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
                              &vq->used->idx);
 }
@@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
                                      __virtio16 *idx)
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_avail *avail;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
-               if (likely(map)) {
-                       avail = map->addr;
-                       *idx = avail->idx;
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_get_avail(vq, *idx, &vq->avail->idx);
 }
 
 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
                                       __virtio16 *head, int idx)
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_avail *avail;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
-               if (likely(map)) {
-                       avail = map->addr;
-                       *head = avail->ring[idx & (vq->num - 1)];
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_get_avail(vq, *head,
                               &vq->avail->ring[idx & (vq->num - 1)]);
 }
@@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
                                        __virtio16 *flags)
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_avail *avail;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
-               if (likely(map)) {
-                       avail = map->addr;
-                       *flags = avail->flags;
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_get_avail(vq, *flags, &vq->avail->flags);
 }
 
 static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
                                       __virtio16 *event)
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_avail *avail;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-               map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
-               if (likely(map)) {
-                       avail = map->addr;
-                       *event = (__virtio16)avail->ring[vq->num];
-                       rcu_read_unlock();
-                       return 0;
-               }
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_get_avail(vq, *event, vhost_used_event(vq));
 }
 
 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
                                     __virtio16 *idx)
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_used *used;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
-               if (likely(map)) {
-                       used = map->addr;
-                       *idx = used->idx;
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_get_used(vq, *idx, &vq->used->idx);
 }
 
 static inline int vhost_get_desc(struct vhost_virtqueue *vq,
                                 struct vring_desc *desc, int idx)
 {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       struct vhost_map *map;
-       struct vring_desc *d;
-
-       if (!vq->iotlb) {
-               rcu_read_lock();
-
-               map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]);
-               if (likely(map)) {
-                       d = map->addr;
-                       *desc = *(d + idx);
-                       rcu_read_unlock();
-                       return 0;
-               }
-
-               rcu_read_unlock();
-       }
-#endif
-
        return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
 }
 
@@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq,
        return true;
 }
 
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq)
-{
-       struct vhost_map __rcu *map;
-       int i;
-
-       for (i = 0; i < VHOST_NUM_ADDRS; i++) {
-               rcu_read_lock();
-               map = rcu_dereference(vq->maps[i]);
-               rcu_read_unlock();
-               if (unlikely(!map))
-                       vhost_map_prefetch(vq, i);
-       }
-}
-#endif
-
 int vq_meta_prefetch(struct vhost_virtqueue *vq)
 {
        unsigned int num = vq->num;
 
-       if (!vq->iotlb) {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-               vhost_vq_map_prefetch(vq);
-#endif
+       if (!vq->iotlb)
                return 1;
-       }
 
        return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
                               vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
@@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
 
        mutex_lock(&vq->mutex);
 
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       /* Unregister MMU notifer to allow invalidation callback
-        * can access vq->uaddrs[] without holding a lock.
-        */
-       if (d->mm)
-               mmu_notifier_unregister(&d->mmu_notifier, d->mm);
-
-       vhost_uninit_vq_maps(vq);
-#endif
-
        switch (ioctl) {
        case VHOST_SET_VRING_NUM:
                r = vhost_vring_set_num(d, vq, argp);
@@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
                BUG();
        }
 
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       vhost_setup_vq_uaddr(vq);
-
-       if (d->mm)
-               mmu_notifier_register(&d->mmu_notifier, d->mm);
-#endif
-
        mutex_unlock(&vq->mutex);
 
        return r;
@@ -2688,7 +2178,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
                /* If this is an input descriptor, increment that count. */
                if (access == VHOST_ACCESS_WO) {
                        *in_num += ret;
-                       if (unlikely(log)) {
+                       if (unlikely(log && ret)) {
                                log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
                                log[*log_num].len = vhost32_to_cpu(vq, desc.len);
                                ++*log_num;
@@ -2829,7 +2319,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
                        /* If this is an input descriptor,
                         * increment that count. */
                        *in_num += ret;
-                       if (unlikely(log)) {
+                       if (unlikely(log && ret)) {
                                log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
                                log[*log_num].len = vhost32_to_cpu(vq, desc.len);
                                ++*log_num;
index 42a8c2a13ab1b1fc5b90f3f6b43f69572fa242ac..e9ed2722b63333404b444f55dd4638a5358c59fa 100644 (file)
@@ -12,9 +12,6 @@
 #include <linux/virtio_config.h>
 #include <linux/virtio_ring.h>
 #include <linux/atomic.h>
-#include <linux/pagemap.h>
-#include <linux/mmu_notifier.h>
-#include <asm/cacheflush.h>
 
 struct vhost_work;
 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -83,24 +80,6 @@ enum vhost_uaddr_type {
        VHOST_NUM_ADDRS = 3,
 };
 
-struct vhost_map {
-       int npages;
-       void *addr;
-       struct page **pages;
-};
-
-struct vhost_uaddr {
-       unsigned long uaddr;
-       size_t size;
-       bool write;
-};
-
-#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
-#else
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
-#endif
-
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
        struct vhost_dev *dev;
@@ -111,22 +90,7 @@ struct vhost_virtqueue {
        struct vring_desc __user *desc;
        struct vring_avail __user *avail;
        struct vring_used __user *used;
-
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-       /* Read by memory accessors, modified by meta data
-        * prefetching, MMU notifier and vring ioctl().
-        * Synchonrized through mmu_lock (writers) and RCU (writers
-        * and readers).
-        */
-       struct vhost_map __rcu *maps[VHOST_NUM_ADDRS];
-       /* Read by MMU notifier, modified by vring ioctl(),
-        * synchronized through MMU notifier
-        * registering/unregistering.
-        */
-       struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS];
-#endif
        const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
-
        struct file *kick;
        struct eventfd_ctx *call_ctx;
        struct eventfd_ctx *error_ctx;
@@ -181,8 +145,6 @@ struct vhost_virtqueue {
        bool user_be;
 #endif
        u32 busyloop_timeout;
-       spinlock_t mmu_lock;
-       int invalidate_count;
 };
 
 struct vhost_msg_node {
@@ -196,9 +158,6 @@ struct vhost_msg_node {
 
 struct vhost_dev {
        struct mm_struct *mm;
-#ifdef CONFIG_MMU_NOTIFIER
-       struct mmu_notifier mmu_notifier;
-#endif
        struct mutex mutex;
        struct vhost_virtqueue **vqs;
        int nvqs;
index fc9dfb0a95afc03e3c9e5633011073c9e79d2074..51f5d1c56fd9c6753079e78ecb4b59605e3e8f6f 100644 (file)
@@ -763,17 +763,17 @@ static void tt_get_par(struct atafb_par *par)
 {
        unsigned long addr;
        par->hw.tt.mode = shifter_tt.tt_shiftmode;
-       par->hw.tt.sync = shifter.syncmode;
-       addr = ((shifter.bas_hi & 0xff) << 16) |
-              ((shifter.bas_md & 0xff) << 8)  |
-              ((shifter.bas_lo & 0xff));
+       par->hw.tt.sync = shifter_st.syncmode;
+       addr = ((shifter_st.bas_hi & 0xff) << 16) |
+              ((shifter_st.bas_md & 0xff) << 8)  |
+              ((shifter_st.bas_lo & 0xff));
        par->screen_base = atari_stram_to_virt(addr);
 }
 
 static void tt_set_par(struct atafb_par *par)
 {
        shifter_tt.tt_shiftmode = par->hw.tt.mode;
-       shifter.syncmode = par->hw.tt.sync;
+       shifter_st.syncmode = par->hw.tt.sync;
        /* only set screen_base if really necessary */
        if (current_par.screen_base != par->screen_base)
                fbhw->set_screen_base(par->screen_base);
@@ -1543,7 +1543,7 @@ static void falcon_get_par(struct atafb_par *par)
        hw->f_shift = videl.f_shift;
        hw->vid_control = videl.control;
        hw->vid_mode = videl.mode;
-       hw->sync = shifter.syncmode & 0x1;
+       hw->sync = shifter_st.syncmode & 0x1;
        hw->xoffset = videl.xoffset & 0xf;
        hw->hht = videl.hht;
        hw->hbb = videl.hbb;
@@ -1558,9 +1558,9 @@ static void falcon_get_par(struct atafb_par *par)
        hw->vde = videl.vde;
        hw->vss = videl.vss;
 
-       addr = (shifter.bas_hi & 0xff) << 16 |
-              (shifter.bas_md & 0xff) << 8  |
-              (shifter.bas_lo & 0xff);
+       addr = (shifter_st.bas_hi & 0xff) << 16 |
+              (shifter_st.bas_md & 0xff) << 8  |
+              (shifter_st.bas_lo & 0xff);
        par->screen_base = atari_stram_to_virt(addr);
 
        /* derived parameters */
@@ -1605,7 +1605,7 @@ static irqreturn_t falcon_vbl_switcher(int irq, void *dummy)
                        /* Turn off external clocks. Read sets all output bits to 1. */
                        *(volatile unsigned short *)0xffff9202;
                }
-               shifter.syncmode = hw->sync;
+               shifter_st.syncmode = hw->sync;
 
                videl.hht = hw->hht;
                videl.hbb = hw->hbb;
@@ -1952,18 +1952,18 @@ static void stste_get_par(struct atafb_par *par)
 {
        unsigned long addr;
        par->hw.st.mode = shifter_tt.st_shiftmode;
-       par->hw.st.sync = shifter.syncmode;
-       addr = ((shifter.bas_hi & 0xff) << 16) |
-              ((shifter.bas_md & 0xff) << 8);
+       par->hw.st.sync = shifter_st.syncmode;
+       addr = ((shifter_st.bas_hi & 0xff) << 16) |
+              ((shifter_st.bas_md & 0xff) << 8);
        if (ATARIHW_PRESENT(EXTD_SHIFTER))
-               addr |= (shifter.bas_lo & 0xff);
+               addr |= (shifter_st.bas_lo & 0xff);
        par->screen_base = atari_stram_to_virt(addr);
 }
 
 static void stste_set_par(struct atafb_par *par)
 {
        shifter_tt.st_shiftmode = par->hw.st.mode;
-       shifter.syncmode = par->hw.st.sync;
+       shifter_st.syncmode = par->hw.st.sync;
        /* only set screen_base if really necessary */
        if (current_par.screen_base != par->screen_base)
                fbhw->set_screen_base(par->screen_base);
@@ -2018,10 +2018,10 @@ static void stste_set_screen_base(void *s_base)
        unsigned long addr;
        addr = atari_stram_to_phys(s_base);
        /* Setup Screen Memory */
-       shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
-       shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
+       shifter_st.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
+       shifter_st.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
        if (ATARIHW_PRESENT(EXTD_SHIFTER))
-               shifter.bas_lo = (unsigned char)(addr & 0x0000ff);
+               shifter_st.bas_lo = (unsigned char)(addr & 0x0000ff);
 }
 
 #endif /* ATAFB_STE */
@@ -2265,9 +2265,9 @@ static void set_screen_base(void *s_base)
 
        addr = atari_stram_to_phys(s_base);
        /* Setup Screen Memory */
-       shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
-       shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
-       shifter.bas_lo = (unsigned char)(addr & 0x0000ff);
+       shifter_st.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
+       shifter_st.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
+       shifter_st.bas_lo = (unsigned char)(addr & 0x0000ff);
 }
 
 static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
index c8be1c4f5b5566929d9dc1601bd8b05b6552402e..bdc08244a648dbf0ee58fe8b9dc4e5e36c15dcb8 100644 (file)
@@ -566,13 +566,17 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 
 unmap_release:
        err_idx = i;
-       i = head;
+
+       if (indirect)
+               i = 0;
+       else
+               i = head;
 
        for (n = 0; n < total_sg; n++) {
                if (i == err_idx)
                        break;
                vring_unmap_one_split(vq, &desc[i]);
-               i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
+               i = virtio16_to_cpu(_vq->vdev, desc[i].next);
        }
 
        if (indirect)
index ae1df496bf384bdb3d8f9c37769326f4c95f01cb..adcabd9473eb65c3d6dc7486c5213f49c5f69369 100644 (file)
@@ -386,8 +386,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
         */
        trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
 
-       map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
-                                    attrs);
+       map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
+                                    size, size, dir, attrs);
        if (map == (phys_addr_t)DMA_MAPPING_ERROR)
                return DMA_MAPPING_ERROR;
 
@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
         * Ensure that the address returned is DMA'ble
         */
        if (unlikely(!dma_capable(dev, dev_addr, size))) {
-               swiotlb_tbl_unmap_single(dev, map, size, dir,
+               swiotlb_tbl_unmap_single(dev, map, size, size, dir,
                                attrs | DMA_ATTR_SKIP_CPU_SYNC);
                return DMA_MAPPING_ERROR;
        }
@@ -433,7 +433,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(dev_addr))
-               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
+               swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
 }
 
 static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
index 1ff438fd5bc2906499a0eb075e8c38740e64be7b..eeb75281894ed6a3ad41dfe53d9f1fa7df2d84dc 100644 (file)
@@ -3628,6 +3628,13 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
                       TASK_UNINTERRUPTIBLE);
 }
 
+static void end_extent_buffer_writeback(struct extent_buffer *eb)
+{
+       clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+       smp_mb__after_atomic();
+       wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+}
+
 /*
  * Lock eb pages and flush the bio if we can't the locks
  *
@@ -3699,8 +3706,11 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
 
                if (!trylock_page(p)) {
                        if (!flush) {
-                               ret = flush_write_bio(epd);
-                               if (ret < 0) {
+                               int err;
+
+                               err = flush_write_bio(epd);
+                               if (err < 0) {
+                                       ret = err;
                                        failed_page_nr = i;
                                        goto err_unlock;
                                }
@@ -3715,16 +3725,23 @@ err_unlock:
        /* Unlock already locked pages */
        for (i = 0; i < failed_page_nr; i++)
                unlock_page(eb->pages[i]);
+       /*
+        * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
+        * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
+        * be made and undo everything done before.
+        */
+       btrfs_tree_lock(eb);
+       spin_lock(&eb->refs_lock);
+       set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
+       end_extent_buffer_writeback(eb);
+       spin_unlock(&eb->refs_lock);
+       percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
+                                fs_info->dirty_metadata_batch);
+       btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+       btrfs_tree_unlock(eb);
        return ret;
 }
 
-static void end_extent_buffer_writeback(struct extent_buffer *eb)
-{
-       clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
-       smp_mb__after_atomic();
-       wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
-}
-
 static void set_btree_ioerr(struct page *page)
 {
        struct extent_buffer *eb = (struct extent_buffer *)page->private;
index 6c8297bcfeb7caa63bc2b39bdd06b5a1f7708489..1bfd7e34f31e33709f0e937f83c6d978d74564c6 100644 (file)
@@ -4985,7 +4985,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
                                                      BTRFS_I(inode),
                                                      LOG_OTHER_INODE_ALL,
                                                      0, LLONG_MAX, ctx);
-                                       iput(inode);
+                                       btrfs_add_delayed_iput(inode);
                                }
                        }
                        continue;
@@ -5000,7 +5000,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
                ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
                                      LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
                if (ret) {
-                       iput(inode);
+                       btrfs_add_delayed_iput(inode);
                        continue;
                }
 
@@ -5009,7 +5009,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
                key.offset = 0;
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
                if (ret < 0) {
-                       iput(inode);
+                       btrfs_add_delayed_iput(inode);
                        continue;
                }
 
@@ -5056,7 +5056,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
                        }
                        path->slots[0]++;
                }
-               iput(inode);
+               btrfs_add_delayed_iput(inode);
        }
 
        return ret;
@@ -5689,7 +5689,7 @@ process_leaf:
                        }
 
                        if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
-                               iput(di_inode);
+                               btrfs_add_delayed_iput(di_inode);
                                break;
                        }
 
@@ -5701,7 +5701,7 @@ process_leaf:
                        if (!ret &&
                            btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
                                ret = 1;
-                       iput(di_inode);
+                       btrfs_add_delayed_iput(di_inode);
                        if (ret)
                                goto next_dir_inode;
                        if (ctx->log_new_dentries) {
@@ -5848,7 +5848,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
                        if (!ret && ctx && ctx->log_new_dentries)
                                ret = log_new_dir_dentries(trans, root,
                                                   BTRFS_I(dir_inode), ctx);
-                       iput(dir_inode);
+                       btrfs_add_delayed_iput(dir_inode);
                        if (ret)
                                goto out;
                }
@@ -5891,7 +5891,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
                        ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
                                              LOG_INODE_EXISTS,
                                              0, LLONG_MAX, ctx);
-               iput(inode);
+               btrfs_add_delayed_iput(inode);
                if (ret)
                        return ret;
 
index 4b21a90015a96e647de60b99dc20cd1f7ccce5a7..99caf77df4a23edd9291e0f9336ca4a0d6df323e 100644 (file)
@@ -152,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.21"
+#define CIFS_VERSION   "2.22"
 #endif                         /* _CIFSFS_H */
index e23234207fc2cfbedaa7460eda6fabf0cd8e8210..592a6cea2b79f52df986bd9df7c1dcced837ccdd 100644 (file)
@@ -579,6 +579,7 @@ extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
                                unsigned int *len, unsigned int *offset);
 
 void extract_unc_hostname(const char *unc, const char **h, size_t *len);
+int copy_path_name(char *dst, const char *src);
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
 static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
index e2f95965065d1d7fd42c8116d0d57cecb097bd6c..3907653e63c7784e8f70fb1f48da7bd3dcf68b73 100644 (file)
@@ -942,10 +942,8 @@ PsxDelete:
                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else { /* BB add path length overrun check */
-               name_len = strnlen(fileName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, fileName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, fileName);
        }
 
        params = 6 + name_len;
@@ -1015,10 +1013,8 @@ DelFileRetry:
                                              remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {                /* BB improve check for buffer overruns BB */
-               name_len = strnlen(name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->fileName, name, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->fileName, name);
        }
        pSMB->SearchAttributes =
            cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
@@ -1062,10 +1058,8 @@ RmDirRetry:
                                              remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {                /* BB improve check for buffer overruns BB */
-               name_len = strnlen(name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->DirName, name, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->DirName, name);
        }
 
        pSMB->BufferFormat = 0x04;
@@ -1107,10 +1101,8 @@ MkDirRetry:
                                              remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {                /* BB improve check for buffer overruns BB */
-               name_len = strnlen(name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->DirName, name, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->DirName, name);
        }
 
        pSMB->BufferFormat = 0x04;
@@ -1157,10 +1149,8 @@ PsxCreat:
                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, name, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, name);
        }
 
        params = 6 + name_len;
@@ -1324,11 +1314,9 @@ OldOpenRetry:
                                      fileName, PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {                /* BB improve check for buffer overruns BB */
+       } else {
                count = 0;      /* no pad */
-               name_len = strnlen(fileName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->fileName, fileName, name_len);
+               name_len = copy_path_name(pSMB->fileName, fileName);
        }
        if (*pOplock & REQ_OPLOCK)
                pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
@@ -1442,11 +1430,8 @@ openRetry:
                /* BB improve check for buffer overruns BB */
                /* no pad */
                count = 0;
-               name_len = strnlen(path, PATH_MAX);
-               /* trailing null */
-               name_len++;
+               name_len = copy_path_name(req->fileName, path);
                req->NameLength = cpu_to_le16(name_len);
-               strncpy(req->fileName, path, name_len);
        }
 
        if (*oplock & REQ_OPLOCK)
@@ -2812,15 +2797,10 @@ renameRetry:
                                       remap);
                name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
                name_len2 *= 2; /* convert to bytes */
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(from_name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->OldFileName, from_name, name_len);
-               name_len2 = strnlen(to_name, PATH_MAX);
-               name_len2++;    /* trailing null */
+       } else {
+               name_len = copy_path_name(pSMB->OldFileName, from_name);
+               name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
                pSMB->OldFileName[name_len] = 0x04;  /* 2nd buffer format */
-               strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
-               name_len2++;    /* trailing null */
                name_len2++;    /* signature byte */
        }
 
@@ -2962,15 +2942,10 @@ copyRetry:
                                       toName, PATH_MAX, nls_codepage, remap);
                name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
                name_len2 *= 2; /* convert to bytes */
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(fromName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->OldFileName, fromName, name_len);
-               name_len2 = strnlen(toName, PATH_MAX);
-               name_len2++;    /* trailing null */
+       } else {
+               name_len = copy_path_name(pSMB->OldFileName, fromName);
                pSMB->OldFileName[name_len] = 0x04;  /* 2nd buffer format */
-               strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
-               name_len2++;    /* trailing null */
+               name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
                name_len2++;    /* signature byte */
        }
 
@@ -3021,10 +2996,8 @@ createSymLinkRetry:
                name_len++;     /* trailing null */
                name_len *= 2;
 
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(fromName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, fromName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, fromName);
        }
        params = 6 + name_len;
        pSMB->MaxSetupCount = 0;
@@ -3044,10 +3017,8 @@ createSymLinkRetry:
                                        PATH_MAX, nls_codepage, remap);
                name_len_target++;      /* trailing null */
                name_len_target *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len_target = strnlen(toName, PATH_MAX);
-               name_len_target++;      /* trailing null */
-               strncpy(data_offset, toName, name_len_target);
+       } else {
+               name_len_target = copy_path_name(data_offset, toName);
        }
 
        pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3109,10 +3080,8 @@ createHardLinkRetry:
                name_len++;     /* trailing null */
                name_len *= 2;
 
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(toName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, toName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, toName);
        }
        params = 6 + name_len;
        pSMB->MaxSetupCount = 0;
@@ -3131,10 +3100,8 @@ createHardLinkRetry:
                                       PATH_MAX, nls_codepage, remap);
                name_len_target++;      /* trailing null */
                name_len_target *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len_target = strnlen(fromName, PATH_MAX);
-               name_len_target++;      /* trailing null */
-               strncpy(data_offset, fromName, name_len_target);
+       } else {
+               name_len_target = copy_path_name(data_offset, fromName);
        }
 
        pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3213,15 +3180,10 @@ winCreateHardLinkRetry:
                                       remap);
                name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
                name_len2 *= 2; /* convert to bytes */
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(from_name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->OldFileName, from_name, name_len);
-               name_len2 = strnlen(to_name, PATH_MAX);
-               name_len2++;    /* trailing null */
+       } else {
+               name_len = copy_path_name(pSMB->OldFileName, from_name);
                pSMB->OldFileName[name_len] = 0x04;     /* 2nd buffer format */
-               strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
-               name_len2++;    /* trailing null */
+               name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
                name_len2++;    /* signature byte */
        }
 
@@ -3271,10 +3233,8 @@ querySymLinkRetry:
                                           remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(searchName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, searchName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, searchName);
        }
 
        params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
@@ -3691,10 +3651,8 @@ queryAclRetry:
                name_len *= 2;
                pSMB->FileName[name_len] = 0;
                pSMB->FileName[name_len+1] = 0;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(searchName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, searchName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, searchName);
        }
 
        params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
@@ -3776,10 +3734,8 @@ setAclRetry:
                                           PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(fileName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, fileName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, fileName);
        }
        params = 6 + name_len;
        pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4184,9 +4140,7 @@ QInfRetry:
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {
-               name_len = strnlen(search_name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, search_name, name_len);
+               name_len = copy_path_name(pSMB->FileName, search_name);
        }
        pSMB->BufferFormat = 0x04;
        name_len++; /* account for buffer type byte */
@@ -4321,10 +4275,8 @@ QPathInfoRetry:
                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(search_name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, search_name, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, search_name);
        }
 
        params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4490,10 +4442,8 @@ UnixQPathInfoRetry:
                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(searchName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, searchName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, searchName);
        }
 
        params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4593,17 +4543,16 @@ findFirstRetry:
                        pSMB->FileName[name_len+1] = 0;
                        name_len += 2;
                }
-       } else {        /* BB add check for overrun of SMB buf BB */
-               name_len = strnlen(searchName, PATH_MAX);
-/* BB fix here and in unicode clause above ie
-               if (name_len > buffersize-header)
-                       free buffer exit; BB */
-               strncpy(pSMB->FileName, searchName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, searchName);
                if (msearch) {
-                       pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb);
-                       pSMB->FileName[name_len+1] = '*';
-                       pSMB->FileName[name_len+2] = 0;
-                       name_len += 3;
+                       if (WARN_ON_ONCE(name_len > PATH_MAX-2))
+                               name_len = PATH_MAX-2;
+                       /* overwrite nul byte */
+                       pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
+                       pSMB->FileName[name_len] = '*';
+                       pSMB->FileName[name_len+1] = 0;
+                       name_len += 2;
                }
        }
 
@@ -4898,10 +4847,8 @@ GetInodeNumberRetry:
                                           remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(search_name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, search_name, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, search_name);
        }
 
        params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
@@ -5008,9 +4955,7 @@ getDFSRetry:
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(search_name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->RequestFileName, search_name, name_len);
+               name_len = copy_path_name(pSMB->RequestFileName, search_name);
        }
 
        if (ses->server->sign)
@@ -5663,10 +5608,8 @@ SetEOFRetry:
                                       PATH_MAX, cifs_sb->local_nls, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(file_name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, file_name, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, file_name);
        }
        params = 6 + name_len;
        data_count = sizeof(struct file_end_of_file_info);
@@ -5959,10 +5902,8 @@ SetTimesRetry:
                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(fileName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, fileName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, fileName);
        }
 
        params = 6 + name_len;
@@ -6040,10 +5981,8 @@ SetAttrLgcyRetry:
                                       PATH_MAX, nls_codepage);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(fileName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->fileName, fileName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->fileName, fileName);
        }
        pSMB->attr = cpu_to_le16(dos_attrs);
        pSMB->BufferFormat = 0x04;
@@ -6203,10 +6142,8 @@ setPermsRetry:
                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(file_name, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, file_name, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, file_name);
        }
 
        params = 6 + name_len;
@@ -6298,10 +6235,8 @@ QAllEAsRetry:
                                       PATH_MAX, nls_codepage, remap);
                list_len++;     /* trailing null */
                list_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               list_len = strnlen(searchName, PATH_MAX);
-               list_len++;     /* trailing null */
-               strncpy(pSMB->FileName, searchName, list_len);
+       } else {
+               list_len = copy_path_name(pSMB->FileName, searchName);
        }
 
        params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
@@ -6480,10 +6415,8 @@ SetEARetry:
                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
-       } else {        /* BB improve the check for buffer overruns BB */
-               name_len = strnlen(fileName, PATH_MAX);
-               name_len++;     /* trailing null */
-               strncpy(pSMB->FileName, fileName, name_len);
+       } else {
+               name_len = copy_path_name(pSMB->FileName, fileName);
        }
 
        params = 6 + name_len;
index 1795e80cbdf7b246c12f4bbdf758b14a1999d96a..5299effa6f7d42d940a76057395fb7d9bd6ce99f 100644 (file)
@@ -2981,6 +2981,7 @@ static int
 cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
 {
        int rc = 0;
+       int is_domain = 0;
        const char *delim, *payload;
        char *desc;
        ssize_t len;
@@ -3028,6 +3029,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
                        rc = PTR_ERR(key);
                        goto out_err;
                }
+               is_domain = 1;
        }
 
        down_read(&key->sem);
@@ -3085,6 +3087,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
                goto out_key_put;
        }
 
+       /*
+        * If we have a domain key then we must set the domainName in the
+        * for the request.
+        */
+       if (is_domain && ses->domainName) {
+               vol->domainname = kstrndup(ses->domainName,
+                                          strlen(ses->domainName),
+                                          GFP_KERNEL);
+               if (!vol->domainname) {
+                       cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+                                "domain\n", len);
+                       rc = -ENOMEM;
+                       kfree(vol->username);
+                       vol->username = NULL;
+                       kzfree(vol->password);
+                       vol->password = NULL;
+                       goto out_key_put;
+               }
+       }
+
 out_key_put:
        up_read(&key->sem);
        key_put(key);
@@ -4209,16 +4231,19 @@ build_unc_path_to_root(const struct smb_vol *vol,
                strlen(vol->prepath) + 1 : 0;
        unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
 
+       if (unc_len > MAX_TREE_SIZE)
+               return ERR_PTR(-EINVAL);
+
        full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
        if (full_path == NULL)
                return ERR_PTR(-ENOMEM);
 
-       strncpy(full_path, vol->UNC, unc_len);
+       memcpy(full_path, vol->UNC, unc_len);
        pos = full_path + unc_len;
 
        if (pplen) {
                *pos = CIFS_DIR_SEP(cifs_sb);
-               strncpy(pos + 1, vol->prepath, pplen);
+               memcpy(pos + 1, vol->prepath, pplen);
                pos += pplen;
        }
 
index f26a48dd2e39508ab01c11c91d85fbb471beb16c..be424e81e3ad98e20f65de41572d87a569a28c7b 100644 (file)
@@ -69,11 +69,10 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
                return full_path;
 
        if (dfsplen)
-               strncpy(full_path, tcon->treeName, dfsplen);
+               memcpy(full_path, tcon->treeName, dfsplen);
        full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
-       strncpy(full_path + dfsplen + 1, vol->prepath, pplen);
+       memcpy(full_path + dfsplen + 1, vol->prepath, pplen);
        convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
-       full_path[dfsplen + pplen] = 0; /* add trailing null */
        return full_path;
 }
 
index f383877a65117d5d42f40e99eae9c8127dcf8747..5ad83bdb9bea3ce3597238e862cd447ca878b40a 100644 (file)
@@ -1011,3 +1011,25 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
        *h = unc;
        *len = end - unc;
 }
+
+/**
+ * copy_path_name - copy src path to dst, possibly truncating
+ *
+ * returns number of bytes written (including trailing nul)
+ */
+int copy_path_name(char *dst, const char *src)
+{
+       int name_len;
+
+       /*
+        * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
+        * will truncate and strlen(dst) will be PATH_MAX-1
+        */
+       name_len = strscpy(dst, src, PATH_MAX);
+       if (WARN_ON_ONCE(name_len < 0))
+               name_len = PATH_MAX-1;
+
+       /* we count the trailing nul */
+       name_len++;
+       return name_len;
+}
index dcd49ad60c83d4b668f0d748ed8601147da74b63..4c764ff7edd217b73c35853db0c12543013a5cbe 100644 (file)
@@ -159,13 +159,16 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
                                 const struct nls_table *nls_cp)
 {
        char *bcc_ptr = *pbcc_area;
+       int len;
 
        /* copy user */
        /* BB what about null user mounts - check that we do this BB */
        /* copy user */
        if (ses->user_name != NULL) {
-               strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
-               bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+               len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
+               if (WARN_ON_ONCE(len < 0))
+                       len = CIFS_MAX_USERNAME_LEN - 1;
+               bcc_ptr += len;
        }
        /* else null user mount */
        *bcc_ptr = 0;
@@ -173,8 +176,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
 
        /* copy domain */
        if (ses->domainName != NULL) {
-               strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
-               bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+               len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+               if (WARN_ON_ONCE(len < 0))
+                       len = CIFS_MAX_DOMAINNAME_LEN - 1;
+               bcc_ptr += len;
        } /* else we will send a null domain name
             so the server will default to its own domain */
        *bcc_ptr = 0;
@@ -242,9 +247,10 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
 
        kfree(ses->serverOS);
 
-       ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
+       ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
        if (ses->serverOS) {
-               strncpy(ses->serverOS, bcc_ptr, len);
+               memcpy(ses->serverOS, bcc_ptr, len);
+               ses->serverOS[len] = 0;
                if (strncmp(ses->serverOS, "OS/2", 4) == 0)
                        cifs_dbg(FYI, "OS/2 server\n");
        }
@@ -258,9 +264,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
 
        kfree(ses->serverNOS);
 
-       ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
-       if (ses->serverNOS)
-               strncpy(ses->serverNOS, bcc_ptr, len);
+       ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
+       if (ses->serverNOS) {
+               memcpy(ses->serverNOS, bcc_ptr, len);
+               ses->serverNOS[len] = 0;
+       }
 
        bcc_ptr += len + 1;
        bleft -= len + 1;
index f752d83a9c44807818d12ad7f010cdaf43b83c8d..520f1813e789c0553e84edc6f7e2255fde174bf1 100644 (file)
 #include <linux/list.h>
 #include <linux/spinlock.h>
 
+struct configfs_fragment {
+       atomic_t frag_count;
+       struct rw_semaphore frag_sem;
+       bool frag_dead;
+};
+
+void put_fragment(struct configfs_fragment *);
+struct configfs_fragment *get_fragment(struct configfs_fragment *);
+
 struct configfs_dirent {
        atomic_t                s_count;
        int                     s_dependent_count;
@@ -34,6 +43,7 @@ struct configfs_dirent {
 #ifdef CONFIG_LOCKDEP
        int                     s_depth;
 #endif
+       struct configfs_fragment *s_frag;
 };
 
 #define CONFIGFS_ROOT          0x0001
@@ -61,8 +71,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in
 extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
 extern int configfs_create_bin_file(struct config_item *,
                                    const struct configfs_bin_attribute *);
-extern int configfs_make_dirent(struct configfs_dirent *,
-                               struct dentry *, void *, umode_t, int);
+extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
+                               void *, umode_t, int, struct configfs_fragment *);
 extern int configfs_dirent_is_ready(struct configfs_dirent *);
 
 extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
@@ -137,6 +147,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
 {
        if (!(sd->s_type & CONFIGFS_ROOT)) {
                kfree(sd->s_iattr);
+               put_fragment(sd->s_frag);
                kmem_cache_free(configfs_dir_cachep, sd);
        }
 }
index 92112915de8e752a1a9ae1b548513e41c7d0f7e4..79fc25aaa8cd5e8168d32a7ada8bb0ec40af4c1d 100644 (file)
@@ -151,11 +151,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
 
 #endif /* CONFIG_LOCKDEP */
 
+static struct configfs_fragment *new_fragment(void)
+{
+       struct configfs_fragment *p;
+
+       p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
+       if (p) {
+               atomic_set(&p->frag_count, 1);
+               init_rwsem(&p->frag_sem);
+               p->frag_dead = false;
+       }
+       return p;
+}
+
+void put_fragment(struct configfs_fragment *frag)
+{
+       if (frag && atomic_dec_and_test(&frag->frag_count))
+               kfree(frag);
+}
+
+struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
+{
+       if (likely(frag))
+               atomic_inc(&frag->frag_count);
+       return frag;
+}
+
 /*
  * Allocates a new configfs_dirent and links it to the parent configfs_dirent
  */
 static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
-                                                  void *element, int type)
+                                                  void *element, int type,
+                                                  struct configfs_fragment *frag)
 {
        struct configfs_dirent * sd;
 
@@ -175,6 +202,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
                kmem_cache_free(configfs_dir_cachep, sd);
                return ERR_PTR(-ENOENT);
        }
+       sd->s_frag = get_fragment(frag);
        list_add(&sd->s_sibling, &parent_sd->s_children);
        spin_unlock(&configfs_dirent_lock);
 
@@ -209,11 +237,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
 
 int configfs_make_dirent(struct configfs_dirent * parent_sd,
                         struct dentry * dentry, void * element,
-                        umode_t mode, int type)
+                        umode_t mode, int type, struct configfs_fragment *frag)
 {
        struct configfs_dirent * sd;
 
-       sd = configfs_new_dirent(parent_sd, element, type);
+       sd = configfs_new_dirent(parent_sd, element, type, frag);
        if (IS_ERR(sd))
                return PTR_ERR(sd);
 
@@ -260,7 +288,8 @@ static void init_symlink(struct inode * inode)
  *     until it is validated by configfs_dir_set_ready()
  */
 
-static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
+                               struct configfs_fragment *frag)
 {
        int error;
        umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
@@ -273,7 +302,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
                return error;
 
        error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
-                                    CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+                                    CONFIGFS_DIR | CONFIGFS_USET_CREATING,
+                                    frag);
        if (unlikely(error))
                return error;
 
@@ -338,9 +368,10 @@ int configfs_create_link(struct configfs_symlink *sl,
 {
        int err = 0;
        umode_t mode = S_IFLNK | S_IRWXUGO;
+       struct configfs_dirent *p = parent->d_fsdata;
 
-       err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
-                                  CONFIGFS_ITEM_LINK);
+       err = configfs_make_dirent(p, dentry, sl, mode,
+                                  CONFIGFS_ITEM_LINK, p->s_frag);
        if (!err) {
                err = configfs_create(dentry, mode, init_symlink);
                if (err) {
@@ -599,7 +630,8 @@ static int populate_attrs(struct config_item *item)
 
 static int configfs_attach_group(struct config_item *parent_item,
                                 struct config_item *item,
-                                struct dentry *dentry);
+                                struct dentry *dentry,
+                                struct configfs_fragment *frag);
 static void configfs_detach_group(struct config_item *item);
 
 static void detach_groups(struct config_group *group)
@@ -647,7 +679,8 @@ static void detach_groups(struct config_group *group)
  * try using vfs_mkdir.  Just a thought.
  */
 static int create_default_group(struct config_group *parent_group,
-                               struct config_group *group)
+                               struct config_group *group,
+                               struct configfs_fragment *frag)
 {
        int ret;
        struct configfs_dirent *sd;
@@ -663,7 +696,7 @@ static int create_default_group(struct config_group *parent_group,
                d_add(child, NULL);
 
                ret = configfs_attach_group(&parent_group->cg_item,
-                                           &group->cg_item, child);
+                                           &group->cg_item, child, frag);
                if (!ret) {
                        sd = child->d_fsdata;
                        sd->s_type |= CONFIGFS_USET_DEFAULT;
@@ -677,13 +710,14 @@ static int create_default_group(struct config_group *parent_group,
        return ret;
 }
 
-static int populate_groups(struct config_group *group)
+static int populate_groups(struct config_group *group,
+                          struct configfs_fragment *frag)
 {
        struct config_group *new_group;
        int ret = 0;
 
        list_for_each_entry(new_group, &group->default_groups, group_entry) {
-               ret = create_default_group(group, new_group);
+               ret = create_default_group(group, new_group, frag);
                if (ret) {
                        detach_groups(group);
                        break;
@@ -797,11 +831,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
  */
 static int configfs_attach_item(struct config_item *parent_item,
                                struct config_item *item,
-                               struct dentry *dentry)
+                               struct dentry *dentry,
+                               struct configfs_fragment *frag)
 {
        int ret;
 
-       ret = configfs_create_dir(item, dentry);
+       ret = configfs_create_dir(item, dentry, frag);
        if (!ret) {
                ret = populate_attrs(item);
                if (ret) {
@@ -831,12 +866,13 @@ static void configfs_detach_item(struct config_item *item)
 
 static int configfs_attach_group(struct config_item *parent_item,
                                 struct config_item *item,
-                                struct dentry *dentry)
+                                struct dentry *dentry,
+                                struct configfs_fragment *frag)
 {
        int ret;
        struct configfs_dirent *sd;
 
-       ret = configfs_attach_item(parent_item, item, dentry);
+       ret = configfs_attach_item(parent_item, item, dentry, frag);
        if (!ret) {
                sd = dentry->d_fsdata;
                sd->s_type |= CONFIGFS_USET_DIR;
@@ -852,7 +888,7 @@ static int configfs_attach_group(struct config_item *parent_item,
                 */
                inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
                configfs_adjust_dir_dirent_depth_before_populate(sd);
-               ret = populate_groups(to_config_group(item));
+               ret = populate_groups(to_config_group(item), frag);
                if (ret) {
                        configfs_detach_item(item);
                        d_inode(dentry)->i_flags |= S_DEAD;
@@ -1247,6 +1283,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
        struct configfs_dirent *sd;
        const struct config_item_type *type;
        struct module *subsys_owner = NULL, *new_item_owner = NULL;
+       struct configfs_fragment *frag;
        char *name;
 
        sd = dentry->d_parent->d_fsdata;
@@ -1265,6 +1302,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
                goto out;
        }
 
+       frag = new_fragment();
+       if (!frag) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        /* Get a working ref for the duration of this function */
        parent_item = configfs_get_config_item(dentry->d_parent);
        type = parent_item->ci_type;
@@ -1367,9 +1410,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
        spin_unlock(&configfs_dirent_lock);
 
        if (group)
-               ret = configfs_attach_group(parent_item, item, dentry);
+               ret = configfs_attach_group(parent_item, item, dentry, frag);
        else
-               ret = configfs_attach_item(parent_item, item, dentry);
+               ret = configfs_attach_item(parent_item, item, dentry, frag);
 
        spin_lock(&configfs_dirent_lock);
        sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
@@ -1406,6 +1449,7 @@ out_put:
         * reference.
         */
        config_item_put(parent_item);
+       put_fragment(frag);
 
 out:
        return ret;
@@ -1417,6 +1461,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
        struct config_item *item;
        struct configfs_subsystem *subsys;
        struct configfs_dirent *sd;
+       struct configfs_fragment *frag;
        struct module *subsys_owner = NULL, *dead_item_owner = NULL;
        int ret;
 
@@ -1474,6 +1519,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
                }
        } while (ret == -EAGAIN);
 
+       frag = sd->s_frag;
+       if (down_write_killable(&frag->frag_sem)) {
+               spin_lock(&configfs_dirent_lock);
+               configfs_detach_rollback(dentry);
+               spin_unlock(&configfs_dirent_lock);
+               return -EINTR;
+       }
+       frag->frag_dead = true;
+       up_write(&frag->frag_sem);
+
        /* Get a working ref for the duration of this function */
        item = configfs_get_config_item(dentry);
 
@@ -1574,7 +1629,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
         */
        err = -ENOENT;
        if (configfs_dirent_is_ready(parent_sd)) {
-               file->private_data = configfs_new_dirent(parent_sd, NULL, 0);
+               file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
                if (IS_ERR(file->private_data))
                        err = PTR_ERR(file->private_data);
                else
@@ -1732,8 +1787,13 @@ int configfs_register_group(struct config_group *parent_group,
 {
        struct configfs_subsystem *subsys = parent_group->cg_subsys;
        struct dentry *parent;
+       struct configfs_fragment *frag;
        int ret;
 
+       frag = new_fragment();
+       if (!frag)
+               return -ENOMEM;
+
        mutex_lock(&subsys->su_mutex);
        link_group(parent_group, group);
        mutex_unlock(&subsys->su_mutex);
@@ -1741,7 +1801,7 @@ int configfs_register_group(struct config_group *parent_group,
        parent = parent_group->cg_item.ci_dentry;
 
        inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
-       ret = create_default_group(parent_group, group);
+       ret = create_default_group(parent_group, group, frag);
        if (ret)
                goto err_out;
 
@@ -1749,12 +1809,14 @@ int configfs_register_group(struct config_group *parent_group,
        configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
        spin_unlock(&configfs_dirent_lock);
        inode_unlock(d_inode(parent));
+       put_fragment(frag);
        return 0;
 err_out:
        inode_unlock(d_inode(parent));
        mutex_lock(&subsys->su_mutex);
        unlink_group(group);
        mutex_unlock(&subsys->su_mutex);
+       put_fragment(frag);
        return ret;
 }
 EXPORT_SYMBOL(configfs_register_group);
@@ -1770,16 +1832,12 @@ void configfs_unregister_group(struct config_group *group)
        struct configfs_subsystem *subsys = group->cg_subsys;
        struct dentry *dentry = group->cg_item.ci_dentry;
        struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
+       struct configfs_dirent *sd = dentry->d_fsdata;
+       struct configfs_fragment *frag = sd->s_frag;
 
-       mutex_lock(&subsys->su_mutex);
-       if (!group->cg_item.ci_parent->ci_group) {
-               /*
-                * The parent has already been unlinked and detached
-                * due to a rmdir.
-                */
-               goto unlink_group;
-       }
-       mutex_unlock(&subsys->su_mutex);
+       down_write(&frag->frag_sem);
+       frag->frag_dead = true;
+       up_write(&frag->frag_sem);
 
        inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
        spin_lock(&configfs_dirent_lock);
@@ -1796,7 +1854,6 @@ void configfs_unregister_group(struct config_group *group)
        dput(dentry);
 
        mutex_lock(&subsys->su_mutex);
-unlink_group:
        unlink_group(group);
        mutex_unlock(&subsys->su_mutex);
 }
@@ -1853,10 +1910,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
        struct dentry *dentry;
        struct dentry *root;
        struct configfs_dirent *sd;
+       struct configfs_fragment *frag;
+
+       frag = new_fragment();
+       if (!frag)
+               return -ENOMEM;
 
        root = configfs_pin_fs();
-       if (IS_ERR(root))
+       if (IS_ERR(root)) {
+               put_fragment(frag);
                return PTR_ERR(root);
+       }
 
        if (!group->cg_item.ci_name)
                group->cg_item.ci_name = group->cg_item.ci_namebuf;
@@ -1872,7 +1936,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
                d_add(dentry, NULL);
 
                err = configfs_attach_group(sd->s_element, &group->cg_item,
-                                           dentry);
+                                           dentry, frag);
                if (err) {
                        BUG_ON(d_inode(dentry));
                        d_drop(dentry);
@@ -1890,6 +1954,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
                unlink_group(group);
                configfs_release_fs();
        }
+       put_fragment(frag);
 
        return err;
 }
@@ -1899,12 +1964,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
        struct config_group *group = &subsys->su_group;
        struct dentry *dentry = group->cg_item.ci_dentry;
        struct dentry *root = dentry->d_sb->s_root;
+       struct configfs_dirent *sd = dentry->d_fsdata;
+       struct configfs_fragment *frag = sd->s_frag;
 
        if (dentry->d_parent != root) {
                pr_err("Tried to unregister non-subsystem!\n");
                return;
        }
 
+       down_write(&frag->frag_sem);
+       frag->frag_dead = true;
+       up_write(&frag->frag_sem);
+
        inode_lock_nested(d_inode(root),
                          I_MUTEX_PARENT);
        inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
index 61e4db4390a1f2878a4a3bcb9c7fabcd129309dd..fb65b706cc0db9976b552c884a8ae056f27f3a68 100644 (file)
@@ -39,40 +39,44 @@ struct configfs_buffer {
        bool                    write_in_progress;
        char                    *bin_buffer;
        int                     bin_buffer_size;
+       int                     cb_max_size;
+       struct config_item      *item;
+       struct module           *owner;
+       union {
+               struct configfs_attribute       *attr;
+               struct configfs_bin_attribute   *bin_attr;
+       };
 };
 
+static inline struct configfs_fragment *to_frag(struct file *file)
+{
+       struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
 
-/**
- *     fill_read_buffer - allocate and fill buffer from item.
- *     @dentry:        dentry pointer.
- *     @buffer:        data buffer for file.
- *
- *     Allocate @buffer->page, if it hasn't been already, then call the
- *     config_item's show() method to fill the buffer with this attribute's
- *     data.
- *     This is called only once, on the file's first read.
- */
-static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
+       return sd->s_frag;
+}
+
+static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
 {
-       struct configfs_attribute * attr = to_attr(dentry);
-       struct config_item * item = to_item(dentry->d_parent);
-       int ret = 0;
-       ssize_t count;
+       struct configfs_fragment *frag = to_frag(file);
+       ssize_t count = -ENOENT;
 
        if (!buffer->page)
                buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
        if (!buffer->page)
                return -ENOMEM;
 
-       count = attr->show(item, buffer->page);
-
-       BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
-       if (count >= 0) {
-               buffer->needs_read_fill = 0;
-               buffer->count = count;
-       } else
-               ret = count;
-       return ret;
+       down_read(&frag->frag_sem);
+       if (!frag->frag_dead)
+               count = buffer->attr->show(buffer->item, buffer->page);
+       up_read(&frag->frag_sem);
+
+       if (count < 0)
+               return count;
+       if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
+               return -EIO;
+       buffer->needs_read_fill = 0;
+       buffer->count = count;
+       return 0;
 }
 
 /**
@@ -97,12 +101,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
 static ssize_t
 configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
-       struct configfs_buffer * buffer = file->private_data;
+       struct configfs_buffer *buffer = file->private_data;
        ssize_t retval = 0;
 
        mutex_lock(&buffer->mutex);
        if (buffer->needs_read_fill) {
-               if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
+               retval = fill_read_buffer(file, buffer);
+               if (retval)
                        goto out;
        }
        pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
@@ -138,10 +143,8 @@ static ssize_t
 configfs_read_bin_file(struct file *file, char __user *buf,
                       size_t count, loff_t *ppos)
 {
+       struct configfs_fragment *frag = to_frag(file);
        struct configfs_buffer *buffer = file->private_data;
-       struct dentry *dentry = file->f_path.dentry;
-       struct config_item *item = to_item(dentry->d_parent);
-       struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
        ssize_t retval = 0;
        ssize_t len = min_t(size_t, count, PAGE_SIZE);
 
@@ -156,14 +159,19 @@ configfs_read_bin_file(struct file *file, char __user *buf,
 
        if (buffer->needs_read_fill) {
                /* perform first read with buf == NULL to get extent */
-               len = bin_attr->read(item, NULL, 0);
+               down_read(&frag->frag_sem);
+               if (!frag->frag_dead)
+                       len = buffer->bin_attr->read(buffer->item, NULL, 0);
+               else
+                       len = -ENOENT;
+               up_read(&frag->frag_sem);
                if (len <= 0) {
                        retval = len;
                        goto out;
                }
 
                /* do not exceed the maximum value */
-               if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) {
+               if (buffer->cb_max_size && len > buffer->cb_max_size) {
                        retval = -EFBIG;
                        goto out;
                }
@@ -176,7 +184,13 @@ configfs_read_bin_file(struct file *file, char __user *buf,
                buffer->bin_buffer_size = len;
 
                /* perform second read to fill buffer */
-               len = bin_attr->read(item, buffer->bin_buffer, len);
+               down_read(&frag->frag_sem);
+               if (!frag->frag_dead)
+                       len = buffer->bin_attr->read(buffer->item,
+                                                    buffer->bin_buffer, len);
+               else
+                       len = -ENOENT;
+               up_read(&frag->frag_sem);
                if (len < 0) {
                        retval = len;
                        vfree(buffer->bin_buffer);
@@ -226,25 +240,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
        return error ? -EFAULT : count;
 }
 
-
-/**
- *     flush_write_buffer - push buffer to config_item.
- *     @dentry:        dentry to the attribute
- *     @buffer:        data buffer for file.
- *     @count:         number of bytes
- *
- *     Get the correct pointers for the config_item and the attribute we're
- *     dealing with, then call the store() method for the attribute,
- *     passing the buffer that we acquired in fill_write_buffer().
- */
-
 static int
-flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
+flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
 {
-       struct configfs_attribute * attr = to_attr(dentry);
-       struct config_item * item = to_item(dentry->d_parent);
-
-       return attr->store(item, buffer->page, count);
+       struct configfs_fragment *frag = to_frag(file);
+       int res = -ENOENT;
+
+       down_read(&frag->frag_sem);
+       if (!frag->frag_dead)
+               res = buffer->attr->store(buffer->item, buffer->page, count);
+       up_read(&frag->frag_sem);
+       return res;
 }
 
 
@@ -268,13 +274,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size
 static ssize_t
 configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 {
-       struct configfs_buffer * buffer = file->private_data;
+       struct configfs_buffer *buffer = file->private_data;
        ssize_t len;
 
        mutex_lock(&buffer->mutex);
        len = fill_write_buffer(buffer, buf, count);
        if (len > 0)
-               len = flush_write_buffer(file->f_path.dentry, buffer, len);
+               len = flush_write_buffer(file, buffer, len);
        if (len > 0)
                *ppos += len;
        mutex_unlock(&buffer->mutex);
@@ -299,8 +305,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
                        size_t count, loff_t *ppos)
 {
        struct configfs_buffer *buffer = file->private_data;
-       struct dentry *dentry = file->f_path.dentry;
-       struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
        void *tbuf = NULL;
        ssize_t len;
 
@@ -316,8 +320,8 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
        /* buffer grows? */
        if (*ppos + count > buffer->bin_buffer_size) {
 
-               if (bin_attr->cb_max_size &&
-                       *ppos + count > bin_attr->cb_max_size) {
+               if (buffer->cb_max_size &&
+                       *ppos + count > buffer->cb_max_size) {
                        len = -EFBIG;
                        goto out;
                }
@@ -349,31 +353,51 @@ out:
        return len;
 }
 
-static int check_perm(struct inode * inode, struct file * file, int type)
+static int __configfs_open_file(struct inode *inode, struct file *file, int type)
 {
-       struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
-       struct configfs_attribute * attr = to_attr(file->f_path.dentry);
-       struct configfs_bin_attribute *bin_attr = NULL;
-       struct configfs_buffer * buffer;
-       struct configfs_item_operations * ops = NULL;
-       int error = 0;
+       struct dentry *dentry = file->f_path.dentry;
+       struct configfs_fragment *frag = to_frag(file);
+       struct configfs_attribute *attr;
+       struct configfs_buffer *buffer;
+       int error;
 
-       if (!item || !attr)
-               goto Einval;
+       error = -ENOMEM;
+       buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
+       if (!buffer)
+               goto out;
 
-       if (type & CONFIGFS_ITEM_BIN_ATTR)
-               bin_attr = to_bin_attr(file->f_path.dentry);
+       error = -ENOENT;
+       down_read(&frag->frag_sem);
+       if (unlikely(frag->frag_dead))
+               goto out_free_buffer;
 
-       /* Grab the module reference for this attribute if we have one */
-       if (!try_module_get(attr->ca_owner)) {
-               error = -ENODEV;
-               goto Done;
+       error = -EINVAL;
+       buffer->item = to_item(dentry->d_parent);
+       if (!buffer->item)
+               goto out_free_buffer;
+
+       attr = to_attr(dentry);
+       if (!attr)
+               goto out_put_item;
+
+       if (type & CONFIGFS_ITEM_BIN_ATTR) {
+               buffer->bin_attr = to_bin_attr(dentry);
+               buffer->cb_max_size = buffer->bin_attr->cb_max_size;
+       } else {
+               buffer->attr = attr;
        }
 
-       if (item->ci_type)
-               ops = item->ci_type->ct_item_ops;
-       else
-               goto Eaccess;
+       buffer->owner = attr->ca_owner;
+       /* Grab the module reference for this attribute if we have one */
+       error = -ENODEV;
+       if (!try_module_get(buffer->owner))
+               goto out_put_item;
+
+       error = -EACCES;
+       if (!buffer->item->ci_type)
+               goto out_put_module;
+
+       buffer->ops = buffer->item->ci_type->ct_item_ops;
 
        /* File needs write support.
         * The inode's perms must say it's ok,
@@ -381,13 +405,11 @@ static int check_perm(struct inode * inode, struct file * file, int type)
         */
        if (file->f_mode & FMODE_WRITE) {
                if (!(inode->i_mode & S_IWUGO))
-                       goto Eaccess;
-
+                       goto out_put_module;
                if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
-                       goto Eaccess;
-
-               if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write)
-                       goto Eaccess;
+                       goto out_put_module;
+               if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
+                       goto out_put_module;
        }
 
        /* File needs read support.
@@ -396,92 +418,72 @@ static int check_perm(struct inode * inode, struct file * file, int type)
         */
        if (file->f_mode & FMODE_READ) {
                if (!(inode->i_mode & S_IRUGO))
-                       goto Eaccess;
-
+                       goto out_put_module;
                if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
-                       goto Eaccess;
-
-               if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read)
-                       goto Eaccess;
+                       goto out_put_module;
+               if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
+                       goto out_put_module;
        }
 
-       /* No error? Great, allocate a buffer for the file, and store it
-        * it in file->private_data for easy access.
-        */
-       buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
-       if (!buffer) {
-               error = -ENOMEM;
-               goto Enomem;
-       }
        mutex_init(&buffer->mutex);
        buffer->needs_read_fill = 1;
        buffer->read_in_progress = false;
        buffer->write_in_progress = false;
-       buffer->ops = ops;
        file->private_data = buffer;
-       goto Done;
+       up_read(&frag->frag_sem);
+       return 0;
 
- Einval:
-       error = -EINVAL;
-       goto Done;
- Eaccess:
-       error = -EACCES;
- Enomem:
-       module_put(attr->ca_owner);
- Done:
-       if (error && item)
-               config_item_put(item);
+out_put_module:
+       module_put(buffer->owner);
+out_put_item:
+       config_item_put(buffer->item);
+out_free_buffer:
+       up_read(&frag->frag_sem);
+       kfree(buffer);
+out:
        return error;
 }
 
 static int configfs_release(struct inode *inode, struct file *filp)
 {
-       struct config_item * item = to_item(filp->f_path.dentry->d_parent);
-       struct configfs_attribute * attr = to_attr(filp->f_path.dentry);
-       struct module * owner = attr->ca_owner;
-       struct configfs_buffer * buffer = filp->private_data;
-
-       if (item)
-               config_item_put(item);
-       /* After this point, attr should not be accessed. */
-       module_put(owner);
-
-       if (buffer) {
-               if (buffer->page)
-                       free_page((unsigned long)buffer->page);
-               mutex_destroy(&buffer->mutex);
-               kfree(buffer);
-       }
+       struct configfs_buffer *buffer = filp->private_data;
+
+       module_put(buffer->owner);
+       if (buffer->page)
+               free_page((unsigned long)buffer->page);
+       mutex_destroy(&buffer->mutex);
+       kfree(buffer);
        return 0;
 }
 
 static int configfs_open_file(struct inode *inode, struct file *filp)
 {
-       return check_perm(inode, filp, CONFIGFS_ITEM_ATTR);
+       return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
 }
 
 static int configfs_open_bin_file(struct inode *inode, struct file *filp)
 {
-       return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
+       return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
 }
 
-static int configfs_release_bin_file(struct inode *inode, struct file *filp)
+static int configfs_release_bin_file(struct inode *inode, struct file *file)
 {
-       struct configfs_buffer *buffer = filp->private_data;
-       struct dentry *dentry = filp->f_path.dentry;
-       struct config_item *item = to_item(dentry->d_parent);
-       struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
-       ssize_t len = 0;
-       int ret;
+       struct configfs_buffer *buffer = file->private_data;
 
        buffer->read_in_progress = false;
 
        if (buffer->write_in_progress) {
+               struct configfs_fragment *frag = to_frag(file);
                buffer->write_in_progress = false;
 
-               len = bin_attr->write(item, buffer->bin_buffer,
-                               buffer->bin_buffer_size);
-
+               down_read(&frag->frag_sem);
+               if (!frag->frag_dead) {
+                       /* result of ->release() is ignored */
+                       buffer->bin_attr->write(buffer->item,
+                                       buffer->bin_buffer,
+                                       buffer->bin_buffer_size);
+               }
+               up_read(&frag->frag_sem);
                /* vfree on NULL is safe */
                vfree(buffer->bin_buffer);
                buffer->bin_buffer = NULL;
@@ -489,10 +491,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
                buffer->needs_read_fill = 1;
        }
 
-       ret = configfs_release(inode, filp);
-       if (len < 0)
-               return len;
-       return ret;
+       configfs_release(inode, file);
+       return 0;
 }
 
 
@@ -527,7 +527,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
 
        inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
        error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
-                                    CONFIGFS_ITEM_ATTR);
+                                    CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
        inode_unlock(d_inode(dir));
 
        return error;
@@ -549,7 +549,7 @@ int configfs_create_bin_file(struct config_item *item,
 
        inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
        error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
-                                    CONFIGFS_ITEM_BIN_ATTR);
+                                    CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
        inode_unlock(dir->d_inode);
 
        return error;
index 420fe3deed39702b93a5567dbebe77a8e4b6b992..006b7a2070bf6869d2f7c42500eea135919def74 100644 (file)
@@ -4586,7 +4586,6 @@ static int __ext4_get_inode_loc(struct inode *inode,
        struct buffer_head      *bh;
        struct super_block      *sb = inode->i_sb;
        ext4_fsblk_t            block;
-       struct blk_plug         plug;
        int                     inodes_per_block, inode_offset;
 
        iloc->bh = NULL;
@@ -4675,7 +4674,6 @@ make_io:
                 * If we need to do any I/O, try to pre-readahead extra
                 * blocks from the inode table.
                 */
-               blk_start_plug(&plug);
                if (EXT4_SB(sb)->s_inode_readahead_blks) {
                        ext4_fsblk_t b, end, table;
                        unsigned num;
@@ -4706,7 +4704,6 @@ make_io:
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
                submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
-               blk_finish_plug(&plug);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        EXT4_ERROR_INODE_BLOCK(inode, block,
index 8d501093660f4a052a781d61c46a507fa4c7c1ca..0adfd88401108029c2fb065db3fd8307580aa427 100644 (file)
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
        if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
                nfs_file_set_open_context(file, ctx);
        else
-               err = -ESTALE;
+               err = -EOPENSTALE;
 out:
        return err;
 }
index 0cb44240616889f62616f7804be7dd909c3b9de1..222d7115db713a76d1e4312ec1c22752e38f3709 100644 (file)
@@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
        unsigned long bytes = 0;
        struct nfs_direct_req *dreq = hdr->dreq;
 
-       if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
-               goto out_put;
-
        spin_lock(&dreq->lock);
-       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
+       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
                dreq->error = hdr->error;
-       else
+
+       if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+               spin_unlock(&dreq->lock);
+               goto out_put;
+       }
+
+       if (hdr->good_bytes != 0)
                nfs_direct_good_bytes(dreq, hdr);
 
+       if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
+               dreq->error = 0;
+
        spin_unlock(&dreq->lock);
 
        while (!list_empty(&hdr->pages)) {
@@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
        bool request_commit = false;
        struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 
-       if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
-               goto out_put;
-
        nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
        spin_lock(&dreq->lock);
 
        if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
                dreq->error = hdr->error;
-       if (dreq->error == 0) {
+
+       if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+               spin_unlock(&dreq->lock);
+               goto out_put;
+       }
+
+       if (hdr->good_bytes != 0) {
                nfs_direct_good_bytes(dreq, hdr);
                if (nfs_write_need_commit(hdr)) {
                        if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
index b04e20d28162d489424606df91f452a0a64c2d4c..5657b7f2611f19e8449e7e51e8b363ef20d573f1 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
 #include <linux/nfs_page.h>
 #include <linux/module.h>
 #include <linux/sched/mm.h>
@@ -928,7 +929,9 @@ retry:
        pgm = &pgio->pg_mirrors[0];
        pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
 
-       pgio->pg_maxretrans = io_maxretrans;
+       if (NFS_SERVER(pgio->pg_inode)->flags &
+                       (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+               pgio->pg_maxretrans = io_maxretrans;
        return;
 out_nolseg:
        if (pgio->pg_error < 0)
@@ -940,6 +943,7 @@ out_mds:
                        pgio->pg_lseg);
        pnfs_put_lseg(pgio->pg_lseg);
        pgio->pg_lseg = NULL;
+       pgio->pg_maxretrans = 0;
        nfs_pageio_reset_read_mds(pgio);
 }
 
@@ -1000,7 +1004,9 @@ retry:
                pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
        }
 
-       pgio->pg_maxretrans = io_maxretrans;
+       if (NFS_SERVER(pgio->pg_inode)->flags &
+                       (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+               pgio->pg_maxretrans = io_maxretrans;
        return;
 
 out_mds:
@@ -1010,6 +1016,7 @@ out_mds:
                        pgio->pg_lseg);
        pnfs_put_lseg(pgio->pg_lseg);
        pgio->pg_lseg = NULL;
+       pgio->pg_maxretrans = 0;
        nfs_pageio_reset_write_mds(pgio);
 }
 
@@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
                break;
        case -NFS4ERR_RETRY_UNCACHED_REP:
                break;
-       case -EAGAIN:
-               return -NFS4ERR_RESET_TO_PNFS;
        /* Invalidate Layout errors */
        case -NFS4ERR_PNFS_NO_LAYOUT:
        case -ESTALE:           /* mapped NFS4ERR_STALE */
@@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
        case -EBADHANDLE:
        case -ELOOP:
        case -ENOSPC:
-       case -EAGAIN:
                break;
        case -EJUKEBOX:
                nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
        ff_layout_read_prepare_common(task, hdr);
 }
 
-static void
-ff_layout_io_prepare_transmit(struct rpc_task *task,
-               void *data)
-{
-       struct nfs_pgio_header *hdr = data;
-
-       if (!pnfs_is_valid_lseg(hdr->lseg))
-               rpc_exit(task, -EAGAIN);
-}
-
 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
 {
        struct nfs_pgio_header *hdr = data;
@@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data)
 
 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
        .rpc_call_prepare = ff_layout_read_prepare_v3,
-       .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
        .rpc_call_done = ff_layout_read_call_done,
        .rpc_count_stats = ff_layout_read_count_stats,
        .rpc_release = ff_layout_read_release,
@@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
 
 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
        .rpc_call_prepare = ff_layout_read_prepare_v4,
-       .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
        .rpc_call_done = ff_layout_read_call_done,
        .rpc_count_stats = ff_layout_read_count_stats,
        .rpc_release = ff_layout_read_release,
@@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
 
 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
        .rpc_call_prepare = ff_layout_write_prepare_v3,
-       .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
        .rpc_call_done = ff_layout_write_call_done,
        .rpc_count_stats = ff_layout_write_count_stats,
        .rpc_release = ff_layout_write_release,
@@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
 
 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
        .rpc_call_prepare = ff_layout_write_prepare_v4,
-       .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
        .rpc_call_done = ff_layout_write_call_done,
        .rpc_count_stats = ff_layout_write_count_stats,
        .rpc_release = ff_layout_write_release,
index 8a1758200b579198d24b562905a24765006f87ca..2a03bfeec10a409a577eb2d773648c032d692d6e 100644 (file)
@@ -1403,12 +1403,22 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
        if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
                return 0;
 
+       if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
+               /* Only a mounted-on-fileid? Just exit */
+               if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+                       return 0;
        /* Has the inode gone and changed behind our back? */
-       if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
+       } else if (nfsi->fileid != fattr->fileid) {
+               /* Is this perhaps the mounted-on fileid? */
+               if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
+                   nfsi->fileid == fattr->mounted_on_fileid)
+                       return 0;
                return -ESTALE;
+       }
        if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
                return -ESTALE;
 
+
        if (!nfs_file_has_buffered_writers(nfsi)) {
                /* Verify a few of the more important attributes */
                if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
@@ -1768,18 +1778,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
 EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
 
 
-static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
-                                   struct nfs_fattr *fattr)
-{
-       bool ret1 = true, ret2 = true;
-
-       if (fattr->valid & NFS_ATTR_FATTR_FILEID)
-               ret1 = (nfsi->fileid == fattr->fileid);
-       if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
-               ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
-       return ret1 || ret2;
-}
-
 /*
  * Many nfs protocol calls return the new file attributes after
  * an operation.  Here we update the inode to reflect the state
@@ -1810,7 +1808,16 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        nfs_display_fhandle_hash(NFS_FH(inode)),
                        atomic_read(&inode->i_count), fattr->valid);
 
-       if (!nfs_fileid_valid(nfsi, fattr)) {
+       if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
+               /* Only a mounted-on-fileid? Just exit */
+               if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+                       return 0;
+       /* Has the inode gone and changed behind our back? */
+       } else if (nfsi->fileid != fattr->fileid) {
+               /* Is this perhaps the mounted-on fileid? */
+               if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
+                   nfsi->fileid == fattr->mounted_on_fileid)
+                       return 0;
                printk(KERN_ERR "NFS: server %s error: fileid changed\n"
                        "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
                        NFS_SERVER(inode)->nfs_client->cl_hostname,
index a2346a2f8361d56c0608af517adf5c74a7d52404..e64f810223be6b961c865e92b45afa40800a45c2 100644 (file)
@@ -775,3 +775,13 @@ static inline bool nfs_error_is_fatal(int err)
        }
 }
 
+static inline bool nfs_error_is_fatal_on_server(int err)
+{
+       switch (err) {
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+               return false;
+       }
+       return nfs_error_is_fatal(err);
+}
index 96db471ca2e5c5413edf5ffe120097cb88d079ee..339663d04bf83b28f3f4426eba8419d4e750146f 100644 (file)
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                switch (err) {
-               case -EPERM:
-               case -EACCES:
-               case -EDQUOT:
-               case -ENOSPC:
-               case -EROFS:
-                       goto out_put_ctx;
                default:
+                       goto out_put_ctx;
+               case -ENOENT:
+               case -ESTALE:
+               case -EISDIR:
+               case -ENOTDIR:
+               case -ELOOP:
                        goto out_drop;
                }
        }
index ed4e1b07447bde1280a41a98120689aafc258ab6..20b3717cd7ca83b8bb9efad78c9099b0d7a144a2 100644 (file)
@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
        }
 
        hdr->res.fattr   = &hdr->fattr;
-       hdr->res.count   = count;
+       hdr->res.count   = 0;
        hdr->res.eof     = 0;
        hdr->res.verf    = &hdr->verf;
        nfs_fattr_init(&hdr->fattr);
@@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
 int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
                      struct nfs_pgio_header *hdr)
 {
-       LIST_HEAD(failed);
+       LIST_HEAD(pages);
 
        desc->pg_io_completion = hdr->io_completion;
        desc->pg_dreq = hdr->dreq;
-       while (!list_empty(&hdr->pages)) {
-               struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+       list_splice_init(&hdr->pages, &pages);
+       while (!list_empty(&pages)) {
+               struct nfs_page *req = nfs_list_entry(pages.next);
 
                if (!nfs_pageio_add_request(desc, req))
-                       nfs_list_move_request(req, &failed);
+                       break;
        }
        nfs_pageio_complete(desc);
-       if (!list_empty(&failed)) {
-               list_move(&failed, &hdr->pages);
-               return desc->pg_error < 0 ? desc->pg_error : -EIO;
+       if (!list_empty(&pages)) {
+               int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
+               hdr->completion_ops->error_cleanup(&pages, err);
+               nfs_set_pgio_error(hdr, err, hdr->io_start);
+               return err;
        }
        return 0;
 }
index c0046c3489103c3ed9e4e8760d6db67b8bc1bca8..82af4809b869a63851ce8d49f4e0d982bdc6305c 100644 (file)
@@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
                        /* Add this address as an alias */
                        rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
                                        rpc_clnt_test_and_add_xprt, NULL);
-               } else
-                       clp = get_v3_ds_connect(mds_srv,
-                                       (struct sockaddr *)&da->da_addr,
-                                       da->da_addrlen, IPPROTO_TCP,
-                                       timeo, retrans);
+                       continue;
+               }
+               clp = get_v3_ds_connect(mds_srv,
+                               (struct sockaddr *)&da->da_addr,
+                               da->da_addrlen, IPPROTO_TCP,
+                               timeo, retrans);
+               if (IS_ERR(clp))
+                       continue;
+               clp->cl_rpcclient->cl_softerr = 0;
+               clp->cl_rpcclient->cl_softrtry = 0;
        }
 
        if (IS_ERR(clp)) {
index 5552fa8b6e1289afe35a9b62595f1fe533cc1ecb..0f7288b94633b66b37718a6b2a8a8167961285d3 100644 (file)
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
                /* Emulate the eof flag, which isn't normally needed in NFSv2
                 * as it is guaranteed to always return the file attributes
                 */
-               if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+               if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+                   hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
                        hdr->res.eof = 1;
        }
        return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
 
 static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
 {
-       if (task->tk_status >= 0)
+       if (task->tk_status >= 0) {
+               hdr->res.count = hdr->args.count;
                nfs_writeback_update_inode(hdr);
+       }
        return 0;
 }
 
index c19841c82b6a3213203d1e964e5fcd85e8c96e1a..cfe0b586eadd4d0ed5bae36a00bfabf85ea90fe9 100644 (file)
@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
 }
 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
 
-static void nfs_readpage_release(struct nfs_page *req)
+static void nfs_readpage_release(struct nfs_page *req, int error)
 {
        struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
+       struct page *page = req->wb_page;
 
        dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
                (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
                (long long)req_offset(req));
 
+       if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
+               SetPageError(page);
        if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
-               if (PageUptodate(req->wb_page))
-                       nfs_readpage_to_fscache(inode, req->wb_page, 0);
+               struct address_space *mapping = page_file_mapping(page);
 
-               unlock_page(req->wb_page);
+               if (PageUptodate(page))
+                       nfs_readpage_to_fscache(inode, page, 0);
+               else if (!PageError(page) && !PagePrivate(page))
+                       generic_error_remove_page(mapping, page);
+               unlock_page(page);
        }
        nfs_release_request(req);
 }
@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
                             &nfs_async_read_completion_ops);
        if (!nfs_pageio_add_request(&pgio, new)) {
                nfs_list_remove_request(new);
-               nfs_readpage_release(new);
+               nfs_readpage_release(new, pgio.pg_error);
        }
        nfs_pageio_complete(&pgio);
 
@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
 static void nfs_read_completion(struct nfs_pgio_header *hdr)
 {
        unsigned long bytes = 0;
+       int error;
 
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
                goto out;
@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
                                zero_user_segment(page, start, end);
                        }
                }
+               error = 0;
                bytes += req->wb_bytes;
                if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
                        if (bytes <= hdr->good_bytes)
                                nfs_page_group_set_uptodate(req);
+                       else {
+                               error = hdr->error;
+                               xchg(&nfs_req_openctx(req)->error, error);
+                       }
                } else
                        nfs_page_group_set_uptodate(req);
                nfs_list_remove_request(req);
-               nfs_readpage_release(req);
+               nfs_readpage_release(req, error);
        }
 out:
        hdr->release(hdr);
@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
-               nfs_readpage_release(req);
+               nfs_readpage_release(req, error);
        }
 }
 
@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
                        goto out;
        }
 
+       xchg(&ctx->error, 0);
        error = nfs_readpage_async(ctx, inode, page);
-
+       if (!error) {
+               error = wait_on_page_locked_killable(page);
+               if (!PageUptodate(page) && !error)
+                       error = xchg(&ctx->error, 0);
+       }
 out:
        put_nfs_open_context(ctx);
        return error;
@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
                zero_user_segment(page, len, PAGE_SIZE);
        if (!nfs_pageio_add_request(desc->pgio, new)) {
                nfs_list_remove_request(new);
-               nfs_readpage_release(new);
                error = desc->pgio->pg_error;
+               nfs_readpage_release(new, error);
                goto out;
        }
        return 0;
index 92d9cadc61027c642bcab5755be7c5d221e7b0b1..85ca49549b39bdf5b9bea4e65dbb52d15945231b 100644 (file)
@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
 static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_inode_remove_request(struct nfs_page *req);
 static void nfs_clear_request_commit(struct nfs_page *req);
 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
                                      struct inode *inode);
@@ -591,23 +592,13 @@ release_request:
 
 static void nfs_write_error(struct nfs_page *req, int error)
 {
+       nfs_set_pageerror(page_file_mapping(req->wb_page));
        nfs_mapping_set_error(req->wb_page, error);
+       nfs_inode_remove_request(req);
        nfs_end_page_writeback(req);
        nfs_release_request(req);
 }
 
-static bool
-nfs_error_is_fatal_on_server(int err)
-{
-       switch (err) {
-       case 0:
-       case -ERESTARTSYS:
-       case -EINTR:
-               return false;
-       }
-       return nfs_error_is_fatal(err);
-}
-
 /*
  * Find an associated nfs write request, and prepare to flush it out
  * May return an error if the user signalled nfs_wait_on_request().
@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
                                struct page *page)
 {
-       struct address_space *mapping;
        struct nfs_page *req;
        int ret = 0;
 
@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
 
        /* If there is a fatal error that covers this write, just exit */
-       ret = 0;
-       mapping = page_file_mapping(page);
-       if (test_bit(AS_ENOSPC, &mapping->flags) ||
-           test_bit(AS_EIO, &mapping->flags))
+       ret = pgio->pg_error;
+       if (nfs_error_is_fatal_on_server(ret))
                goto out_launder;
 
+       ret = 0;
        if (!nfs_pageio_add_request(pgio, req)) {
                ret = pgio->pg_error;
                /*
@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
                } else
                        ret = -EAGAIN;
                nfs_redirty_request(req);
+               pgio->pg_error = 0;
        } else
                nfs_add_stats(page_file_mapping(page)->host,
                                NFSIOS_WRITEPAGES, 1);
@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
        ret = nfs_page_async_flush(pgio, page);
        if (ret == -EAGAIN) {
                redirty_page_for_writepage(wbc, page);
-               ret = 0;
+               ret = AOP_WRITEPAGE_ACTIVATE;
        }
        return ret;
 }
@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
        nfs_pageio_init_write(&pgio, inode, 0,
                                false, &nfs_async_write_completion_ops);
        err = nfs_do_writepage(page, wbc, &pgio);
+       pgio.pg_error = 0;
        nfs_pageio_complete(&pgio);
        if (err < 0)
                return err;
-       if (pgio.pg_error < 0)
+       if (nfs_error_is_fatal(pgio.pg_error))
                return pgio.pg_error;
        return 0;
 }
@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
        int ret;
 
        ret = nfs_writepage_locked(page, wbc);
-       unlock_page(page);
+       if (ret != AOP_WRITEPAGE_ACTIVATE)
+               unlock_page(page);
        return ret;
 }
 
@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
        int ret;
 
        ret = nfs_do_writepage(page, wbc, data);
-       unlock_page(page);
+       if (ret != AOP_WRITEPAGE_ACTIVATE)
+               unlock_page(page);
        return ret;
 }
 
@@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
                                &nfs_async_write_completion_ops);
        pgio.pg_io_completion = ioc;
        err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
+       pgio.pg_error = 0;
        nfs_pageio_complete(&pgio);
        nfs_io_completion_put(ioc);
 
        if (err < 0)
                goto out_err;
        err = pgio.pg_error;
-       if (err < 0)
+       if (nfs_error_is_fatal(err))
                goto out_err;
        return 0;
 out_err:
index 95a159a4137fb645f3fbe0ed5e4278d55ad58fd2..80ca61058dd20b9f1234bbec0c9c7431eabb1293 100644 (file)
@@ -16,6 +16,8 @@ struct error_injection_entry {
        int             etype;
 };
 
+struct pt_regs;
+
 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
 /*
  * Whitelist ganerating macro. Specify functions which can be
@@ -28,8 +30,12 @@ static struct error_injection_entry __used                           \
                .addr = (unsigned long)fname,                           \
                .etype = EI_ETYPE_##_etype,                             \
        };
+
+void override_function_with_return(struct pt_regs *regs);
 #else
 #define ALLOW_ERROR_INJECTION(fname, _etype)
+
+static inline void override_function_with_return(struct pt_regs *regs) { }
 #endif
 #endif
 
diff --git a/include/dt-bindings/memory/mt8183-larb-port.h b/include/dt-bindings/memory/mt8183-larb-port.h
new file mode 100644 (file)
index 0000000..2c579f3
--- /dev/null
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef __DTS_IOMMU_PORT_MT8183_H
+#define __DTS_IOMMU_PORT_MT8183_H
+
+#define MTK_M4U_ID(larb, port)         (((larb) << 5) | (port))
+
+#define M4U_LARB0_ID                   0
+#define M4U_LARB1_ID                   1
+#define M4U_LARB2_ID                   2
+#define M4U_LARB3_ID                   3
+#define M4U_LARB4_ID                   4
+#define M4U_LARB5_ID                   5
+#define M4U_LARB6_ID                   6
+#define M4U_LARB7_ID                   7
+
+/* larb0 */
+#define        M4U_PORT_DISP_OVL0              MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define        M4U_PORT_DISP_2L_OVL0_LARB0     MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define        M4U_PORT_DISP_2L_OVL1_LARB0     MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define        M4U_PORT_DISP_RDMA0             MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define        M4U_PORT_DISP_RDMA1             MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define        M4U_PORT_DISP_WDMA0             MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define        M4U_PORT_MDP_RDMA0              MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define        M4U_PORT_MDP_WROT0              MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define        M4U_PORT_MDP_WDMA0              MTK_M4U_ID(M4U_LARB0_ID, 8)
+#define        M4U_PORT_DISP_FAKE0             MTK_M4U_ID(M4U_LARB0_ID, 9)
+
+/* larb1 */
+#define        M4U_PORT_HW_VDEC_MC_EXT         MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define        M4U_PORT_HW_VDEC_PP_EXT         MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define        M4U_PORT_HW_VDEC_VLD_EXT        MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define        M4U_PORT_HW_VDEC_AVC_MV_EXT     MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define        M4U_PORT_HW_VDEC_PRED_RD_EXT    MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define        M4U_PORT_HW_VDEC_PRED_WR_EXT    MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define        M4U_PORT_HW_VDEC_PPWRAP_EXT     MTK_M4U_ID(M4U_LARB1_ID, 6)
+
+/* larb2 VPU0 */
+#define        M4U_PORT_IMG_IPUO               MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define        M4U_PORT_IMG_IPU3O              MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define        M4U_PORT_IMG_IPUI               MTK_M4U_ID(M4U_LARB2_ID, 2)
+
+/* larb3 VPU1 */
+#define        M4U_PORT_CAM_IPUO               MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define        M4U_PORT_CAM_IPU2O              MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define        M4U_PORT_CAM_IPU3O              MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define        M4U_PORT_CAM_IPUI               MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define        M4U_PORT_CAM_IPU2I              MTK_M4U_ID(M4U_LARB3_ID, 4)
+
+/* larb4 */
+#define        M4U_PORT_VENC_RCPU              MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define        M4U_PORT_VENC_REC               MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define        M4U_PORT_VENC_BSDMA             MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define        M4U_PORT_VENC_SV_COMV           MTK_M4U_ID(M4U_LARB4_ID, 3)
+#define        M4U_PORT_VENC_RD_COMV           MTK_M4U_ID(M4U_LARB4_ID, 4)
+#define        M4U_PORT_JPGENC_RDMA            MTK_M4U_ID(M4U_LARB4_ID, 5)
+#define        M4U_PORT_JPGENC_BSDMA           MTK_M4U_ID(M4U_LARB4_ID, 6)
+#define        M4U_PORT_VENC_CUR_LUMA          MTK_M4U_ID(M4U_LARB4_ID, 7)
+#define        M4U_PORT_VENC_CUR_CHROMA        MTK_M4U_ID(M4U_LARB4_ID, 8)
+#define        M4U_PORT_VENC_REF_LUMA          MTK_M4U_ID(M4U_LARB4_ID, 9)
+#define        M4U_PORT_VENC_REF_CHROMA        MTK_M4U_ID(M4U_LARB4_ID, 10)
+
+/* larb5 */
+#define        M4U_PORT_CAM_IMGI               MTK_M4U_ID(M4U_LARB5_ID, 0)
+#define        M4U_PORT_CAM_IMG2O              MTK_M4U_ID(M4U_LARB5_ID, 1)
+#define        M4U_PORT_CAM_IMG3O              MTK_M4U_ID(M4U_LARB5_ID, 2)
+#define        M4U_PORT_CAM_VIPI               MTK_M4U_ID(M4U_LARB5_ID, 3)
+#define        M4U_PORT_CAM_LCEI               MTK_M4U_ID(M4U_LARB5_ID, 4)
+#define        M4U_PORT_CAM_SMXI               MTK_M4U_ID(M4U_LARB5_ID, 5)
+#define        M4U_PORT_CAM_SMXO               MTK_M4U_ID(M4U_LARB5_ID, 6)
+#define        M4U_PORT_CAM_WPE0_RDMA1         MTK_M4U_ID(M4U_LARB5_ID, 7)
+#define        M4U_PORT_CAM_WPE0_RDMA0         MTK_M4U_ID(M4U_LARB5_ID, 8)
+#define        M4U_PORT_CAM_WPE0_WDMA          MTK_M4U_ID(M4U_LARB5_ID, 9)
+#define        M4U_PORT_CAM_FDVT_RP            MTK_M4U_ID(M4U_LARB5_ID, 10)
+#define        M4U_PORT_CAM_FDVT_WR            MTK_M4U_ID(M4U_LARB5_ID, 11)
+#define        M4U_PORT_CAM_FDVT_RB            MTK_M4U_ID(M4U_LARB5_ID, 12)
+#define        M4U_PORT_CAM_WPE1_RDMA0         MTK_M4U_ID(M4U_LARB5_ID, 13)
+#define        M4U_PORT_CAM_WPE1_RDMA1         MTK_M4U_ID(M4U_LARB5_ID, 14)
+#define        M4U_PORT_CAM_WPE1_WDMA          MTK_M4U_ID(M4U_LARB5_ID, 15)
+#define        M4U_PORT_CAM_DPE_RDMA           MTK_M4U_ID(M4U_LARB5_ID, 16)
+#define        M4U_PORT_CAM_DPE_WDMA           MTK_M4U_ID(M4U_LARB5_ID, 17)
+#define        M4U_PORT_CAM_MFB_RDMA0          MTK_M4U_ID(M4U_LARB5_ID, 18)
+#define        M4U_PORT_CAM_MFB_RDMA1          MTK_M4U_ID(M4U_LARB5_ID, 19)
+#define        M4U_PORT_CAM_MFB_WDMA           MTK_M4U_ID(M4U_LARB5_ID, 20)
+#define        M4U_PORT_CAM_RSC_RDMA0          MTK_M4U_ID(M4U_LARB5_ID, 21)
+#define        M4U_PORT_CAM_RSC_WDMA           MTK_M4U_ID(M4U_LARB5_ID, 22)
+#define        M4U_PORT_CAM_OWE_RDMA           MTK_M4U_ID(M4U_LARB5_ID, 23)
+#define        M4U_PORT_CAM_OWE_WDMA           MTK_M4U_ID(M4U_LARB5_ID, 24)
+
+/* larb6 */
+#define        M4U_PORT_CAM_IMGO               MTK_M4U_ID(M4U_LARB6_ID, 0)
+#define        M4U_PORT_CAM_RRZO               MTK_M4U_ID(M4U_LARB6_ID, 1)
+#define        M4U_PORT_CAM_AAO                MTK_M4U_ID(M4U_LARB6_ID, 2)
+#define        M4U_PORT_CAM_AFO                MTK_M4U_ID(M4U_LARB6_ID, 3)
+#define        M4U_PORT_CAM_LSCI0              MTK_M4U_ID(M4U_LARB6_ID, 4)
+#define        M4U_PORT_CAM_LSCI1              MTK_M4U_ID(M4U_LARB6_ID, 5)
+#define        M4U_PORT_CAM_PDO                MTK_M4U_ID(M4U_LARB6_ID, 6)
+#define        M4U_PORT_CAM_BPCI               MTK_M4U_ID(M4U_LARB6_ID, 7)
+#define        M4U_PORT_CAM_LCSO               MTK_M4U_ID(M4U_LARB6_ID, 8)
+#define        M4U_PORT_CAM_CAM_RSSO_A         MTK_M4U_ID(M4U_LARB6_ID, 9)
+#define        M4U_PORT_CAM_UFEO               MTK_M4U_ID(M4U_LARB6_ID, 10)
+#define        M4U_PORT_CAM_SOCO               MTK_M4U_ID(M4U_LARB6_ID, 11)
+#define        M4U_PORT_CAM_SOC1               MTK_M4U_ID(M4U_LARB6_ID, 12)
+#define        M4U_PORT_CAM_SOC2               MTK_M4U_ID(M4U_LARB6_ID, 13)
+#define        M4U_PORT_CAM_CCUI               MTK_M4U_ID(M4U_LARB6_ID, 14)
+#define        M4U_PORT_CAM_CCUO               MTK_M4U_ID(M4U_LARB6_ID, 15)
+#define        M4U_PORT_CAM_RAWI_A             MTK_M4U_ID(M4U_LARB6_ID, 16)
+#define        M4U_PORT_CAM_CCUG               MTK_M4U_ID(M4U_LARB6_ID, 17)
+#define        M4U_PORT_CAM_PSO                MTK_M4U_ID(M4U_LARB6_ID, 18)
+#define        M4U_PORT_CAM_AFO_1              MTK_M4U_ID(M4U_LARB6_ID, 19)
+#define        M4U_PORT_CAM_LSCI_2             MTK_M4U_ID(M4U_LARB6_ID, 20)
+#define        M4U_PORT_CAM_PDI                MTK_M4U_ID(M4U_LARB6_ID, 21)
+#define        M4U_PORT_CAM_FLKO               MTK_M4U_ID(M4U_LARB6_ID, 22)
+#define        M4U_PORT_CAM_LMVO               MTK_M4U_ID(M4U_LARB6_ID, 23)
+#define        M4U_PORT_CAM_UFGO               MTK_M4U_ID(M4U_LARB6_ID, 24)
+#define        M4U_PORT_CAM_SPARE              MTK_M4U_ID(M4U_LARB6_ID, 25)
+#define        M4U_PORT_CAM_SPARE_2            MTK_M4U_ID(M4U_LARB6_ID, 26)
+#define        M4U_PORT_CAM_SPARE_3            MTK_M4U_ID(M4U_LARB6_ID, 27)
+#define        M4U_PORT_CAM_SPARE_4            MTK_M4U_ID(M4U_LARB6_ID, 28)
+#define        M4U_PORT_CAM_SPARE_5            MTK_M4U_ID(M4U_LARB6_ID, 29)
+#define        M4U_PORT_CAM_SPARE_6            MTK_M4U_ID(M4U_LARB6_ID, 30)
+
+/* CCU */
+#define        M4U_PORT_CCU0                   MTK_M4U_ID(M4U_LARB7_ID, 0)
+#define        M4U_PORT_CCU1                   MTK_M4U_ID(M4U_LARB7_ID, 1)
+
+#endif
diff --git a/include/dt-bindings/regulator/active-semi,8865-regulator.h b/include/dt-bindings/regulator/active-semi,8865-regulator.h
new file mode 100644 (file)
index 0000000..15473db
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device Tree binding constants for the ACT8865 PMIC regulators
+ */
+
+#ifndef _DT_BINDINGS_REGULATOR_ACT8865_H
+#define _DT_BINDINGS_REGULATOR_ACT8865_H
+
+/*
+ * These constants should be used to specify regulator modes in device tree for
+ * ACT8865 regulators as follows:
+ * ACT8865_REGULATOR_MODE_FIXED:       It is specific to DCDC regulators and it
+ *                                     specifies the usage of fixed-frequency
+ *                                     PWM.
+ *
+ * ACT8865_REGULATOR_MODE_NORMAL:      It is specific to LDO regulators and it
+ *                                     specifies the usage of normal mode.
+ *
+ * ACT8865_REGULATOR_MODE_LOWPOWER:    For DCDC and LDO regulators; it specify
+ *                                     the usage of proprietary power-saving
+ *                                     mode.
+ */
+
+#define ACT8865_REGULATOR_MODE_FIXED           1
+#define ACT8865_REGULATOR_MODE_NORMAL          2
+#define ACT8865_REGULATOR_MODE_LOWPOWER        3
+
+#endif
index 9426b9aaed86f7db346aa4b0cd7010b7b75be62d..0fecacca51e889f6d05966b0e843fd84cb638f60 100644 (file)
@@ -994,62 +994,11 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
 #endif
 #endif
 
-struct acpi_gpio_params {
-       unsigned int crs_entry_index;
-       unsigned int line_index;
-       bool active_low;
-};
-
-struct acpi_gpio_mapping {
-       const char *name;
-       const struct acpi_gpio_params *data;
-       unsigned int size;
-
-/* Ignore IoRestriction field */
-#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION      BIT(0)
-/*
- * When ACPI GPIO mapping table is in use the index parameter inside it
- * refers to the GPIO resource in _CRS method. That index has no
- * distinction of actual type of the resource. When consumer wants to
- * get GpioIo type explicitly, this quirk may be used.
- */
-#define ACPI_GPIO_QUIRK_ONLY_GPIOIO            BIT(1)
-
-       unsigned int quirks;
-};
-
 #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
-int acpi_dev_add_driver_gpios(struct acpi_device *adev,
-                             const struct acpi_gpio_mapping *gpios);
-
-static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
-{
-       if (adev)
-               adev->driver_gpios = NULL;
-}
-
-int devm_acpi_dev_add_driver_gpios(struct device *dev,
-                                  const struct acpi_gpio_mapping *gpios);
-void devm_acpi_dev_remove_driver_gpios(struct device *dev);
-
 bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
                                struct acpi_resource_gpio **agpio);
 int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
 #else
-static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
-                             const struct acpi_gpio_mapping *gpios)
-{
-       return -ENXIO;
-}
-static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
-
-static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
-                             const struct acpi_gpio_mapping *gpios)
-{
-       return -ENXIO;
-}
-static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
-
 static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
                                              struct acpi_resource_gpio **agpio)
 {
@@ -1302,11 +1251,16 @@ static inline int lpit_read_residency_count_address(u64 *address)
 #endif
 
 #ifdef CONFIG_ACPI_PPTT
+int acpi_pptt_cpu_is_thread(unsigned int cpu);
 int find_acpi_cpu_topology(unsigned int cpu, int level);
 int find_acpi_cpu_topology_package(unsigned int cpu);
 int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
 int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
 #else
+static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+       return -EINVAL;
+}
 static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
 {
        return -EINVAL;
index 4a4d00646040a2eef6290cd22c1caad7c25f8f4a..21e950e4ab623ee3ca9ecce468ee31da60dd6971 100644 (file)
@@ -184,6 +184,9 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
 extern int
 amd_iommu_update_ga(int cpu, bool is_run, void *data);
 
+extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_deactivate_guest_mode(void *data);
+
 #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
 
 static inline int
@@ -198,6 +201,15 @@ amd_iommu_update_ga(int cpu, bool is_run, void *data)
        return 0;
 }
 
+static inline int amd_iommu_activate_guest_mode(void *data)
+{
+       return 0;
+}
+
+static inline int amd_iommu_deactivate_guest_mode(void *data)
+{
+       return 0;
+}
 #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
 
 #endif /* _ASM_X86_AMD_IOMMU_H */
index feff3fe4467ec97ce92aa7e6b5b13ef05012bb47..1b1fa1557e68aefe5cf5df252485d398b8a38e35 100644 (file)
@@ -311,6 +311,7 @@ enum req_flag_bits {
        __REQ_RAHEAD,           /* read ahead, can fail anytime */
        __REQ_BACKGROUND,       /* background IO */
        __REQ_NOWAIT,           /* Don't wait if request will block */
+       __REQ_NOWAIT_INLINE,    /* Return would-block error inline */
        /*
         * When a shared kthread needs to issue a bio for a cgroup, doing
         * so synchronously can lead to priority inversions as the kthread
@@ -345,6 +346,7 @@ enum req_flag_bits {
 #define REQ_RAHEAD             (1ULL << __REQ_RAHEAD)
 #define REQ_BACKGROUND         (1ULL << __REQ_BACKGROUND)
 #define REQ_NOWAIT             (1ULL << __REQ_NOWAIT)
+#define REQ_NOWAIT_INLINE      (1ULL << __REQ_NOWAIT_INLINE)
 #define REQ_CGROUP_PUNT                (1ULL << __REQ_CGROUP_PUNT)
 
 #define REQ_NOUNMAP            (1ULL << __REQ_NOUNMAP)
@@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op)
 
 typedef unsigned int blk_qc_t;
 #define BLK_QC_T_NONE          -1U
+#define BLK_QC_T_EAGAIN                -2U
 #define BLK_QC_T_SHIFT         16
 #define BLK_QC_T_INTERNAL      (1U << 31)
 
 static inline bool blk_qc_t_valid(blk_qc_t cookie)
 {
-       return cookie != BLK_QC_T_NONE;
+       return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
 }
 
 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
index f0fd5636fddbebd8a55d235dbd3cc8a41e398ae0..5e88e7e33abecd9ec88003b51fdd34ab611f6eb7 100644 (file)
@@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
                        long ______r;                                   \
                        static struct ftrace_likely_data                \
                                __aligned(4)                            \
-                               __section("_ftrace_annotated_branch")   \
+                               __section(_ftrace_annotated_branch)     \
                                ______f = {                             \
                                .data.func = __func__,                  \
                                .data.file = __FILE__,                  \
@@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 #define __trace_if_value(cond) ({                      \
        static struct ftrace_branch_data                \
                __aligned(4)                            \
-               __section("_ftrace_branch")             \
+               __section(_ftrace_branch)               \
                __if_trace = {                          \
                        .func = __func__,               \
                        .file = __FILE__,               \
@@ -118,7 +118,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
        ".popsection\n\t"
 
 /* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table")
+#define __annotate_jump_table __section(.rodata..c_jump_table)
 
 #else
 #define annotate_reachable()
@@ -298,7 +298,7 @@ unsigned long read_word_at_a_time(const void *addr)
  * visible to the compiler.
  */
 #define __ADDRESSABLE(sym) \
-       static void * __section(".discard.addressable") __used \
+       static void * __section(.discard.addressable) __used \
                __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
 
 /**
index bb9a0db89f1ab8e8d5387c93e4a98e81cba5da60..12ae4b87494e4eb3f265e7385458afbbd00f6195 100644 (file)
@@ -256,7 +256,10 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
 {return 0;}
 #endif
 
-#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter,                  \
+                               idx,                                    \
+                               state,                                  \
+                               is_retention)                           \
 ({                                                                     \
        int __ret = 0;                                                  \
                                                                        \
@@ -268,7 +271,7 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
        if (!is_retention)                                              \
                __ret =  cpu_pm_enter();                                \
        if (!__ret) {                                                   \
-               __ret = low_level_idle_enter(idx);                      \
+               __ret = low_level_idle_enter(state);                    \
                if (!is_retention)                                      \
                        cpu_pm_exit();                                  \
        }                                                               \
@@ -277,9 +280,15 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
 })
 
 #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx)       \
-       __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0)
+       __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0)
 
 #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx)     \
-       __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1)
+       __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1)
+
+#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state)  \
+       __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state)        \
+       __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
 
 #endif /* _LINUX_CPUIDLE_H */
index 342dabda9c7ef23cdeb4f7da161bb648de423c84..c19483b900794cd8a478f265a6873bea054ac712 100644 (file)
@@ -440,7 +440,7 @@ struct dimm_info {
        char label[EDAC_MC_LABEL_LEN + 1];      /* DIMM label on motherboard */
 
        /* Memory location data */
-       unsigned location[EDAC_MAX_LAYERS];
+       unsigned int location[EDAC_MAX_LAYERS];
 
        struct mem_ctl_info *mci;       /* the parent */
 
@@ -451,7 +451,7 @@ struct dimm_info {
 
        u32 nr_pages;                   /* number of pages on this dimm */
 
-       unsigned csrow, cschannel;      /* Points to the old API data */
+       unsigned int csrow, cschannel;  /* Points to the old API data */
 
        u16 smbios_handle;              /* Handle for SMBIOS type 17 */
 };
@@ -597,7 +597,7 @@ struct mem_ctl_info {
                                           unsigned long page);
        int mc_idx;
        struct csrow_info **csrows;
-       unsigned nr_csrows, num_cschannel;
+       unsigned int nr_csrows, num_cschannel;
 
        /*
         * Memory Controller hierarchy
@@ -608,14 +608,14 @@ struct mem_ctl_info {
         * of the recent drivers enumerate memories per DIMM, instead.
         * When the memory controller is per rank, csbased is true.
         */
-       unsigned n_layers;
+       unsigned int n_layers;
        struct edac_mc_layer *layers;
        bool csbased;
 
        /*
         * DIMM info. Will eventually remove the entire csrows_info some day
         */
-       unsigned tot_dimms;
+       unsigned int tot_dimms;
        struct dimm_info **dimms;
 
        /*
index 280c61ecbf20552b3fa68a99150b55e52c524905..635a95caf29f3254cfdcbc69102fb96a1faf4c51 100644 (file)
@@ -2,16 +2,16 @@
 #ifndef _LINUX_ERROR_INJECTION_H
 #define _LINUX_ERROR_INJECTION_H
 
-#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+#include <linux/compiler.h>
+#include <asm-generic/error-injection.h>
 
-#include <asm/error-injection.h>
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
 
 extern bool within_error_injection_list(unsigned long addr);
 extern int get_injectable_error_type(unsigned long addr);
 
 #else /* !CONFIG_FUNCTION_ERROR_INJECTION */
 
-#include <asm-generic/error-injection.h>
 static inline bool within_error_injection_list(unsigned long addr)
 {
        return false;
index f757a58191a6256368e9e0f7b17623b0500e2c0e..2157717c213683e62680720e9b66c232239c10a6 100644 (file)
@@ -221,19 +221,6 @@ static inline int gpio_to_irq(unsigned gpio)
        return -EINVAL;
 }
 
-static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
-                                      unsigned int offset)
-{
-       WARN_ON(1);
-       return -EINVAL;
-}
-
-static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
-                                         unsigned int offset)
-{
-       WARN_ON(1);
-}
-
 static inline int irq_to_gpio(unsigned irq)
 {
        /* irq can never have been returned from gpio_to_irq() */
index a7f08fb0f8653224ec2f87b11fad89d554b771c3..b70af921c6145527322bd00cb97fa5749d4a335a 100644 (file)
@@ -170,18 +170,8 @@ struct gpio_desc *gpio_to_desc(unsigned gpio);
 int desc_to_gpio(const struct gpio_desc *desc);
 
 /* Child properties interface */
-struct device_node;
 struct fwnode_handle;
 
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
-                                        const char *propname, int index,
-                                        enum gpiod_flags dflags,
-                                        const char *label);
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
-                                             struct device_node *node,
-                                             const char *propname, int index,
-                                             enum gpiod_flags dflags,
-                                             const char *label);
 struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
                                         const char *propname, int index,
                                         enum gpiod_flags dflags,
@@ -530,28 +520,8 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
 }
 
 /* Child properties interface */
-struct device_node;
 struct fwnode_handle;
 
-static inline
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
-                                        const char *propname, int index,
-                                        enum gpiod_flags dflags,
-                                        const char *label)
-{
-       return ERR_PTR(-ENOSYS);
-}
-
-static inline
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
-                                             struct device_node *node,
-                                             const char *propname, int index,
-                                             enum gpiod_flags dflags,
-                                             const char *label)
-{
-       return ERR_PTR(-ENOSYS);
-}
-
 static inline
 struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
                                         const char *propname, int index,
@@ -584,6 +554,111 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
                                                      flags, label);
 }
 
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
+struct device_node;
+
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+                                        const char *propname, int index,
+                                        enum gpiod_flags dflags,
+                                        const char *label);
+
+#else  /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */
+
+struct device_node;
+
+static inline
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+                                        const char *propname, int index,
+                                        enum gpiod_flags dflags,
+                                        const char *label)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
+#endif /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */
+
+#ifdef CONFIG_GPIOLIB
+struct device_node;
+
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+                                             struct device_node *node,
+                                             const char *propname, int index,
+                                             enum gpiod_flags dflags,
+                                             const char *label);
+
+#else  /* CONFIG_GPIOLIB */
+
+struct device_node;
+
+static inline
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+                                             struct device_node *node,
+                                             const char *propname, int index,
+                                             enum gpiod_flags dflags,
+                                             const char *label)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+struct acpi_gpio_params {
+       unsigned int crs_entry_index;
+       unsigned int line_index;
+       bool active_low;
+};
+
+struct acpi_gpio_mapping {
+       const char *name;
+       const struct acpi_gpio_params *data;
+       unsigned int size;
+
+/* Ignore IoRestriction field */
+#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION      BIT(0)
+/*
+ * When ACPI GPIO mapping table is in use the index parameter inside it
+ * refers to the GPIO resource in _CRS method. That index has no
+ * distinction of actual type of the resource. When consumer wants to
+ * get GpioIo type explicitly, this quirk may be used.
+ */
+#define ACPI_GPIO_QUIRK_ONLY_GPIOIO            BIT(1)
+
+       unsigned int quirks;
+};
+
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
+
+struct acpi_device;
+
+int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+                             const struct acpi_gpio_mapping *gpios);
+void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
+
+int devm_acpi_dev_add_driver_gpios(struct device *dev,
+                                  const struct acpi_gpio_mapping *gpios);
+void devm_acpi_dev_remove_driver_gpios(struct device *dev);
+
+#else  /* CONFIG_GPIOLIB && CONFIG_ACPI */
+
+struct acpi_device;
+
+static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+                             const struct acpi_gpio_mapping *gpios)
+{
+       return -ENXIO;
+}
+static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+
+static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
+                             const struct acpi_gpio_mapping *gpios)
+{
+       return -ENXIO;
+}
+static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
+
+#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
+
+
 #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
 
 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
index 6a0e420915a3cba73fc3cdf31b52eeb33adccd85..f8245d67f07089774bca10017df85fb82ee39835 100644 (file)
@@ -20,9 +20,8 @@ struct module;
 enum gpiod_flags;
 enum gpio_lookup_flags;
 
-#ifdef CONFIG_GPIOLIB
+struct gpio_chip;
 
-#ifdef CONFIG_GPIOLIB_IRQCHIP
 /**
  * struct gpio_irq_chip - GPIO interrupt controller
  */
@@ -49,6 +48,84 @@ struct gpio_irq_chip {
         */
        const struct irq_domain_ops *domain_ops;
 
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+       /**
+        * @fwnode:
+        *
+        * Firmware node corresponding to this gpiochip/irqchip, necessary
+        * for hierarchical irqdomain support.
+        */
+       struct fwnode_handle *fwnode;
+
+       /**
+        * @parent_domain:
+        *
+        * If non-NULL, will be set as the parent of this GPIO interrupt
+        * controller's IRQ domain to establish a hierarchical interrupt
+        * domain. The presence of this will activate the hierarchical
+        * interrupt support.
+        */
+       struct irq_domain *parent_domain;
+
+       /**
+        * @child_to_parent_hwirq:
+        *
+        * This callback translates a child hardware IRQ offset to a parent
+        * hardware IRQ offset on a hierarchical interrupt chip. The child
+        * hardware IRQs correspond to the GPIO index 0..ngpio-1 (see the
+        * ngpio field of struct gpio_chip) and the corresponding parent
+        * hardware IRQ and type (such as IRQ_TYPE_*) shall be returned by
+        * the driver. The driver can calculate this from an offset or using
+        * a lookup table or whatever method is best for this chip. Return
+        * 0 on successful translation in the driver.
+        *
+        * If some ranges of hardware IRQs do not have a corresponding parent
+        * HWIRQ, return -EINVAL, but also make sure to fill in @valid_mask and
+        * @need_valid_mask to make these GPIO lines unavailable for
+        * translation.
+        */
+       int (*child_to_parent_hwirq)(struct gpio_chip *chip,
+                                    unsigned int child_hwirq,
+                                    unsigned int child_type,
+                                    unsigned int *parent_hwirq,
+                                    unsigned int *parent_type);
+
+       /**
+        * @populate_parent_fwspec:
+        *
+        * This optional callback populates the &struct irq_fwspec for the
+        * parent's IRQ domain. If this is not specified, then
+        * &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell
+        * variant named &gpiochip_populate_parent_fwspec_fourcell is also
+        * available.
+        */
+       void (*populate_parent_fwspec)(struct gpio_chip *chip,
+                                      struct irq_fwspec *fwspec,
+                                      unsigned int parent_hwirq,
+                                      unsigned int parent_type);
+
+       /**
+        * @child_offset_to_irq:
+        *
+        * This optional callback is used to translate the child's GPIO line
+        * offset on the GPIO chip to an IRQ number for the GPIO to_irq()
+        * callback. If this is not specified, then a default callback will be
+        * provided that returns the line offset.
+        */
+       unsigned int (*child_offset_to_irq)(struct gpio_chip *chip,
+                                           unsigned int pin);
+
+       /**
+        * @child_irq_domain_ops:
+        *
+        * The IRQ domain operations that will be used for this GPIO IRQ
+        * chip. If no operations are provided, then default callbacks will
+        * be populated to setup the IRQ hierarchy. Some drivers need to
+        * supply their own translate function.
+        */
+       struct irq_domain_ops child_irq_domain_ops;
+#endif
+
        /**
         * @handler:
         *
@@ -125,11 +202,17 @@ struct gpio_irq_chip {
        bool threaded;
 
        /**
-        * @need_valid_mask:
-        *
-        * If set core allocates @valid_mask with all bits set to one.
+        * @init_valid_mask: optional routine to initialize @valid_mask, to be
+        * used if not all GPIO lines are valid interrupts. Sometimes some
+        * lines just cannot fire interrupts, and this routine, when defined,
+        * is passed a bitmap in "valid_mask" and it will have ngpios
+        * bits from 0..(ngpios-1) set to "1" as in valid. The callback can
+        * then directly set some bits to "0" if they cannot be used for
+        * interrupts.
         */
-       bool need_valid_mask;
+       void (*init_valid_mask)(struct gpio_chip *chip,
+                               unsigned long *valid_mask,
+                               unsigned int ngpios);
 
        /**
         * @valid_mask:
@@ -161,7 +244,6 @@ struct gpio_irq_chip {
         */
        void            (*irq_disable)(struct irq_data *data);
 };
-#endif /* CONFIG_GPIOLIB_IRQCHIP */
 
 /**
  * struct gpio_chip - abstract a GPIO controller
@@ -282,7 +364,9 @@ struct gpio_chip {
        void                    (*dbg_show)(struct seq_file *s,
                                                struct gpio_chip *chip);
 
-       int                     (*init_valid_mask)(struct gpio_chip *chip);
+       int                     (*init_valid_mask)(struct gpio_chip *chip,
+                                                  unsigned long *valid_mask,
+                                                  unsigned int ngpios);
 
        int                     base;
        u16                     ngpio;
@@ -320,15 +404,6 @@ struct gpio_chip {
        struct gpio_irq_chip irq;
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
-       /**
-        * @need_valid_mask:
-        *
-        * If set core allocates @valid_mask with all its values initialized
-        * with init_valid_mask() or set to one if init_valid_mask() is not
-        * defined
-        */
-       bool need_valid_mask;
-
        /**
         * @valid_mask:
         *
@@ -421,9 +496,6 @@ extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
 extern struct gpio_chip *gpiochip_find(void *data,
                              int (*match)(struct gpio_chip *chip, void *data));
 
-/* lock/unlock as IRQ */
-int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
 bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
 int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset);
 void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset);
@@ -441,15 +513,40 @@ bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset);
 /* get driver data */
 void *gpiochip_get_data(struct gpio_chip *chip);
 
-struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
-
 struct bgpio_pdata {
        const char *label;
        int base;
        int ngpio;
 };
 
-#if IS_ENABLED(CONFIG_GPIO_GENERIC)
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+
+void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+                                            struct irq_fwspec *fwspec,
+                                            unsigned int parent_hwirq,
+                                            unsigned int parent_type);
+void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
+                                             struct irq_fwspec *fwspec,
+                                             unsigned int parent_hwirq,
+                                             unsigned int parent_type);
+
+#else
+
+static inline void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+                                                   struct irq_fwspec *fwspec,
+                                                   unsigned int parent_hwirq,
+                                                   unsigned int parent_type)
+{
+}
+
+static inline void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
+                                                    struct irq_fwspec *fwspec,
+                                                    unsigned int parent_hwirq,
+                                                    unsigned int parent_type)
+{
+}
+
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
 
 int bgpio_init(struct gpio_chip *gc, struct device *dev,
               unsigned long sz, void __iomem *dat, void __iomem *set,
@@ -463,10 +560,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
 #define BGPIOF_READ_OUTPUT_REG_SET     BIT(4) /* reg_set stores output value */
 #define BGPIOF_NO_OUTPUT               BIT(5) /* only input */
 
-#endif /* CONFIG_GPIO_GENERIC */
-
-#ifdef CONFIG_GPIOLIB_IRQCHIP
-
 int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
                     irq_hw_number_t hwirq);
 void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq);
@@ -555,15 +648,11 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
 }
 #endif /* CONFIG_LOCKDEP */
 
-#endif /* CONFIG_GPIOLIB_IRQCHIP */
-
 int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
 void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
 int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
                            unsigned long config);
 
-#ifdef CONFIG_PINCTRL
-
 /**
  * struct gpio_pin_range - pin range controlled by a gpio chip
  * @node: list for maintaining set of pin ranges, used internally
@@ -576,6 +665,8 @@ struct gpio_pin_range {
        struct pinctrl_gpio_range range;
 };
 
+#ifdef CONFIG_PINCTRL
+
 int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
                           unsigned int gpio_offset, unsigned int pin_offset,
                           unsigned int npins);
@@ -586,8 +677,6 @@ void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
 
 #else /* ! CONFIG_PINCTRL */
 
-struct pinctrl_dev;
-
 static inline int
 gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
                       unsigned int gpio_offset, unsigned int pin_offset,
@@ -619,6 +708,15 @@ void gpiochip_free_own_desc(struct gpio_desc *desc);
 void devprop_gpiochip_set_names(struct gpio_chip *chip,
                                const struct fwnode_handle *fwnode);
 
+#ifdef CONFIG_GPIOLIB
+
+/* lock/unlock as IRQ */
+int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
+
+
+struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+
 #else /* CONFIG_GPIOLIB */
 
 static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
@@ -628,6 +726,18 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
        return ERR_PTR(-ENODEV);
 }
 
+static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
+                                      unsigned int offset)
+{
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
+                                         unsigned int offset)
+{
+       WARN_ON(1);
+}
 #endif /* CONFIG_GPIOLIB */
 
-#endif
+#endif /* __LINUX_GPIO_DRIVER_H */
index 5ecb055fd3759f49aa34c9c019b245e47965035b..de102e4418ab4118f00cf5fd5b6adda4c67eb73e 100644 (file)
@@ -188,6 +188,10 @@ static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
 struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
 struct i3c_device *dev_to_i3cdev(struct device *dev);
 
+const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+                   const struct i3c_device_id *id_table);
+
 static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev,
                                      void *data)
 {
index 1f08fa8d69d2725e204f761260692b610b89b31e..9cb39d901cd5f95070adc82609f50134ff9d8937 100644 (file)
@@ -71,6 +71,9 @@ struct i2c_dev_boardinfo {
  * @common: common part of the I2C device descriptor
  * @boardinfo: pointer to the boardinfo attached to this I2C device
  * @dev: I2C device object registered to the I2C framework
+ * @addr: I2C device address
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ *      the I2C device limitations
  *
  * Each I2C device connected on the bus will have an i2c_dev_desc.
  * This object is created by the core and later attached to the controller
@@ -84,6 +87,8 @@ struct i2c_dev_desc {
        struct i3c_i2c_dev_desc common;
        const struct i2c_dev_boardinfo *boardinfo;
        struct i2c_client *dev;
+       u16 addr;
+       u8 lvr;
 };
 
 /**
index ceabb01a6a7d939b8d7149e1e06877bd635829cd..1ecb6b45812c9dfd70d9eb77a48baf5eafef74d9 100644 (file)
@@ -48,7 +48,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0618", 0 },
        { "ELAN0619", 0 },
        { "ELAN061A", 0 },
-       { "ELAN061B", 0 },
+/*     { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
        { "ELAN061C", 0 },
        { "ELAN061D", 0 },
        { "ELAN061E", 0 },
index f2ae8a006ff8b279b9b86d68d49fcb8b62d70531..ed11ef594378d8e6ecec23071754ff8552ebe4f1 100644 (file)
 #define dma_frcd_type(d) ((d >> 30) & 1)
 #define dma_frcd_fault_reason(c) (c & 0xff)
 #define dma_frcd_source_id(c) (c & 0xffff)
+#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
+#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
 /* low 64 bit */
 #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
 
@@ -346,7 +348,6 @@ enum {
 #define QI_PC_PASID_SEL                (QI_PC_TYPE | QI_PC_GRAN(1))
 
 #define QI_EIOTLB_ADDR(addr)   ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_EIOTLB_GL(gl)       (((u64)gl) << 7)
 #define QI_EIOTLB_IH(ih)       (((u64)ih) << 6)
 #define QI_EIOTLB_AM(am)       (((u64)am))
 #define QI_EIOTLB_PASID(pasid)         (((u64)pasid) << 32)
@@ -378,8 +379,6 @@ enum {
 #define QI_RESP_INVALID                0x1
 #define QI_RESP_FAILURE                0xf
 
-#define QI_GRAN_ALL_ALL                        0
-#define QI_GRAN_NONG_ALL               1
 #define QI_GRAN_NONG_PASID             2
 #define QI_GRAN_PSI_PASID              3
 
index b5a450a3bb47a6f25beafa9510902af070f14dbd..ec7a13405f10bbece1606f51f7f6b27df1dcfe48 100644 (file)
@@ -1,7 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __IO_PGTABLE_H
 #define __IO_PGTABLE_H
+
 #include <linux/bitops.h>
+#include <linux/iommu.h>
 
 /*
  * Public API for use by IOMMU drivers
@@ -17,22 +19,31 @@ enum io_pgtable_fmt {
 };
 
 /**
- * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
+ * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
  *
- * @tlb_flush_all: Synchronously invalidate the entire TLB context.
- * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
- * @tlb_sync:      Ensure any queued TLB invalidation has taken effect, and
- *                 any corresponding page table updates are visible to the
- *                 IOMMU.
+ * @tlb_flush_all:  Synchronously invalidate the entire TLB context.
+ * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
+ *                  (sometimes referred to as the "walk cache") for a virtual
+ *                  address range.
+ * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
+ *                  address range.
+ * @tlb_add_page:   Optional callback to queue up leaf TLB invalidation for a
+ *                  single page.  IOMMUs that cannot batch TLB invalidation
+ *                  operations efficiently will typically issue them here, but
+ *                  others may decide to update the iommu_iotlb_gather structure
+ *                  and defer the invalidation until iommu_tlb_sync() instead.
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
  */
-struct iommu_gather_ops {
+struct iommu_flush_ops {
        void (*tlb_flush_all)(void *cookie);
-       void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
-                             bool leaf, void *cookie);
-       void (*tlb_sync)(void *cookie);
+       void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
+                              void *cookie);
+       void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
+                              void *cookie);
+       void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
+                            unsigned long iova, size_t granule, void *cookie);
 };
 
 /**
@@ -65,10 +76,9 @@ struct io_pgtable_cfg {
         *      (unmapped) entries but the hardware might do so anyway, perform
         *      TLB maintenance when mapping as well as when unmapping.
         *
-        * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
-        *      PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
-        *      when the SoC is in "4GB mode" and they can only access the high
-        *      remap of DRAM (0x1_00000000 to 0x1_ffffffff).
+        * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
+        *      to support up to 34 bits PA where the bit32 and bit33 are
+        *      encoded in the bit9 and bit4 of the PTE respectively.
         *
         * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
         *      on unmap, for DMA domains using the flush queue mechanism for
@@ -77,14 +87,14 @@ struct io_pgtable_cfg {
        #define IO_PGTABLE_QUIRK_ARM_NS         BIT(0)
        #define IO_PGTABLE_QUIRK_NO_PERMS       BIT(1)
        #define IO_PGTABLE_QUIRK_TLBI_ON_MAP    BIT(2)
-       #define IO_PGTABLE_QUIRK_ARM_MTK_4GB    BIT(3)
+       #define IO_PGTABLE_QUIRK_ARM_MTK_EXT    BIT(3)
        #define IO_PGTABLE_QUIRK_NON_STRICT     BIT(4)
        unsigned long                   quirks;
        unsigned long                   pgsize_bitmap;
        unsigned int                    ias;
        unsigned int                    oas;
        bool                            coherent_walk;
-       const struct iommu_gather_ops   *tlb;
+       const struct iommu_flush_ops    *tlb;
        struct device                   *iommu_dev;
 
        /* Low-level data specific to the table format */
@@ -128,7 +138,7 @@ struct io_pgtable_ops {
        int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
                   phys_addr_t paddr, size_t size, int prot);
        size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
-                       size_t size);
+                       size_t size, struct iommu_iotlb_gather *gather);
        phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
                                    unsigned long iova);
 };
@@ -184,15 +194,27 @@ static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
        iop->cfg.tlb->tlb_flush_all(iop->cookie);
 }
 
-static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
-               unsigned long iova, size_t size, size_t granule, bool leaf)
+static inline void
+io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
+                         size_t size, size_t granule)
+{
+       iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
+}
+
+static inline void
+io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
+                         size_t size, size_t granule)
 {
-       iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
+       iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
 }
 
-static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
+static inline void
+io_pgtable_tlb_add_page(struct io_pgtable *iop,
+                       struct iommu_iotlb_gather * gather, unsigned long iova,
+                       size_t granule)
 {
-       iop->cfg.tlb->tlb_sync(iop->cookie);
+       if (iop->cfg.tlb->tlb_add_page)
+               iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
 }
 
 /**
index fdc355ccc5701e59d5894bdd648ccb1c19d26d95..29bac5345563aca434bfb86ab00df539ab80d080 100644 (file)
@@ -191,6 +191,23 @@ struct iommu_sva_ops {
 
 #ifdef CONFIG_IOMMU_API
 
+/**
+ * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
+ *
+ * @start: IOVA representing the start of the range to be flushed
+ * @end: IOVA representing the end of the range to be flushed (exclusive)
+ * @pgsize: The interval at which to perform the flush
+ *
+ * This structure is intended to be updated by multiple calls to the
+ * ->unmap() function in struct iommu_ops before eventually being passed
+ * into ->iotlb_sync().
+ */
+struct iommu_iotlb_gather {
+       unsigned long           start;
+       unsigned long           end;
+       size_t                  pgsize;
+};
+
 /**
  * struct iommu_ops - iommu ops and capabilities
  * @capable: check capability
@@ -201,7 +218,6 @@ struct iommu_sva_ops {
  * @map: map a physically contiguous memory region to an iommu domain
  * @unmap: unmap a physically contiguous memory region from an iommu domain
  * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
- * @iotlb_range_add: Add a given iova range to the flush queue for this domain
  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
  *            queue
@@ -242,12 +258,11 @@ struct iommu_ops {
        int (*map)(struct iommu_domain *domain, unsigned long iova,
                   phys_addr_t paddr, size_t size, int prot);
        size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
-                    size_t size);
+                    size_t size, struct iommu_iotlb_gather *iotlb_gather);
        void (*flush_iotlb_all)(struct iommu_domain *domain);
-       void (*iotlb_range_add)(struct iommu_domain *domain,
-                               unsigned long iova, size_t size);
        void (*iotlb_sync_map)(struct iommu_domain *domain);
-       void (*iotlb_sync)(struct iommu_domain *domain);
+       void (*iotlb_sync)(struct iommu_domain *domain,
+                          struct iommu_iotlb_gather *iotlb_gather);
        phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
        int (*add_device)(struct device *dev);
        void (*remove_device)(struct device *dev);
@@ -378,6 +393,13 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
        return (struct iommu_device *)dev_get_drvdata(dev);
 }
 
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
+{
+       *gather = (struct iommu_iotlb_gather) {
+               .start  = ULONG_MAX,
+       };
+}
+
 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE          1 /* Device added */
 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE          2 /* Pre Device removed */
 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER         3 /* Pre Driver bind */
@@ -402,7 +424,8 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
                          size_t size);
 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
-                              unsigned long iova, size_t size);
+                              unsigned long iova, size_t size,
+                              struct iommu_iotlb_gather *iotlb_gather);
 extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
                           struct scatterlist *sg,unsigned int nents, int prot);
 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
@@ -413,6 +436,9 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 extern int iommu_request_dm_for_dev(struct device *dev);
 extern int iommu_request_dma_domain_for_dev(struct device *dev);
+extern void iommu_set_default_passthrough(bool cmd_line);
+extern void iommu_set_default_translated(bool cmd_line);
+extern bool iommu_default_passthrough(void);
 extern struct iommu_resv_region *
 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
                        enum iommu_resv_type type);
@@ -476,17 +502,38 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
                domain->ops->flush_iotlb_all(domain);
 }
 
-static inline void iommu_tlb_range_add(struct iommu_domain *domain,
-                                      unsigned long iova, size_t size)
+static inline void iommu_tlb_sync(struct iommu_domain *domain,
+                                 struct iommu_iotlb_gather *iotlb_gather)
 {
-       if (domain->ops->iotlb_range_add)
-               domain->ops->iotlb_range_add(domain, iova, size);
+       if (domain->ops->iotlb_sync)
+               domain->ops->iotlb_sync(domain, iotlb_gather);
+
+       iommu_iotlb_gather_init(iotlb_gather);
 }
 
-static inline void iommu_tlb_sync(struct iommu_domain *domain)
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+                                              struct iommu_iotlb_gather *gather,
+                                              unsigned long iova, size_t size)
 {
-       if (domain->ops->iotlb_sync)
-               domain->ops->iotlb_sync(domain);
+       unsigned long start = iova, end = start + size;
+
+       /*
+        * If the new page is disjoint from the current range or is mapped at
+        * a different granularity, then sync the TLB so that the gather
+        * structure can be rewritten.
+        */
+       if (gather->pgsize != size ||
+           end < gather->start || start > gather->end) {
+               if (gather->pgsize)
+                       iommu_tlb_sync(domain, gather);
+               gather->pgsize = size;
+       }
+
+       if (gather->end < end)
+               gather->end = end;
+
+       if (gather->start > start)
+               gather->start = start;
 }
 
 /* PCI device grouping function */
@@ -567,6 +614,7 @@ struct iommu_group {};
 struct iommu_fwspec {};
 struct iommu_device {};
 struct iommu_fault_param {};
+struct iommu_iotlb_gather {};
 
 static inline bool iommu_present(struct bus_type *bus)
 {
@@ -621,7 +669,8 @@ static inline size_t iommu_unmap(struct iommu_domain *domain,
 }
 
 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
-                                     unsigned long iova, int gfp_order)
+                                     unsigned long iova, int gfp_order,
+                                     struct iommu_iotlb_gather *iotlb_gather)
 {
        return 0;
 }
@@ -637,12 +686,8 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
 {
 }
 
-static inline void iommu_tlb_range_add(struct iommu_domain *domain,
-                                      unsigned long iova, size_t size)
-{
-}
-
-static inline void iommu_tlb_sync(struct iommu_domain *domain)
+static inline void iommu_tlb_sync(struct iommu_domain *domain,
+                                 struct iommu_iotlb_gather *iotlb_gather)
 {
 }
 
@@ -694,6 +739,19 @@ static inline int iommu_request_dma_domain_for_dev(struct device *dev)
        return -ENODEV;
 }
 
+static inline void iommu_set_default_passthrough(bool cmd_line)
+{
+}
+
+static inline void iommu_set_default_translated(bool cmd_line)
+{
+}
+
+static inline bool iommu_default_passthrough(void)
+{
+       return true;
+}
+
 static inline int iommu_attach_group(struct iommu_domain *domain,
                                     struct iommu_group *group)
 {
@@ -827,6 +885,16 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
        return NULL;
 }
 
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
+{
+}
+
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+                                              struct iommu_iotlb_gather *gather,
+                                              unsigned long iova, size_t size)
+{
+}
+
 static inline void iommu_device_unregister(struct iommu_device *iommu)
 {
 }
index cbd9d8495690212fc689d3ce3a7caf0812d45523..88e1e6304a7193bca45a14500f7ade4632dbd335 100644 (file)
@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
 unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
                        resource_size_t hw_addr, resource_size_t size);
 int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
 resource_size_t logic_pio_to_hwaddr(unsigned long pio);
 unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
 
index d77d717c620cbe3362b5787086b546f08b938f46..3f38c30d2f13d1e3b86408a6edf1e0eb8ef52778 100644 (file)
@@ -215,8 +215,9 @@ enum node_stat_item {
        NR_INACTIVE_FILE,       /*  "     "     "   "       "         */
        NR_ACTIVE_FILE,         /*  "     "     "   "       "         */
        NR_UNEVICTABLE,         /*  "     "     "   "       "         */
-       NR_SLAB_RECLAIMABLE,
-       NR_SLAB_UNRECLAIMABLE,
+       NR_SLAB_RECLAIMABLE,    /* Please do not reorder this item */
+       NR_SLAB_UNRECLAIMABLE,  /* and this one without looking at
+                                * memcg_flush_percpu_vmstats() first. */
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
        WORKINGSET_NODES,
index 7a6871ac8784f56d1ba12f5530eb7232da9066ba..74c6f9241944d2fd6e70f59139d01c1099dcaf9d 100644 (file)
@@ -4,6 +4,9 @@
  * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
  */
 
+#ifndef _NF_CONNTRACK_H323_TYPES_H
+#define _NF_CONNTRACK_H323_TYPES_H
+
 typedef struct TransportAddress_ipAddress {    /* SEQUENCE */
        int options;            /* No use */
        unsigned int ip;
@@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
                InfoRequestResponse infoRequestResponse;
        };
 } RasMessage;
+
+#endif /* _NF_CONNTRACK_H323_TYPES_H */
index f9737dea9d1f945a574c25aea05d02144e185b13..16967390a3fe3b12c00c1a80f1fb088089b3838e 100644 (file)
@@ -61,10 +61,6 @@ static inline int of_mm_gpiochip_add(struct device_node *np,
 }
 extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
 
-extern int of_gpio_simple_xlate(struct gpio_chip *gc,
-                               const struct of_phandle_args *gpiospec,
-                               u32 *flags);
-
 #else /* CONFIG_OF_GPIO */
 
 /* Drivers may not strictly depend on the GPIO support, so let them link. */
@@ -77,13 +73,6 @@ static inline int of_get_named_gpio_flags(struct device_node *np,
        return -ENOSYS;
 }
 
-static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
-                                      const struct of_phandle_args *gpiospec,
-                                      u32 *flags)
-{
-       return -ENOSYS;
-}
-
 #endif /* CONFIG_OF_GPIO */
 
 /**
index 153bf25b4df366d01f0ce16bd6e9b75f9ae9eaae..2c32ca09df02504447222d744d5459eaa2a3e9cb 100644 (file)
 #ifndef _OMAP_IOMMU_H_
 #define _OMAP_IOMMU_H_
 
+struct iommu_domain;
+
 #ifdef CONFIG_OMAP_IOMMU
 extern void omap_iommu_save_ctx(struct device *dev);
 extern void omap_iommu_restore_ctx(struct device *dev);
+
+int omap_iommu_domain_deactivate(struct iommu_domain *domain);
+int omap_iommu_domain_activate(struct iommu_domain *domain);
 #else
 static inline void omap_iommu_save_ctx(struct device *dev) {}
 static inline void omap_iommu_restore_ctx(struct device *dev) {}
+
+static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain)
+{
+       return -ENODEV;
+}
+
+static inline int omap_iommu_domain_activate(struct iommu_domain *domain)
+{
+       return -ENODEV;
+}
 #endif
 
 #endif
index c842735a4f45a41d4ce305d236d08a8b2d0ef3b6..4b97f427cc929972de59bcd4b1de5531ea56e16a 100644 (file)
 #define PCI_DEVICE_ID_AMD_17H_DF_F3    0x1463
 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index 462b90b73f939d475e800b7b8cec0cf96f6152d4..2fb9c8ffaf10a5b3ff654095a190c7c9df912fff 100644 (file)
@@ -1107,6 +1107,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev);
 int genphy_c45_read_mdix(struct phy_device *phydev);
 int genphy_c45_pma_read_abilities(struct phy_device *phydev);
 int genphy_c45_read_status(struct phy_device *phydev);
+int genphy_c45_config_aneg(struct phy_device *phydev);
 
 /* The gen10g_* functions are the old Clause 45 stub */
 int gen10g_config_aneg(struct phy_device *phydev);
index 1e5d86ebdaeb54cb5718ff10c8a8f74adc3a8efc..52bc8e487ef7d3442b562ffd9907027e53a310c4 100644 (file)
@@ -11,6 +11,7 @@ struct fixed_phy_status {
 };
 
 struct device_node;
+struct gpio_desc;
 
 #if IS_ENABLED(CONFIG_FIXED_PHY)
 extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
index 2a83e434db9d080667bd8f3465552c797e814c10..9645b1194c9812adc23c62b9518c305c3dcacf40 100644 (file)
@@ -72,6 +72,10 @@ extern struct pid init_struct_pid;
 
 extern const struct file_operations pidfd_fops;
 
+struct file;
+
+extern struct pid *pidfd_pid(const struct file *file);
+
 static inline struct pid *get_pid(struct pid *pid)
 {
        if (pid)
index 9a3e78082883f366922e318a0794baf393bd74bb..eaefba0b6465b48c5ddfadf0e0f7f16c0610ccfb 100644 (file)
@@ -50,7 +50,4 @@ struct htc_egpio_platform_data {
        int                   num_chips;
 };
 
-/* Determine the wakeup irq, to be called during early resume */
-extern int htc_egpio_get_wakeup_irq(struct device *dev);
-
 #endif
index 44d913a7580ce5c445e73e3f7c402cb3a94d2cd9..8474a0208b34ff8a99e93c8a215ef18575dca7cb 100644 (file)
@@ -13,4 +13,8 @@ struct iommu_platform_data {
        const char *reset_name;
        int (*assert_reset)(struct platform_device *pdev, const char *name);
        int (*deassert_reset)(struct platform_device *pdev, const char *name);
+       int (*device_enable)(struct platform_device *pdev);
+       int (*device_idle)(struct platform_device *pdev);
+       int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request,
+                                   u8 *pwrst);
 };
index a8a15613c157ef48bfbc87810d9a04c8edba3e1b..e2bacc6fd2f2d93710cd65d9751a2d1db0b04b37 100644 (file)
@@ -15,8 +15,8 @@
 
 bool psci_tos_resident_on(int cpu);
 
-int psci_cpu_init_idle(unsigned int cpu);
-int psci_cpu_suspend_enter(unsigned long index);
+int psci_cpu_suspend_enter(u32 state);
+bool psci_power_state_is_valid(u32 state);
 
 enum psci_conduit {
        PSCI_CONDUIT_NONE,
index 1f7dced2bba66b8afe6f0824ee8dba615f0fb8cc..f189c927fdea56314ffbd69803da6c67ac44f2b4 100644 (file)
@@ -19,6 +19,7 @@ struct random_ready_callback {
 };
 
 extern void add_device_randomness(const void *, unsigned int);
+extern void add_bootloader_randomness(const void *, unsigned int);
 
 #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
 static inline void add_latent_entropy(void)
index 815983419375a28fb5db6d87843d7acc2d193501..337a463915278cc000cbdb0d47b40ea4e5645a7d 100644 (file)
@@ -281,6 +281,12 @@ void devm_regulator_unregister_notifier(struct regulator *regulator,
 void *regulator_get_drvdata(struct regulator *regulator);
 void regulator_set_drvdata(struct regulator *regulator, void *data);
 
+/* misc helpers */
+
+void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
+                                    const char *const *supply_names,
+                                    unsigned int num_supplies);
+
 #else
 
 /*
@@ -580,6 +586,13 @@ static inline int regulator_list_voltage(struct regulator *regulator, unsigned s
        return -EINVAL;
 }
 
+static inline void
+regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
+                               const char *const *supply_names,
+                               unsigned int num_supplies)
+{
+}
+
 #endif
 
 static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
new file mode 100644 (file)
index 0000000..1cc3049
--- /dev/null
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6358_H
+#define __LINUX_REGULATOR_MT6358_H
+
+enum {
+       MT6358_ID_VDRAM1 = 0,
+       MT6358_ID_VCORE,
+       MT6358_ID_VPA,
+       MT6358_ID_VPROC11,
+       MT6358_ID_VPROC12,
+       MT6358_ID_VGPU,
+       MT6358_ID_VS2,
+       MT6358_ID_VMODEM,
+       MT6358_ID_VS1,
+       MT6358_ID_VDRAM2 = 9,
+       MT6358_ID_VSIM1,
+       MT6358_ID_VIBR,
+       MT6358_ID_VRF12,
+       MT6358_ID_VIO18,
+       MT6358_ID_VUSB,
+       MT6358_ID_VCAMIO,
+       MT6358_ID_VCAMD,
+       MT6358_ID_VCN18,
+       MT6358_ID_VFE28,
+       MT6358_ID_VSRAM_PROC11,
+       MT6358_ID_VCN28,
+       MT6358_ID_VSRAM_OTHERS,
+       MT6358_ID_VSRAM_GPU,
+       MT6358_ID_VXO22,
+       MT6358_ID_VEFUSE,
+       MT6358_ID_VAUX18,
+       MT6358_ID_VMCH,
+       MT6358_ID_VBIF28,
+       MT6358_ID_VSRAM_PROC12,
+       MT6358_ID_VCAMA1,
+       MT6358_ID_VEMC,
+       MT6358_ID_VIO28,
+       MT6358_ID_VA12,
+       MT6358_ID_VRF18,
+       MT6358_ID_VCN33_BT,
+       MT6358_ID_VCN33_WIFI,
+       MT6358_ID_VCAMA2,
+       MT6358_ID_VMC,
+       MT6358_ID_VLDO28,
+       MT6358_ID_VAUD28,
+       MT6358_ID_VSIM2,
+       MT6358_ID_RG_MAX,
+};
+
+#define MT6358_MAX_REGULATOR   MT6358_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6358_H */
index baa3ecdb882f2428ca1dad0159f794bd71ecff3a..27536b961552c21217b9ccad96a0ecec71243342 100644 (file)
@@ -98,7 +98,6 @@ typedef void                  (*rpc_action)(struct rpc_task *);
 
 struct rpc_call_ops {
        void (*rpc_call_prepare)(struct rpc_task *, void *);
-       void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
        void (*rpc_call_done)(struct rpc_task *, void *);
        void (*rpc_count_stats)(struct rpc_task *, void *);
        void (*rpc_release)(void *);
index 361f62bb4a8e92efd26e3f0d91e65193085dcb3b..cde3dc18e21a2cdee5ef36183d180946e45be0ea 100644 (file)
@@ -46,13 +46,17 @@ enum dma_sync_target {
 
 extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
                                          dma_addr_t tbl_dma_addr,
-                                         phys_addr_t phys, size_t size,
+                                         phys_addr_t phys,
+                                         size_t mapping_size,
+                                         size_t alloc_size,
                                          enum dma_data_direction dir,
                                          unsigned long attrs);
 
 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
                                     phys_addr_t tlb_addr,
-                                    size_t size, enum dma_data_direction dir,
+                                    size_t mapping_size,
+                                    size_t alloc_size,
+                                    enum dma_data_direction dir,
                                     unsigned long attrs);
 
 extern void swiotlb_tbl_sync_single(struct device *hwdev,
index 88145da7d14078ff4af7594271d7411d7c8eba61..f7c561c4dcdd5fa5d1376ac0d75fad959729c48e 100644 (file)
@@ -1402,4 +1402,23 @@ static inline unsigned int ksys_personality(unsigned int personality)
        return old;
 }
 
+/* for __ARCH_WANT_SYS_IPC */
+long ksys_semtimedop(int semid, struct sembuf __user *tsops,
+                    unsigned int nsops,
+                    const struct __kernel_timespec __user *timeout);
+long ksys_semget(key_t key, int nsems, int semflg);
+long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
+long ksys_msgget(key_t key, int msgflg);
+long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+                long msgtyp, int msgflg);
+long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+                int msgflg);
+long ksys_shmget(key_t key, size_t size, int shmflg);
+long ksys_shmdt(char __user *shmaddr);
+long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
+long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
+                           unsigned int nsops,
+                           const struct old_timespec32 __user *timeout);
+
 #endif
index 5150436783e8e44e920a62a045e181e70fb98887..30a8cdcfd4a4f2c72e89eefcbf71e2bb6764dcd2 100644 (file)
@@ -548,6 +548,7 @@ extern int trace_event_get_offsets(struct trace_event_call *call);
 
 #define is_signed_type(type)   (((type)(-1)) < (type)1)
 
+int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
 int trace_set_clr_event(const char *system, const char *event, int set);
 
 /*
index f37d12877754b05a3f534097d7fd7c957326c94e..adcc6a97db6123e7a19ed43e59659f4874b9b5a0 100644 (file)
@@ -308,6 +308,7 @@ do {                                                                             \
                                                                             \
   case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO):                          \
     R##_e = X##_e;                                                          \
+         /* Fall through */                                                 \
   case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL):                           \
   case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF):                              \
   case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO):                                     \
@@ -318,6 +319,7 @@ do {                                                                             \
                                                                             \
   case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL):                          \
     R##_e = Y##_e;                                                          \
+         /* Fall through */                                                 \
   case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN):                           \
   case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN):                              \
   case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN):                                     \
@@ -415,6 +417,7 @@ do {                                                        \
   case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF):         \
   case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO):                \
     R##_s = X##_s;                                     \
+       /* Fall through */                              \
                                                        \
   case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF):         \
   case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL):      \
@@ -428,6 +431,7 @@ do {                                                        \
   case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN):         \
   case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN):                \
     R##_s = Y##_s;                                     \
+       /* Fall through */                              \
                                                        \
   case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF):      \
   case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO):     \
@@ -493,6 +497,7 @@ do {                                                        \
                                                        \
   case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO):     \
     FP_SET_EXCEPTION(FP_EX_DIVZERO);                   \
+         /* Fall through */                            \
   case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO):                \
   case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL):      \
     R##_c = FP_CLS_INF;                                        \
index c61a1bf4e3de544dd41886e9126b7ff20eb829c3..3a1a72990fceb7744725b65dd65a0827ad1d9636 100644 (file)
@@ -15,6 +15,7 @@
 struct tcf_idrinfo {
        struct mutex    lock;
        struct idr      action_idr;
+       struct net      *net;
 };
 
 struct tc_action_ops;
@@ -108,7 +109,7 @@ struct tc_action_net {
 };
 
 static inline
-int tc_action_net_init(struct tc_action_net *tn,
+int tc_action_net_init(struct net *net, struct tc_action_net *tn,
                       const struct tc_action_ops *ops)
 {
        int err = 0;
@@ -117,6 +118,7 @@ int tc_action_net_init(struct tc_action_net *tn,
        if (!tn->idrinfo)
                return -ENOMEM;
        tn->ops = ops;
+       tn->idrinfo->net = net;
        mutex_init(&tn->idrinfo->lock);
        idr_init(&tn->idrinfo->action_idr);
        return err;
index becdad57685908a4b07bb3df890260483aa3e36a..3f62b347b04a7bfee96425a173922cbe6d390cb3 100644 (file)
@@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb,
                                   unsigned int len)
 {
        if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
-               return -EINVAL;
+               return 0;
 
        return pskb_may_pull(skb, len);
 }
index 4c81846ccce8c2e16e3effd0ab4573e07887255c..ab1ca9e238d2726507596b8a5408bc97a9f5e3d0 100644 (file)
@@ -513,7 +513,7 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
                          struct netlink_callback *cb);
 
 int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
-                    unsigned char *flags, bool skip_oif);
+                    u8 rt_family, unsigned char *flags, bool skip_oif);
 int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
-                   int nh_weight);
+                   int nh_weight, u8 rt_family);
 #endif  /* _NET_FIB_H */
index cb668bc2692db22e4639fcd3e95b21ed55f00410..ab40d7afdc541703e4e314fea1cb33950d2201f6 100644 (file)
@@ -52,7 +52,7 @@ struct bpf_prog;
 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
 
 struct net {
-       refcount_t              passive;        /* To decided when the network
+       refcount_t              passive;        /* To decide when the network
                                                 * namespace should be freed.
                                                 */
        refcount_t              count;          /* To decided when the network
index 25f1f9a8419b88eba011f1d14c6e1987146b2b26..331ebbc94fe7846ed3563b6050c1d24e0d9a150c 100644 (file)
@@ -141,12 +141,6 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
 
                nh_grp = rcu_dereference_rtnl(nh->nh_grp);
                rc = nh_grp->num_nh;
-       } else {
-               const struct nh_info *nhi;
-
-               nhi = rcu_dereference_rtnl(nh->nh_info);
-               if (nhi->reject_nh)
-                       rc = 0;
        }
 
        return rc;
@@ -167,7 +161,8 @@ struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel)
 }
 
 static inline
-int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh)
+int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
+                           u8 rt_family)
 {
        struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
        int i;
@@ -178,7 +173,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh)
                struct fib_nh_common *nhc = &nhi->fib_nhc;
                int weight = nhg->nh_entries[i].weight;
 
-               if (fib_add_nexthop(skb, nhc, weight) < 0)
+               if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
                        return -EMSGSIZE;
        }
 
index 37a4df2325b2eff3ad8d17df2c76fde184dd87c1..6b578ce69cd8a2b78d0fcaed3005f435380c4823 100644 (file)
@@ -11,6 +11,7 @@ struct psample_group {
        u32 group_num;
        u32 refcount;
        u32 seq;
+       struct rcu_head rcu;
 };
 
 struct psample_group *psample_group_get(struct net *net, u32 group_num);
index 630a0493f1f31e082ef29a26810946959ab65513..dfce19c9fa9618add7323eeee6e42e6d1a08ad84 100644 (file)
@@ -233,7 +233,7 @@ void rt_del_uncached_list(struct rtable *rt);
 
 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
                       u32 table_id, struct fib_info *fi,
-                      int *fa_index, int fa_start);
+                      int *fa_index, int fa_start, unsigned int flags);
 
 static inline void ip_rt_put(struct rtable *rt)
 {
index b22db30c3d887818fdd20657ef073760f902aa83..aa08a7a5f6ac5836524dd34115cd57e2675e574d 100644 (file)
@@ -983,7 +983,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
 
 struct xfrm_if_parms {
-       char name[IFNAMSIZ];    /* name of XFRM device */
        int link;               /* ifindex of underlying L2 interface */
        u32 if_id;              /* interface identifyer */
 };
@@ -991,7 +990,6 @@ struct xfrm_if_parms {
 struct xfrm_if {
        struct xfrm_if __rcu *next;     /* next interface in list */
        struct net_device *dev;         /* virtual device associated with interface */
-       struct net_device *phydev;      /* physical device */
        struct net *net;                /* netns for packet i/o */
        struct xfrm_if_parms p;         /* interface parms */
 
index 50f49e043668c0bbcac77e77645ddb99af171ff0..d1a93c73f0062f5305b39cba66264aaccb148358 100644 (file)
@@ -46,7 +46,9 @@ struct mcip_cmd {
 #define CMD_IDU_ENABLE                 0x71
 #define CMD_IDU_DISABLE                        0x72
 #define CMD_IDU_SET_MODE               0x74
+#define CMD_IDU_READ_MODE              0x75
 #define CMD_IDU_SET_DEST               0x76
+#define CMD_IDU_ACK_CIRQ               0x79
 #define CMD_IDU_SET_MASK               0x7C
 
 #define IDU_M_TRIG_LEVEL               0x0
@@ -119,4 +121,13 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
        __mcip_cmd(cmd, param);
 }
 
+/*
+ * Read MCIP register
+ */
+static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param)
+{
+       __mcip_cmd(cmd, param);
+       return read_aux_reg(ARC_REG_MCIP_READBACK);
+}
+
 #endif
index 79b74ced9d9132ae032cc8768d2cd8fbc67a6f05..5a34b87d89e32fdc7bbea9a138fcc5f80c482f99 100644 (file)
@@ -20,11 +20,6 @@ struct mtk_smi_larb_iommu {
        unsigned int   mmu;
 };
 
-struct mtk_smi_iommu {
-       unsigned int larb_nr;
-       struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
-};
-
 /*
  * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter.
  *                   It also initialize some basic setting(like iommu).
diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h
new file mode 100644 (file)
index 0000000..54e61d4
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel IOMMU trace support
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+#ifdef CONFIG_INTEL_IOMMU
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_iommu
+
+#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTEL_IOMMU_H
+
+#include <linux/tracepoint.h>
+#include <linux/intel-iommu.h>
+
+DECLARE_EVENT_CLASS(dma_map,
+       TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
+                size_t size),
+
+       TP_ARGS(dev, dev_addr, phys_addr, size),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name(dev))
+               __field(dma_addr_t, dev_addr)
+               __field(phys_addr_t, phys_addr)
+               __field(size_t, size)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name(dev));
+               __entry->dev_addr = dev_addr;
+               __entry->phys_addr = phys_addr;
+               __entry->size = size;
+       ),
+
+       TP_printk("dev=%s dev_addr=0x%llx phys_addr=0x%llx size=%zu",
+                 __get_str(dev_name),
+                 (unsigned long long)__entry->dev_addr,
+                 (unsigned long long)__entry->phys_addr,
+                 __entry->size)
+);
+
+DEFINE_EVENT(dma_map, map_single,
+       TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
+                size_t size),
+       TP_ARGS(dev, dev_addr, phys_addr, size)
+);
+
+DEFINE_EVENT(dma_map, map_sg,
+       TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
+                size_t size),
+       TP_ARGS(dev, dev_addr, phys_addr, size)
+);
+
+DEFINE_EVENT(dma_map, bounce_map_single,
+       TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
+                size_t size),
+       TP_ARGS(dev, dev_addr, phys_addr, size)
+);
+
+DECLARE_EVENT_CLASS(dma_unmap,
+       TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
+
+       TP_ARGS(dev, dev_addr, size),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name(dev))
+               __field(dma_addr_t, dev_addr)
+               __field(size_t, size)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name(dev));
+               __entry->dev_addr = dev_addr;
+               __entry->size = size;
+       ),
+
+       TP_printk("dev=%s dev_addr=0x%llx size=%zu",
+                 __get_str(dev_name),
+                 (unsigned long long)__entry->dev_addr,
+                 __entry->size)
+);
+
+DEFINE_EVENT(dma_unmap, unmap_single,
+       TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
+       TP_ARGS(dev, dev_addr, size)
+);
+
+DEFINE_EVENT(dma_unmap, unmap_sg,
+       TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
+       TP_ARGS(dev, dev_addr, size)
+);
+
+DEFINE_EVENT(dma_unmap, bounce_unmap_single,
+       TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
+       TP_ARGS(dev, dev_addr, size)
+);
+
+#endif /* _TRACE_INTEL_IOMMU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+#endif /* CONFIG_INTEL_IOMMU */
index fa06b528c73c51d14f757be9328470d987e1945f..a13a62db356553552d0f20871075715d854ada2c 100644 (file)
 #define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
 
 enum rxrpc_skb_trace {
-       rxrpc_skb_rx_cleaned,
-       rxrpc_skb_rx_freed,
-       rxrpc_skb_rx_got,
-       rxrpc_skb_rx_lost,
-       rxrpc_skb_rx_purged,
-       rxrpc_skb_rx_received,
-       rxrpc_skb_rx_rotated,
-       rxrpc_skb_rx_seen,
-       rxrpc_skb_tx_cleaned,
-       rxrpc_skb_tx_freed,
-       rxrpc_skb_tx_got,
-       rxrpc_skb_tx_new,
-       rxrpc_skb_tx_rotated,
-       rxrpc_skb_tx_seen,
+       rxrpc_skb_cleaned,
+       rxrpc_skb_freed,
+       rxrpc_skb_got,
+       rxrpc_skb_lost,
+       rxrpc_skb_new,
+       rxrpc_skb_purged,
+       rxrpc_skb_received,
+       rxrpc_skb_rotated,
+       rxrpc_skb_seen,
+       rxrpc_skb_unshared,
+       rxrpc_skb_unshared_nomem,
 };
 
 enum rxrpc_local_trace {
@@ -228,20 +225,17 @@ enum rxrpc_tx_point {
  * Declare tracing information enums and their string mappings for display.
  */
 #define rxrpc_skb_traces \
-       EM(rxrpc_skb_rx_cleaned,                "Rx CLN") \
-       EM(rxrpc_skb_rx_freed,                  "Rx FRE") \
-       EM(rxrpc_skb_rx_got,                    "Rx GOT") \
-       EM(rxrpc_skb_rx_lost,                   "Rx *L*") \
-       EM(rxrpc_skb_rx_purged,                 "Rx PUR") \
-       EM(rxrpc_skb_rx_received,               "Rx RCV") \
-       EM(rxrpc_skb_rx_rotated,                "Rx ROT") \
-       EM(rxrpc_skb_rx_seen,                   "Rx SEE") \
-       EM(rxrpc_skb_tx_cleaned,                "Tx CLN") \
-       EM(rxrpc_skb_tx_freed,                  "Tx FRE") \
-       EM(rxrpc_skb_tx_got,                    "Tx GOT") \
-       EM(rxrpc_skb_tx_new,                    "Tx NEW") \
-       EM(rxrpc_skb_tx_rotated,                "Tx ROT") \
-       E_(rxrpc_skb_tx_seen,                   "Tx SEE")
+       EM(rxrpc_skb_cleaned,                   "CLN") \
+       EM(rxrpc_skb_freed,                     "FRE") \
+       EM(rxrpc_skb_got,                       "GOT") \
+       EM(rxrpc_skb_lost,                      "*L*") \
+       EM(rxrpc_skb_new,                       "NEW") \
+       EM(rxrpc_skb_purged,                    "PUR") \
+       EM(rxrpc_skb_received,                  "RCV") \
+       EM(rxrpc_skb_rotated,                   "ROT") \
+       EM(rxrpc_skb_seen,                      "SEE") \
+       EM(rxrpc_skb_unshared,                  "UNS") \
+       E_(rxrpc_skb_unshared_nomem,            "US0")
 
 #define rxrpc_local_traces \
        EM(rxrpc_local_got,                     "GOT") \
@@ -643,13 +637,14 @@ TRACE_EVENT(rxrpc_call,
 
 TRACE_EVENT(rxrpc_skb,
            TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
-                    int usage, int mod_count, const void *where),
+                    int usage, int mod_count, u8 flags,    const void *where),
 
-           TP_ARGS(skb, op, usage, mod_count, where),
+           TP_ARGS(skb, op, usage, mod_count, flags, where),
 
            TP_STRUCT__entry(
                    __field(struct sk_buff *,           skb             )
                    __field(enum rxrpc_skb_trace,       op              )
+                   __field(u8,                         flags           )
                    __field(int,                        usage           )
                    __field(int,                        mod_count       )
                    __field(const void *,               where           )
@@ -657,14 +652,16 @@ TRACE_EVENT(rxrpc_skb,
 
            TP_fast_assign(
                    __entry->skb = skb;
+                   __entry->flags = flags;
                    __entry->op = op;
                    __entry->usage = usage;
                    __entry->mod_count = mod_count;
                    __entry->where = where;
                           ),
 
-           TP_printk("s=%p %s u=%d m=%d p=%pSR",
+           TP_printk("s=%p %cx %s u=%d m=%d p=%pSR",
                      __entry->skb,
+                     __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R',
                      __print_symbolic(__entry->op, rxrpc_skb_traces),
                      __entry->usage,
                      __entry->mod_count,
index 1be0e798e36218c1d1bbbf9b46b42b7deb9c9e57..1fc8faa6e97306dfa95335ecba91b3777a843aa9 100644 (file)
@@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget)
 __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
 #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_semtimedop 192
-__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
+__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
 #endif
 #define __NR_semop 193
 __SYSCALL(__NR_semop, sys_semop)
index 4941628a4fb9b919720d3fd9a949828f2c397fc0..5ec88e7548a9f55e19c1b90f990f2f617ca3c723 100644 (file)
@@ -16,6 +16,7 @@
 #define CAPI_MSG_BASELEN               8
 #define CAPI_DATA_B3_REQ_LEN           (CAPI_MSG_BASELEN+4+4+2+2+2)
 #define CAPI_DATA_B3_RESP_LEN          (CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_RESP_LEN    (CAPI_MSG_BASELEN+4)
 
 /*----- CAPI commands -----*/
 #define CAPI_ALERT                 0x01
index 5c8a4d760ee3443c0292112f18852a807b037184..b5123ab8d54a8c7fd4e75d6ee00fad938c1ded81 100644 (file)
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
        struct nf_acct  *nfacct;
 };
 
+struct xt_nfacct_match_info_v1 {
+       char            name[NFACCT_NAME_MAX];
+       struct nf_acct  *nfacct __attribute__((aligned(8)));
+};
+
 #endif /* _XT_NFACCT_MATCH_H */
index 094bb03b9cc2821e79cfd0e606350e9d3e221a02..2e927b3e9d6c717fcdb9bcadf736b7ede0c58f16 100644 (file)
@@ -229,4 +229,9 @@ struct prctl_mm_map {
 # define PR_PAC_APDBKEY                        (1UL << 3)
 # define PR_PAC_APGAKEY                        (1UL << 4)
 
+/* Tagged user address controls for arm64 */
+#define PR_SET_TAGGED_ADDR_CTRL                55
+#define PR_GET_TAGGED_ADDR_CTRL                56
+# define PR_TAGGED_ADDR_ENABLE         (1UL << 0)
+
 #endif /* _LINUX_PRCTL_H */
index fd6b5f66e2c55432b05ad939477393a29659a5ac..cba368e558634ff7e7ea125487b95ac3afda2d28 100644 (file)
@@ -250,6 +250,7 @@ struct rds_info_rdma_connection {
        __u32           rdma_mr_max;
        __u32           rdma_mr_size;
        __u8            tos;
+       __u8            sl;
        __u32           cache_allocs;
 };
 
@@ -265,6 +266,7 @@ struct rds6_info_rdma_connection {
        __u32           rdma_mr_max;
        __u32           rdma_mr_size;
        __u8            tos;
+       __u8            sl;
        __u32           cache_allocs;
 };
 
index ac49a220cf2ad8ee3fa1bfa2b72c9b232c1cf0d6..85b809fc9f118414a33eb730c21b62c35a91dd12 100644 (file)
@@ -17,6 +17,7 @@
 #define P_ALL          0
 #define P_PID          1
 #define P_PGID         2
+#define P_PIDFD                3
 
 
 #endif /* _UAPI_LINUX_WAIT_H */
index bd7d650d4a996c7c10d2349a340f52c1f99c1e49..d96127ebc44e08526f0be1586098d2ecd52e7104 100644 (file)
@@ -30,6 +30,9 @@ config CC_CAN_LINK
 config CC_HAS_ASM_GOTO
        def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
 
+config TOOLS_SUPPORT_RELR
+       def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
+
 config CC_HAS_WARN_MAYBE_UNINITIALIZED
        def_bool $(cc-option,-Wmaybe-uninitialized)
        help
index 0fcf8e719b7647fe7e59141c153b2485c95c4399..5766c61aed0e538f2b255480c28e8b41333736af 100644 (file)
@@ -276,29 +276,7 @@ static inline int compat_ipc_parse_version(int *cmd)
        *cmd &= ~IPC_64;
        return version;
 }
-#endif
 
-/* for __ARCH_WANT_SYS_IPC */
-long ksys_semtimedop(int semid, struct sembuf __user *tsops,
-                    unsigned int nsops,
-                    const struct __kernel_timespec __user *timeout);
-long ksys_semget(key_t key, int nsems, int semflg);
-long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
-long ksys_msgget(key_t key, int msgflg);
-long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
-long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
-                long msgtyp, int msgflg);
-long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
-                int msgflg);
-long ksys_shmget(key_t key, size_t size, int shmflg);
-long ksys_shmdt(char __user *shmaddr);
-long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
-
-/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
-long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
-                           unsigned int nsops,
-                           const struct old_timespec32 __user *timeout);
-#ifdef CONFIG_COMPAT
 long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg);
 long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr);
 long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
@@ -306,6 +284,7 @@ long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
 long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp,
                       compat_ssize_t msgsz, int msgflg);
 long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr);
-#endif /* CONFIG_COMPAT */
+
+#endif
 
 #endif
index 8191a7db27776d6eb7eade68a0458927c2b82c96..66088a9e9b9e220b9b823999d4cc9b022e203c5f 100644 (file)
@@ -890,7 +890,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
 
 static int bpf_jit_blind_insn(const struct bpf_insn *from,
                              const struct bpf_insn *aux,
-                             struct bpf_insn *to_buff)
+                             struct bpf_insn *to_buff,
+                             bool emit_zext)
 {
        struct bpf_insn *to = to_buff;
        u32 imm_rnd = get_random_int();
@@ -1005,6 +1006,8 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
        case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               if (emit_zext)
+                       *to++ = BPF_ZEXT_REG(BPF_REG_AX);
                *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
                break;
 
@@ -1088,7 +1091,8 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
                    insn[1].code == 0)
                        memcpy(aux, insn, sizeof(aux));
 
-               rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
+               rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
+                                               clone->aux->verifier_zext);
                if (!rewritten)
                        continue;
 
index 5d141f16f6fa95dd21fbacdc6000521d26a394c0..272071e9112f3bc7ccfb111fc734238f9561ffde 100644 (file)
@@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
        if (err)
                goto free_used_maps;
 
-       err = bpf_prog_new_fd(prog);
-       if (err < 0) {
-               /* failed to allocate fd.
-                * bpf_prog_put() is needed because the above
-                * bpf_prog_alloc_id() has published the prog
-                * to the userspace and the userspace may
-                * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
-                */
-               bpf_prog_put(prog);
-               return err;
-       }
-
+       /* Upon success of bpf_prog_alloc_id(), the BPF prog is
+        * effectively publicly exposed. However, retrieving via
+        * bpf_prog_get_fd_by_id() will take another reference,
+        * therefore it cannot be gone underneath us.
+        *
+        * Only for the time /after/ successful bpf_prog_new_fd()
+        * and before returning to userspace, we might just hold
+        * one reference and any parallel close on that fd could
+        * rip everything out. Hence, below notifications must
+        * happen before bpf_prog_new_fd().
+        *
+        * Also, any failure handling from this point onwards must
+        * be using bpf_prog_put() given the program is exposed.
+        */
        bpf_prog_kallsyms_add(prog);
        perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
+
+       err = bpf_prog_new_fd(prog);
+       if (err < 0)
+               bpf_prog_put(prog);
        return err;
 
 free_used_maps:
index c84d83f86141f82913bd540be1fcd87bd04613f6..c36a719fee6d34a18af09ac4d1e7d0b8c58d377f 100644 (file)
@@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
        reg->smax_value = S64_MAX;
        reg->umin_value = 0;
        reg->umax_value = U64_MAX;
-
-       /* constant backtracking is enabled for root only for now */
-       reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
 }
 
 /* Mark a register as having a completely unknown (scalar) value. */
@@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
                        __mark_reg_not_init(regs + regno);
                return;
        }
-       __mark_reg_unknown(regs + regno);
+       regs += regno;
+       __mark_reg_unknown(regs);
+       /* constant backtracking is enabled for root without bpf2bpf calls */
+       regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
+                       true : false;
 }
 
 static void __mark_reg_not_init(struct bpf_reg_state *reg)
@@ -1771,16 +1772,21 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
                bitmap_from_u64(mask, stack_mask);
                for_each_set_bit(i, mask, 64) {
                        if (i >= func->allocated_stack / BPF_REG_SIZE) {
-                               /* This can happen if backtracking
-                                * is propagating stack precision where
-                                * caller has larger stack frame
-                                * than callee, but backtrack_insn() should
-                                * have returned -ENOTSUPP.
+                               /* the sequence of instructions:
+                                * 2: (bf) r3 = r10
+                                * 3: (7b) *(u64 *)(r3 -8) = r0
+                                * 4: (79) r4 = *(u64 *)(r10 -8)
+                                * doesn't contain jmps. It's backtracked
+                                * as a single block.
+                                * During backtracking insn 3 is not recognized as
+                                * stack access, so at the end of backtracking
+                                * stack slot fp-8 is still marked in stack_mask.
+                                * However the parent state may not have accessed
+                                * fp-8 and it's "unallocated" stack space.
+                                * In such case fallback to conservative.
                                 */
-                               verbose(env, "BUG spi %d stack_size %d\n",
-                                       i, func->allocated_stack);
-                               WARN_ONCE(1, "verifier backtracking bug");
-                               return -EFAULT;
+                               mark_all_scalars_precise(env, st);
+                               return 0;
                        }
 
                        if (func->stack[i].slot_type[0] != STACK_SPILL) {
index 753afbca549fdaeba9c133c192c1927d1d9c6f3c..8be1da1ebd9a4f3d4ee3f6038a85e18e8d5fa685 100644 (file)
@@ -5255,8 +5255,16 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
         * if the parent has to be frozen, the child has too.
         */
        cgrp->freezer.e_freeze = parent->freezer.e_freeze;
-       if (cgrp->freezer.e_freeze)
+       if (cgrp->freezer.e_freeze) {
+               /*
+                * Set the CGRP_FREEZE flag, so when a process will be
+                * attached to the child cgroup, it will become frozen.
+                * At this point the new cgroup is unpopulated, so we can
+                * consider it frozen immediately.
+                */
+               set_bit(CGRP_FREEZE, &cgrp->flags);
                set_bit(CGRP_FROZEN, &cgrp->flags);
+       }
 
        spin_lock_irq(&css_set_lock);
        for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
index 706113c6bebc3d984d0e5abe4acf83519cbec13b..8402b29c280f560ae18420f9a99fc3f614c40293 100644 (file)
@@ -305,7 +305,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
                dma_direct_sync_single_for_cpu(dev, addr, size, dir);
 
        if (unlikely(is_swiotlb_buffer(phys)))
-               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+               swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
 }
 EXPORT_SYMBOL(dma_direct_unmap_page);
 
index 9de232229063bcf78b1d0ce942fe6a38a9e78dba..796a44f8ef5a9027bd70c3c89e01c44f1c01dd62 100644 (file)
@@ -444,7 +444,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
 
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
                                   dma_addr_t tbl_dma_addr,
-                                  phys_addr_t orig_addr, size_t size,
+                                  phys_addr_t orig_addr,
+                                  size_t mapping_size,
+                                  size_t alloc_size,
                                   enum dma_data_direction dir,
                                   unsigned long attrs)
 {
@@ -464,6 +466,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
                pr_warn_once("%s is active and system is using DMA bounce buffers\n",
                             sme_active() ? "SME" : "SEV");
 
+       if (mapping_size > alloc_size) {
+               dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
+                             mapping_size, alloc_size);
+               return (phys_addr_t)DMA_MAPPING_ERROR;
+       }
+
        mask = dma_get_seg_boundary(hwdev);
 
        tbl_dma_addr &= mask;
@@ -471,8 +479,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
        offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
 
        /*
-        * Carefully handle integer overflow which can occur when mask == ~0UL.
-        */
+        * Carefully handle integer overflow which can occur when mask == ~0UL.
+        */
        max_slots = mask + 1
                    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
                    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@@ -481,8 +489,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
         * For mappings greater than or equal to a page, we limit the stride
         * (and hence alignment) to a page size.
         */
-       nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       if (size >= PAGE_SIZE)
+       nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+       if (alloc_size >= PAGE_SIZE)
                stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
        else
                stride = 1;
@@ -547,7 +555,7 @@ not_found:
        spin_unlock_irqrestore(&io_tlb_lock, flags);
        if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
                dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
-                        size, io_tlb_nslabs, tmp_io_tlb_used);
+                        alloc_size, io_tlb_nslabs, tmp_io_tlb_used);
        return (phys_addr_t)DMA_MAPPING_ERROR;
 found:
        io_tlb_used += nslots;
@@ -562,7 +570,7 @@ found:
                io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
            (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
+               swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
 
        return tlb_addr;
 }
@@ -571,11 +579,11 @@ found:
  * tlb_addr is the physical address of the bounce buffer to unmap.
  */
 void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
-                             size_t size, enum dma_data_direction dir,
-                             unsigned long attrs)
+                             size_t mapping_size, size_t alloc_size,
+                             enum dma_data_direction dir, unsigned long attrs)
 {
        unsigned long flags;
-       int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+       int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
        int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
        phys_addr_t orig_addr = io_tlb_orig_addr[index];
 
@@ -585,7 +593,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
        if (orig_addr != INVALID_PHYS_ADDR &&
            !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
            ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
-               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
+               swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
 
        /*
         * Return the buffer to the free list by setting the corresponding
@@ -665,14 +673,14 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
 
        /* Oh well, have to allocate and map a bounce buffer. */
        *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
-                       *phys, size, dir, attrs);
+                       *phys, size, size, dir, attrs);
        if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
                return false;
 
        /* Ensure that the address returned is DMA'ble */
        *dma_addr = __phys_to_dma(dev, *phys);
        if (unlikely(!dma_capable(dev, *dma_addr, size))) {
-               swiotlb_tbl_unmap_single(dev, *phys, size, dir,
+               swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
                        attrs | DMA_ATTR_SKIP_CPU_SYNC);
                return false;
        }
index c5cd852fe86bb4e6eda1466330d7d6ea5f5a5fac..3cc8416ec844a46cb967becb21de903bc019824f 100644 (file)
@@ -413,7 +413,7 @@ static int hw_breakpoint_parse(struct perf_event *bp,
 
 int register_perf_hw_breakpoint(struct perf_event *bp)
 {
-       struct arch_hw_breakpoint hw;
+       struct arch_hw_breakpoint hw = { };
        int err;
 
        err = reserve_bp_slot(bp);
@@ -461,7 +461,7 @@ int
 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
                                bool check)
 {
-       struct arch_hw_breakpoint hw;
+       struct arch_hw_breakpoint hw = { };
        int err;
 
        err = hw_breakpoint_parse(bp, attr, &hw);
index 5b4a5dcce8f8328dd173ead148f4dc6a5fa0d8c4..22ab6a4bdc513ce04c059b81bad920dd0308b2c9 100644 (file)
@@ -1554,6 +1554,23 @@ end:
        return retval;
 }
 
+static struct pid *pidfd_get_pid(unsigned int fd)
+{
+       struct fd f;
+       struct pid *pid;
+
+       f = fdget(fd);
+       if (!f.file)
+               return ERR_PTR(-EBADF);
+
+       pid = pidfd_pid(f.file);
+       if (!IS_ERR(pid))
+               get_pid(pid);
+
+       fdput(f);
+       return pid;
+}
+
 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
                          int options, struct rusage *ru)
 {
@@ -1576,19 +1593,32 @@ static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
                type = PIDTYPE_PID;
                if (upid <= 0)
                        return -EINVAL;
+
+               pid = find_get_pid(upid);
                break;
        case P_PGID:
                type = PIDTYPE_PGID;
-               if (upid <= 0)
+               if (upid < 0)
+                       return -EINVAL;
+
+               if (upid)
+                       pid = find_get_pid(upid);
+               else
+                       pid = get_task_pid(current, PIDTYPE_PGID);
+               break;
+       case P_PIDFD:
+               type = PIDTYPE_PID;
+               if (upid < 0)
                        return -EINVAL;
+
+               pid = pidfd_get_pid(upid);
+               if (IS_ERR(pid))
+                       return PTR_ERR(pid);
                break;
        default:
                return -EINVAL;
        }
 
-       if (type < PIDTYPE_MAX)
-               pid = find_get_pid(upid);
-
        wo.wo_type      = type;
        wo.wo_pid       = pid;
        wo.wo_flags     = options;
index 2852d0e76ea3b905693a454f36e21524c14a8b54..0ad65a932936d62ea5dd82f2e48c74bc31b2ffc6 100644 (file)
@@ -1690,6 +1690,14 @@ static inline void rcu_copy_process(struct task_struct *p)
 #endif /* #ifdef CONFIG_TASKS_RCU */
 }
 
+struct pid *pidfd_pid(const struct file *file)
+{
+       if (file->f_op == &pidfd_fops)
+               return file->private_data;
+
+       return ERR_PTR(-EBADF);
+}
+
 static int pidfd_release(struct inode *inode, struct file *file)
 {
        struct pid *pid = file->private_data;
@@ -2338,6 +2346,8 @@ struct mm_struct *copy_init_mm(void)
  *
  * It copies the process, and if successful kick-starts
  * it and waits for it to finish using the VM if required.
+ *
+ * args->exit_signal is expected to be checked for sanity by the caller.
  */
 long _do_fork(struct kernel_clone_args *args)
 {
@@ -2562,6 +2572,14 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
        if (copy_from_user(&args, uargs, size))
                return -EFAULT;
 
+       /*
+        * Verify that higher 32bits of exit_signal are unset and that
+        * it is a valid signal
+        */
+       if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
+                    !valid_signal(args.exit_signal)))
+               return -EINVAL;
+
        *kargs = (struct kernel_clone_args){
                .flags          = args.flags,
                .pidfd          = u64_to_user_ptr(args.pidfd),
index 95414ad3506a919e21561d057e54ba27b8eb53c2..98c04ca5fa43d6a4515fff52a2d956c250777083 100644 (file)
@@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
                irq = find_first_bit(irqs_resend, nr_irqs);
                clear_bit(irq, irqs_resend);
                desc = irq_to_desc(irq);
+               if (!desc)
+                       continue;
                local_irq_disable();
                desc->handle_irq(desc);
                local_irq_enable();
index df3008419a1d0a34229580d86a2aec2f0e305d2a..cdb3ffab128b64a44f331d452501c0544a16389f 100644 (file)
@@ -407,7 +407,9 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init)
                return false;
 
        if (!kernel_text_address(jump_entry_code(entry))) {
-               WARN_ONCE(1, "can't patch jump_label at %pS", (void *)jump_entry_code(entry));
+               WARN_ONCE(!jump_entry_is_init(entry),
+                         "can't patch jump_label at %pS",
+                         (void *)jump_entry_code(entry));
                return false;
        }
 
index 95a260f9214b97e274ce4fdfe3ca1446b441ebbc..136ce049c4ad2f34f2b811c0e04c668ed511dba1 100644 (file)
@@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
 {
        char namebuf[KSYM_NAME_LEN];
 
-       if (is_ksym_addr(addr))
-               return !!get_symbol_pos(addr, symbolsize, offset);
+       if (is_ksym_addr(addr)) {
+               get_symbol_pos(addr, symbolsize, offset);
+               return 1;
+       }
        return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
               !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
 }
index 010d578118d62760a9fd388dd5c236dd3fb632fc..df9f1fe5689b04006907283f798b91959c0b9d51 100644 (file)
@@ -5105,37 +5105,40 @@ out_unlock:
        return retval;
 }
 
-static int sched_read_attr(struct sched_attr __user *uattr,
-                          struct sched_attr *attr,
-                          unsigned int usize)
+/*
+ * Copy the kernel size attribute structure (which might be larger
+ * than what user-space knows about) to user-space.
+ *
+ * Note that all cases are valid: user-space buffer can be larger or
+ * smaller than the kernel-space buffer. The usual case is that both
+ * have the same size.
+ */
+static int
+sched_attr_copy_to_user(struct sched_attr __user *uattr,
+                       struct sched_attr *kattr,
+                       unsigned int usize)
 {
-       int ret;
+       unsigned int ksize = sizeof(*kattr);
 
        if (!access_ok(uattr, usize))
                return -EFAULT;
 
        /*
-        * If we're handed a smaller struct than we know of,
-        * ensure all the unknown bits are 0 - i.e. old
-        * user-space does not get uncomplete information.
+        * sched_getattr() ABI forwards and backwards compatibility:
+        *
+        * If usize == ksize then we just copy everything to user-space and all is good.
+        *
+        * If usize < ksize then we only copy as much as user-space has space for,
+        * this keeps ABI compatibility as well. We skip the rest.
+        *
+        * If usize > ksize then user-space is using a newer version of the ABI,
+        * which part the kernel doesn't know about. Just ignore it - tooling can
+        * detect the kernel's knowledge of attributes from the attr->size value
+        * which is set to ksize in this case.
         */
-       if (usize < sizeof(*attr)) {
-               unsigned char *addr;
-               unsigned char *end;
+       kattr->size = min(usize, ksize);
 
-               addr = (void *)attr + usize;
-               end  = (void *)attr + sizeof(*attr);
-
-               for (; addr < end; addr++) {
-                       if (*addr)
-                               return -EFBIG;
-               }
-
-               attr->size = usize;
-       }
-
-       ret = copy_to_user(uattr, attr, attr->size);
-       if (ret)
+       if (copy_to_user(uattr, kattr, kattr->size))
                return -EFAULT;
 
        return 0;
@@ -5145,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr,
  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
- * @size: sizeof(attr) for fwd/bwd comp.
+ * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
  * @flags: for future extension.
  */
 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
-               unsigned int, size, unsigned int, flags)
+               unsigned int, usize, unsigned int, flags)
 {
-       struct sched_attr attr = {
-               .size = sizeof(struct sched_attr),
-       };
+       struct sched_attr kattr = { };
        struct task_struct *p;
        int retval;
 
-       if (!uattr || pid < 0 || size > PAGE_SIZE ||
-           size < SCHED_ATTR_SIZE_VER0 || flags)
+       if (!uattr || pid < 0 || usize > PAGE_SIZE ||
+           usize < SCHED_ATTR_SIZE_VER0 || flags)
                return -EINVAL;
 
        rcu_read_lock();
@@ -5171,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
        if (retval)
                goto out_unlock;
 
-       attr.sched_policy = p->policy;
+       kattr.sched_policy = p->policy;
        if (p->sched_reset_on_fork)
-               attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+               kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
        if (task_has_dl_policy(p))
-               __getparam_dl(p, &attr);
+               __getparam_dl(p, &kattr);
        else if (task_has_rt_policy(p))
-               attr.sched_priority = p->rt_priority;
+               kattr.sched_priority = p->rt_priority;
        else
-               attr.sched_nice = task_nice(p);
+               kattr.sched_nice = task_nice(p);
 
 #ifdef CONFIG_UCLAMP_TASK
-       attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
-       attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
+       kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
+       kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
 #endif
 
        rcu_read_unlock();
 
-       retval = sched_read_attr(uattr, &attr, size);
-       return retval;
+       return sched_attr_copy_to_user(uattr, &kattr, usize);
 
 out_unlock:
        rcu_read_unlock();
index bc9cfeaac8bd26464c55a42e93b105967ea92c3b..500f5db0de0ba86a331586d4189e3b299cb6148e 100644 (file)
@@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
        if (likely(cfs_rq->runtime_remaining > 0))
                return;
 
+       if (cfs_rq->throttled)
+               return;
        /*
         * if we're unable to extend our runtime we resched so that the active
         * hierarchy can be throttled
@@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
                if (!cfs_rq_throttled(cfs_rq))
                        goto next;
 
+               /* By the above check, this should never be true */
+               SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
+
                runtime = -cfs_rq->runtime_remaining + 1;
                if (runtime > remaining)
                        runtime = remaining;
index 534fec266a334b299846034e878c271619fe5c71..c4da1ef56fdfcd7a522e403600c9786efc26bd94 100644 (file)
@@ -3678,8 +3678,11 @@ static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
 
 static struct pid *pidfd_to_pid(const struct file *file)
 {
-       if (file->f_op == &pidfd_fops)
-               return file->private_data;
+       struct pid *pid;
+
+       pid = pidfd_pid(file);
+       if (!IS_ERR(pid))
+               return pid;
 
        return tgid_pidfd_to_pid(file);
 }
index 2969304c29fe3628ca46a2db6ca824588c12f72b..ec48396b494375aa22dc8a3d5570cd477db4b429 100644 (file)
 #ifndef PAC_RESET_KEYS
 # define PAC_RESET_KEYS(a, b)  (-EINVAL)
 #endif
+#ifndef SET_TAGGED_ADDR_CTRL
+# define SET_TAGGED_ADDR_CTRL(a)       (-EINVAL)
+#endif
+#ifndef GET_TAGGED_ADDR_CTRL
+# define GET_TAGGED_ADDR_CTRL()                (-EINVAL)
+#endif
 
 /*
  * this is where the system-wide overflow UID and GID are defined, for
@@ -2492,6 +2498,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                        return -EINVAL;
                error = PAC_RESET_KEYS(me, arg2);
                break;
+       case PR_SET_TAGGED_ADDR_CTRL:
+               if (arg3 || arg4 || arg5)
+                       return -EINVAL;
+               error = SET_TAGGED_ADDR_CTRL(arg2);
+               break;
+       case PR_GET_TAGGED_ADDR_CTRL:
+               if (arg2 || arg3 || arg4 || arg5)
+                       return -EINVAL;
+               error = GET_TAGGED_ADDR_CTRL();
+               break;
        default:
                error = -EINVAL;
                break;
index eca34503f178ece3f25a469ed3d73f186fd98c9e..f9821a3374e9dd4b81ed0f7cfe8c436d7a58b4c5 100644 (file)
@@ -3095,6 +3095,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
                hnd = &iter->probe_entry->hlist;
 
        hash = iter->probe->ops.func_hash->filter_hash;
+
+       /*
+        * A probe being registered may temporarily have an empty hash
+        * and it's at the end of the func_probes list.
+        */
+       if (!hash || hash == EMPTY_HASH)
+               return NULL;
+
        size = 1 << hash->size_bits;
 
  retry:
@@ -4320,12 +4328,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
 
        mutex_unlock(&ftrace_lock);
 
+       /*
+        * Note, there's a small window here that the func_hash->filter_hash
+        * may be NULL or empty. Need to be carefule when reading the loop.
+        */
        mutex_lock(&probe->ops.func_hash->regex_lock);
 
        orig_hash = &probe->ops.func_hash->filter_hash;
        old_hash = *orig_hash;
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
 
+       if (!hash) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        ret = ftrace_match_records(hash, glob, strlen(glob));
 
        /* Nothing found? */
index 525a97fbbc603fa476a27b23d49dff7b7932a112..563e80f9006a438442dcfb1736e846aca7959b63 100644 (file)
@@ -1567,9 +1567,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 
 /**
  * update_max_tr_single - only copy one trace over, and reset the rest
- * @tr - tracer
- * @tsk - task with the latency
- * @cpu - the cpu of the buffer to copy.
+ * @tr: tracer
+ * @tsk: task with the latency
+ * @cpu: the cpu of the buffer to copy.
  *
  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
  */
@@ -1767,7 +1767,7 @@ static void __init apply_trace_boot_options(void);
 
 /**
  * register_tracer - register a tracer with the ftrace system.
- * @type - the plugin for the tracer
+ * @type: the plugin for the tracer
  *
  * Register a new plugin tracer.
  */
@@ -2230,9 +2230,9 @@ static bool tracing_record_taskinfo_skip(int flags)
 /**
  * tracing_record_taskinfo - record the task info of a task
  *
- * @task  - task to record
- * @flags - TRACE_RECORD_CMDLINE for recording comm
- *        - TRACE_RECORD_TGID for recording tgid
+ * @task task to record
+ * @flags: TRACE_RECORD_CMDLINE for recording comm
+ *         TRACE_RECORD_TGID for recording tgid
  */
 void tracing_record_taskinfo(struct task_struct *task, int flags)
 {
@@ -2258,10 +2258,10 @@ void tracing_record_taskinfo(struct task_struct *task, int flags)
 /**
  * tracing_record_taskinfo_sched_switch - record task info for sched_switch
  *
- * @prev - previous task during sched_switch
- * @next - next task during sched_switch
- * @flags - TRACE_RECORD_CMDLINE for recording comm
- *          TRACE_RECORD_TGID for recording tgid
+ * @prev: previous task during sched_switch
+ * @next: next task during sched_switch
+ * @flags: TRACE_RECORD_CMDLINE for recording comm
+ *         TRACE_RECORD_TGID for recording tgid
  */
 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
                                          struct task_struct *next, int flags)
@@ -3072,7 +3072,9 @@ static void trace_printk_start_stop_comm(int enabled)
 
 /**
  * trace_vbprintk - write binary msg to tracing buffer
- *
+ * @ip:    The address of the caller
+ * @fmt:   The string format to write to the buffer
+ * @args:  Arguments for @fmt
  */
 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 {
index c7506bc81b757f776ff690be99749ea1b8338e5f..648930823b571083c1a95000937962e565162eb2 100644 (file)
@@ -787,7 +787,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
        return ret;
 }
 
-static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
 {
        char *event = NULL, *sub = NULL, *match;
        int ret;
index dbef0d1350754c5404995811f31358bc39087400..fb6bfbc5bf862005e30f12b777c561e0bf09666a 100644 (file)
@@ -895,7 +895,8 @@ void trace_probe_cleanup(struct trace_probe *tp)
        for (i = 0; i < tp->nr_args; i++)
                traceprobe_free_probe_arg(&tp->args[i]);
 
-       kfree(call->class->system);
+       if (call->class)
+               kfree(call->class->system);
        kfree(call->name);
        kfree(call->print_fmt);
 }
index f33d66fc0e86d5741e42b352bcb9e3a94d944412..4e6b1c3e4c9814405922d47ee2d9ffe1eb3c5d3d 100644 (file)
@@ -631,6 +631,9 @@ config SBITMAP
 config PARMAN
        tristate "parman" if COMPILE_TEST
 
+config OBJAGG
+       tristate "objagg" if COMPILE_TEST
+
 config STRING_SELFTEST
        tristate "Test string functions"
 
@@ -653,6 +656,3 @@ config GENERIC_LIB_CMPDI2
 
 config GENERIC_LIB_UCMPDI2
        bool
-
-config OBJAGG
-       tristate "objagg" if COMPILE_TEST
index 117ad0e7fbf4913e8cd9d3f38d3a81bfe2420a65..70dab9ac78273f4cc50da6805dd9dfcf24ead42c 100644 (file)
@@ -68,7 +68,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
 {
        size /= esize;
 
-       size = roundup_pow_of_two(size);
+       if (!is_power_of_2(size))
+               size = rounddown_pow_of_two(size);
 
        fifo->in = 0;
        fifo->out = 0;
index feea48fd1a0dd6ae7913b6fad19cc52e20664785..905027574e5d8041d1394dc4e5582061ed084e2f 100644 (file)
@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
        struct logic_pio_hwaddr *range;
        resource_size_t start;
        resource_size_t end;
-       resource_size_t mmio_sz = 0;
+       resource_size_t mmio_end = 0;
        resource_size_t iio_sz = MMIO_UPPER_LIMIT;
        int ret = 0;
 
@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
        end = new_range->hw_start + new_range->size;
 
        mutex_lock(&io_range_mutex);
-       list_for_each_entry_rcu(range, &io_range_list, list) {
+       list_for_each_entry(range, &io_range_list, list) {
                if (range->fwnode == new_range->fwnode) {
                        /* range already there */
                        goto end_register;
@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
                        /* for MMIO ranges we need to check for overlap */
                        if (start >= range->hw_start + range->size ||
                            end < range->hw_start) {
-                               mmio_sz += range->size;
+                               mmio_end = range->io_start + range->size;
                        } else {
                                ret = -EFAULT;
                                goto end_register;
@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
 
        /* range not registered yet, check for available space */
        if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
-               if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+               if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
                        /* if it's too big check if 64K space can be reserved */
-                       if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+                       if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
                                ret = -E2BIG;
                                goto end_register;
                        }
                        new_range->size = SZ_64K;
                        pr_warn("Requested IO range too big, new size set to 64K\n");
                }
-               new_range->io_start = mmio_sz;
+               new_range->io_start = mmio_end;
        } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
                if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
                        ret = -E2BIG;
@@ -98,6 +98,20 @@ end_register:
        return ret;
 }
 
+/**
+ * logic_pio_unregister_range - unregister a logical PIO range for a host
+ * @range: pointer to the IO range which has been already registered.
+ *
+ * Unregister a previously-registered IO range node.
+ */
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
+{
+       mutex_lock(&io_range_mutex);
+       list_del_rcu(&range->list);
+       mutex_unlock(&io_range_mutex);
+       synchronize_rcu();
+}
+
 /**
  * find_io_range_by_fwnode - find logical PIO range for given FW node
  * @fwnode: FW node handle associated with logical PIO range
@@ -108,26 +122,38 @@ end_register:
  */
 struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
 {
-       struct logic_pio_hwaddr *range;
+       struct logic_pio_hwaddr *range, *found_range = NULL;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(range, &io_range_list, list) {
-               if (range->fwnode == fwnode)
-                       return range;
+               if (range->fwnode == fwnode) {
+                       found_range = range;
+                       break;
+               }
        }
-       return NULL;
+       rcu_read_unlock();
+
+       return found_range;
 }
 
 /* Return a registered range given an input PIO token */
 static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
 {
-       struct logic_pio_hwaddr *range;
+       struct logic_pio_hwaddr *range, *found_range = NULL;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(range, &io_range_list, list) {
-               if (in_range(pio, range->io_start, range->size))
-                       return range;
+               if (in_range(pio, range->io_start, range->size)) {
+                       found_range = range;
+                       break;
+               }
        }
-       pr_err("PIO entry token %lx invalid\n", pio);
-       return NULL;
+       rcu_read_unlock();
+
+       if (!found_range)
+               pr_err("PIO entry token 0x%lx invalid\n", pio);
+
+       return found_range;
 }
 
 /**
@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
 {
        struct logic_pio_hwaddr *range;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(range, &io_range_list, list) {
                if (range->flags != LOGIC_PIO_CPU_MMIO)
                        continue;
-               if (in_range(addr, range->hw_start, range->size))
-                       return addr - range->hw_start + range->io_start;
+               if (in_range(addr, range->hw_start, range->size)) {
+                       unsigned long cpuaddr;
+
+                       cpuaddr = addr - range->hw_start + range->io_start;
+
+                       rcu_read_unlock();
+                       return cpuaddr;
+               }
        }
-       pr_err("addr %llx not registered in io_range_list\n",
-              (unsigned long long) addr);
+       rcu_read_unlock();
+
+       pr_err("addr %pa not registered in io_range_list\n", &addr);
+
        return ~0UL;
 }
 
index 798275a51887ca82b378ee45c86671894c4bdf74..26de020aae7b407c7ff5b1ae01eea92ad18d4e9e 100644 (file)
@@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
 struct page *balloon_page_alloc(void)
 {
        struct page *page = alloc_page(balloon_mapping_gfp_mask() |
-                                      __GFP_NOMEMALLOC | __GFP_NORETRY);
+                                      __GFP_NOMEMALLOC | __GFP_NORETRY |
+                                      __GFP_NOWARN);
        return page;
 }
 EXPORT_SYMBOL_GPL(balloon_page_alloc);
index 26e2999af608d3776e609b213d522345fc793792..9ec5e12486a7805c8250f9a814be5036785c66b1 100644 (file)
@@ -752,15 +752,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
        /* Update memcg */
        __mod_memcg_state(memcg, idx, val);
 
+       /* Update lruvec */
+       __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
+
        x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
        if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
                struct mem_cgroup_per_node *pi;
 
-               /*
-                * Batch local counters to keep them in sync with
-                * the hierarchical ones.
-                */
-               __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
                for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
                        atomic_long_add(x, &pi->lruvec_stat[idx]);
                x = 0;
@@ -3260,37 +3258,49 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
        }
 }
 
-static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
+static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
 {
        unsigned long stat[MEMCG_NR_STAT];
        struct mem_cgroup *mi;
        int node, cpu, i;
+       int min_idx, max_idx;
 
-       for (i = 0; i < MEMCG_NR_STAT; i++)
+       if (slab_only) {
+               min_idx = NR_SLAB_RECLAIMABLE;
+               max_idx = NR_SLAB_UNRECLAIMABLE;
+       } else {
+               min_idx = 0;
+               max_idx = MEMCG_NR_STAT;
+       }
+
+       for (i = min_idx; i < max_idx; i++)
                stat[i] = 0;
 
        for_each_online_cpu(cpu)
-               for (i = 0; i < MEMCG_NR_STAT; i++)
-                       stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]);
+               for (i = min_idx; i < max_idx; i++)
+                       stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
 
        for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
-               for (i = 0; i < MEMCG_NR_STAT; i++)
+               for (i = min_idx; i < max_idx; i++)
                        atomic_long_add(stat[i], &mi->vmstats[i]);
 
+       if (!slab_only)
+               max_idx = NR_VM_NODE_STAT_ITEMS;
+
        for_each_node(node) {
                struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
                struct mem_cgroup_per_node *pi;
 
-               for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+               for (i = min_idx; i < max_idx; i++)
                        stat[i] = 0;
 
                for_each_online_cpu(cpu)
-                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
-                               stat[i] += raw_cpu_read(
-                                       pn->lruvec_stat_cpu->count[i]);
+                       for (i = min_idx; i < max_idx; i++)
+                               stat[i] += per_cpu(
+                                       pn->lruvec_stat_cpu->count[i], cpu);
 
                for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
-                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+                       for (i = min_idx; i < max_idx; i++)
                                atomic_long_add(stat[i], &pi->lruvec_stat[i]);
        }
 }
@@ -3306,8 +3316,8 @@ static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
 
        for_each_online_cpu(cpu)
                for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
-                       events[i] += raw_cpu_read(
-                               memcg->vmstats_percpu->events[i]);
+                       events[i] += per_cpu(memcg->vmstats_percpu->events[i],
+                                            cpu);
 
        for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
@@ -3363,7 +3373,14 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
        if (!parent)
                parent = root_mem_cgroup;
 
+       /*
+        * Deactivate and reparent kmem_caches. Then flush percpu
+        * slab statistics to have precise values at the parent and
+        * all ancestor levels. It's required to keep slab stats
+        * accurate after the reparenting of kmem_caches.
+        */
        memcg_deactivate_kmem_caches(memcg, parent);
+       memcg_flush_percpu_vmstats(memcg, true);
 
        kmemcg_id = memcg->kmemcg_id;
        BUG_ON(kmemcg_id < 0);
@@ -4740,7 +4757,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
         * Flush percpu vmstats and vmevents to guarantee the value correctness
         * on parent's and all ancestor levels.
         */
-       memcg_flush_percpu_vmstats(memcg);
+       memcg_flush_percpu_vmstats(memcg, false);
        memcg_flush_percpu_vmevents(memcg);
        for_each_node(node)
                free_mem_cgroup_per_node_info(memcg, node);
index c77d1e3761a7f191f5e274b281a9ca132b64fc6f..a6c5d0b28321c383037aab176bfbfca35755a320 100644 (file)
@@ -3220,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 
 #ifdef CONFIG_MEMCG
 
+/* Only used by soft limit reclaim. Do not reuse for anything else. */
 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
                                                gfp_t gfp_mask, bool noswap,
                                                pg_data_t *pgdat,
@@ -3235,7 +3236,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
        };
        unsigned long lru_pages;
 
-       set_task_reclaim_state(current, &sc.reclaim_state);
+       WARN_ON_ONCE(!current->reclaim_state);
+
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
 
@@ -3253,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
-       set_task_reclaim_state(current, NULL);
        *nr_scanned = sc.nr_scanned;
 
        return sc.nr_reclaimed;
index e31cd9bd4ed561bcf4f6788da18f4759c7f61b96..75b7962439ff8b21b9cacb6a542d21ad8313af3b 100644 (file)
@@ -1406,6 +1406,7 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
                                 * should freak out.
                                 */
                                WARN(1, "Z3fold is experiencing kref problems\n");
+                               z3fold_page_unlock(zhdr);
                                return false;
                        }
                        z3fold_page_unlock(zhdr);
index 08def3a0d2007c3030384e4cd4d04f83b5ac7c74..e98bb6ab4f7e76c8b3f6d29c165b43fd6a6da964 100644 (file)
@@ -2412,7 +2412,9 @@ struct zs_pool *zs_create_pool(const char *name)
        if (!pool->name)
                goto err;
 
+#ifdef CONFIG_COMPACTION
        init_waitqueue_head(&pool->migration_wait);
+#endif
 
        if (create_cache(pool))
                goto err;
index 240ed70912d6a014c0a48280741989133034396c..d78938e3e0085d9cff138b805148a0e77de3f654 100644 (file)
@@ -277,17 +277,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
  * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm_packet: potential OGM in buffer
  *
  * Return: true if there is enough space for another OGM, false otherwise.
  */
-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
-                                     __be16 tvlv_len)
+static bool
+batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+                         const struct batadv_ogm_packet *ogm_packet)
 {
        int next_buff_pos = 0;
 
-       next_buff_pos += buff_pos + BATADV_OGM_HLEN;
-       next_buff_pos += ntohs(tvlv_len);
+       /* check if there is enough space for the header */
+       next_buff_pos += buff_pos + sizeof(*ogm_packet);
+       if (next_buff_pos > packet_len)
+               return false;
+
+       /* check if there is enough space for the optional TVLV */
+       next_buff_pos += ntohs(ogm_packet->tvlv_len);
 
        return (next_buff_pos <= packet_len) &&
               (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -315,7 +321,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
 
        /* adjust all flags and log packets */
        while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
-                                        batadv_ogm_packet->tvlv_len)) {
+                                        batadv_ogm_packet)) {
                /* we might have aggregated direct link packets with an
                 * ordinary base packet
                 */
@@ -1704,7 +1710,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
 
        /* unpack the aggregated packets and process them one by one */
        while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
-                                        ogm_packet->tvlv_len)) {
+                                        ogm_packet)) {
                batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
 
                ogm_offset += BATADV_OGM_HLEN;
index fad95ef64e01a9670ed66f9a81658718a14fc716..bc06e3cdfa84f63cc003867d4453a88249d3fb18 100644 (file)
@@ -631,17 +631,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
  * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm2_packet: potential OGM2 in buffer
  *
  * Return: true if there is enough space for another OGM, false otherwise.
  */
-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
-                                    __be16 tvlv_len)
+static bool
+batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+                        const struct batadv_ogm2_packet *ogm2_packet)
 {
        int next_buff_pos = 0;
 
-       next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
-       next_buff_pos += ntohs(tvlv_len);
+       /* check if there is enough space for the header */
+       next_buff_pos += buff_pos + sizeof(*ogm2_packet);
+       if (next_buff_pos > packet_len)
+               return false;
+
+       /* check if there is enough space for the optional TVLV */
+       next_buff_pos += ntohs(ogm2_packet->tvlv_len);
 
        return (next_buff_pos <= packet_len) &&
               (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -818,7 +824,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
        ogm_packet = (struct batadv_ogm2_packet *)skb->data;
 
        while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
-                                       ogm_packet->tvlv_len)) {
+                                       ogm_packet)) {
                batadv_v_ogm_process(skb, ogm_offset, if_incoming);
 
                ogm_offset += BATADV_OGM2_HLEN;
index 6f08fd122a8ddea43600c856a6be171dc7625d9c..7e052d6f759b659dbee0edd7546c367cf14b8e9e 100644 (file)
@@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
 {
        struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
 
-       return attr ? nla_get_u32(attr) : 0;
+       return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
 }
 
 /**
index cdb00c2ef242387c0d6824e0ec07c3a19b85eb8f..c1d3a303d97fbda890e546755535199af0420d2e 100644 (file)
@@ -5660,11 +5660,6 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
                return send_conn_param_neg_reply(hdev, handle,
                                                 HCI_ERROR_UNKNOWN_CONN_ID);
 
-       if (min < hcon->le_conn_min_interval ||
-           max > hcon->le_conn_max_interval)
-               return send_conn_param_neg_reply(hdev, handle,
-                                                HCI_ERROR_INVALID_LL_PARAMS);
-
        if (hci_check_conn_params(min, max, latency, timeout))
                return send_conn_param_neg_reply(hdev, handle,
                                                 HCI_ERROR_INVALID_LL_PARAMS);
index dfc1edb168b78b2ed43575cdd6087c45b75a0f82..da7fdbdf9c41af026969f6045513d3e7685434bc 100644 (file)
@@ -5305,14 +5305,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
 
        memset(&rsp, 0, sizeof(rsp));
 
-       if (min < hcon->le_conn_min_interval ||
-           max > hcon->le_conn_max_interval) {
-               BT_DBG("requested connection interval exceeds current bounds.");
-               err = -EINVAL;
-       } else {
-               err = hci_check_conn_params(min, max, latency, to_multiplier);
-       }
-
+       err = hci_check_conn_params(min, max, latency, to_multiplier);
        if (err)
                rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
        else
index bf6acd34234daa8a1da4aff928c169837bcf58c1..63f9c08625f055b7779dd471707f16cf6f7b4ba1 100644 (file)
@@ -437,7 +437,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
        struct nlmsghdr *nlh;
        struct nlattr *nest;
 
-       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
        if (!nlh)
                return -EMSGSIZE;
 
index d3f9592f4ff8d8435861cd95dd0d130e8fa22fbb..af7800103e51e5aee724ff314697d17ec583aeba 100644 (file)
@@ -496,6 +496,10 @@ static unsigned int br_nf_pre_routing(void *priv,
                if (!brnet->call_ip6tables &&
                    !br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
                        return NF_ACCEPT;
+               if (!ipv6_mod_enabled()) {
+                       pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
+                       return NF_DROP;
+               }
 
                nf_bridge_pull_encap_header_rcsum(skb);
                return br_nf_pre_routing_ipv6(priv, skb, state);
index c8177a89f52c362e70e4bb617db78bb000613bd8..4096d8a74a2bd7474da1e5ee980d840126f90c83 100644 (file)
@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
                        return NF_DROP;
                }
 
-               ADD_COUNTER(*(counter_base + i), 1, skb->len);
+               ADD_COUNTER(*(counter_base + i), skb->len, 1);
 
                /* these should only watch: not modify, nor tell us
                 * what to do with the packet
@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
                        continue;
                counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
                for (i = 0; i < nentries; i++)
-                       ADD_COUNTER(counters[i], counter_base[i].pcnt,
-                                   counter_base[i].bcnt);
+                       ADD_COUNTER(counters[i], counter_base[i].bcnt,
+                                   counter_base[i].pcnt);
        }
 }
 
@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
 
        /* we add to the counters of the first cpu */
        for (i = 0; i < num_counters; i++)
-               ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt);
+               ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
 
        write_unlock_bh(&t->lock);
        ret = 0;
index 1804e867f7151c77284f15b3681e72728c73cc5b..7c9e92b2f806c8979d603e0624b72df141cc2900 100644 (file)
@@ -53,7 +53,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
                        goto err;
 
                br_vlan_get_proto(br_dev, &p_proto);
-               nft_reg_store16(dest, p_proto);
+               nft_reg_store16(dest, htons(p_proto));
                return;
        }
        default:
index 5d6724cee38f90d1cbafcd6e66eb731394dad43c..4f75df40fb121f3c955f916fac5a3bb7f64d7f40 100644 (file)
@@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
        if (key) {
                kfree(key->key);
                key->key = NULL;
-               crypto_free_sync_skcipher(key->tfm);
-               key->tfm = NULL;
+               if (key->tfm) {
+                       crypto_free_sync_skcipher(key->tfm);
+                       key->tfm = NULL;
+               }
        }
 }
 
index 0891f499c1bb7cdb85490a681b21895f2206bcba..5156c0edebe8097403c6b78f711df5a373fd2e6d 100644 (file)
@@ -8758,6 +8758,8 @@ int register_netdevice(struct net_device *dev)
        ret = notifier_to_errno(ret);
        if (ret) {
                rollback_registered(dev);
+               rcu_barrier();
+
                dev->reg_state = NETREG_UNREGISTERED;
        }
        /*
index 7878f918b8c057b7b90ca0afcf2d5773cfb55e15..4c6a252d42122d711f975f50f5c912e6d3fbcaff 100644 (file)
@@ -8757,13 +8757,13 @@ sk_reuseport_is_valid_access(int off, int size,
                return size == size_default;
 
        /* Fields that allow narrowing */
-       case offsetof(struct sk_reuseport_md, eth_protocol):
+       case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
                if (size < FIELD_SIZEOF(struct sk_buff, protocol))
                        return false;
                /* fall through */
-       case offsetof(struct sk_reuseport_md, ip_protocol):
-       case offsetof(struct sk_reuseport_md, bind_inany):
-       case offsetof(struct sk_reuseport_md, len):
+       case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
+       case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
+       case bpf_ctx_range(struct sk_reuseport_md, len):
                bpf_ctx_record_field_size(info, size_default);
                return bpf_ctx_narrow_access_ok(off, size, size_default);
 
index 3e6fedb57bc100e8ce002deddbade59371a73065..2470b4b404e62e625b17714f27fa7aba58eef9c5 100644 (file)
@@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
                mutex_unlock(&flow_dissector_mutex);
                return -ENOENT;
        }
-       bpf_prog_put(attached);
        RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
+       bpf_prog_put(attached);
        mutex_unlock(&flow_dissector_mutex);
        return 0;
 }
index 2cf27da1baebf48e6ced1a29ed5fc112a3def9a6..849380a622ef9d7fc2e8970c342245e15997fbda 100644 (file)
@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
                txq = netdev_get_tx_queue(dev, q_index);
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (netif_xmit_frozen_or_stopped(txq) ||
-                   netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
+                   !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
                        skb_queue_head(&npinfo->txq, skb);
                        HARD_TX_UNLOCK(dev, txq);
                        local_irq_restore(flags);
@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
                                HARD_TX_UNLOCK(dev, txq);
 
-                               if (status == NETDEV_TX_OK)
+                               if (dev_xmit_complete(status))
                                        break;
 
                        }
@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
        }
 
-       if (status != NETDEV_TX_OK) {
+       if (!dev_xmit_complete(status)) {
                skb_queue_tail(&npinfo->txq, skb);
                schedule_delayed_work(&npinfo->tx_work,0);
        }
index 0338820ee0ec67ce5b399b629984a0d05033e8c5..982d8d12830e411a2f262da3dffbc407ab40ff64 100644 (file)
@@ -3664,6 +3664,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        int pos;
        int dummy;
 
+       if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
+           (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
+               /* gso_size is untrusted, and we have a frag_list with a linear
+                * non head_frag head.
+                *
+                * (we assume checking the first list_skb member suffices;
+                * i.e if either of the list_skb members have non head_frag
+                * head, then the first one has too).
+                *
+                * If head_skb's headlen does not fit requested gso_size, it
+                * means that the frag_list members do NOT terminate on exact
+                * gso_size boundaries. Hence we cannot perform skb_frag_t page
+                * sharing. Therefore we must fallback to copying the frag_list
+                * skbs; we do so by disabling SG.
+                */
+               if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
+                       features &= ~NETIF_F_SG;
+       }
+
        __skb_push(head_skb, doffset);
        proto = skb_network_protocol(head_skb, &dummy);
        if (unlikely(!proto))
index 6d08553f885cdb44ed3e53c2231abb7f48bf309c..545fac19a711f261fc6cdbdb54a3f08cb9705987 100644 (file)
@@ -3287,16 +3287,17 @@ static __init int net_inuse_init(void)
 
 core_initcall(net_inuse_init);
 
-static void assign_proto_idx(struct proto *prot)
+static int assign_proto_idx(struct proto *prot)
 {
        prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
 
        if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
                pr_err("PROTO_INUSE_NR exhausted\n");
-               return;
+               return -ENOSPC;
        }
 
        set_bit(prot->inuse_idx, proto_inuse_idx);
+       return 0;
 }
 
 static void release_proto_idx(struct proto *prot)
@@ -3305,8 +3306,9 @@ static void release_proto_idx(struct proto *prot)
                clear_bit(prot->inuse_idx, proto_inuse_idx);
 }
 #else
-static inline void assign_proto_idx(struct proto *prot)
+static inline int assign_proto_idx(struct proto *prot)
 {
+       return 0;
 }
 
 static inline void release_proto_idx(struct proto *prot)
@@ -3355,6 +3357,8 @@ static int req_prot_init(const struct proto *prot)
 
 int proto_register(struct proto *prot, int alloc_slab)
 {
+       int ret = -ENOBUFS;
+
        if (alloc_slab) {
                prot->slab = kmem_cache_create_usercopy(prot->name,
                                        prot->obj_size, 0,
@@ -3391,20 +3395,27 @@ int proto_register(struct proto *prot, int alloc_slab)
        }
 
        mutex_lock(&proto_list_mutex);
+       ret = assign_proto_idx(prot);
+       if (ret) {
+               mutex_unlock(&proto_list_mutex);
+               goto out_free_timewait_sock_slab_name;
+       }
        list_add(&prot->node, &proto_list);
-       assign_proto_idx(prot);
        mutex_unlock(&proto_list_mutex);
-       return 0;
+       return ret;
 
 out_free_timewait_sock_slab_name:
-       kfree(prot->twsk_prot->twsk_slab_name);
+       if (alloc_slab && prot->twsk_prot)
+               kfree(prot->twsk_prot->twsk_slab_name);
 out_free_request_sock_slab:
-       req_prot_cleanup(prot->rsk_prot);
+       if (alloc_slab) {
+               req_prot_cleanup(prot->rsk_prot);
 
-       kmem_cache_destroy(prot->slab);
-       prot->slab = NULL;
+               kmem_cache_destroy(prot->slab);
+               prot->slab = NULL;
+       }
 out:
-       return -ENOBUFS;
+       return ret;
 }
 EXPORT_SYMBOL(proto_register);
 
index 1330a7442e5b1e54d80d0b675f7356742bcdfbec..50916f9bc4f2e5b7814943e7528f46d5df44b502 100644 (file)
@@ -656,6 +656,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
                                   struct sock *sk, u64 flags)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        u32 key_size = map->key_size, hash;
        struct bpf_htab_elem *elem, *elem_new;
        struct bpf_htab_bucket *bucket;
@@ -666,6 +667,8 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
        WARN_ON_ONCE(!rcu_read_lock_held());
        if (unlikely(flags > BPF_EXIST))
                return -EINVAL;
+       if (unlikely(icsk->icsk_ulp_data))
+               return -EINVAL;
 
        link = sk_psock_init_link();
        if (!link)
index e94bb02a56295ec2db34ab423a8c7c890df0a696..4f1d4aa5fb38d989a9c81f32dfce3f31bbc1fa47 100644 (file)
@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
        int err = 0;
        long vm_wait = 0;
        long current_timeo = *timeo_p;
-       bool noblock = (*timeo_p ? false : true);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        if (sk_stream_memory_free(sk))
@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 
                if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                        goto do_error;
-               if (!*timeo_p) {
-                       if (noblock)
-                               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-                       goto do_nonblock;
-               }
+               if (!*timeo_p)
+                       goto do_eagain;
                if (signal_pending(current))
                        goto do_interrupted;
                sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -169,7 +165,13 @@ out:
 do_error:
        err = -EPIPE;
        goto out;
-do_nonblock:
+do_eagain:
+       /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
+        * be generated later.
+        * When TCP receives ACK packets that make room, tcp_check_space()
+        * only calls tcp_new_space() if SOCK_NOSPACE is set.
+        */
+       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
        err = -EAGAIN;
        goto out;
 do_interrupted:
index 6ebbd799c4eb61648f2466dc3d7498c7b1b954a4..67a1bc635a7b4342d0a7e935272776edf1295f7a 100644 (file)
@@ -28,6 +28,7 @@
  *
  * RSV - VID[9]:
  *     To be used for further expansion of SWITCH_ID or for other purposes.
+ *     Must be transmitted as zero and ignored on receive.
  *
  * SWITCH_ID - VID[8:6]:
  *     Index of switch within DSA tree. Must be between 0 and
@@ -35,6 +36,7 @@
  *
  * RSV - VID[5:4]:
  *     To be used for further expansion of PORT or for other purposes.
+ *     Must be transmitted as zero and ignored on receive.
  *
  * PORT - VID[3:0]:
  *     Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
index dacbd58e1799225b389afddca14e5923164202de..badc5cfe4dc63e1660696443e7f01e5707e81206 100644 (file)
@@ -1092,7 +1092,7 @@ static struct packet_type ieee802154_packet_type = {
 
 static int __init af_ieee802154_init(void)
 {
-       int rc = -EINVAL;
+       int rc;
 
        rc = proto_register(&ieee802154_raw_prot, 1);
        if (rc)
index 2db089e10ba0ee524ce50cec26c19137002a9992..0913a090b2bf4232d82b6294051d4924052f66b1 100644 (file)
@@ -1582,7 +1582,7 @@ failure:
 }
 
 int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
-                    unsigned char *flags, bool skip_oif)
+                    u8 rt_family, unsigned char *flags, bool skip_oif)
 {
        if (nhc->nhc_flags & RTNH_F_DEAD)
                *flags |= RTNH_F_DEAD;
@@ -1613,7 +1613,7 @@ int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
                /* if gateway family does not match nexthop family
                 * gateway is encoded as RTA_VIA
                 */
-               if (nhc->nhc_gw_family != nhc->nhc_family) {
+               if (rt_family != nhc->nhc_gw_family) {
                        int alen = sizeof(struct in6_addr);
                        struct nlattr *nla;
                        struct rtvia *via;
@@ -1654,7 +1654,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
 
 #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
 int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
-                   int nh_weight)
+                   int nh_weight, u8 rt_family)
 {
        const struct net_device *dev = nhc->nhc_dev;
        struct rtnexthop *rtnh;
@@ -1667,7 +1667,7 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
        rtnh->rtnh_hops = nh_weight - 1;
        rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
 
-       if (fib_nexthop_info(skb, nhc, &flags, true) < 0)
+       if (fib_nexthop_info(skb, nhc, rt_family, &flags, true) < 0)
                goto nla_put_failure;
 
        rtnh->rtnh_flags = flags;
@@ -1693,13 +1693,14 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
                goto nla_put_failure;
 
        if (unlikely(fi->nh)) {
-               if (nexthop_mpath_fill_node(skb, fi->nh) < 0)
+               if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0)
                        goto nla_put_failure;
                goto mp_end;
        }
 
        for_nexthops(fi) {
-               if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight) < 0)
+               if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
+                                   AF_INET) < 0)
                        goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
                if (nh->nh_tclassid &&
@@ -1775,7 +1776,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
                unsigned char flags = 0;
 
-               if (fib_nexthop_info(skb, nhc, &flags, false) < 0)
+               if (fib_nexthop_info(skb, nhc, AF_INET, &flags, false) < 0)
                        goto nla_put_failure;
 
                rtm->rtm_flags = flags;
index 2b2b3d291ab061a916ce9aa0371a865cf7eb8107..1ab2fb6bb37d7d8de416d5c7cf84efe21dc8faf3 100644 (file)
@@ -2145,7 +2145,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
 
                if (filter->dump_exceptions) {
                        err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
-                                                &i_fa, s_fa);
+                                                &i_fa, s_fa, flags);
                        if (err < 0)
                                goto stop;
                }
index 1510e951f4511a351ba0afa7330731bb16331fc9..4298aae74e0ee6266286fa49e9cfda602b2a9cd0 100644 (file)
@@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
 
        if (!rt)
                goto out;
-       net = dev_net(rt->dst.dev);
+
+       if (rt->dst.dev)
+               net = dev_net(rt->dst.dev);
+       else if (skb_in->dev)
+               net = dev_net(skb_in->dev);
+       else
+               goto out;
 
        /*
         *      Find the original header. It is expected to be valid, of course.
@@ -902,7 +908,7 @@ static bool icmp_redirect(struct sk_buff *skb)
                return false;
        }
 
-       icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
+       icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
        return true;
 }
 
index 180f6896b98baade47bf9e2a4b208043fbc6cca3..480d0b22db1a6d0cb36688fff01966fdb7375d1f 100644 (file)
@@ -1475,7 +1475,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group);
 
 void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
 {
-       __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
+       __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
 }
 EXPORT_SYMBOL(ip_mc_inc_group);
 
@@ -2197,7 +2197,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
        iml->sflist = NULL;
        iml->sfmode = mode;
        rcu_assign_pointer(inet->mc_list, iml);
-       __ip_mc_inc_group(in_dev, addr, mode);
+       ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
        err = 0;
 done:
        return err;
index 517300d587a7df0110430a85c08917104626f858..b6a6f18c3dd144d2feb4b1841845bca5535092fc 100644 (file)
@@ -2728,7 +2728,8 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
 /* called with rcu_read_lock held */
 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
                        struct rtable *rt, u32 table_id, struct flowi4 *fl4,
-                       struct sk_buff *skb, u32 portid, u32 seq)
+                       struct sk_buff *skb, u32 portid, u32 seq,
+                       unsigned int flags)
 {
        struct rtmsg *r;
        struct nlmsghdr *nlh;
@@ -2736,7 +2737,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
        u32 error;
        u32 metrics[RTAX_MAX];
 
-       nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
+       nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
        if (!nlh)
                return -EMSGSIZE;
 
@@ -2860,7 +2861,7 @@ nla_put_failure:
 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
                            struct netlink_callback *cb, u32 table_id,
                            struct fnhe_hash_bucket *bucket, int genid,
-                           int *fa_index, int fa_start)
+                           int *fa_index, int fa_start, unsigned int flags)
 {
        int i;
 
@@ -2891,7 +2892,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
                        err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
                                           table_id, NULL, skb,
                                           NETLINK_CB(cb->skb).portid,
-                                          cb->nlh->nlmsg_seq);
+                                          cb->nlh->nlmsg_seq, flags);
                        if (err)
                                return err;
 next:
@@ -2904,7 +2905,7 @@ next:
 
 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
                       u32 table_id, struct fib_info *fi,
-                      int *fa_index, int fa_start)
+                      int *fa_index, int fa_start, unsigned int flags)
 {
        struct net *net = sock_net(cb->skb->sk);
        int nhsel, genid = fnhe_genid(net);
@@ -2922,7 +2923,8 @@ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
                err = 0;
                if (bucket)
                        err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
-                                              genid, fa_index, fa_start);
+                                              genid, fa_index, fa_start,
+                                              flags);
                rcu_read_unlock();
                if (err)
                        return err;
@@ -3183,7 +3185,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                                    fl4.flowi4_tos, res.fi, 0);
        } else {
                err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
-                                  NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
+                                  NETLINK_CB(in_skb).portid,
+                                  nlh->nlmsg_seq, 0);
        }
        if (err < 0)
                goto errout_rcu;
index 77b485d60b9d0e00edc4e2f0d6c5bb3a9460b23b..61082065b26a068975c411b74eb46739ab0632ca 100644 (file)
@@ -935,6 +935,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
        return mss_now;
 }
 
+/* In some cases, both sendpage() and sendmsg() could have added
+ * an skb to the write queue, but failed adding payload on it.
+ * We need to remove it to consume less memory, but more
+ * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
+ * users.
+ */
+static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
+{
+       if (skb && !skb->len) {
+               tcp_unlink_write_queue(skb, sk);
+               if (tcp_write_queue_empty(sk))
+                       tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+               sk_wmem_free_skb(sk, skb);
+       }
+}
+
 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
                         size_t size, int flags)
 {
@@ -1064,6 +1080,7 @@ out:
        return copied;
 
 do_error:
+       tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
        if (copied)
                goto out;
 out_err:
@@ -1388,18 +1405,11 @@ out_nopush:
        sock_zerocopy_put(uarg);
        return copied + copied_syn;
 
+do_error:
+       skb = tcp_write_queue_tail(sk);
 do_fault:
-       if (!skb->len) {
-               tcp_unlink_write_queue(skb, sk);
-               /* It is the one place in all of TCP, except connection
-                * reset, where we can be unlinking the send_head.
-                */
-               if (tcp_write_queue_empty(sk))
-                       tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
-               sk_wmem_free_skb(sk, skb);
-       }
+       tcp_remove_empty_skb(sk, skb);
 
-do_error:
        if (copied + copied_syn)
                goto out;
 out_err:
index c21e8a22fb3bb39d06eb3ee7eb4cfae5066b6f48..8a1cd93dbb09d2f06ff6e2f39b496ca28341093f 100644 (file)
@@ -266,7 +266,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
 
 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
 {
-       tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+       tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 }
 
 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
index 979520e46e33c16ba2237cb36ef7b4dab94f6546..8a645f304e6c0a581e0ad26d7d4083491fab0578 100644 (file)
@@ -2053,7 +2053,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
                if (len <= skb->len)
                        break;
 
-               if (unlikely(TCP_SKB_CB(skb)->eor))
+               if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
                        return false;
 
                len -= skb->len;
@@ -2170,6 +2170,7 @@ static int tcp_mtu_probe(struct sock *sk)
                         * we need to propagate it to the new skb.
                         */
                        TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
+                       tcp_skb_collapse_tstamp(nskb, skb);
                        tcp_unlink_write_queue(skb, sk);
                        sk_wmem_free_skb(sk, skb);
                } else {
index dc73888c7859a2d0d499fabd14aa8b71e12b0e77..6a576ff92c399fbb686c839c525e700f2579b1a9 100644 (file)
@@ -478,7 +478,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
        if (!idev) {
                idev = ipv6_add_dev(dev);
                if (IS_ERR(idev))
-                       return NULL;
+                       return idev;
        }
 
        if (dev->flags&IFF_UP)
@@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
        int err = 0;
 
        if (addr_type == IPV6_ADDR_ANY ||
-           addr_type & IPV6_ADDR_MULTICAST ||
+           (addr_type & IPV6_ADDR_MULTICAST &&
+            !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
            (!(idev->dev->flags & IFF_LOOPBACK) &&
             !netif_is_l3_master(idev->dev) &&
             addr_type & IPV6_ADDR_LOOPBACK))
@@ -2465,8 +2466,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
        ASSERT_RTNL();
 
        idev = ipv6_find_idev(dev);
-       if (!idev)
-               return ERR_PTR(-ENOBUFS);
+       if (IS_ERR(idev))
+               return idev;
 
        if (idev->cnf.disable_ipv6)
                return ERR_PTR(-EACCES);
@@ -3158,7 +3159,7 @@ static void init_loopback(struct net_device *dev)
        ASSERT_RTNL();
 
        idev = ipv6_find_idev(dev);
-       if (!idev) {
+       if (IS_ERR(idev)) {
                pr_debug("%s: add_dev failed\n", __func__);
                return;
        }
@@ -3373,7 +3374,7 @@ static void addrconf_sit_config(struct net_device *dev)
         */
 
        idev = ipv6_find_idev(dev);
-       if (!idev) {
+       if (IS_ERR(idev)) {
                pr_debug("%s: add_dev failed\n", __func__);
                return;
        }
@@ -3398,7 +3399,7 @@ static void addrconf_gre_config(struct net_device *dev)
        ASSERT_RTNL();
 
        idev = ipv6_find_idev(dev);
-       if (!idev) {
+       if (IS_ERR(idev)) {
                pr_debug("%s: add_dev failed\n", __func__);
                return;
        }
@@ -4772,8 +4773,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
                         IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
 
        idev = ipv6_find_idev(dev);
-       if (!idev)
-               return -ENOBUFS;
+       if (IS_ERR(idev))
+               return PTR_ERR(idev);
 
        if (!ipv6_allow_optimistic_dad(net, idev))
                cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
index 7f3f13c3791636f92c5c46aa6235c69682dea151..eaa4c2cc2fbb2e784b5641135d28dd38a0616421 100644 (file)
@@ -787,14 +787,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
        if (pmc) {
                im->idev = pmc->idev;
                if (im->mca_sfmode == MCAST_INCLUDE) {
-                       im->mca_tomb = pmc->mca_tomb;
-                       im->mca_sources = pmc->mca_sources;
+                       swap(im->mca_tomb, pmc->mca_tomb);
+                       swap(im->mca_sources, pmc->mca_sources);
                        for (psf = im->mca_sources; psf; psf = psf->sf_next)
                                psf->sf_crcount = idev->mc_qrv;
                } else {
                        im->mca_crcount = idev->mc_qrv;
                }
                in6_dev_put(pmc->idev);
+               ip6_mc_clear_src(pmc);
                kfree(pmc);
        }
        spin_unlock_bh(&im->mca_lock);
index 87d2d8c1db7cc86940d6216b50d3bdadfb588937..98ac32b49d8c9a4fe6158d8c9e7b759373483709 100644 (file)
@@ -223,7 +223,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
        return 0;
 }
 
-static void __net_init ping_v6_proc_exit_net(struct net *net)
+static void __net_exit ping_v6_proc_exit_net(struct net *net)
 {
        remove_proc_entry("icmp6", net->proc_net);
 }
index fd059e08785abb0d06320da870ffe8dd9499f8f9..546088e50815158a81ebe953a3a1ec820243c367 100644 (file)
@@ -4388,13 +4388,14 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
        struct fib6_config cfg = {
                .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
                .fc_ifindex = idev->dev->ifindex,
-               .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
+               .fc_flags = RTF_UP | RTF_NONEXTHOP,
                .fc_dst = *addr,
                .fc_dst_len = 128,
                .fc_protocol = RTPROT_KERNEL,
                .fc_nlinfo.nl_net = net,
                .fc_ignore_dev_down = true,
        };
+       struct fib6_info *f6i;
 
        if (anycast) {
                cfg.fc_type = RTN_ANYCAST;
@@ -4404,7 +4405,10 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
                cfg.fc_flags |= RTF_LOCAL;
        }
 
-       return ip6_route_info_create(&cfg, gfp_flags, NULL);
+       f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
+       if (!IS_ERR(f6i))
+               f6i->dst_nocount = true;
+       return f6i;
 }
 
 /* remove deleted ip from prefsrc entries */
@@ -5325,11 +5329,11 @@ static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
        if (nexthop_is_multipath(nh)) {
                struct nlattr *mp;
 
-               mp = nla_nest_start(skb, RTA_MULTIPATH);
+               mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
                if (!mp)
                        goto nla_put_failure;
 
-               if (nexthop_mpath_fill_node(skb, nh))
+               if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
                        goto nla_put_failure;
 
                nla_nest_end(skb, mp);
@@ -5337,7 +5341,7 @@ static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
                struct fib6_nh *fib6_nh;
 
                fib6_nh = nexthop_fib6_nh(nh);
-               if (fib_nexthop_info(skb, &fib6_nh->nh_common,
+               if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
                                     flags, false) < 0)
                        goto nla_put_failure;
        }
@@ -5466,13 +5470,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
 
                if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
-                                   rt->fib6_nh->fib_nh_weight) < 0)
+                                   rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
                        goto nla_put_failure;
 
                list_for_each_entry_safe(sibling, next_sibling,
                                         &rt->fib6_siblings, fib6_siblings) {
                        if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
-                                           sibling->fib6_nh->fib_nh_weight) < 0)
+                                           sibling->fib6_nh->fib_nh_weight,
+                                           AF_INET6) < 0)
                                goto nla_put_failure;
                }
 
@@ -5489,7 +5494,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
 
                rtm->rtm_flags |= nh_flags;
        } else {
-               if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common,
+               if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
                                     &nh_flags, false) < 0)
                        goto nla_put_failure;
 
index 4d458067d80d17d774ef1cda0a31a28045781841..4105c97c7ba1f4bbae8bd66c977047992a648a4c 100644 (file)
@@ -1529,7 +1529,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        struct sta_info *sta;
        struct ieee80211_sub_if_data *sdata;
        int err;
-       int layer2_update;
 
        if (params->vlan) {
                sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
@@ -1546,6 +1545,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (is_multicast_ether_addr(mac))
                return -EINVAL;
 
+       if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
+           sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !sdata->u.mgd.associated)
+               return -EINVAL;
+
        sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
        if (!sta)
                return -ENOMEM;
@@ -1553,10 +1557,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
                sta->sta.tdls = true;
 
-       if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
-           !sdata->u.mgd.associated)
-               return -EINVAL;
-
        err = sta_apply_parameters(local, sta, params);
        if (err) {
                sta_info_free(local, sta);
@@ -1572,18 +1572,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
            test_sta_flag(sta, WLAN_STA_ASSOC))
                rate_control_rate_init(sta);
 
-       layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
-               sdata->vif.type == NL80211_IFTYPE_AP;
-
        err = sta_info_insert_rcu(sta);
        if (err) {
                rcu_read_unlock();
                return err;
        }
 
-       if (layer2_update)
-               cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
-
        rcu_read_unlock();
 
        return 0;
@@ -1681,10 +1675,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
                sta->sdata = vlansdata;
                ieee80211_check_fast_xmit(sta);
 
-               if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+               if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
                        ieee80211_vif_inc_num_mcast(sta->sdata);
-
-               cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
+                       cfg80211_send_layer2_update(sta->sdata->dev,
+                                                   sta->sta.addr);
+               }
        }
 
        err = sta_apply_parameters(local, sta, params);
index 3c1ab870fefe5b147a82680749ab99275108c367..768d14c9a7161c56c78d11253bb6e152c999ecdb 100644 (file)
@@ -2447,11 +2447,13 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
                      skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
                     sdata->control_port_over_nl80211)) {
                struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-               bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
+               bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
 
                cfg80211_rx_control_port(dev, skb, noencrypt);
                dev_kfree_skb(skb);
        } else {
+               memset(skb->cb, 0, sizeof(skb->cb));
+
                /* deliver to local stack */
                if (rx->napi)
                        napi_gro_receive(rx->napi, skb);
@@ -2546,8 +2548,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
 
        if (skb) {
                skb->protocol = eth_type_trans(skb, dev);
-               memset(skb->cb, 0, sizeof(skb->cb));
-
                ieee80211_deliver_skb_to_local_stack(skb, rx);
        }
 
index 95eb8220e2e47def69ad381539330ec1d47bd599..5fb368cc26338dc048212367c6233a6018f5b091 100644 (file)
@@ -1979,6 +1979,10 @@ int sta_info_move_state(struct sta_info *sta,
                        ieee80211_check_fast_xmit(sta);
                        ieee80211_check_fast_rx(sta);
                }
+               if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+                   sta->sdata->vif.type == NL80211_IFTYPE_AP)
+                       cfg80211_send_layer2_update(sta->sdata->dev,
+                                                   sta->sta.addr);
                break;
        default:
                break;
index d25e91d7bdc18526301c6228f5943aec7f9becc8..44b675016393a450e7ec7c73e2dfb514ade4e5f9 100644 (file)
@@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb)
        mpls_stats_inc_outucastpkts(out_dev, skb);
 
        if (rt) {
-               if (rt->rt_gw_family == AF_INET)
-                       err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
-                                        skb);
-               else if (rt->rt_gw_family == AF_INET6)
+               if (rt->rt_gw_family == AF_INET6)
                        err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
                                         skb);
+               else
+                       err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
+                                        skb);
        } else if (rt6) {
                if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
                        /* 6PE (RFC 4798) */
index 5c3fad8cba575376df16ff60609e054b0b041255..0187e65176c05c1ac93a18d77a12323e6a692576 100644 (file)
@@ -54,7 +54,7 @@ static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h,
        checksum = ncsi_calculate_checksum((unsigned char *)h,
                                           sizeof(*h) + nca->payload);
        pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
-                   nca->payload);
+                   ALIGN(nca->payload, 4));
        *pchecksum = htonl(checksum);
 }
 
@@ -309,14 +309,21 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
 
 int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
 {
+       struct ncsi_cmd_handler *nch = NULL;
        struct ncsi_request *nr;
+       unsigned char type;
        struct ethhdr *eh;
-       struct ncsi_cmd_handler *nch = NULL;
        int i, ret;
 
+       /* Use OEM generic handler for Netlink request */
+       if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
+               type = NCSI_PKT_CMD_OEM;
+       else
+               type = nca->type;
+
        /* Search for the handler */
        for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
-               if (ncsi_cmd_handlers[i].type == nca->type) {
+               if (ncsi_cmd_handlers[i].type == type) {
                        if (ncsi_cmd_handlers[i].handler)
                                nch = &ncsi_cmd_handlers[i];
                        else
index 7581bf9198858864fb4854a5e5c8a845e8093387..d876bd55f356159aa8862d8369d056151dd668e6 100644 (file)
@@ -47,7 +47,8 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
        if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
            ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
                netdev_dbg(nr->ndp->ndev.dev,
-                          "NCSI: non zero response/reason code\n");
+                          "NCSI: non zero response/reason code %04xh, %04xh\n",
+                           ntohs(h->code), ntohs(h->reason));
                return -EPERM;
        }
 
@@ -55,7 +56,7 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
         * sender doesn't support checksum according to NCSI
         * specification.
         */
-       pchecksum = (__be32 *)((void *)(h + 1) + payload - 4);
+       pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
        if (ntohl(*pchecksum) == 0)
                return 0;
 
@@ -63,7 +64,9 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
                                           sizeof(*h) + payload - 4);
 
        if (*pchecksum != htonl(checksum)) {
-               netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n");
+               netdev_dbg(nr->ndp->ndev.dev,
+                          "NCSI: checksum mismatched; recd: %08x calc: %08x\n",
+                          *pchecksum, htonl(checksum));
                return -EINVAL;
        }
 
index 0ecb3e289ef25860211ce046e173c8665aed37d9..8d96738b7dfd6bb0527cdb20e50fb2658a25ff8a 100644 (file)
@@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen,
                i++;
        }
 
-       pr_debug("Skipped up to `%c'!\n", skip);
+       pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
 
        *numoff = i;
        *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
index 6aa01eb6fe99ce8cdb62dec08c2b37a3701a6218..e2d13cd1887562c80361042c7fe72ad50b8034d5 100644 (file)
@@ -553,10 +553,8 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                goto nla_put_failure;
 
        if (ctnetlink_dump_status(skb, ct) < 0 ||
-           ctnetlink_dump_timeout(skb, ct) < 0 ||
            ctnetlink_dump_acct(skb, ct, type) < 0 ||
            ctnetlink_dump_timestamp(skb, ct) < 0 ||
-           ctnetlink_dump_protoinfo(skb, ct) < 0 ||
            ctnetlink_dump_helpinfo(skb, ct) < 0 ||
            ctnetlink_dump_mark(skb, ct) < 0 ||
            ctnetlink_dump_secctx(skb, ct) < 0 ||
@@ -568,6 +566,11 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
            ctnetlink_dump_ct_synproxy(skb, ct) < 0)
                goto nla_put_failure;
 
+       if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
+           (ctnetlink_dump_timeout(skb, ct) < 0 ||
+            ctnetlink_dump_protoinfo(skb, ct) < 0))
+               goto nla_put_failure;
+
        nlmsg_end(skb, nlh);
        return skb->len;
 
index e0d392cb3075ab8ee82f69f6fd03f002f9901316..0006503d2da9793ff858cfe3d7218f9709f11c45 100644 (file)
@@ -1037,8 +1037,13 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
        table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
        table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
        table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
+       table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
+       table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
        table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+       table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
 #endif
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
index 80a8f9ae4c93118d651f6b27f97f1f89bda77cfe..a0b4bf654de2d75765bc27b4ea61c130f32a14a1 100644 (file)
@@ -217,7 +217,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
                return err;
        }
 
-       flow->timeout = (u32)jiffies;
+       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
        return 0;
 }
 EXPORT_SYMBOL_GPL(flow_offload_add);
index d68c801dd614bc3e40ec8741981a4d9b6cc1b997..b9e7dd6e60ce2b1327709fb38a0eab5e7e2d1af6 100644 (file)
@@ -228,7 +228,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
 {
        skb_orphan(skb);
        skb_dst_set_noref(skb, dst);
-       skb->tstamp = 0;
        dst_output(state->net, state->sk, skb);
        return NF_STOLEN;
 }
@@ -284,6 +283,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
        flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
        iph = ip_hdr(skb);
        ip_decrease_ttl(iph);
+       skb->tstamp = 0;
 
        if (unlikely(dst_xfrm(&rt->dst))) {
                memset(skb->cb, 0, sizeof(struct inet_skb_parm));
@@ -512,6 +512,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
        flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
        ip6h = ipv6_hdr(skb);
        ip6h->hop_limit--;
+       skb->tstamp = 0;
 
        if (unlikely(dst_xfrm(&rt->dst))) {
                memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
index 2cf3f32fe6d2543fd338b0a545aca3fd975ec04f..a2e726ae7f07737b8dc5d420cacbf00885a90ba0 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables_core.h>
 #include <net/netfilter/nf_tables.h>
+#include <net/ipv6.h>
 
 #include <net/netfilter/nft_fib.h>
 
@@ -34,6 +35,8 @@ static void nft_fib_netdev_eval(const struct nft_expr *expr,
                }
                break;
        case ETH_P_IPV6:
+               if (!ipv6_mod_enabled())
+                       break;
                switch (priv->result) {
                case NFT_FIB_RESULT_OIF:
                case NFT_FIB_RESULT_OIFNAME:
index 060a4ed46d5e6fca0196b3c874c9758999d84e0a..01705ad74a9aa5cff33d5615021d6f9a13ccf347 100644 (file)
@@ -149,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
        return nft_chain_validate_hooks(ctx->chain, hook_mask);
 }
 
+static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
+       [NFTA_FLOW_TABLE_NAME]  = { .type = NLA_STRING,
+                                   .len = NFT_NAME_MAXLEN - 1 },
+};
+
 static int nft_flow_offload_init(const struct nft_ctx *ctx,
                                 const struct nft_expr *expr,
                                 const struct nlattr * const tb[])
@@ -207,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
 static struct nft_expr_type nft_flow_offload_type __read_mostly = {
        .name           = "flow_offload",
        .ops            = &nft_flow_offload_ops,
+       .policy         = nft_flow_offload_policy,
        .maxattr        = NFTA_FLOW_MAX,
        .owner          = THIS_MODULE,
 };
index d7f3776dfd719d402ae178890107db8ebd1627f2..637ce3e8c575ce1b95dfcd340347176772cdff2d 100644 (file)
@@ -47,9 +47,6 @@ static void nft_socket_eval(const struct nft_expr *expr,
                return;
        }
 
-       /* So that subsequent socket matching not to require other lookups. */
-       skb->sk = sk;
-
        switch(priv->key) {
        case NFT_SOCKET_TRANSPARENT:
                nft_reg_store8(dest, inet_sk_transparent(sk));
@@ -66,6 +63,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
                WARN_ON(1);
                regs->verdict.code = NFT_BREAK;
        }
+
+       if (sk != skb->sk)
+               sock_gen_put(sk);
 }
 
 static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = {
index d0ab1adf5bff81a71fd489b24ce856e66016f781..5aab6df74e0f253012ea67a229547ebca5a60724 100644 (file)
@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
        nfnl_acct_put(info->nfacct);
 }
 
-static struct xt_match nfacct_mt_reg __read_mostly = {
-       .name       = "nfacct",
-       .family     = NFPROTO_UNSPEC,
-       .checkentry = nfacct_mt_checkentry,
-       .match      = nfacct_mt,
-       .destroy    = nfacct_mt_destroy,
-       .matchsize  = sizeof(struct xt_nfacct_match_info),
-       .usersize   = offsetof(struct xt_nfacct_match_info, nfacct),
-       .me         = THIS_MODULE,
+static struct xt_match nfacct_mt_reg[] __read_mostly = {
+       {
+               .name       = "nfacct",
+               .revision   = 0,
+               .family     = NFPROTO_UNSPEC,
+               .checkentry = nfacct_mt_checkentry,
+               .match      = nfacct_mt,
+               .destroy    = nfacct_mt_destroy,
+               .matchsize  = sizeof(struct xt_nfacct_match_info),
+               .usersize   = offsetof(struct xt_nfacct_match_info, nfacct),
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "nfacct",
+               .revision   = 1,
+               .family     = NFPROTO_UNSPEC,
+               .checkentry = nfacct_mt_checkentry,
+               .match      = nfacct_mt,
+               .destroy    = nfacct_mt_destroy,
+               .matchsize  = sizeof(struct xt_nfacct_match_info_v1),
+               .usersize   = offsetof(struct xt_nfacct_match_info_v1, nfacct),
+               .me         = THIS_MODULE,
+       },
 };
 
 static int __init nfacct_mt_init(void)
 {
-       return xt_register_match(&nfacct_mt_reg);
+       return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
 }
 
 static void __exit nfacct_mt_exit(void)
 {
-       xt_unregister_match(&nfacct_mt_reg);
+       xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
 }
 
 module_init(nfacct_mt_init);
index ead7c60222086579cc9f21ea2aa8b37747b35db1..b92b22ce8abd3f41b90901c4cae2e75918cc0645 100644 (file)
@@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
        if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
            (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
             info->invert & XT_PHYSDEV_OP_BRIDGED) &&
-           par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
-           (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
+           par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
                pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
-               if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
-                       return -EINVAL;
+               return -EINVAL;
        }
 
        if (!brnf_probed) {
index 848c6eb550644feaf5e9a785123861fdc4cb1c15..05249eb4508231186ba15b12a2330068b1d26b0d 100644 (file)
@@ -67,6 +67,7 @@ struct ovs_conntrack_info {
        struct md_mark mark;
        struct md_labels labels;
        char timeout[CTNL_TIMEOUT_NAME_MAX];
+       struct nf_ct_timeout *nf_ct_timeout;
 #if IS_ENABLED(CONFIG_NF_NAT)
        struct nf_nat_range2 range;  /* Only present for SRC NAT and DST NAT. */
 #endif
@@ -524,6 +525,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
                return -EPFNOSUPPORT;
        }
 
+       /* The key extracted from the fragment that completed this datagram
+        * likely didn't have an L4 header, so regenerate it.
+        */
+       ovs_flow_key_update_l3l4(skb, key);
+
        key->ip.frag = OVS_FRAG_TYPE_NONE;
        skb_clear_hash(skb);
        skb->ignore_df = 1;
@@ -697,6 +703,14 @@ static bool skb_nfct_cached(struct net *net,
                if (help && rcu_access_pointer(help->helper) != info->helper)
                        return false;
        }
+       if (info->nf_ct_timeout) {
+               struct nf_conn_timeout *timeout_ext;
+
+               timeout_ext = nf_ct_timeout_find(ct);
+               if (!timeout_ext || info->nf_ct_timeout !=
+                   rcu_dereference(timeout_ext->timeout))
+                       return false;
+       }
        /* Force conntrack entry direction to the current packet? */
        if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
                /* Delete the conntrack entry if confirmed, else just release
@@ -1565,7 +1579,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
                case OVS_CT_ATTR_TIMEOUT:
                        memcpy(info->timeout, nla_data(a), nla_len(a));
                        if (!memchr(info->timeout, '\0', nla_len(a))) {
-                               OVS_NLERR(log, "Invalid conntrack helper");
+                               OVS_NLERR(log, "Invalid conntrack timeout");
                                return -EINVAL;
                        }
                        break;
@@ -1657,6 +1671,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
                                      ct_info.timeout))
                        pr_info_ratelimited("Failed to associated timeout "
                                            "policy `%s'\n", ct_info.timeout);
+               else
+                       ct_info.nf_ct_timeout = rcu_dereference(
+                               nf_ct_timeout_find(ct_info.ct)->timeout);
+
        }
 
        if (helper) {
index bc89e16e05050ce7038a1b67823e2f410a60efce..9d81d2c7bf8212e5d50063634e8ab064b2005040 100644 (file)
@@ -523,78 +523,15 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
 }
 
 /**
- * key_extract - extracts a flow key from an Ethernet frame.
+ * key_extract_l3l4 - extracts L3/L4 header information.
  * @skb: sk_buff that contains the frame, with skb->data pointing to the
- * Ethernet header
+ *       L3 header
  * @key: output flow key
  *
- * The caller must ensure that skb->len >= ETH_HLEN.
- *
- * Returns 0 if successful, otherwise a negative errno value.
- *
- * Initializes @skb header fields as follows:
- *
- *    - skb->mac_header: the L2 header.
- *
- *    - skb->network_header: just past the L2 header, or just past the
- *      VLAN header, to the first byte of the L2 payload.
- *
- *    - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
- *      on output, then just past the IP header, if one is present and
- *      of a correct length, otherwise the same as skb->network_header.
- *      For other key->eth.type values it is left untouched.
- *
- *    - skb->protocol: the type of the data starting at skb->network_header.
- *      Equals to key->eth.type.
  */
-static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
+static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
 {
        int error;
-       struct ethhdr *eth;
-
-       /* Flags are always used as part of stats */
-       key->tp.flags = 0;
-
-       skb_reset_mac_header(skb);
-
-       /* Link layer. */
-       clear_vlan(key);
-       if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
-               if (unlikely(eth_type_vlan(skb->protocol)))
-                       return -EINVAL;
-
-               skb_reset_network_header(skb);
-               key->eth.type = skb->protocol;
-       } else {
-               eth = eth_hdr(skb);
-               ether_addr_copy(key->eth.src, eth->h_source);
-               ether_addr_copy(key->eth.dst, eth->h_dest);
-
-               __skb_pull(skb, 2 * ETH_ALEN);
-               /* We are going to push all headers that we pull, so no need to
-               * update skb->csum here.
-               */
-
-               if (unlikely(parse_vlan(skb, key)))
-                       return -ENOMEM;
-
-               key->eth.type = parse_ethertype(skb);
-               if (unlikely(key->eth.type == htons(0)))
-                       return -ENOMEM;
-
-               /* Multiple tagged packets need to retain TPID to satisfy
-                * skb_vlan_pop(), which will later shift the ethertype into
-                * skb->protocol.
-                */
-               if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
-                       skb->protocol = key->eth.cvlan.tpid;
-               else
-                       skb->protocol = key->eth.type;
-
-               skb_reset_network_header(skb);
-               __skb_push(skb, skb->data - skb_mac_header(skb));
-       }
-       skb_reset_mac_len(skb);
 
        /* Network layer. */
        if (key->eth.type == htons(ETH_P_IP)) {
@@ -623,6 +560,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                offset = nh->frag_off & htons(IP_OFFSET);
                if (offset) {
                        key->ip.frag = OVS_FRAG_TYPE_LATER;
+                       memset(&key->tp, 0, sizeof(key->tp));
                        return 0;
                }
                if (nh->frag_off & htons(IP_MF) ||
@@ -740,8 +678,10 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                        return error;
                }
 
-               if (key->ip.frag == OVS_FRAG_TYPE_LATER)
+               if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
+                       memset(&key->tp, 0, sizeof(key->tp));
                        return 0;
+               }
                if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
                        key->ip.frag = OVS_FRAG_TYPE_FIRST;
 
@@ -788,6 +728,92 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
        return 0;
 }
 
+/**
+ * key_extract - extracts a flow key from an Ethernet frame.
+ * @skb: sk_buff that contains the frame, with skb->data pointing to the
+ * Ethernet header
+ * @key: output flow key
+ *
+ * The caller must ensure that skb->len >= ETH_HLEN.
+ *
+ * Returns 0 if successful, otherwise a negative errno value.
+ *
+ * Initializes @skb header fields as follows:
+ *
+ *    - skb->mac_header: the L2 header.
+ *
+ *    - skb->network_header: just past the L2 header, or just past the
+ *      VLAN header, to the first byte of the L2 payload.
+ *
+ *    - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
+ *      on output, then just past the IP header, if one is present and
+ *      of a correct length, otherwise the same as skb->network_header.
+ *      For other key->eth.type values it is left untouched.
+ *
+ *    - skb->protocol: the type of the data starting at skb->network_header.
+ *      Equals to key->eth.type.
+ */
+static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
+{
+       struct ethhdr *eth;
+
+       /* Flags are always used as part of stats */
+       key->tp.flags = 0;
+
+       skb_reset_mac_header(skb);
+
+       /* Link layer. */
+       clear_vlan(key);
+       if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
+               if (unlikely(eth_type_vlan(skb->protocol)))
+                       return -EINVAL;
+
+               skb_reset_network_header(skb);
+               key->eth.type = skb->protocol;
+       } else {
+               eth = eth_hdr(skb);
+               ether_addr_copy(key->eth.src, eth->h_source);
+               ether_addr_copy(key->eth.dst, eth->h_dest);
+
+               __skb_pull(skb, 2 * ETH_ALEN);
+               /* We are going to push all headers that we pull, so no need to
+                * update skb->csum here.
+                */
+
+               if (unlikely(parse_vlan(skb, key)))
+                       return -ENOMEM;
+
+               key->eth.type = parse_ethertype(skb);
+               if (unlikely(key->eth.type == htons(0)))
+                       return -ENOMEM;
+
+               /* Multiple tagged packets need to retain TPID to satisfy
+                * skb_vlan_pop(), which will later shift the ethertype into
+                * skb->protocol.
+                */
+               if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
+                       skb->protocol = key->eth.cvlan.tpid;
+               else
+                       skb->protocol = key->eth.type;
+
+               skb_reset_network_header(skb);
+               __skb_push(skb, skb->data - skb_mac_header(skb));
+       }
+
+       skb_reset_mac_len(skb);
+
+       /* Fill out L3/L4 key info, if any */
+       return key_extract_l3l4(skb, key);
+}
+
+/* In the case of conntrack fragment handling it expects L3 headers,
+ * add a helper.
+ */
+int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
+{
+       return key_extract_l3l4(skb, key);
+}
+
 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
 {
        int res;
index a5506e2d4b7ae3b42c9e652e041cf34501c78a00..b830d5ff7af4de82d7ccddcd851e892f4311f6d4 100644 (file)
@@ -270,6 +270,7 @@ void ovs_flow_stats_clear(struct sw_flow *);
 u64 ovs_flow_used_time(unsigned long flow_jiffies);
 
 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
+int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key);
 int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                         struct sk_buff *skb,
                         struct sw_flow_key *key);
index 841f198ea1a8853c57e6b9f7309d02bbf11d3ba5..66e4b61a350d5472776286e415192fba140d8a36 100644 (file)
@@ -154,7 +154,7 @@ static void psample_group_destroy(struct psample_group *group)
 {
        psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
        list_del(&group->list);
-       kfree(group);
+       kfree_rcu(group, rcu);
 }
 
 static struct psample_group *
index ccff1e544c2183ba709d488c80015b8932370fbc..e35869e81766ea6660176c94fa910dbe2614bf61 100644 (file)
@@ -84,11 +84,14 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (!kbuf)
                return -ENOMEM;
 
-       if (!copy_from_iter_full(kbuf, len, from))
+       if (!copy_from_iter_full(kbuf, len, from)) {
+               kfree(kbuf);
                return -EFAULT;
+       }
 
        ret = qrtr_endpoint_post(&tun->ep, kbuf, len);
 
+       kfree(kbuf);
        return ret < 0 ? ret : len;
 }
 
index 0f4398e7f2a7add7c20b6fdd333c40af4e719c92..05464fd7c17afcc708eec3e4305ca85d039048d3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -239,34 +239,30 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
-       sock_set_flag(sk, SOCK_RCU_FREE);
-       ret = rds_add_bound(rs, binding_addr, &port, scope_id);
-       if (ret)
-               goto out;
-
-       if (rs->rs_transport) { /* previously bound */
+       /* The transport can be set using SO_RDS_TRANSPORT option before the
+        * socket is bound.
+        */
+       if (rs->rs_transport) {
                trans = rs->rs_transport;
                if (trans->laddr_check(sock_net(sock->sk),
                                       binding_addr, scope_id) != 0) {
                        ret = -ENOPROTOOPT;
-                       rds_remove_bound(rs);
-               } else {
-                       ret = 0;
+                       goto out;
                }
-               goto out;
-       }
-       trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr,
-                                       scope_id);
-       if (!trans) {
-               ret = -EADDRNOTAVAIL;
-               rds_remove_bound(rs);
-               pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
-                                   __func__, binding_addr);
-               goto out;
+       } else {
+               trans = rds_trans_get_preferred(sock_net(sock->sk),
+                                               binding_addr, scope_id);
+               if (!trans) {
+                       ret = -EADDRNOTAVAIL;
+                       pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
+                                           __func__, binding_addr);
+                       goto out;
+               }
+               rs->rs_transport = trans;
        }
 
-       rs->rs_transport = trans;
-       ret = 0;
+       sock_set_flag(sk, SOCK_RCU_FREE);
+       ret = rds_add_bound(rs, binding_addr, &port, scope_id);
 
 out:
        release_sock(sk);
index ec05d91aa9a2b39b0b904a275b0731ba540ebb3c..45acab2de0cf3e09b295d2faf7db3b3c78528fc5 100644 (file)
@@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
                                    void *buffer)
 {
        struct rds_info_rdma_connection *iinfo = buffer;
-       struct rds_ib_connection *ic;
+       struct rds_ib_connection *ic = conn->c_transport_data;
 
        /* We will only ever look at IB transports */
        if (conn->c_trans != &rds_ib_transport)
@@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
 
        iinfo->src_addr = conn->c_laddr.s6_addr32[3];
        iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
-       iinfo->tos = conn->c_tos;
+       if (ic) {
+               iinfo->tos = conn->c_tos;
+               iinfo->sl = ic->i_sl;
+       }
 
        memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
        memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
        if (rds_conn_state(conn) == RDS_CONN_UP) {
                struct rds_ib_device *rds_ibdev;
 
-               ic = conn->c_transport_data;
-
                rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
                               (union ib_gid *)&iinfo->dst_gid);
 
@@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
                                     void *buffer)
 {
        struct rds6_info_rdma_connection *iinfo6 = buffer;
-       struct rds_ib_connection *ic;
+       struct rds_ib_connection *ic = conn->c_transport_data;
 
        /* We will only ever look at IB transports */
        if (conn->c_trans != &rds_ib_transport)
@@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
 
        iinfo6->src_addr = conn->c_laddr;
        iinfo6->dst_addr = conn->c_faddr;
+       if (ic) {
+               iinfo6->tos = conn->c_tos;
+               iinfo6->sl = ic->i_sl;
+       }
 
        memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
        memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
@@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
        if (rds_conn_state(conn) == RDS_CONN_UP) {
                struct rds_ib_device *rds_ibdev;
 
-               ic = conn->c_transport_data;
                rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
                               (union ib_gid *)&iinfo6->dst_gid);
                rds_ibdev = ic->rds_ibdev;
index 303c6ee8bdb7b0835448068e64dc992fda0380a1..f2b558e8b5eaf738cd6b1a46dc98c20205d5ab7b 100644 (file)
@@ -220,6 +220,7 @@ struct rds_ib_connection {
        /* Send/Recv vectors */
        int                     i_scq_vector;
        int                     i_rcq_vector;
+       u8                      i_sl;
 };
 
 /* This assumes that atomic_t is at least 32 bits */
index fddaa09f7b0dbd68d1d19cef054dccaa6369e45c..233f1368162b4f1be8596ba2d4803fdced10b41a 100644 (file)
@@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
                  RDS_PROTOCOL_MINOR(conn->c_version),
                  ic->i_flowctl ? ", flow control" : "");
 
+       /* receive sl from the peer */
+       ic->i_sl = ic->i_cm_id->route.path_rec->sl;
+
        atomic_set(&ic->i_cq_quiesce, 0);
 
        /* Init rings and fill recv. this needs to wait until protocol
index 9986d6065c4d1f357f021700351866d0ebe4f8a2..5f741e51b4baa46f825b7a676969cde3159b2c21 100644 (file)
@@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id;
 static struct rdma_cm_id *rds6_rdma_listen_id;
 #endif
 
+/* Per IB specification 7.7.3, service level is a 4-bit field. */
+#define TOS_TO_SL(tos)         ((tos) & 0xF)
+
 static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
                                         struct rdma_cm_event *event,
                                         bool isv6)
@@ -97,10 +100,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
                        struct rds_ib_connection *ibic;
 
                        ibic = conn->c_transport_data;
-                       if (ibic && ibic->i_cm_id == cm_id)
+                       if (ibic && ibic->i_cm_id == cm_id) {
+                               cm_id->route.path_rec[0].sl =
+                                       TOS_TO_SL(conn->c_tos);
                                ret = trans->cm_initiate_connect(cm_id, isv6);
-                       else
+                       } else {
                                rds_conn_drop(conn);
+                       }
                }
                break;
 
index 853de48760880603bf60142700cd668100ca289e..a42ba7fa06d5d0a04e9829ffdf5561c3e648353a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -811,6 +811,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
 
        minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
        minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
+       minfo6.tos = inc->i_conn->c_tos;
 
        if (flip) {
                minfo6.laddr = *daddr;
@@ -824,6 +825,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
                minfo6.fport = inc->i_hdr.h_dport;
        }
 
+       minfo6.flags = 0;
+
        rds_info_copy(iter, &minfo6, sizeof(minfo6));
 }
 #endif
index 0dbbfd1b648759a7d9e729188521861d8accfe8f..d72ddb67bb742a0f6db6db765dd0035c94f636e8 100644 (file)
@@ -862,7 +862,6 @@ static void rxrpc_sock_destructor(struct sock *sk)
 static int rxrpc_release_sock(struct sock *sk)
 {
        struct rxrpc_sock *rx = rxrpc_sk(sk);
-       struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 
        _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
 
@@ -898,8 +897,6 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_release_calls_on_socket(rx);
        flush_workqueue(rxrpc_workqueue);
        rxrpc_purge_queue(&sk->sk_receive_queue);
-       rxrpc_queue_work(&rxnet->service_conn_reaper);
-       rxrpc_queue_work(&rxnet->client_conn_reaper);
 
        rxrpc_unuse_local(rx->local);
        rx->local = NULL;
index 145335611af66b1e6fcd261cc532be0a72aaa6ae..8051dfdcf26df1a506043ab887de93dd5c24aadc 100644 (file)
@@ -185,11 +185,17 @@ struct rxrpc_host_header {
  * - max 48 bytes (struct sk_buff::cb)
  */
 struct rxrpc_skb_priv {
-       union {
-               u8              nr_jumbo;       /* Number of jumbo subpackets */
-       };
+       atomic_t        nr_ring_pins;           /* Number of rxtx ring pins */
+       u8              nr_subpackets;          /* Number of subpackets */
+       u8              rx_flags;               /* Received packet flags */
+#define RXRPC_SKB_INCL_LAST    0x01            /* - Includes last packet */
+#define RXRPC_SKB_TX_BUFFER    0x02            /* - Is transmit buffer */
        union {
                int             remain;         /* amount of space remaining for next write */
+
+               /* List of requested ACKs on subpackets */
+               unsigned long   rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
+                                          BITS_PER_LONG];
        };
 
        struct rxrpc_host_header hdr;           /* RxRPC packet header from this packet */
@@ -613,8 +619,7 @@ struct rxrpc_call {
 #define RXRPC_TX_ANNO_LAST     0x04
 #define RXRPC_TX_ANNO_RESENT   0x08
 
-#define RXRPC_RX_ANNO_JUMBO    0x3f            /* Jumbo subpacket number + 1 if not zero */
-#define RXRPC_RX_ANNO_JLAST    0x40            /* Set if last element of a jumbo packet */
+#define RXRPC_RX_ANNO_SUBPACKET        0x3f            /* Subpacket number in jumbogram */
 #define RXRPC_RX_ANNO_VERIFIED 0x80            /* Set if verified and decrypted */
        rxrpc_seq_t             tx_hard_ack;    /* Dead slot in buffer; the first transmitted but
                                                 * not hard-ACK'd packet follows this.
@@ -905,6 +910,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *);
 void rxrpc_put_client_conn(struct rxrpc_connection *);
 void rxrpc_discard_expired_client_conns(struct work_struct *);
 void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
+void rxrpc_clean_up_local_conns(struct rxrpc_local *);
 
 /*
  * conn_event.c
@@ -1105,6 +1111,7 @@ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
 void rxrpc_packet_destructor(struct sk_buff *);
 void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
 void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
 void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
 void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
 void rxrpc_purge_queue(struct sk_buff_head *);
index c767679bfa5db842c3ad94c33f011a4d5854bff2..cedbbb3a7c2ea27ddef918ca0e3bcc690ddd8f3d 100644 (file)
@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
                        continue;
 
                skb = call->rxtx_buffer[ix];
-               rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
+               rxrpc_see_skb(skb, rxrpc_skb_seen);
 
                if (anno_type == RXRPC_TX_ANNO_UNACK) {
                        if (ktime_after(skb->tstamp, max_age)) {
@@ -255,18 +255,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
                        continue;
 
                skb = call->rxtx_buffer[ix];
-               rxrpc_get_skb(skb, rxrpc_skb_tx_got);
+               rxrpc_get_skb(skb, rxrpc_skb_got);
                spin_unlock_bh(&call->lock);
 
                if (rxrpc_send_data_packet(call, skb, true) < 0) {
-                       rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+                       rxrpc_free_skb(skb, rxrpc_skb_freed);
                        return;
                }
 
                if (rxrpc_is_client_call(call))
                        rxrpc_expose_client_call(call);
 
-               rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
                spin_lock_bh(&call->lock);
 
                /* We need to clear the retransmit state, but there are two
index 217b12be9e089d7aea3c774c50314c6fd2bdf83d..014548c259ceb9fa82e241a2bcae64ac76b77eb3 100644 (file)
@@ -421,6 +421,19 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
        trace_rxrpc_call(call, op, n, here, NULL);
 }
 
+/*
+ * Clean up the RxTx skb ring.
+ */
+static void rxrpc_cleanup_ring(struct rxrpc_call *call)
+{
+       int i;
+
+       for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
+               rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
+               call->rxtx_buffer[i] = NULL;
+       }
+}
+
 /*
  * Detach a call from its owning socket.
  */
@@ -429,7 +442,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
        const void *here = __builtin_return_address(0);
        struct rxrpc_connection *conn = call->conn;
        bool put = false;
-       int i;
 
        _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
 
@@ -479,13 +491,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
        if (conn)
                rxrpc_disconnect_call(call);
 
-       for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
-               rxrpc_free_skb(call->rxtx_buffer[i],
-                              (call->tx_phase ? rxrpc_skb_tx_cleaned :
-                               rxrpc_skb_rx_cleaned));
-               call->rxtx_buffer[i] = NULL;
-       }
-
+       rxrpc_cleanup_ring(call);
        _leave("");
 }
 
@@ -568,8 +574,6 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
  */
 void rxrpc_cleanup_call(struct rxrpc_call *call)
 {
-       int i;
-
        _net("DESTROY CALL %d", call->debug_id);
 
        memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
@@ -580,13 +584,8 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
        ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
        ASSERTCMP(call->conn, ==, NULL);
 
-       /* Clean up the Rx/Tx buffer */
-       for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
-               rxrpc_free_skb(call->rxtx_buffer[i],
-                              (call->tx_phase ? rxrpc_skb_tx_cleaned :
-                               rxrpc_skb_rx_cleaned));
-
-       rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
+       rxrpc_cleanup_ring(call);
+       rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
 
        call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
 }
index aea82f909c60801d347c8f02eeb5d9b9790bff91..3f1da1b49f690d0e3ccf6d93607cd2b0583d293b 100644 (file)
@@ -1162,3 +1162,47 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
 
        _leave("");
 }
+
+/*
+ * Clean up the client connections on a local endpoint.
+ */
+void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
+{
+       struct rxrpc_connection *conn, *tmp;
+       struct rxrpc_net *rxnet = local->rxnet;
+       unsigned int nr_active;
+       LIST_HEAD(graveyard);
+
+       _enter("");
+
+       spin_lock(&rxnet->client_conn_cache_lock);
+       nr_active = rxnet->nr_active_client_conns;
+
+       list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
+                                cache_link) {
+               if (conn->params.local == local) {
+                       ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
+
+                       trace_rxrpc_client(conn, -1, rxrpc_client_discard);
+                       if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
+                               BUG();
+                       conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
+                       list_move(&conn->cache_link, &graveyard);
+                       nr_active--;
+               }
+       }
+
+       rxnet->nr_active_client_conns = nr_active;
+       spin_unlock(&rxnet->client_conn_cache_lock);
+       ASSERTCMP(nr_active, >=, 0);
+
+       while (!list_empty(&graveyard)) {
+               conn = list_entry(graveyard.next,
+                                 struct rxrpc_connection, cache_link);
+               list_del_init(&conn->cache_link);
+
+               rxrpc_put_connection(conn);
+       }
+
+       _leave(" [culled]");
+}
index df6624c140be9be2fc248d5f533a9245c4b07e9c..a1ceef4f5cd07754fff2fa5e1472ff60a50a272e 100644 (file)
@@ -472,7 +472,7 @@ void rxrpc_process_connection(struct work_struct *work)
        /* go through the conn-level event packets, releasing the ref on this
         * connection that each one has when we've finished with it */
        while ((skb = skb_dequeue(&conn->rx_queue))) {
-               rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+               rxrpc_see_skb(skb, rxrpc_skb_seen);
                ret = rxrpc_process_event(conn, skb, &abort_code);
                switch (ret) {
                case -EPROTO:
@@ -484,7 +484,7 @@ void rxrpc_process_connection(struct work_struct *work)
                        goto requeue_and_leave;
                case -ECONNABORTED:
                default:
-                       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+                       rxrpc_free_skb(skb, rxrpc_skb_freed);
                        break;
                }
        }
@@ -501,6 +501,6 @@ requeue_and_leave:
 protocol_error:
        if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
                goto requeue_and_leave;
-       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+       rxrpc_free_skb(skb, rxrpc_skb_freed);
        goto out;
 }
index 434ef392212bcdc0b71c7961effadc9b5f4c0b6d..ed05b692213226e7494c3908caa4dcf44f39bf27 100644 (file)
@@ -398,7 +398,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
                if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
                        continue;
 
-               if (rxnet->live) {
+               if (rxnet->live && !conn->params.local->dead) {
                        idle_timestamp = READ_ONCE(conn->idle_timestamp);
                        expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
                        if (conn->params.local->service_closed)
index dd47d465d1d3e7de3bcf72dc76611621bb2c0e6a..157be1ff8697be438a7ee6636e37671169fff3df 100644 (file)
@@ -233,7 +233,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
                ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
                skb = call->rxtx_buffer[ix];
                annotation = call->rxtx_annotations[ix];
-               rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
+               rxrpc_see_skb(skb, rxrpc_skb_rotated);
                call->rxtx_buffer[ix] = NULL;
                call->rxtx_annotations[ix] = 0;
                skb->next = list;
@@ -258,7 +258,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
                skb = list;
                list = skb->next;
                skb_mark_not_on_list(skb);
-               rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
        }
 
        return rot_last;
@@ -347,7 +347,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
 }
 
 /*
- * Scan a jumbo packet to validate its structure and to work out how many
+ * Scan a data packet to validate its structure and to work out how many
  * subpackets it contains.
  *
  * A jumbo packet is a collection of consecutive packets glued together with
@@ -358,16 +358,21 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
  * the last are RXRPC_JUMBO_DATALEN in size.  The last subpacket may be of any
  * size.
  */
-static bool rxrpc_validate_jumbo(struct sk_buff *skb)
+static bool rxrpc_validate_data(struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned int offset = sizeof(struct rxrpc_wire_header);
        unsigned int len = skb->len;
-       int nr_jumbo = 1;
        u8 flags = sp->hdr.flags;
 
-       do {
-               nr_jumbo++;
+       for (;;) {
+               if (flags & RXRPC_REQUEST_ACK)
+                       __set_bit(sp->nr_subpackets, sp->rx_req_ack);
+               sp->nr_subpackets++;
+
+               if (!(flags & RXRPC_JUMBO_PACKET))
+                       break;
+
                if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
                        goto protocol_error;
                if (flags & RXRPC_LAST_PACKET)
@@ -376,9 +381,10 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb)
                if (skb_copy_bits(skb, offset, &flags, 1) < 0)
                        goto protocol_error;
                offset += sizeof(struct rxrpc_jumbo_header);
-       } while (flags & RXRPC_JUMBO_PACKET);
+       }
 
-       sp->nr_jumbo = nr_jumbo;
+       if (flags & RXRPC_LAST_PACKET)
+               sp->rx_flags |= RXRPC_SKB_INCL_LAST;
        return true;
 
 protocol_error:
@@ -399,10 +405,10 @@ protocol_error:
  * (that information is encoded in the ACK packet).
  */
 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
-                                u8 annotation, bool *_jumbo_bad)
+                                bool is_jumbo, bool *_jumbo_bad)
 {
        /* Discard normal packets that are duplicates. */
-       if (annotation == 0)
+       if (is_jumbo)
                return;
 
        /* Skip jumbo subpackets that are duplicates.  When we've had three or
@@ -416,29 +422,30 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
 }
 
 /*
- * Process a DATA packet, adding the packet to the Rx ring.
+ * Process a DATA packet, adding the packet to the Rx ring.  The caller's
+ * packet ref must be passed on or discarded.
  */
 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        enum rxrpc_call_state state;
-       unsigned int offset = sizeof(struct rxrpc_wire_header);
-       unsigned int ix;
+       unsigned int j;
        rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
-       rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
-       bool immediate_ack = false, jumbo_bad = false, queued;
-       u16 len;
-       u8 ack = 0, flags, annotation = 0;
+       rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
+       bool immediate_ack = false, jumbo_bad = false;
+       u8 ack = 0;
 
        _enter("{%u,%u},{%u,%u}",
-              call->rx_hard_ack, call->rx_top, skb->len, seq);
+              call->rx_hard_ack, call->rx_top, skb->len, seq0);
 
-       _proto("Rx DATA %%%u { #%u f=%02x }",
-              sp->hdr.serial, seq, sp->hdr.flags);
+       _proto("Rx DATA %%%u { #%u f=%02x n=%u }",
+              sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets);
 
        state = READ_ONCE(call->state);
-       if (state >= RXRPC_CALL_COMPLETE)
+       if (state >= RXRPC_CALL_COMPLETE) {
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
                return;
+       }
 
        if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
                unsigned long timo = READ_ONCE(call->next_req_timo);
@@ -463,137 +470,137 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
            !rxrpc_receiving_reply(call))
                goto unlock;
 
-       call->ackr_prev_seq = seq;
-
+       call->ackr_prev_seq = seq0;
        hard_ack = READ_ONCE(call->rx_hard_ack);
-       if (after(seq, hard_ack + call->rx_winsize)) {
-               ack = RXRPC_ACK_EXCEEDS_WINDOW;
-               ack_serial = serial;
-               goto ack;
-       }
 
-       flags = sp->hdr.flags;
-       if (flags & RXRPC_JUMBO_PACKET) {
+       if (sp->nr_subpackets > 1) {
                if (call->nr_jumbo_bad > 3) {
                        ack = RXRPC_ACK_NOSPACE;
                        ack_serial = serial;
                        goto ack;
                }
-               annotation = 1;
        }
 
-next_subpacket:
-       queued = false;
-       ix = seq & RXRPC_RXTX_BUFF_MASK;
-       len = skb->len;
-       if (flags & RXRPC_JUMBO_PACKET)
-               len = RXRPC_JUMBO_DATALEN;
-
-       if (flags & RXRPC_LAST_PACKET) {
-               if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
-                   seq != call->rx_top) {
-                       rxrpc_proto_abort("LSN", call, seq);
-                       goto unlock;
-               }
-       } else {
-               if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
-                   after_eq(seq, call->rx_top)) {
-                       rxrpc_proto_abort("LSA", call, seq);
-                       goto unlock;
+       for (j = 0; j < sp->nr_subpackets; j++) {
+               rxrpc_serial_t serial = sp->hdr.serial + j;
+               rxrpc_seq_t seq = seq0 + j;
+               unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK;
+               bool terminal = (j == sp->nr_subpackets - 1);
+               bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST);
+               u8 flags, annotation = j;
+
+               _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }",
+                    j, serial, seq, terminal, last);
+
+               if (last) {
+                       if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+                           seq != call->rx_top) {
+                               rxrpc_proto_abort("LSN", call, seq);
+                               goto unlock;
+                       }
+               } else {
+                       if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+                           after_eq(seq, call->rx_top)) {
+                               rxrpc_proto_abort("LSA", call, seq);
+                               goto unlock;
+                       }
                }
-       }
 
-       trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
-       if (before_eq(seq, hard_ack)) {
-               ack = RXRPC_ACK_DUPLICATE;
-               ack_serial = serial;
-               goto skip;
-       }
+               flags = 0;
+               if (last)
+                       flags |= RXRPC_LAST_PACKET;
+               if (!terminal)
+                       flags |= RXRPC_JUMBO_PACKET;
+               if (test_bit(j, sp->rx_req_ack))
+                       flags |= RXRPC_REQUEST_ACK;
+               trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
 
-       if (flags & RXRPC_REQUEST_ACK && !ack) {
-               ack = RXRPC_ACK_REQUESTED;
-               ack_serial = serial;
-       }
-
-       if (call->rxtx_buffer[ix]) {
-               rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
-               if (ack != RXRPC_ACK_DUPLICATE) {
+               if (before_eq(seq, hard_ack)) {
                        ack = RXRPC_ACK_DUPLICATE;
                        ack_serial = serial;
+                       continue;
                }
-               immediate_ack = true;
-               goto skip;
-       }
 
-       /* Queue the packet.  We use a couple of memory barriers here as need
-        * to make sure that rx_top is perceived to be set after the buffer
-        * pointer and that the buffer pointer is set after the annotation and
-        * the skb data.
-        *
-        * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
-        * and also rxrpc_fill_out_ack().
-        */
-       rxrpc_get_skb(skb, rxrpc_skb_rx_got);
-       call->rxtx_annotations[ix] = annotation;
-       smp_wmb();
-       call->rxtx_buffer[ix] = skb;
-       if (after(seq, call->rx_top)) {
-               smp_store_release(&call->rx_top, seq);
-       } else if (before(seq, call->rx_top)) {
-               /* Send an immediate ACK if we fill in a hole */
-               if (!ack) {
-                       ack = RXRPC_ACK_DELAY;
-                       ack_serial = serial;
+               if (call->rxtx_buffer[ix]) {
+                       rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1,
+                                            &jumbo_bad);
+                       if (ack != RXRPC_ACK_DUPLICATE) {
+                               ack = RXRPC_ACK_DUPLICATE;
+                               ack_serial = serial;
+                       }
+                       immediate_ack = true;
+                       continue;
                }
-               immediate_ack = true;
-       }
-       if (flags & RXRPC_LAST_PACKET) {
-               set_bit(RXRPC_CALL_RX_LAST, &call->flags);
-               trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
-       } else {
-               trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
-       }
-       queued = true;
 
-       if (after_eq(seq, call->rx_expect_next)) {
-               if (after(seq, call->rx_expect_next)) {
-                       _net("OOS %u > %u", seq, call->rx_expect_next);
-                       ack = RXRPC_ACK_OUT_OF_SEQUENCE;
-                       ack_serial = serial;
-               }
-               call->rx_expect_next = seq + 1;
-       }
-
-skip:
-       offset += len;
-       if (flags & RXRPC_JUMBO_PACKET) {
-               if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
-                       rxrpc_proto_abort("XJF", call, seq);
-                       goto unlock;
-               }
-               offset += sizeof(struct rxrpc_jumbo_header);
-               seq++;
-               serial++;
-               annotation++;
-               if (flags & RXRPC_JUMBO_PACKET)
-                       annotation |= RXRPC_RX_ANNO_JLAST;
                if (after(seq, hard_ack + call->rx_winsize)) {
                        ack = RXRPC_ACK_EXCEEDS_WINDOW;
                        ack_serial = serial;
-                       if (!jumbo_bad) {
-                               call->nr_jumbo_bad++;
-                               jumbo_bad = true;
+                       if (flags & RXRPC_JUMBO_PACKET) {
+                               if (!jumbo_bad) {
+                                       call->nr_jumbo_bad++;
+                                       jumbo_bad = true;
+                               }
                        }
+
                        goto ack;
                }
 
-               _proto("Rx DATA Jumbo %%%u", serial);
-               goto next_subpacket;
-       }
+               if (flags & RXRPC_REQUEST_ACK && !ack) {
+                       ack = RXRPC_ACK_REQUESTED;
+                       ack_serial = serial;
+               }
+
+               /* Queue the packet.  We use a couple of memory barriers here as need
+                * to make sure that rx_top is perceived to be set after the buffer
+                * pointer and that the buffer pointer is set after the annotation and
+                * the skb data.
+                *
+                * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
+                * and also rxrpc_fill_out_ack().
+                */
+               if (!terminal)
+                       rxrpc_get_skb(skb, rxrpc_skb_got);
+               call->rxtx_annotations[ix] = annotation;
+               smp_wmb();
+               call->rxtx_buffer[ix] = skb;
+               if (after(seq, call->rx_top)) {
+                       smp_store_release(&call->rx_top, seq);
+               } else if (before(seq, call->rx_top)) {
+                       /* Send an immediate ACK if we fill in a hole */
+                       if (!ack) {
+                               ack = RXRPC_ACK_DELAY;
+                               ack_serial = serial;
+                       }
+                       immediate_ack = true;
+               }
+
+               if (terminal) {
+                       /* From this point on, we're not allowed to touch the
+                        * packet any longer as its ref now belongs to the Rx
+                        * ring.
+                        */
+                       skb = NULL;
+               }
 
-       if (queued && flags & RXRPC_LAST_PACKET && !ack) {
-               ack = RXRPC_ACK_DELAY;
-               ack_serial = serial;
+               if (last) {
+                       set_bit(RXRPC_CALL_RX_LAST, &call->flags);
+                       if (!ack) {
+                               ack = RXRPC_ACK_DELAY;
+                               ack_serial = serial;
+                       }
+                       trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
+               } else {
+                       trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
+               }
+
+               if (after_eq(seq, call->rx_expect_next)) {
+                       if (after(seq, call->rx_expect_next)) {
+                               _net("OOS %u > %u", seq, call->rx_expect_next);
+                               ack = RXRPC_ACK_OUT_OF_SEQUENCE;
+                               ack_serial = serial;
+                       }
+                       call->rx_expect_next = seq + 1;
+               }
        }
 
 ack:
@@ -606,13 +613,14 @@ ack:
                                  false, true,
                                  rxrpc_propose_ack_input_data);
 
-       if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) {
+       if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
                trace_rxrpc_notify_socket(call->debug_id, serial);
                rxrpc_notify_socket(call);
        }
 
 unlock:
        spin_unlock(&call->input_lock);
+       rxrpc_free_skb(skb, rxrpc_skb_freed);
        _leave(" [queued]");
 }
 
@@ -1021,7 +1029,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
                rxrpc_input_data(call, skb);
-               break;
+               goto no_free;
 
        case RXRPC_PACKET_TYPE_ACK:
                rxrpc_input_ack(call, skb);
@@ -1048,6 +1056,8 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
                break;
        }
 
+       rxrpc_free_skb(skb, rxrpc_skb_freed);
+no_free:
        _leave("");
 }
 
@@ -1109,7 +1119,7 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
                skb_queue_tail(&local->event_queue, skb);
                rxrpc_queue_local(local);
        } else {
-               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
        }
 }
 
@@ -1124,7 +1134,7 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
                skb_queue_tail(&local->reject_queue, skb);
                rxrpc_queue_local(local);
        } else {
-               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
        }
 }
 
@@ -1188,7 +1198,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
        if (skb->tstamp == 0)
                skb->tstamp = ktime_get_real();
 
-       rxrpc_new_skb(skb, rxrpc_skb_rx_received);
+       rxrpc_new_skb(skb, rxrpc_skb_received);
 
        skb_pull(skb, sizeof(struct udphdr));
 
@@ -1205,7 +1215,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
                static int lose;
                if ((lose++ & 7) == 7) {
                        trace_rxrpc_rx_lose(sp);
-                       rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
+                       rxrpc_free_skb(skb, rxrpc_skb_lost);
                        return 0;
                }
        }
@@ -1237,9 +1247,26 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
                if (sp->hdr.callNumber == 0 ||
                    sp->hdr.seq == 0)
                        goto bad_message;
-               if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
-                   !rxrpc_validate_jumbo(skb))
+               if (!rxrpc_validate_data(skb))
                        goto bad_message;
+
+               /* Unshare the packet so that it can be modified for in-place
+                * decryption.
+                */
+               if (sp->hdr.securityIndex != 0) {
+                       struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
+                       if (!nskb) {
+                               rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem);
+                               goto out;
+                       }
+
+                       if (nskb != skb) {
+                               rxrpc_eaten_skb(skb, rxrpc_skb_received);
+                               skb = nskb;
+                               rxrpc_new_skb(skb, rxrpc_skb_unshared);
+                               sp = rxrpc_skb(skb);
+                       }
+               }
                break;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -1373,11 +1400,14 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
                mutex_unlock(&call->user_mutex);
        }
 
+       /* Process a call packet; this either discards or passes on the ref
+        * elsewhere.
+        */
        rxrpc_input_call_packet(call, skb);
-       goto discard;
+       goto out;
 
 discard:
-       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+       rxrpc_free_skb(skb, rxrpc_skb_freed);
 out:
        trace_rxrpc_rx_done(0, 0);
        return 0;
index e93a78f7c05e274ae279dddabcd8bfc8d957dff2..3ce6d628cd759d58c036e2ad3be257b4af9bb5fe 100644 (file)
@@ -90,7 +90,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
        if (skb) {
                struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 
-               rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+               rxrpc_see_skb(skb, rxrpc_skb_seen);
                _debug("{%d},{%u}", local->debug_id, sp->hdr.type);
 
                switch (sp->hdr.type) {
@@ -108,7 +108,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
                        break;
                }
 
-               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
        }
 
        _leave("");
index 72a6e12a9304f8dbade52afcbca1019f5e28b1b5..36587260cabd99b280372b9fe0fda70e300bae44 100644 (file)
@@ -426,11 +426,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
 
        _enter("%d", local->debug_id);
 
+       local->dead = true;
+
        mutex_lock(&rxnet->local_mutex);
        list_del_init(&local->link);
        mutex_unlock(&rxnet->local_mutex);
 
-       ASSERT(RB_EMPTY_ROOT(&local->client_conns));
+       rxrpc_clean_up_local_conns(local);
+       rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
        ASSERT(!local->service);
 
        if (socket) {
index 369e516c4bdfdf3656a3f38b783ddcc70c92a5d3..935bb60fff56a7899374bf47db09edfb27ba05aa 100644 (file)
@@ -565,7 +565,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        memset(&whdr, 0, sizeof(whdr));
 
        while ((skb = skb_dequeue(&local->reject_queue))) {
-               rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+               rxrpc_see_skb(skb, rxrpc_skb_seen);
                sp = rxrpc_skb(skb);
 
                switch (skb->mark) {
@@ -581,7 +581,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
                        ioc = 2;
                        break;
                default:
-                       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+                       rxrpc_free_skb(skb, rxrpc_skb_freed);
                        continue;
                }
 
@@ -606,7 +606,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
                                                      rxrpc_tx_point_reject);
                }
 
-               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
        }
 
        _leave("");
index 7666ec72d37e5e5e8971ca288749738ba324c64a..c97ebdc043e44525eaecdd54bc447c1895bdca74 100644 (file)
@@ -163,11 +163,11 @@ void rxrpc_error_report(struct sock *sk)
                _leave("UDP socket errqueue empty");
                return;
        }
-       rxrpc_new_skb(skb, rxrpc_skb_rx_received);
+       rxrpc_new_skb(skb, rxrpc_skb_received);
        serr = SKB_EXT_ERR(skb);
        if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
                _leave("UDP empty message");
-               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
                return;
        }
 
@@ -177,7 +177,7 @@ void rxrpc_error_report(struct sock *sk)
                peer = NULL;
        if (!peer) {
                rcu_read_unlock();
-               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
                _leave(" [no peer]");
                return;
        }
@@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk)
             serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
                rxrpc_adjust_mtu(peer, serr);
                rcu_read_unlock();
-               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+               rxrpc_free_skb(skb, rxrpc_skb_freed);
                rxrpc_put_peer(peer);
                _leave(" [MTU update]");
                return;
@@ -197,7 +197,7 @@ void rxrpc_error_report(struct sock *sk)
 
        rxrpc_store_error(peer, serr);
        rcu_read_unlock();
-       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+       rxrpc_free_skb(skb, rxrpc_skb_freed);
        rxrpc_put_peer(peer);
 
        _leave("");
index 99ce322d7caa0369ecd8905a0de53aac6b4a8909..49bb972539aa1c7534430f5d5362277ea203ddc2 100644 (file)
@@ -89,6 +89,15 @@ struct rxrpc_jumbo_header {
 #define RXRPC_JUMBO_DATALEN    1412    /* non-terminal jumbo packet data length */
 #define RXRPC_JUMBO_SUBPKTLEN  (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
 
+/*
+ * The maximum number of subpackets that can possibly fit in a UDP packet is:
+ *
+ *     ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1
+ *     = ((65535 - 28 - 28) / 1416) + 1
+ *     = 46 non-terminal packets and 1 terminal packet.
+ */
+#define RXRPC_MAX_NR_JUMBO     47
+
 /*****************************************************************************/
 /*
  * on-the-wire Rx ACK packet data payload
index 9a7e1bc9791d7913e64e664ed08e4c17267b4331..3b0becb1204156eff6753f2399ec09c40fd233b0 100644 (file)
@@ -177,7 +177,8 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
        struct sk_buff *skb;
        rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top;
-       u8 flags;
+       bool last = false;
+       u8 subpacket;
        int ix;
 
        _enter("%d", call->debug_id);
@@ -189,23 +190,25 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
        hard_ack++;
        ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
        skb = call->rxtx_buffer[ix];
-       rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
+       rxrpc_see_skb(skb, rxrpc_skb_rotated);
        sp = rxrpc_skb(skb);
-       flags = sp->hdr.flags;
-       serial = sp->hdr.serial;
-       if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
-               serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;
+
+       subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
+       serial = sp->hdr.serial + subpacket;
+
+       if (subpacket == sp->nr_subpackets - 1 &&
+           sp->rx_flags & RXRPC_SKB_INCL_LAST)
+               last = true;
 
        call->rxtx_buffer[ix] = NULL;
        call->rxtx_annotations[ix] = 0;
        /* Barrier against rxrpc_input_data(). */
        smp_store_release(&call->rx_hard_ack, hard_ack);
 
-       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+       rxrpc_free_skb(skb, rxrpc_skb_freed);
 
-       _debug("%u,%u,%02x", hard_ack, top, flags);
        trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
-       if (flags & RXRPC_LAST_PACKET) {
+       if (last) {
                rxrpc_end_rx_phase(call, serial);
        } else {
                /* Check to see if there's an ACK that needs sending. */
@@ -233,18 +236,19 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        rxrpc_seq_t seq = sp->hdr.seq;
        u16 cksum = sp->hdr.cksum;
+       u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
 
        _enter("");
 
        /* For all but the head jumbo subpacket, the security checksum is in a
         * jumbo header immediately prior to the data.
         */
-       if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) {
+       if (subpacket > 0) {
                __be16 tmp;
                if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
                        BUG();
                cksum = ntohs(tmp);
-               seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1;
+               seq += subpacket;
        }
 
        return call->conn->security->verify_packet(call, skb, offset, len,
@@ -265,19 +269,18 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
                             u8 *_annotation,
                             unsigned int *_offset, unsigned int *_len)
 {
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned int offset = sizeof(struct rxrpc_wire_header);
        unsigned int len;
        int ret;
        u8 annotation = *_annotation;
+       u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
 
        /* Locate the subpacket */
+       offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
        len = skb->len - offset;
-       if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) {
-               offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) *
-                          RXRPC_JUMBO_SUBPKTLEN);
-               len = (annotation & RXRPC_RX_ANNO_JLAST) ?
-                       skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
-       }
+       if (subpacket < sp->nr_subpackets - 1)
+               len = RXRPC_JUMBO_DATALEN;
 
        if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
                ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -303,6 +306,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
 {
        struct rxrpc_skb_priv *sp;
        struct sk_buff *skb;
+       rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top, seq;
        size_t remain;
        bool last;
@@ -336,12 +340,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                        break;
                }
                smp_rmb();
-               rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+               rxrpc_see_skb(skb, rxrpc_skb_seen);
                sp = rxrpc_skb(skb);
 
-               if (!(flags & MSG_PEEK))
+               if (!(flags & MSG_PEEK)) {
+                       serial = sp->hdr.serial;
+                       serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
                        trace_rxrpc_receive(call, rxrpc_receive_front,
-                                           sp->hdr.serial, seq);
+                                           serial, seq);
+               }
 
                if (msg)
                        sock_recv_timestamp(msg, sock->sk, skb);
index ae8cd8926456b51763e029ba541984ed9f663d06..c60c520fde7c0a57850d53886e0fce9fc622f58c 100644 (file)
@@ -187,10 +187,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
        struct rxrpc_skb_priv *sp;
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
-       struct sk_buff *trailer;
        unsigned int len;
        u16 check;
-       int nsg;
        int err;
 
        sp = rxrpc_skb(skb);
@@ -214,15 +212,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
        crypto_skcipher_encrypt(req);
 
        /* we want to encrypt the skbuff in-place */
-       nsg = skb_cow_data(skb, 0, &trailer);
-       err = -ENOMEM;
-       if (nsg < 0 || nsg > 16)
+       err = -EMSGSIZE;
+       if (skb_shinfo(skb)->nr_frags > 16)
                goto out;
 
        len = data_size + call->conn->size_align - 1;
        len &= ~(call->conn->size_align - 1);
 
-       sg_init_table(sg, nsg);
+       sg_init_table(sg, ARRAY_SIZE(sg));
        err = skb_to_sgvec(skb, sg, 0, len);
        if (unlikely(err < 0))
                goto out;
@@ -319,11 +316,10 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxkad_level1_hdr sechdr;
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
-       struct sk_buff *trailer;
        bool aborted;
        u32 data_size, buf;
        u16 check;
-       int nsg, ret;
+       int ret;
 
        _enter("");
 
@@ -336,11 +332,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        /* Decrypt the skbuff in-place.  TODO: We really want to decrypt
         * directly into the target buffer.
         */
-       nsg = skb_cow_data(skb, 0, &trailer);
-       if (nsg < 0 || nsg > 16)
-               goto nomem;
-
-       sg_init_table(sg, nsg);
+       sg_init_table(sg, ARRAY_SIZE(sg));
        ret = skb_to_sgvec(skb, sg, offset, 8);
        if (unlikely(ret < 0))
                return ret;
@@ -388,10 +380,6 @@ protocol_error:
        if (aborted)
                rxrpc_send_abort_packet(call);
        return -EPROTO;
-
-nomem:
-       _leave(" = -ENOMEM");
-       return -ENOMEM;
 }
 
 /*
@@ -406,7 +394,6 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxkad_level2_hdr sechdr;
        struct rxrpc_crypt iv;
        struct scatterlist _sg[4], *sg;
-       struct sk_buff *trailer;
        bool aborted;
        u32 data_size, buf;
        u16 check;
@@ -423,12 +410,11 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        /* Decrypt the skbuff in-place.  TODO: We really want to decrypt
         * directly into the target buffer.
         */
-       nsg = skb_cow_data(skb, 0, &trailer);
-       if (nsg < 0)
-               goto nomem;
-
        sg = _sg;
-       if (unlikely(nsg > 4)) {
+       nsg = skb_shinfo(skb)->nr_frags;
+       if (nsg <= 4) {
+               nsg = 4;
+       } else {
                sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
                if (!sg)
                        goto nomem;
index bae14438f86918a8ccf1e2df82071d3a9dba7f18..6a1547b270fef519fc5c334ced74b728f6484d28 100644 (file)
@@ -176,7 +176,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
        skb->tstamp = ktime_get_real();
 
        ix = seq & RXRPC_RXTX_BUFF_MASK;
-       rxrpc_get_skb(skb, rxrpc_skb_tx_got);
+       rxrpc_get_skb(skb, rxrpc_skb_got);
        call->rxtx_annotations[ix] = annotation;
        smp_wmb();
        call->rxtx_buffer[ix] = skb;
@@ -248,7 +248,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
        }
 
 out:
-       rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+       rxrpc_free_skb(skb, rxrpc_skb_freed);
        _leave(" = %d", ret);
        return ret;
 }
@@ -289,7 +289,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
 
        skb = call->tx_pending;
        call->tx_pending = NULL;
-       rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
+       rxrpc_see_skb(skb, rxrpc_skb_seen);
 
        copied = 0;
        do {
@@ -336,7 +336,9 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        if (!skb)
                                goto maybe_error;
 
-                       rxrpc_new_skb(skb, rxrpc_skb_tx_new);
+                       sp = rxrpc_skb(skb);
+                       sp->rx_flags |= RXRPC_SKB_TX_BUFFER;
+                       rxrpc_new_skb(skb, rxrpc_skb_new);
 
                        _debug("ALLOC SEND %p", skb);
 
@@ -346,7 +348,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        skb_reserve(skb, call->conn->security_size);
                        skb->len += call->conn->security_size;
 
-                       sp = rxrpc_skb(skb);
                        sp->remain = chunk;
                        if (sp->remain > skb_tailroom(skb))
                                sp->remain = skb_tailroom(skb);
@@ -439,7 +440,7 @@ out:
        return ret;
 
 call_terminated:
-       rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+       rxrpc_free_skb(skb, rxrpc_skb_freed);
        _leave(" = %d", call->error);
        return call->error;
 
index 9ad5045b7c2f34e2fea3d5541e53c3401e5ab3ad..0348d2bf6f7d8e85ea075c992a5b5368ca0362a0 100644 (file)
@@ -14,7 +14,8 @@
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
+#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
+#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
 
 /*
  * Note the allocation or reception of a socket buffer.
@@ -22,8 +23,9 @@
 void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 {
        const void *here = __builtin_return_address(0);
-       int n = atomic_inc_return(select_skb_count(op));
-       trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
+       int n = atomic_inc_return(select_skb_count(skb));
+       trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
+                       rxrpc_skb(skb)->rx_flags, here);
 }
 
 /*
@@ -33,8 +35,9 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 {
        const void *here = __builtin_return_address(0);
        if (skb) {
-               int n = atomic_read(select_skb_count(op));
-               trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
+               int n = atomic_read(select_skb_count(skb));
+               trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
+                               rxrpc_skb(skb)->rx_flags, here);
        }
 }
 
@@ -44,11 +47,22 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 {
        const void *here = __builtin_return_address(0);
-       int n = atomic_inc_return(select_skb_count(op));
-       trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
+       int n = atomic_inc_return(select_skb_count(skb));
+       trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
+                       rxrpc_skb(skb)->rx_flags, here);
        skb_get(skb);
 }
 
+/*
+ * Note the dropping of a ref on a socket buffer by the core.
+ */
+void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
+{
+       const void *here = __builtin_return_address(0);
+       int n = atomic_inc_return(&rxrpc_n_rx_skbs);
+       trace_rxrpc_skb(skb, op, 0, n, 0, here);
+}
+
 /*
  * Note the destruction of a socket buffer.
  */
@@ -58,8 +72,9 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
        if (skb) {
                int n;
                CHECK_SLAB_OKAY(&skb->users);
-               n = atomic_dec_return(select_skb_count(op));
-               trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
+               n = atomic_dec_return(select_skb_count(skb));
+               trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
+                               rxrpc_skb(skb)->rx_flags, here);
                kfree_skb(skb);
        }
 }
@@ -72,9 +87,10 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
        const void *here = __builtin_return_address(0);
        struct sk_buff *skb;
        while ((skb = skb_dequeue((list))) != NULL) {
-               int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
-               trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
-                               refcount_read(&skb->users), n, here);
+               int n = atomic_dec_return(select_skb_count(skb));
+               trace_rxrpc_skb(skb, rxrpc_skb_purged,
+                               refcount_read(&skb->users), n,
+                               rxrpc_skb(skb)->rx_flags, here);
                kfree_skb(skb);
        }
 }
index fd1f7e799e23e04337eaee35aef5dfb03d43563e..04b7bd4ec7513c2388275ee5ed1af18b8585a32b 100644 (file)
@@ -422,7 +422,7 @@ static __net_init int bpf_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, bpf_net_id);
 
-       return tc_action_net_init(tn, &act_bpf_ops);
+       return tc_action_net_init(net, tn, &act_bpf_ops);
 }
 
 static void __net_exit bpf_exit_net(struct list_head *net_list)
index 32ac04d77a455a845e61d89ec9d646d3e886b155..2b43cacf82af4aeaefdc41bf97651d832b1e31f1 100644 (file)
@@ -231,7 +231,7 @@ static __net_init int connmark_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, connmark_net_id);
 
-       return tc_action_net_init(tn, &act_connmark_ops);
+       return tc_action_net_init(net, tn, &act_connmark_ops);
 }
 
 static void __net_exit connmark_exit_net(struct list_head *net_list)
index 9b9288267a545627474a9909018f6a3e9f625930..d3cfad88dc3a93368449f2aa179f94c64b7a903c 100644 (file)
@@ -714,7 +714,7 @@ static __net_init int csum_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, csum_net_id);
 
-       return tc_action_net_init(tn, &act_csum_ops);
+       return tc_action_net_init(net, tn, &act_csum_ops);
 }
 
 static void __net_exit csum_exit_net(struct list_head *net_list)
index 33a1a7406e87feb2735f9acde1b9a76f7304bec5..cdd6f3818097842f247393e1a88626b6bef95be5 100644 (file)
@@ -939,7 +939,7 @@ static __net_init int ct_init_net(struct net *net)
                tn->labels = true;
        }
 
-       return tc_action_net_init(&tn->tn, &act_ct_ops);
+       return tc_action_net_init(net, &tn->tn, &act_ct_ops);
 }
 
 static void __net_exit ct_exit_net(struct list_head *net_list)
index 06ef74b74911cea631af5f12d8dba6fb9c13cf46..0dbcfd1dca7bf354a51892adeed5a5fc3b4889fd 100644 (file)
@@ -376,7 +376,7 @@ static __net_init int ctinfo_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
 
-       return tc_action_net_init(tn, &act_ctinfo_ops);
+       return tc_action_net_init(net, tn, &act_ctinfo_ops);
 }
 
 static void __net_exit ctinfo_exit_net(struct list_head *net_list)
index 8f0140c6ca58b6b0c6645f48b4f093c21c471dd6..324f1d1f6d477973596fe334ce682587610a30a7 100644 (file)
@@ -278,7 +278,7 @@ static __net_init int gact_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, gact_net_id);
 
-       return tc_action_net_init(tn, &act_gact_ops);
+       return tc_action_net_init(net, tn, &act_gact_ops);
 }
 
 static void __net_exit gact_exit_net(struct list_head *net_list)
index 92ee853d43e6c5f71d4ed635cc0043eb761358a9..3a31e241c6472b42d37414f68ba18c16bd1a9f19 100644 (file)
@@ -890,7 +890,7 @@ static __net_init int ife_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, ife_net_id);
 
-       return tc_action_net_init(tn, &act_ife_ops);
+       return tc_action_net_init(net, tn, &act_ife_ops);
 }
 
 static void __net_exit ife_exit_net(struct list_head *net_list)
index ce2c30a591d23572c947be16e6445168d230daa8..214a03d405cf9bb546903a12262a03a7d1843d0c 100644 (file)
@@ -61,12 +61,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
        return 0;
 }
 
-static void ipt_destroy_target(struct xt_entry_target *t)
+static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
 {
        struct xt_tgdtor_param par = {
                .target   = t->u.kernel.target,
                .targinfo = t->data,
                .family   = NFPROTO_IPV4,
+               .net      = net,
        };
        if (par.target->destroy != NULL)
                par.target->destroy(&par);
@@ -78,7 +79,7 @@ static void tcf_ipt_release(struct tc_action *a)
        struct tcf_ipt *ipt = to_ipt(a);
 
        if (ipt->tcfi_t) {
-               ipt_destroy_target(ipt->tcfi_t);
+               ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
                kfree(ipt->tcfi_t);
        }
        kfree(ipt->tcfi_tname);
@@ -180,7 +181,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
 
        spin_lock_bh(&ipt->tcf_lock);
        if (ret != ACT_P_CREATED) {
-               ipt_destroy_target(ipt->tcfi_t);
+               ipt_destroy_target(ipt->tcfi_t, net);
                kfree(ipt->tcfi_tname);
                kfree(ipt->tcfi_t);
        }
@@ -350,7 +351,7 @@ static __net_init int ipt_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, ipt_net_id);
 
-       return tc_action_net_init(tn, &act_ipt_ops);
+       return tc_action_net_init(net, tn, &act_ipt_ops);
 }
 
 static void __net_exit ipt_exit_net(struct list_head *net_list)
@@ -399,7 +400,7 @@ static __net_init int xt_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, xt_net_id);
 
-       return tc_action_net_init(tn, &act_xt_ops);
+       return tc_action_net_init(net, tn, &act_xt_ops);
 }
 
 static void __net_exit xt_exit_net(struct list_head *net_list)
index be3f88dfc37eb735a7f76728c525fad88d22c83a..9d1bf508075a8c789658ba7a08ac1c0a20cd63eb 100644 (file)
@@ -453,7 +453,7 @@ static __net_init int mirred_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, mirred_net_id);
 
-       return tc_action_net_init(tn, &act_mirred_ops);
+       return tc_action_net_init(net, tn, &act_mirred_ops);
 }
 
 static void __net_exit mirred_exit_net(struct list_head *net_list)
index 0f299e3b618cbae1f4794cd8e7230437d974ead9..e168df0e008acef81e579900602138b5da683111 100644 (file)
@@ -375,7 +375,7 @@ static __net_init int mpls_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, mpls_net_id);
 
-       return tc_action_net_init(tn, &act_mpls_ops);
+       return tc_action_net_init(net, tn, &act_mpls_ops);
 }
 
 static void __net_exit mpls_exit_net(struct list_head *net_list)
index 7b858c11b1b5ffd34f57706ee5631229b45f65a3..ea4c5359e7dfb5668dfc23ac4dfb95672f61304e 100644 (file)
@@ -327,7 +327,7 @@ static __net_init int nat_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, nat_net_id);
 
-       return tc_action_net_init(tn, &act_nat_ops);
+       return tc_action_net_init(net, tn, &act_nat_ops);
 }
 
 static void __net_exit nat_exit_net(struct list_head *net_list)
index 17360c6faeaac184a59ef82fa2e909885b83fa84..cdfaa79382a22c63761cdb7507296946aac3cb12 100644 (file)
@@ -498,7 +498,7 @@ static __net_init int pedit_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, pedit_net_id);
 
-       return tc_action_net_init(tn, &act_pedit_ops);
+       return tc_action_net_init(net, tn, &act_pedit_ops);
 }
 
 static void __net_exit pedit_exit_net(struct list_head *net_list)
index 49cec3e64a4d5b15b781151715347cccff1833ca..6315e0f8d26e3abfb96101bec5f9048db9619c21 100644 (file)
@@ -371,7 +371,7 @@ static __net_init int police_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, police_net_id);
 
-       return tc_action_net_init(tn, &act_police_ops);
+       return tc_action_net_init(net, tn, &act_police_ops);
 }
 
 static void __net_exit police_exit_net(struct list_head *net_list)
index 595308d60133d4428d385d34429d692af3f8c747..10229124a9924efda7745e5917a37fd5c6bff057 100644 (file)
@@ -102,13 +102,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        s->rate = rate;
        s->psample_group_num = psample_group_num;
-       RCU_INIT_POINTER(s->psample_group, psample_group);
+       rcu_swap_protected(s->psample_group, psample_group,
+                          lockdep_is_held(&s->tcf_lock));
 
        if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
                s->truncate = true;
                s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
        }
        spin_unlock_bh(&s->tcf_lock);
+
+       if (psample_group)
+               psample_group_put(psample_group);
        if (goto_ch)
                tcf_chain_put_by_act(goto_ch);
 
@@ -265,7 +269,7 @@ static __net_init int sample_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, sample_net_id);
 
-       return tc_action_net_init(tn, &act_sample_ops);
+       return tc_action_net_init(net, tn, &act_sample_ops);
 }
 
 static void __net_exit sample_exit_net(struct list_head *net_list)
index 33aefa25b545e4032c0058b633aaaf74a88cf70e..6120e56117ca14d2abeb855e20beb33ae7cd9896 100644 (file)
@@ -232,7 +232,7 @@ static __net_init int simp_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, simp_net_id);
 
-       return tc_action_net_init(tn, &act_simp_ops);
+       return tc_action_net_init(net, tn, &act_simp_ops);
 }
 
 static void __net_exit simp_exit_net(struct list_head *net_list)
index 37dced00b63d16de7797c34ff0c10ed4812fe0b4..6a8d3337c577493dc9dcc2b70e7946abbc165cfa 100644 (file)
@@ -336,7 +336,7 @@ static __net_init int skbedit_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 
-       return tc_action_net_init(tn, &act_skbedit_ops);
+       return tc_action_net_init(net, tn, &act_skbedit_ops);
 }
 
 static void __net_exit skbedit_exit_net(struct list_head *net_list)
index 7da3518e18efb709f81d9435d6fe66411b8a270a..888437f97ba62087992a6b19de615e859fd63563 100644 (file)
@@ -287,7 +287,7 @@ static __net_init int skbmod_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, skbmod_net_id);
 
-       return tc_action_net_init(tn, &act_skbmod_ops);
+       return tc_action_net_init(net, tn, &act_skbmod_ops);
 }
 
 static void __net_exit skbmod_exit_net(struct list_head *net_list)
index 6d0debdc9b972631ee2f53a1745e8fe0d8d5d126..2f83a79f76aae656fcea6f2d4a35d45c22e2a501 100644 (file)
@@ -600,7 +600,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
 
-       return tc_action_net_init(tn, &act_tunnel_key_ops);
+       return tc_action_net_init(net, tn, &act_tunnel_key_ops);
 }
 
 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
index a3c9eea1ee8ac720311a8c88994a08a4a01d2bcb..287a30bf8930e130745c2ccca42f2949975f225d 100644 (file)
@@ -334,7 +334,7 @@ static __net_init int vlan_init_net(struct net *net)
 {
        struct tc_action_net *tn = net_generic(net, vlan_net_id);
 
-       return tc_action_net_init(tn, &act_vlan_ops);
+       return tc_action_net_init(net, tn, &act_vlan_ops);
 }
 
 static void __net_exit vlan_exit_net(struct list_head *net_list)
index 04faee7ccbce64354618365b1c304d7c76f6bb06..1047825d9f48d546fe1339a25f3f86979e7ac801 100644 (file)
@@ -1920,6 +1920,8 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
        cl = cops->find(q, portid);
        if (!cl)
                return;
+       if (!cops->tcf_block)
+               return;
        block = cops->tcf_block(q, cl, NULL);
        if (!block)
                return;
index 732e109c305557f4dc365cd65feaacbbb72852eb..810645b5c086101622393d2a5ae44613ff14f1d6 100644 (file)
@@ -181,11 +181,6 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
        s64 credits;
        int len;
 
-       if (atomic64_read(&q->port_rate) == -1) {
-               WARN_ONCE(1, "cbs: dequeue() called with unknown port rate.");
-               return NULL;
-       }
-
        if (q->credits < 0) {
                credits = timediff_to_credits(now - q->last, q->idleslope);
 
@@ -303,11 +298,19 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
 static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
 {
        struct ethtool_link_ksettings ecmd;
+       int speed = SPEED_10;
        int port_rate = -1;
+       int err;
+
+       err = __ethtool_get_link_ksettings(dev, &ecmd);
+       if (err < 0)
+               goto skip;
+
+       if (ecmd.base.speed != SPEED_UNKNOWN)
+               speed = ecmd.base.speed;
 
-       if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
-           ecmd.base.speed != SPEED_UNKNOWN)
-               port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT;
+skip:
+       port_rate = speed * 1000 * BYTES_PER_KBIT;
 
        atomic64_set(&q->port_rate, port_rate);
        netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
index 11c03cf4aa74b44663c74e0e3284140b0c75d9c4..ac28f6a5d70e0b38ae8ce7858f08e9d15778c22f 100644 (file)
@@ -46,6 +46,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
+#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
+
 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
 {
        const struct netdev_queue *txq = q->dev_queue;
@@ -71,7 +73,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
                                q->q.qlen--;
                        }
                } else {
-                       skb = NULL;
+                       skb = SKB_XOFF_MAGIC;
                }
        }
 
@@ -253,8 +255,11 @@ validate:
                return skb;
 
        skb = qdisc_dequeue_skb_bad_txq(q);
-       if (unlikely(skb))
+       if (unlikely(skb)) {
+               if (skb == SKB_XOFF_MAGIC)
+                       return NULL;
                goto bulk;
+       }
        skb = q->dequeue(q);
        if (skb) {
 bulk:
@@ -624,8 +629,12 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 
        err = skb_array_produce(q, skb);
 
-       if (unlikely(err))
-               return qdisc_drop_cpu(skb, qdisc, to_free);
+       if (unlikely(err)) {
+               if (qdisc_is_percpu_stats(qdisc))
+                       return qdisc_drop_cpu(skb, qdisc, to_free);
+               else
+                       return qdisc_drop(skb, qdisc, to_free);
+       }
 
        qdisc_update_stats_at_enqueue(qdisc, pkt_len);
        return NET_XMIT_SUCCESS;
@@ -688,11 +697,14 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
                        kfree_skb(skb);
        }
 
-       for_each_possible_cpu(i) {
-               struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
+       if (qdisc_is_percpu_stats(qdisc)) {
+               for_each_possible_cpu(i) {
+                       struct gnet_stats_queue *q;
 
-               q->backlog = 0;
-               q->qlen = 0;
+                       q = per_cpu_ptr(qdisc->cpu_qstats, i);
+                       q->backlog = 0;
+                       q->qlen = 0;
+               }
        }
 }
 
index cee6971c1c8249a68dd6b953948fa1fef21fce04..23cd1c873a2cfc24b907a8177e71dd68b0939701 100644 (file)
@@ -531,7 +531,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
                new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
 
        non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
-       if (non_hh_quantum > INT_MAX)
+       if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
                return -EINVAL;
 
        sch_tree_lock(sch);
index e25d414ae12fdd9a7c4b8ea1903293e36e4ae12a..8d8bc2ec5cd6281d811fd5d8a5c5211ebb0edd73 100644 (file)
@@ -477,11 +477,6 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
        u32 gate_mask;
        int i;
 
-       if (atomic64_read(&q->picos_per_byte) == -1) {
-               WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
-               return NULL;
-       }
-
        rcu_read_lock();
        entry = rcu_dereference(q->current_entry);
        /* if there's no entry, it means that the schedule didn't
@@ -958,12 +953,20 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
                                      struct taprio_sched *q)
 {
        struct ethtool_link_ksettings ecmd;
-       int picos_per_byte = -1;
+       int speed = SPEED_10;
+       int picos_per_byte;
+       int err;
 
-       if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
-           ecmd.base.speed != SPEED_UNKNOWN)
-               picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
-                                          ecmd.base.speed * 1000 * 1000);
+       err = __ethtool_get_link_ksettings(dev, &ecmd);
+       if (err < 0)
+               goto skip;
+
+       if (ecmd.base.speed != SPEED_UNKNOWN)
+               speed = ecmd.base.speed;
+
+skip:
+       picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+                                  speed * 1000 * 1000);
 
        atomic64_set(&q->picos_per_byte, picos_per_byte);
        netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
@@ -1249,6 +1252,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
         */
        q->clockid = -1;
 
+       spin_lock(&taprio_list_lock);
+       list_add(&q->taprio_list, &taprio_list);
+       spin_unlock(&taprio_list_lock);
+
        if (sch->parent != TC_H_ROOT)
                return -EOPNOTSUPP;
 
@@ -1266,10 +1273,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
        if (!opt)
                return -EINVAL;
 
-       spin_lock(&taprio_list_lock);
-       list_add(&q->taprio_list, &taprio_list);
-       spin_unlock(&taprio_list_lock);
-
        for (i = 0; i < dev->num_tx_queues; i++) {
                struct netdev_queue *dev_queue;
                struct Qdisc *qdisc;
index 2d47adcb4cbe002f655a937ab89ffc0c603d3015..53746ffeeca355026dfb0e7712d2ec36c47811f1 100644 (file)
@@ -1336,7 +1336,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
        return status;
 }
 
-static void __net_init sctp_ctrlsock_exit(struct net *net)
+static void __net_exit sctp_ctrlsock_exit(struct net *net)
 {
        /* Free the control endpoint.  */
        inet_ctl_sock_destroy(net->sctp.ctl_sock);
index 1cf5bb5b73c412cadf1b3d7240bb8c200edb77a5..e52b2128e43b8706ddbec3d6b77388e24402c1c9 100644 (file)
@@ -547,7 +547,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
        if (net->sctp.pf_enable &&
           (transport->state == SCTP_ACTIVE) &&
           (transport->error_count < transport->pathmaxrxt) &&
-          (transport->error_count > asoc->pf_retrans)) {
+          (transport->error_count > transport->pf_retrans)) {
 
                sctp_assoc_control_transport(asoc, transport,
                                             SCTP_TRANSPORT_PF,
index 9d1f83b10c0a6c5ab0ca2736c8a9f8491e44c310..b083d4e66230b1e365fc983a0d82eb6f76d0bedd 100644 (file)
@@ -309,7 +309,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
        return retval;
 }
 
-static long sctp_get_port_local(struct sock *, union sctp_addr *);
+static int sctp_get_port_local(struct sock *, union sctp_addr *);
 
 /* Verify this is a valid sockaddr. */
 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
@@ -399,9 +399,8 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
         * detection.
         */
        addr->v4.sin_port = htons(snum);
-       if ((ret = sctp_get_port_local(sk, addr))) {
+       if (sctp_get_port_local(sk, addr))
                return -EADDRINUSE;
-       }
 
        /* Refresh ephemeral port.  */
        if (!bp->port)
@@ -413,11 +412,13 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
        ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
                                 SCTP_ADDR_SRC, GFP_ATOMIC);
 
-       /* Copy back into socket for getsockname() use. */
-       if (!ret) {
-               inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
-               sp->pf->to_sk_saddr(addr, sk);
+       if (ret) {
+               sctp_put_port(sk);
+               return ret;
        }
+       /* Copy back into socket for getsockname() use. */
+       inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
+       sp->pf->to_sk_saddr(addr, sk);
 
        return ret;
 }
@@ -7173,7 +7174,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
                val.spt_pathmaxrxt = trans->pathmaxrxt;
                val.spt_pathpfthld = trans->pf_retrans;
 
-               return 0;
+               goto out;
        }
 
        asoc = sctp_id2assoc(sk, val.spt_assoc_id);
@@ -7191,6 +7192,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
                val.spt_pathmaxrxt = sp->pathmaxrxt;
        }
 
+out:
        if (put_user(len, optlen) || copy_to_user(optval, &val, len))
                return -EFAULT;
 
@@ -7998,7 +8000,7 @@ static void sctp_unhash(struct sock *sk)
 static struct sctp_bind_bucket *sctp_bucket_create(
        struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
 
-static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
+static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        bool reuse = (sk->sk_reuse || sp->reuse);
@@ -8108,7 +8110,7 @@ pp_found:
 
                        if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
                                                    addr, sp2, sp)) {
-                               ret = (long)sk2;
+                               ret = 1;
                                goto fail_unlock;
                        }
                }
@@ -8180,7 +8182,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
        addr.v4.sin_port = htons(snum);
 
        /* Note: sk->sk_num gets filled in if ephemeral port request. */
-       return !!sctp_get_port_local(sk, &addr);
+       return sctp_get_port_local(sk, &addr);
 }
 
 /*
index f0de323d15d60557255cf5572677dc9828a8953e..6c8f09c1ce512b4740ba384c01f2d5cf530cf844 100644 (file)
@@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct smc_connection *conn = &smc->conn;
        struct sock *sk = &smc->sk;
-       bool noblock;
        long timeo;
        int rc = 0;
 
        /* similar to sk_stream_wait_memory */
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
-       noblock = timeo ? false : true;
        add_wait_queue(sk_sleep(sk), &wait);
        while (1) {
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
                        break;
                }
                if (!timeo) {
-                       if (noblock)
-                               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+                       /* ensure EPOLLOUT is subsequently generated */
+                       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                        rc = -EAGAIN;
                        break;
                }
index d8679b6027e933a2130fafcecd12e57b9975c8db..a07b516e503a0a8252d0f3876f38df65b812da0d 100644 (file)
@@ -1970,6 +1970,7 @@ call_bind(struct rpc_task *task)
 static void
 call_bind_status(struct rpc_task *task)
 {
+       struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
        int status = -EIO;
 
        if (rpc_task_transmitted(task)) {
@@ -1977,14 +1978,15 @@ call_bind_status(struct rpc_task *task)
                return;
        }
 
-       if (task->tk_status >= 0) {
-               dprint_status(task);
+       dprint_status(task);
+       trace_rpc_bind_status(task);
+       if (task->tk_status >= 0)
+               goto out_next;
+       if (xprt_bound(xprt)) {
                task->tk_status = 0;
-               task->tk_action = call_connect;
-               return;
+               goto out_next;
        }
 
-       trace_rpc_bind_status(task);
        switch (task->tk_status) {
        case -ENOMEM:
                dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
@@ -2003,6 +2005,9 @@ call_bind_status(struct rpc_task *task)
                task->tk_rebind_retry--;
                rpc_delay(task, 3*HZ);
                goto retry_timeout;
+       case -ENOBUFS:
+               rpc_delay(task, HZ >> 2);
+               goto retry_timeout;
        case -EAGAIN:
                goto retry_timeout;
        case -ETIMEDOUT:
@@ -2026,7 +2031,6 @@ call_bind_status(struct rpc_task *task)
        case -ENETDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
-       case -ENOBUFS:
        case -EPIPE:
                dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
                                task->tk_pid, task->tk_status);
@@ -2043,7 +2047,9 @@ call_bind_status(struct rpc_task *task)
 
        rpc_call_rpcerror(task, status);
        return;
-
+out_next:
+       task->tk_action = call_connect;
+       return;
 retry_timeout:
        task->tk_status = 0;
        task->tk_action = call_bind;
@@ -2090,6 +2096,7 @@ call_connect(struct rpc_task *task)
 static void
 call_connect_status(struct rpc_task *task)
 {
+       struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
        struct rpc_clnt *clnt = task->tk_client;
        int status = task->tk_status;
 
@@ -2099,8 +2106,17 @@ call_connect_status(struct rpc_task *task)
        }
 
        dprint_status(task);
-
        trace_rpc_connect_status(task);
+
+       if (task->tk_status == 0) {
+               clnt->cl_stats->netreconn++;
+               goto out_next;
+       }
+       if (xprt_connected(xprt)) {
+               task->tk_status = 0;
+               goto out_next;
+       }
+
        task->tk_status = 0;
        switch (status) {
        case -ECONNREFUSED:
@@ -2117,8 +2133,6 @@ call_connect_status(struct rpc_task *task)
        case -ENETDOWN:
        case -ENETUNREACH:
        case -EHOSTUNREACH:
-       case -EADDRINUSE:
-       case -ENOBUFS:
        case -EPIPE:
                xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
                                            task->tk_rqstp->rq_connect_cookie);
@@ -2127,17 +2141,20 @@ call_connect_status(struct rpc_task *task)
                /* retry with existing socket, after a delay */
                rpc_delay(task, 3*HZ);
                /* fall through */
+       case -EADDRINUSE:
        case -ENOTCONN:
        case -EAGAIN:
        case -ETIMEDOUT:
                goto out_retry;
-       case 0:
-               clnt->cl_stats->netreconn++;
-               task->tk_action = call_transmit;
-               return;
+       case -ENOBUFS:
+               rpc_delay(task, HZ >> 2);
+               goto out_retry;
        }
        rpc_call_rpcerror(task, status);
        return;
+out_next:
+       task->tk_action = call_transmit;
+       return;
 out_retry:
        /* Check for timeouts before looping back to call_bind */
        task->tk_action = call_bind;
@@ -2365,7 +2382,7 @@ call_status(struct rpc_task *task)
        case -ECONNABORTED:
        case -ENOTCONN:
                rpc_force_rebind(clnt);
-               /* fall through */
+               break;
        case -EADDRINUSE:
                rpc_delay(task, 3*HZ);
                /* fall through */
index 783748dc5e6fbf59959380e024e99e42039d7956..2e71f5455c6cc95d9182b6a3f36b458f5d20a951 100644 (file)
@@ -1408,13 +1408,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
                        status = -EBADMSG;
                        goto out_dequeue;
                }
-               if (task->tk_ops->rpc_call_prepare_transmit) {
-                       task->tk_ops->rpc_call_prepare_transmit(task,
-                                       task->tk_calldata);
-                       status = task->tk_status;
-                       if (status < 0)
-                               goto out_dequeue;
-               }
                if (RPC_SIGNALLED(task)) {
                        status = -ERESTARTSYS;
                        goto out_dequeue;
index 44abc8e9c99024df9932d151fdc32ce4d4aa2820..241ed22744739dc5d2167ce7c4f108c2e1200b01 100644 (file)
@@ -223,7 +223,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
                       publ->key);
        }
 
-       kfree_rcu(p, rcu);
+       if (p)
+               kfree_rcu(p, rcu);
 }
 
 /**
index 4831ad745f912dd0e82e0a87b49414a175a28c68..327479ce69f53d5ea2dfe062fd83f25d69fa70b1 100644 (file)
@@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void)
 
        /* When last_request->processed becomes true this will be rescheduled */
        if (lr && !lr->processed) {
-               reg_process_hint(lr);
+               pr_debug("Pending regulatory request, waiting for it to be processed...\n");
                return;
        }
 
index d0e35b7b9e3502790cb6812f12824ff3826e7610..e74837824cea14070440c9cc673ecd0349144594 100644 (file)
@@ -233,25 +233,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
 
        switch (params->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
+               /* Extended Key ID can only be used with CCMP/GCMP ciphers */
+               if ((pairwise && key_idx) ||
+                   params->mode != NL80211_KEY_RX_TX)
+                       return -EINVAL;
+               break;
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_CCMP_256:
        case WLAN_CIPHER_SUITE_GCMP:
        case WLAN_CIPHER_SUITE_GCMP_256:
-               /* IEEE802.11-2016 allows only 0 and - when using Extended Key
-                * ID - 1 as index for pairwise keys.
+               /* IEEE802.11-2016 allows only 0 and - when supporting
+                * Extended Key ID - 1 as index for pairwise keys.
                 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when
                 * the driver supports Extended Key ID.
                 * @NL80211_KEY_SET_TX can't be set when installing and
                 * validating a key.
                 */
-               if (params->mode == NL80211_KEY_NO_TX) {
-                       if (!wiphy_ext_feature_isset(&rdev->wiphy,
-                                                    NL80211_EXT_FEATURE_EXT_KEY_ID))
-                               return -EINVAL;
-                       else if (!pairwise || key_idx < 0 || key_idx > 1)
+               if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
+                   params->mode == NL80211_KEY_SET_TX)
+                       return -EINVAL;
+               if (wiphy_ext_feature_isset(&rdev->wiphy,
+                                           NL80211_EXT_FEATURE_EXT_KEY_ID)) {
+                       if (pairwise && (key_idx < 0 || key_idx > 1))
                                return -EINVAL;
-               } else if ((pairwise && key_idx) ||
-                          params->mode == NL80211_KEY_SET_TX) {
+               } else if (pairwise && key_idx) {
                        return -EINVAL;
                }
                break;
index 83de74ca729a39bc22cd83ac3f1cb2e7b184db0f..688aac7a6943a91751e551ea6d09b479a4d30938 100644 (file)
@@ -365,7 +365,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
        if (!umem->pages) {
                err = -ENOMEM;
-               goto out_account;
+               goto out_pin;
        }
 
        for (i = 0; i < umem->npgs; i++)
@@ -373,6 +373,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
        return 0;
 
+out_pin:
+       xdp_umem_unpin_pages(umem);
 out_account:
        xdp_umem_unaccount_pages(umem);
        return err;
index 74868f9d81fb2b2c0fdd98522544eabfad3689fe..2ab4859df55ac08fec69b81a2a8f1cac6585195f 100644 (file)
@@ -145,8 +145,6 @@ static int xfrmi_create(struct net_device *dev)
        if (err < 0)
                goto out;
 
-       strcpy(xi->p.name, dev->name);
-
        dev_hold(dev);
        xfrmi_link(xfrmn, xi);
 
@@ -177,7 +175,6 @@ static void xfrmi_dev_uninit(struct net_device *dev)
        struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
 
        xfrmi_unlink(xfrmn, xi);
-       dev_put(xi->phydev);
        dev_put(dev);
 }
 
@@ -294,7 +291,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        if (tdev == dev) {
                stats->collisions++;
                net_warn_ratelimited("%s: Local routing loop detected!\n",
-                                    xi->p.name);
+                                    dev->name);
                goto tx_err_dst_release;
        }
 
@@ -364,7 +361,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_err;
        }
 
-       fl.flowi_oif = xi->phydev->ifindex;
+       fl.flowi_oif = xi->p.link;
 
        ret = xfrmi_xmit2(skb, dev, &fl);
        if (ret < 0)
@@ -505,7 +502,7 @@ static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
 
 static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
 {
-       struct net *net = dev_net(xi->dev);
+       struct net *net = xi->net;
        struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
        int err;
 
@@ -550,7 +547,7 @@ static int xfrmi_get_iflink(const struct net_device *dev)
 {
        struct xfrm_if *xi = netdev_priv(dev);
 
-       return xi->phydev->ifindex;
+       return xi->p.link;
 }
 
 
@@ -576,12 +573,14 @@ static void xfrmi_dev_setup(struct net_device *dev)
        dev->needs_free_netdev  = true;
        dev->priv_destructor    = xfrmi_dev_free;
        netif_keep_dst(dev);
+
+       eth_broadcast_addr(dev->broadcast);
 }
 
 static int xfrmi_dev_init(struct net_device *dev)
 {
        struct xfrm_if *xi = netdev_priv(dev);
-       struct net_device *phydev = xi->phydev;
+       struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
        int err;
 
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -596,13 +595,19 @@ static int xfrmi_dev_init(struct net_device *dev)
 
        dev->features |= NETIF_F_LLTX;
 
-       dev->needed_headroom = phydev->needed_headroom;
-       dev->needed_tailroom = phydev->needed_tailroom;
+       if (phydev) {
+               dev->needed_headroom = phydev->needed_headroom;
+               dev->needed_tailroom = phydev->needed_tailroom;
 
-       if (is_zero_ether_addr(dev->dev_addr))
-               eth_hw_addr_inherit(dev, phydev);
-       if (is_zero_ether_addr(dev->broadcast))
-               memcpy(dev->broadcast, phydev->broadcast, dev->addr_len);
+               if (is_zero_ether_addr(dev->dev_addr))
+                       eth_hw_addr_inherit(dev, phydev);
+               if (is_zero_ether_addr(dev->broadcast))
+                       memcpy(dev->broadcast, phydev->broadcast,
+                              dev->addr_len);
+       } else {
+               eth_hw_addr_random(dev);
+               eth_broadcast_addr(dev->broadcast);
+       }
 
        return 0;
 }
@@ -638,12 +643,6 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
        int err;
 
        xfrmi_netlink_parms(data, &p);
-
-       if (!tb[IFLA_IFNAME])
-               return -EINVAL;
-
-       nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ);
-
        xi = xfrmi_locate(net, &p);
        if (xi)
                return -EEXIST;
@@ -652,13 +651,8 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
        xi->p = p;
        xi->net = net;
        xi->dev = dev;
-       xi->phydev = dev_get_by_index(net, p.link);
-       if (!xi->phydev)
-               return -ENODEV;
 
        err = xfrmi_create(dev);
-       if (err < 0)
-               dev_put(xi->phydev);
        return err;
 }
 
@@ -672,11 +666,11 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
                           struct netlink_ext_ack *extack)
 {
        struct xfrm_if *xi = netdev_priv(dev);
-       struct net *net = dev_net(dev);
-
-       xfrmi_netlink_parms(data, &xi->p);
+       struct net *net = xi->net;
+       struct xfrm_if_parms p;
 
-       xi = xfrmi_locate(net, &xi->p);
+       xfrmi_netlink_parms(data, &p);
+       xi = xfrmi_locate(net, &p);
        if (!xi) {
                xi = netdev_priv(dev);
        } else {
@@ -684,7 +678,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
                        return -EEXIST;
        }
 
-       return xfrmi_update(xi, &xi->p);
+       return xfrmi_update(xi, &p);
 }
 
 static size_t xfrmi_get_size(const struct net_device *dev)
@@ -715,7 +709,7 @@ static struct net *xfrmi_get_link_net(const struct net_device *dev)
 {
        struct xfrm_if *xi = netdev_priv(dev);
 
-       return dev_net(xi->phydev);
+       return xi->net;
 }
 
 static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
index 8ca637a72697491863bf2f6f606c7a669932847f..21e939235b3908ba3c363e2a570f41acd316173e 100644 (file)
@@ -912,6 +912,7 @@ restart:
                } else if (delta > 0) {
                        p = &parent->rb_right;
                } else {
+                       bool same_prefixlen = node->prefixlen == n->prefixlen;
                        struct xfrm_policy *tmp;
 
                        hlist_for_each_entry(tmp, &n->hhead, bydst) {
@@ -919,9 +920,11 @@ restart:
                                hlist_del_rcu(&tmp->bydst);
                        }
 
+                       node->prefixlen = prefixlen;
+
                        xfrm_policy_inexact_list_reinsert(net, node, family);
 
-                       if (node->prefixlen == n->prefixlen) {
+                       if (same_prefixlen) {
                                kfree_rcu(n, rcu);
                                return;
                        }
@@ -929,7 +932,6 @@ restart:
                        rb_erase(*p, new);
                        kfree_rcu(n, rcu);
                        n = node;
-                       n->prefixlen = prefixlen;
                        goto restart;
                }
        }
@@ -3269,7 +3271,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
        struct flowi4 *fl4 = &fl->u.ip4;
        int oif = 0;
 
-       if (skb_dst(skb))
+       if (skb_dst(skb) && skb_dst(skb)->dev)
                oif = skb_dst(skb)->dev->ifindex;
 
        memset(fl4, 0, sizeof(struct flowi4));
@@ -3387,7 +3389,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
 
        nexthdr = nh[nhoff];
 
-       if (skb_dst(skb))
+       if (skb_dst(skb) && skb_dst(skb)->dev)
                oif = skb_dst(skb)->dev->ifindex;
 
        memset(fl6, 0, sizeof(struct flowi6));
index 6410bd22fe387dc2ea358c3cffed046985066a46..03757cc60e06c83e5dae59d5ebcce6c912e62c73 100644 (file)
@@ -1,4 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
+ifdef CONFIG_KASAN
+CFLAGS_KASAN_NOSANITIZE := -fno-builtin
+KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
+endif
+
 ifdef CONFIG_KASAN_GENERIC
 
 ifdef CONFIG_KASAN_INLINE
@@ -7,8 +12,6 @@ else
        call_threshold := 0
 endif
 
-KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
-
 CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
 
 cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
@@ -45,7 +48,3 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
                $(instrumentation_flags)
 
 endif # CONFIG_KASAN_SW_TAGS
-
-ifdef CONFIG_KASAN
-CFLAGS_KASAN_NOSANITIZE := -fno-builtin
-endif
diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh
new file mode 100755 (executable)
index 0000000..97a2c84
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/sh -eu
+# SPDX-License-Identifier: GPL-2.0
+
+tmp_file=$(mktemp)
+trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
+
+cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1
+void *p = &p;
+END
+"$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+
+# Despite printing an error message, GNU nm still exits with exit code 0 if it
+# sees a relr section. So we need to check that nothing is printed to stderr.
+test -z "$("$NM" $tmp_file 2>&1 >/dev/null)"
+
+"$OBJCOPY" -O binary $tmp_file $tmp_file.bin
index 7325f382dbf43b5660d0f3938e0ce18b737bed6e..957b9e3e14924dff2c3ad5d8e647d77f9eff6860 100644 (file)
@@ -595,7 +595,7 @@ struct key *request_key_and_link(struct key_type *type,
 
        key = check_cached_key(&ctx);
        if (key)
-               return key;
+               goto error_free;
 
        /* search all the process keyrings for a key */
        rcu_read_lock();
index e73ec040e2509494d3d123ad8c47419b747a33f7..ecba39c93fd91f887026c3f9c8f6bc3270fc9225 100644 (file)
@@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key,
 {
        struct request_key_auth *rka = dereference_key_rcu(key);
 
+       if (!rka)
+               return;
+
        seq_puts(m, "key:");
        seq_puts(m, key->description);
        if (key_is_positive(key))
@@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key,
        size_t datalen;
        long ret;
 
+       if (!rka)
+               return -EKEYREVOKED;
+
        datalen = rka->callout_len;
        ret = datalen;
 
index 7737b2670064dcd1b8c13af1b8b69a0ab5cd7997..6d9592f0ae1d53c42b3d7d59bfdc462f39a34f77 100644 (file)
@@ -1835,8 +1835,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
        if (cptr->type == USER_CLIENT) {
                info->input_pool = cptr->data.user.fifo_pool_size;
                info->input_free = info->input_pool;
-               if (cptr->data.user.fifo)
-                       info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
+               info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
        } else {
                info->input_pool = 0;
                info->input_free = 0;
index ea69261f269aa4be262cffe736d876c6136b2d78..eaaa8b5830bb6471616c2c4e0458ab469e6c4c85 100644 (file)
@@ -263,3 +263,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
 
        return 0;
 }
+
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
+{
+       unsigned long flags;
+       int cells;
+
+       if (!f)
+               return 0;
+
+       snd_use_lock_use(&f->use_lock);
+       spin_lock_irqsave(&f->lock, flags);
+       cells = snd_seq_unused_cells(f->pool);
+       spin_unlock_irqrestore(&f->lock, flags);
+       snd_use_lock_free(&f->use_lock);
+       return cells;
+}
index edc68743943d7249bfc46c2a874c5d4602362644..b56a7b897c9c313f9b91be1069c1a84da2866132 100644 (file)
@@ -53,5 +53,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
 /* resize pool in fifo */
 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
 
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
 
 #endif
index 9ea39348cdf5623db8c1a8a73fd9a26ecadee095..7c6d1c277d4d9e52a107c8079ff9f5a3254b4ac9 100644 (file)
@@ -248,7 +248,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
                unsigned int channels = params_channels(hw_params);
 
                mutex_lock(&oxfw->mutex);
-               err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream,
+               err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
                                                     rate, channels);
                if (err >= 0)
                        ++oxfw->substreams_count;
index 92390d457567effa041306600d1bc6c5962cad05..18e6546b44675c7f7deee917f3066b828896cb88 100644 (file)
@@ -824,6 +824,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
        while (id >= 0) {
                const struct hda_fixup *fix = codec->fixup_list + id;
 
+               if (++depth > 10)
+                       break;
                if (fix->chained_before)
                        apply_fixup(codec, fix->chain_id, action, depth + 1);
 
@@ -863,8 +865,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
                }
                if (!fix->chained || fix->chained_before)
                        break;
-               if (++depth > 10)
-                       break;
                id = fix->chain_id;
        }
 }
index 5bf24fb819d28f12cb00012dd73aa65db65c60cc..10d502328b76eee5e480a777f691e29fd2884d23 100644 (file)
@@ -6009,7 +6009,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
        if (spec->init_hook)
                spec->init_hook(codec);
 
-       snd_hda_apply_verbs(codec);
+       if (!spec->skip_verbs)
+               snd_hda_apply_verbs(codec);
 
        init_multi_out(codec);
        init_extra_out(codec);
index 5f199dcb0d188eb62ff84fac843e6a94bb027e57..fb9f1a90238bfb67d2a8079bf11b3dc012f86294 100644 (file)
@@ -243,6 +243,7 @@ struct hda_gen_spec {
        unsigned int indep_hp_enabled:1; /* independent HP enabled */
        unsigned int have_aamix_ctl:1;
        unsigned int hp_mic_jack_modes:1;
+       unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
 
        /* additional mute flags (only effective with auto_mute_via_amp=1) */
        u64 mute_bits;
index 0d51823d7270eaf2629eabb15290e14d4839dec2..6d1fb7c11f17ed93b5ca4f2f12e295c3970b6ec4 100644 (file)
@@ -1175,6 +1175,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
        SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
+       SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
index 14298ef45b21bdf71db7dc864542ea8a7213331f..968d3caab6ac3e157f5eb4bee9f0990f30840d16 100644 (file)
@@ -611,18 +611,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
 
 /* update LED status via GPIO */
 static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
-                               bool enabled)
+                               bool led_on)
 {
        struct conexant_spec *spec = codec->spec;
        unsigned int oldval = spec->gpio_led;
 
        if (spec->mute_led_polarity)
-               enabled = !enabled;
+               led_on = !led_on;
 
-       if (enabled)
-               spec->gpio_led &= ~mask;
-       else
+       if (led_on)
                spec->gpio_led |= mask;
+       else
+               spec->gpio_led &= ~mask;
+       codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
+                       mask, led_on, spec->gpio_led);
        if (spec->gpio_led != oldval)
                snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
                                    spec->gpio_led);
@@ -633,8 +635,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
 {
        struct hda_codec *codec = private_data;
        struct conexant_spec *spec = codec->spec;
-
-       cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
+       /* muted -> LED on */
+       cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
 }
 
 /* turn on/off mic-mute LED via GPIO per capture hook */
@@ -656,7 +658,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
                { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
                {}
        };
-       codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
                spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
index e333b3e30e316034420b1dd4e24e031d18812b3d..c1ddfd2fac522c8c5f663495ad3ad750f77b8120 100644 (file)
@@ -837,9 +837,11 @@ static int alc_init(struct hda_codec *codec)
        if (spec->init_hook)
                spec->init_hook(codec);
 
+       spec->gen.skip_verbs = 1; /* applied in below */
        snd_hda_gen_init(codec);
        alc_fix_pll(codec);
        alc_auto_init_amp(codec, spec->init_amp);
+       snd_hda_apply_verbs(codec); /* apply verbs here after own init */
 
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
 
@@ -5797,6 +5799,7 @@ enum {
        ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
        ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC299_FIXUP_PREDATOR_SPK,
+       ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6837,6 +6840,16 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                }
        },
+       [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
+                       { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6979,6 +6992,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6995,6 +7009,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -7072,6 +7087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8946,6 +8962,7 @@ static int patch_alc680(struct hda_codec *codec)
 static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
index 54ac2fd419259abd636fa14d3b0937a7d080918f..67f06c95eec5311b13902530b10e08b28cf333d6 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/acpi.h>
 #include <linux/device.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 33eb72545be6206a2362fe3e3ceca3ea03904c4e..05db311b579edf05c78bab6a71d12d14d7a1808e 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 4977b5a65eb89a5e9a45b1cb9176106fa8c0429e..9d657421730a7d1fd4354ff8e13338f0bce2803f 100644 (file)
@@ -8,6 +8,7 @@
  *          Mengdong Lin <mengdong.lin@intel.com>
  */
 
+#include <linux/gpio/consumer.h>
 #include <linux/input.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
index 2c03e0f6bf72d661226b73ddee90c087db8f4b6f..f70211e6b174fdc18553a222630c58643a7fed34 100644 (file)
@@ -550,6 +550,15 @@ int line6_init_pcm(struct usb_line6 *line6,
        line6pcm->volume_monitor = 255;
        line6pcm->line6 = line6;
 
+       spin_lock_init(&line6pcm->out.lock);
+       spin_lock_init(&line6pcm->in.lock);
+       line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
+
+       line6->line6pcm = line6pcm;
+
+       pcm->private_data = line6pcm;
+       pcm->private_free = line6_cleanup_pcm;
+
        line6pcm->max_packet_size_in =
                usb_maxpacket(line6->usbdev,
                        usb_rcvisocpipe(line6->usbdev, ep_read), 0);
@@ -562,15 +571,6 @@ int line6_init_pcm(struct usb_line6 *line6,
                return -EINVAL;
        }
 
-       spin_lock_init(&line6pcm->out.lock);
-       spin_lock_init(&line6pcm->in.lock);
-       line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
-
-       line6->line6pcm = line6pcm;
-
-       pcm->private_data = line6pcm;
-       pcm->private_free = line6_cleanup_pcm;
-
        err = line6_create_audio_out_urbs(line6pcm);
        if (err < 0)
                return err;
index b5927c3d5bc0b0657360ef8aa923c0aa344422bb..eceab19766dbc745e20c9cb09a82320c43c8a72a 100644 (file)
@@ -739,7 +739,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
                                       struct uac_mixer_unit_descriptor *desc)
 {
        int mu_channels;
-       void *c;
 
        if (desc->bLength < sizeof(*desc))
                return -EINVAL;
@@ -762,13 +761,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
                break;
        }
 
-       if (!mu_channels)
-               return 0;
-
-       c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
-       if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
-               return 0; /* no bmControls -> skip */
-
        return mu_channels;
 }
 
@@ -2009,6 +2001,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
  * Mixer Unit
  */
 
+/* check whether the given in/out overflows bmMixerControls matrix */
+static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
+                                 int protocol, int num_ins, int num_outs)
+{
+       u8 *hdr = (u8 *)desc;
+       u8 *c = uac_mixer_unit_bmControls(desc, protocol);
+       size_t rest; /* remaining bytes after bmMixerControls */
+
+       switch (protocol) {
+       case UAC_VERSION_1:
+       default:
+               rest = 1; /* iMixer */
+               break;
+       case UAC_VERSION_2:
+               rest = 2; /* bmControls + iMixer */
+               break;
+       case UAC_VERSION_3:
+               rest = 6; /* bmControls + wMixerDescrStr */
+               break;
+       }
+
+       /* overflow? */
+       return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
+}
+
 /*
  * build a mixer unit control
  *
@@ -2137,6 +2154,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
                if (err < 0)
                        return err;
                num_ins += iterm.channels;
+               if (mixer_bitmap_overflow(desc, state->mixer->protocol,
+                                         num_ins, num_outs))
+                       break;
                for (; ich < num_ins; ich++) {
                        int och, ich_has_controls = 0;
 
index 199fa157a411e4731fa56e477f0953b2c8fd68ea..27dcb3743690f7c29764fb978823f0f37ad9c977 100644 (file)
@@ -1155,17 +1155,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
 {
        struct usb_mixer_interface *mixer;
        struct usb_mixer_elem_info *cval;
-       int unitid = 12; /* SamleRate ExtensionUnit ID */
+       int unitid = 12; /* SampleRate ExtensionUnit ID */
 
        list_for_each_entry(mixer, &chip->mixer_list, list) {
-               cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
-               if (cval) {
+               if (mixer->id_elems[unitid]) {
+                       cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
                        snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
                                                    cval->control << 8,
                                                    samplerate_id);
                        snd_usb_mixer_notify_id(mixer, unitid);
+                       break;
                }
-               break;
        }
 }
 
index 75b96929f76c5ee73cfb159a7f0ef557326f43fd..e4bbf79de956e1d8958d02ead1c907c5bd7ea2bc 100644 (file)
@@ -339,6 +339,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ep = 0x81;
                ifnum = 2;
                goto add_sync_ep_from_ifnum;
+       case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
        case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
                ep = 0x81;
                ifnum = 1;
index 66f04a4846a587e8e48bf3a599082ee48c452c19..43fdbbfe41bb24eef44e91c984c4dd30f41a6798 100644 (file)
@@ -363,7 +363,9 @@ static int do_show(int argc, char **argv)
                if (fd < 0)
                        return -1;
 
-               return show_prog(fd);
+               err = show_prog(fd);
+               close(fd);
+               return err;
        }
 
        if (argc)
index 045f5f7d68ab5d9836a3abe5170ab14ad172b379..13f1e8b9ac525ddf44793541551acde4f8432e1c 100644 (file)
@@ -9,9 +9,10 @@ ifeq ("$(origin O)", "command line")
 endif
 
 turbostat : turbostat.c
-override CFLAGS +=     -Wall -I../../../include
+override CFLAGS +=     -O2 -Wall -I../../../include
 override CFLAGS +=     -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
 override CFLAGS +=     -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
+override CFLAGS +=     -D_FORTIFY_SOURCE=2
 
 %: %.c
        @mkdir -p $(BUILD_OUTPUT)
index 75fc4fb9901cd20ec71c61537441565c08d6b1c3..b2a86438f074ac25ec14f7cb57ff3d4f45f416aa 100644 (file)
@@ -39,7 +39,6 @@ FILE *outf;
 int *fd_percpu;
 struct timeval interval_tv = {5, 0};
 struct timespec interval_ts = {5, 0};
-struct timespec one_msec = {0, 1000000};
 unsigned int num_iterations;
 unsigned int debug;
 unsigned int quiet;
@@ -60,6 +59,7 @@ unsigned int do_irtl_hsw;
 unsigned int units = 1000000;  /* MHz etc */
 unsigned int genuine_intel;
 unsigned int authentic_amd;
+unsigned int hygon_genuine;
 unsigned int max_level, max_extended_level;
 unsigned int has_invariant_tsc;
 unsigned int do_nhm_platform_info;
@@ -100,6 +100,7 @@ unsigned int has_hwp_epp;           /* IA32_HWP_REQUEST[bits 31:24] */
 unsigned int has_hwp_pkg;              /* IA32_HWP_REQUEST_PKG */
 unsigned int has_misc_feature_control;
 unsigned int first_counter_read = 1;
+int ignore_stdin;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -166,6 +167,7 @@ size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
 struct thread_data {
        struct timeval tv_begin;
        struct timeval tv_end;
+       struct timeval tv_delta;
        unsigned long long tsc;
        unsigned long long aperf;
        unsigned long long mperf;
@@ -506,6 +508,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU
 unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
 
 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
+#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
 #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
 #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
 #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
@@ -849,7 +852,6 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
                outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
                outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
-               outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
                outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
                outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
                outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
@@ -911,7 +913,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (DO_BIC(BIC_TOD))
                outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec);
 
-       interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
+       interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0;
 
        tsc = t->tsc * tsc_tweak;
 
@@ -1287,6 +1289,14 @@ delta_core(struct core_data *new, struct core_data *old)
        }
 }
 
+int soft_c1_residency_display(int bic)
+{
+       if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
+               return 0;
+
+       return DO_BIC_READ(bic);
+}
+
 /*
  * old = new - old
  */
@@ -1309,6 +1319,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
         * over-write old w/ new so we can print end of interval values
         */
 
+       timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta);
        old->tv_begin = new->tv_begin;
        old->tv_end = new->tv_end;
 
@@ -1322,7 +1333,8 @@ delta_thread(struct thread_data *new, struct thread_data *old,
 
        old->c1 = new->c1 - old->c1;
 
-       if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
+       if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
+           soft_c1_residency_display(BIC_Avg_MHz)) {
                if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
                        old->aperf = new->aperf - old->aperf;
                        old->mperf = new->mperf - old->mperf;
@@ -1404,6 +1416,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
        t->tv_begin.tv_usec = 0;
        t->tv_end.tv_sec = 0;
        t->tv_end.tv_usec = 0;
+       t->tv_delta.tv_sec = 0;
+       t->tv_delta.tv_usec = 0;
 
        t->tsc = 0;
        t->aperf = 0;
@@ -1573,6 +1587,9 @@ void compute_average(struct thread_data *t, struct core_data *c,
 
        for_all_cpus(sum_counters, t, c, p);
 
+       /* Use the global time delta for the average. */
+       average.threads.tv_delta = tv_delta;
+
        average.threads.tsc /= topo.num_cpus;
        average.threads.aperf /= topo.num_cpus;
        average.threads.mperf /= topo.num_cpus;
@@ -1714,7 +1731,7 @@ void get_apic_id(struct thread_data *t)
        if (!DO_BIC(BIC_X2APIC))
                return;
 
-       if (authentic_amd) {
+       if (authentic_amd || hygon_genuine) {
                unsigned int topology_extensions;
 
                if (max_extended_level < 0x8000001e)
@@ -1762,19 +1779,20 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        struct msr_counter *mp;
        int i;
 
-       gettimeofday(&t->tv_begin, (struct timezone *)NULL);
-
        if (cpu_migrate(cpu)) {
                fprintf(outf, "Could not migrate to CPU %d\n", cpu);
                return -1;
        }
 
+       gettimeofday(&t->tv_begin, (struct timezone *)NULL);
+
        if (first_counter_read)
                get_apic_id(t);
 retry:
        t->tsc = rdtsc();       /* we are running on local CPU of interest */
 
-       if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
+       if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
+           soft_c1_residency_display(BIC_Avg_MHz)) {
                unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
 
                /*
@@ -1851,20 +1869,20 @@ retry:
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3)) {
+       if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
        }
 
-       if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) {
+       if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
                if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
                        return -7;
-       } else if (do_knl_cstates) {
+       } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
                if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
                        return -7;
        }
 
-       if (DO_BIC(BIC_CPU_c7))
+       if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
                if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
                        return -8;
 
@@ -2912,6 +2930,7 @@ int snapshot_cpu_lpi_us(void)
        if (retval != 1) {
                fprintf(stderr, "Disabling Low Power Idle CPU output\n");
                BIC_NOT_PRESENT(BIC_CPU_LPI);
+               fclose(fp);
                return -1;
        }
 
@@ -2938,6 +2957,7 @@ int snapshot_sys_lpi_us(void)
        if (retval != 1) {
                fprintf(stderr, "Disabling Low Power Idle System output\n");
                BIC_NOT_PRESENT(BIC_SYS_LPI);
+               fclose(fp);
                return -1;
        }
        fclose(fp);
@@ -2985,8 +3005,6 @@ static void signal_handler (int signal)
                        fprintf(stderr, "SIGUSR1\n");
                break;
        }
-       /* make sure this manually-invoked interval is at least 1ms long */
-       nanosleep(&one_msec, NULL);
 }
 
 void setup_signal_handler(void)
@@ -3005,29 +3023,38 @@ void setup_signal_handler(void)
 
 void do_sleep(void)
 {
-       struct timeval select_timeout;
+       struct timeval tout;
+       struct timespec rest;
        fd_set readfds;
        int retval;
 
        FD_ZERO(&readfds);
        FD_SET(0, &readfds);
 
-       if (!isatty(fileno(stdin))) {
+       if (ignore_stdin) {
                nanosleep(&interval_ts, NULL);
                return;
        }
 
-       select_timeout = interval_tv;
-       retval = select(1, &readfds, NULL, NULL, &select_timeout);
+       tout = interval_tv;
+       retval = select(1, &readfds, NULL, NULL, &tout);
 
        if (retval == 1) {
                switch (getc(stdin)) {
                case 'q':
                        exit_requested = 1;
                        break;
+               case EOF:
+                       /*
+                        * 'stdin' is a pipe closed on the other end. There
+                        * won't be any further input.
+                        */
+                       ignore_stdin = 1;
+                       /* Sleep the rest of the time */
+                       rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000);
+                       rest.tv_nsec = (tout.tv_usec % 1000000) * 1000;
+                       nanosleep(&rest, NULL);
                }
-               /* make sure this manually-invoked interval is at least 1ms long */
-               nanosleep(&one_msec, NULL);
        }
 }
 
@@ -3209,6 +3236,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
                break;
        case INTEL_FAM6_HASWELL_CORE:   /* HSW */
        case INTEL_FAM6_HASWELL_X:      /* HSX */
+       case INTEL_FAM6_HASWELL_ULT:    /* HSW */
        case INTEL_FAM6_HASWELL_GT3E:   /* HSW */
        case INTEL_FAM6_BROADWELL_CORE: /* BDW */
        case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3405,6 +3433,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
        case INTEL_FAM6_IVYBRIDGE:      /* IVB */
        case INTEL_FAM6_HASWELL_CORE:   /* HSW */
        case INTEL_FAM6_HASWELL_X:      /* HSX */
+       case INTEL_FAM6_HASWELL_ULT:    /* HSW */
        case INTEL_FAM6_HASWELL_GT3E:   /* HSW */
        case INTEL_FAM6_BROADWELL_CORE: /* BDW */
        case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3803,6 +3832,7 @@ double get_tdp_amd(unsigned int family)
 {
        switch (family) {
        case 0x17:
+       case 0x18:
        default:
                /* This is the max stock TDP of HEDT/Server Fam17h chips */
                return 250.0;
@@ -3841,6 +3871,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
        case INTEL_FAM6_SANDYBRIDGE:
        case INTEL_FAM6_IVYBRIDGE:
        case INTEL_FAM6_HASWELL_CORE:   /* HSW */
+       case INTEL_FAM6_HASWELL_ULT:    /* HSW */
        case INTEL_FAM6_HASWELL_GT3E:   /* HSW */
        case INTEL_FAM6_BROADWELL_CORE: /* BDW */
        case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3982,6 +4013,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
 
        switch (family) {
        case 0x17: /* Zen, Zen+ */
+       case 0x18: /* Hygon Dhyana */
                do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
                if (rapl_joules) {
                        BIC_PRESENT(BIC_Pkg_J);
@@ -4002,7 +4034,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
        rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
        rapl_power_units = ldexp(1.0, -(msr & 0xf));
 
-       tdp = get_tdp_amd(model);
+       tdp = get_tdp_amd(family);
 
        rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
        if (!quiet)
@@ -4018,7 +4050,7 @@ void rapl_probe(unsigned int family, unsigned int model)
 {
        if (genuine_intel)
                rapl_probe_intel(family, model);
-       if (authentic_amd)
+       if (authentic_amd || hygon_genuine)
                rapl_probe_amd(family, model);
 }
 
@@ -4032,6 +4064,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
 
        switch (model) {
        case INTEL_FAM6_HASWELL_CORE:   /* HSW */
+       case INTEL_FAM6_HASWELL_ULT:    /* HSW */
        case INTEL_FAM6_HASWELL_GT3E:   /* HSW */
                do_gfx_perf_limit_reasons = 1;
        case INTEL_FAM6_HASWELL_X:      /* HSX */
@@ -4251,6 +4284,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_IVYBRIDGE_X:    /* IVB Xeon */
        case INTEL_FAM6_HASWELL_CORE:   /* HSW */
        case INTEL_FAM6_HASWELL_X:      /* HSW */
+       case INTEL_FAM6_HASWELL_ULT:    /* HSW */
        case INTEL_FAM6_HASWELL_GT3E:   /* HSW */
        case INTEL_FAM6_BROADWELL_CORE: /* BDW */
        case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -4267,7 +4301,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
 }
 
 /*
- * HSW adds support for additional MSRs:
+ * HSW ULT added support for C8/C9/C10 MSRs:
  *
  * MSR_PKG_C8_RESIDENCY                0x00000630
  * MSR_PKG_C9_RESIDENCY                0x00000631
@@ -4278,13 +4312,13 @@ int has_snb_msrs(unsigned int family, unsigned int model)
  * MSR_PKGC10_IRTL             0x00000635
  *
  */
-int has_hsw_msrs(unsigned int family, unsigned int model)
+int has_c8910_msrs(unsigned int family, unsigned int model)
 {
        if (!genuine_intel)
                return 0;
 
        switch (model) {
-       case INTEL_FAM6_HASWELL_CORE:
+       case INTEL_FAM6_HASWELL_ULT:    /* HSW */
        case INTEL_FAM6_BROADWELL_CORE: /* BDW */
        case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
        case INTEL_FAM6_CANNONLAKE_MOBILE:      /* CNL */
@@ -4568,9 +4602,6 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_XEON_PHI_KNM:
                return INTEL_FAM6_XEON_PHI_KNL;
 
-       case INTEL_FAM6_HASWELL_ULT:
-               return INTEL_FAM6_HASWELL_CORE;
-
        case INTEL_FAM6_BROADWELL_X:
        case INTEL_FAM6_BROADWELL_XEON_D:       /* BDX-DE */
                return INTEL_FAM6_BROADWELL_X;
@@ -4582,7 +4613,11 @@ unsigned int intel_model_duplicates(unsigned int model)
                return INTEL_FAM6_SKYLAKE_MOBILE;
 
        case INTEL_FAM6_ICELAKE_MOBILE:
+       case INTEL_FAM6_ICELAKE_NNPI:
                return INTEL_FAM6_CANNONLAKE_MOBILE;
+
+       case INTEL_FAM6_ATOM_TREMONT_X:
+               return INTEL_FAM6_ATOM_GOLDMONT_X;
        }
        return model;
 }
@@ -4600,6 +4635,8 @@ void process_cpuid()
                genuine_intel = 1;
        else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
                authentic_amd = 1;
+       else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e)
+               hygon_genuine = 1;
 
        if (!quiet)
                fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
@@ -4820,12 +4857,12 @@ void process_cpuid()
                BIC_NOT_PRESENT(BIC_CPU_c7);
                BIC_NOT_PRESENT(BIC_Pkgpc7);
        }
-       if (has_hsw_msrs(family, model)) {
+       if (has_c8910_msrs(family, model)) {
                BIC_PRESENT(BIC_Pkgpc8);
                BIC_PRESENT(BIC_Pkgpc9);
                BIC_PRESENT(BIC_Pkgpc10);
        }
-       do_irtl_hsw = has_hsw_msrs(family, model);
+       do_irtl_hsw = has_c8910_msrs(family, model);
        if (has_skl_msrs(family, model)) {
                BIC_PRESENT(BIC_Totl_c0);
                BIC_PRESENT(BIC_Any_c0);
@@ -5123,7 +5160,7 @@ int initialize_counters(int cpu_id)
 
 void allocate_output_buffer()
 {
-       output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
+       output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
        outp = output_buffer;
        if (outp == NULL)
                err(-1, "calloc output buffer");
@@ -5269,7 +5306,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 19.03.20"
+       fprintf(outf, "turbostat version 19.08.31"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
index 1fdeef864e7c1b43a97ad5b3d8a47523f98a5027..666b325a62a2299aba039fcbdd3ffae6ba26dbfc 100644 (file)
@@ -9,8 +9,9 @@ ifeq ("$(origin O)", "command line")
 endif
 
 x86_energy_perf_policy : x86_energy_perf_policy.c
-override CFLAGS +=     -Wall -I../../../include
+override CFLAGS +=     -O2 -Wall -I../../../include
 override CFLAGS +=     -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
+override CFLAGS +=     -D_FORTIFY_SOURCE=2
 
 %: %.c
        @mkdir -p $(BUILD_OUTPUT)
index 17db1c3af4d0bb3901482bbfed9e2cff8bc8cb6f..78c6361898b189a57414d3e10f05de23e1da6f81 100644 (file)
@@ -40,7 +40,7 @@ in the same processor package.
 Hardware P-States (HWP) are effectively an expansion of hardware
 P-state control from the opportunistic turbo-mode P-state range
 to include the entire range of available P-states.
-On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP.
+On Broadwell Xeon, the initial HWP implementation, EPB influenced HWP.
 That influence was removed in subsequent generations,
 where it was moved to the
 Energy_Performance_Preference (EPP) field in
index 34a796b303fe2bfecd731d5b1f513e257637e663..3fe1eed900d414169fb99d00787e1be710c7270b 100644 (file)
@@ -545,7 +545,7 @@ void cmdline(int argc, char **argv)
 
        progname = argv[0];
 
-       while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
+       while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
                                long_options, &option_index)) != -1) {
                switch (opt) {
                case 'a':
@@ -1259,6 +1259,15 @@ void probe_dev_msr(void)
                if (system("/sbin/modprobe msr > /dev/null 2>&1"))
                        err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
+
+static void get_cpuid_or_exit(unsigned int leaf,
+                            unsigned int *eax, unsigned int *ebx,
+                            unsigned int *ecx, unsigned int *edx)
+{
+       if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
+               errx(1, "Processor not supported\n");
+}
+
 /*
  * early_cpuid()
  * initialize turbo_is_enabled, has_hwp, has_epb
@@ -1266,15 +1275,10 @@ void probe_dev_msr(void)
  */
 void early_cpuid(void)
 {
-       unsigned int eax, ebx, ecx, edx, max_level;
+       unsigned int eax, ebx, ecx, edx;
        unsigned int fms, family, model;
 
-       __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
-
-       if (max_level < 6)
-               errx(1, "Processor not supported\n");
-
-       __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
        family = (fms >> 8) & 0xf;
        model = (fms >> 4) & 0xf;
        if (family == 6 || family == 0xf)
@@ -1288,7 +1292,7 @@ void early_cpuid(void)
                bdx_highest_ratio = msr & 0xFF;
        }
 
-       __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
        turbo_is_enabled = (eax >> 1) & 1;
        has_hwp = (eax >> 7) & 1;
        has_epb = (ecx >> 3) & 1;
@@ -1306,7 +1310,7 @@ void parse_cpuid(void)
 
        eax = ebx = ecx = edx = 0;
 
-       __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
 
        if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
                genuine_intel = 1;
@@ -1315,7 +1319,7 @@ void parse_cpuid(void)
                fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
                        (char *)&ebx, (char *)&edx, (char *)&ecx);
 
-       __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
        family = (fms >> 8) & 0xf;
        model = (fms >> 4) & 0xf;
        stepping = fms & 0xf;
@@ -1340,7 +1344,7 @@ void parse_cpuid(void)
                errx(1, "CPUID: no MSR");
 
 
-       __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+       get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
        /* turbo_is_enabled already set */
        /* has_hwp already set */
        has_hwp_notify = eax & (1 << 8);
diff --git a/tools/testing/selftests/arm64/.gitignore b/tools/testing/selftests/arm64/.gitignore
new file mode 100644 (file)
index 0000000..e8fae8d
--- /dev/null
@@ -0,0 +1 @@
+tags_test
diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile
new file mode 100644 (file)
index 0000000..a61b2e7
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# ARCH can be overridden by the user for cross compiling
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+
+ifneq (,$(filter $(ARCH),aarch64 arm64))
+TEST_GEN_PROGS := tags_test
+TEST_PROGS := run_tags_test.sh
+endif
+
+include ../lib.mk
diff --git a/tools/testing/selftests/arm64/run_tags_test.sh b/tools/testing/selftests/arm64/run_tags_test.sh
new file mode 100755 (executable)
index 0000000..745f113
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+echo "--------------------"
+echo "running tags test"
+echo "--------------------"
+./tags_test
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+else
+       echo "[PASS]"
+fi
diff --git a/tools/testing/selftests/arm64/tags_test.c b/tools/testing/selftests/arm64/tags_test.c
new file mode 100644 (file)
index 0000000..5701163
--- /dev/null
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/prctl.h>
+#include <sys/utsname.h>
+
+#define SHIFT_TAG(tag)         ((uint64_t)(tag) << 56)
+#define SET_TAG(ptr, tag)      (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
+                                       SHIFT_TAG(tag))
+
+int main(void)
+{
+       static int tbi_enabled = 0;
+       unsigned long tag = 0;
+       struct utsname *ptr;
+       int err;
+
+       if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
+               tbi_enabled = 1;
+       ptr = (struct utsname *)malloc(sizeof(*ptr));
+       if (tbi_enabled)
+               tag = 0x42;
+       ptr = (struct utsname *)SET_TAG(ptr, tag);
+       err = uname(ptr);
+       free(ptr);
+
+       return err;
+}
index c085964e1d05a2d85e589be8565b025e9dda4665..96752ebd938fa7b941d167009c8d92d48f769df1 100644 (file)
@@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
 BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
 TEST_GEN_FILES = $(BPF_OBJ_FILES)
 
+BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
+TEST_FILES = $(BTF_C_FILES)
+
 # Also test sub-register code-gen if LLVM has eBPF v3 processor support which
 # contains both ALU32 and JMP32 instructions.
 SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
@@ -68,7 +71,8 @@ TEST_PROGS := test_kmod.sh \
 TEST_PROGS_EXTENDED := with_addr.sh \
        with_tunnels.sh \
        tcp_client.py \
-       tcp_server.py
+       tcp_server.py \
+       test_xdp_vlan.sh
 
 # Compile but not part of 'make run_tests'
 TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
index f7a0744db31e1bb3211e0b17ca73b7e57941e5b7..5dc109f4c0970fb492b955ec3a1edebbcc6d0932 100644 (file)
@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_IPV6_SIT=m
+CONFIG_BPF_JIT=y
index 8f850823d35f8362d0c6245724e059517618d2a5..6e75dd3cb14f97f033ff253e9a5f41af5f2c6d1c 100644 (file)
@@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
        }
 
        snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
+       if (access(test_file, R_OK) == -1)
+               /*
+                * When the test is run with O=, kselftest copies TEST_FILES
+                * without preserving the directory structure.
+                */
+               snprintf(test_file, sizeof(test_file), "%s.c",
+                       test_case->name);
        /*
         * Diff test output and expected test output, contained between
         * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
index 2fc4625c1a1502b5926614874253fd86715242c7..65572900439118dc17522ae971f5daf91a84db6b 100644 (file)
@@ -20,9 +20,9 @@ int main(int argc, char **argv)
                BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                             BPF_FUNC_get_local_storage),
-               BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
                BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
-               BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
 
                BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
                BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
@@ -30,7 +30,7 @@ int main(int argc, char **argv)
                             BPF_FUNC_get_local_storage),
                BPF_MOV64_IMM(BPF_REG_1, 1),
                BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
-               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
                BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
                BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
index fb679ac3d4b07a4a68822369f9476ab0d06f7fd2..0e6652733462377cfe7bdc0fa57b5548c39f2e0f 100644 (file)
@@ -13,6 +13,7 @@
 #include <bpf/bpf.h>
 
 #include "cgroup_helpers.h"
+#include "bpf_endian.h"
 #include "bpf_rlimit.h"
 #include "bpf_util.h"
 
@@ -232,7 +233,8 @@ static struct sock_test tests[] = {
                        /* if (ip == expected && port == expected) */
                        BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                                    offsetof(struct bpf_sock, src_ip6[3])),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+                                   __bpf_constant_ntohl(0x00000001), 4),
                        BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                                    offsetof(struct bpf_sock, src_port)),
                        BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
@@ -261,7 +263,8 @@ static struct sock_test tests[] = {
                        /* if (ip == expected && port == expected) */
                        BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                                    offsetof(struct bpf_sock, src_ip4)),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+                                   __bpf_constant_ntohl(0x7F000001), 4),
                        BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                                    offsetof(struct bpf_sock, src_port)),
                        BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
index 8219a30853d283fa65baa1be4f16036d7cf542a8..0fc1b6d4b0f9c4db9084158fd22b755e7b53482f 100644 (file)
@@ -447,6 +447,59 @@ cleanup:
        return ret;
 }
 
+/*
+ * The test creates a cgroups and freezes it. Then it creates a child cgroup
+ * and populates it with a task. After that it checks that the child cgroup
+ * is frozen and the parent cgroup remains frozen too.
+ */
+static int test_cgfreezer_mkdir(const char *root)
+{
+       int ret = KSFT_FAIL;
+       char *parent, *child = NULL;
+       int pid;
+
+       parent = cg_name(root, "cg_test_mkdir_A");
+       if (!parent)
+               goto cleanup;
+
+       child = cg_name(parent, "cg_test_mkdir_B");
+       if (!child)
+               goto cleanup;
+
+       if (cg_create(parent))
+               goto cleanup;
+
+       if (cg_freeze_wait(parent, true))
+               goto cleanup;
+
+       if (cg_create(child))
+               goto cleanup;
+
+       pid = cg_run_nowait(child, child_fn, NULL);
+       if (pid < 0)
+               goto cleanup;
+
+       if (cg_wait_for_proc_count(child, 1))
+               goto cleanup;
+
+       if (cg_check_frozen(child, true))
+               goto cleanup;
+
+       if (cg_check_frozen(parent, true))
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       if (child)
+               cg_destroy(child);
+       free(child);
+       if (parent)
+               cg_destroy(parent);
+       free(parent);
+       return ret;
+}
+
 /*
  * The test creates two nested cgroups, freezes the parent
  * and removes the child. Then it checks that the parent cgroup
@@ -815,6 +868,7 @@ struct cgfreezer_test {
        T(test_cgfreezer_simple),
        T(test_cgfreezer_tree),
        T(test_cgfreezer_forkbomb),
+       T(test_cgfreezer_mkdir),
        T(test_cgfreezer_rmdir),
        T(test_cgfreezer_migrate),
        T(test_cgfreezer_ptrace),
index c5c93d5fb3ade4fbd24d668d227281712ae9894b..f9ebeac1e6f20e8fcafba752a916c87d61dfb7ca 100755 (executable)
@@ -212,6 +212,8 @@ check_output()
                        printf "        ${out}\n"
                        printf "    Expected:\n"
                        printf "        ${expected}\n\n"
+               else
+                       echo "      WARNING: Unexpected route entry"
                fi
        fi
 
@@ -274,7 +276,7 @@ ipv6_fcnal()
 
        run_cmd "$IP nexthop get id 52"
        log_test $? 0 "Get nexthop by id"
-       check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1"
+       check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1 scope link"
 
        run_cmd "$IP nexthop del id 52"
        log_test $? 0 "Delete nexthop by id"
@@ -479,12 +481,12 @@ ipv6_fcnal_runtime()
        run_cmd "$IP -6 nexthop add id 85 dev veth1"
        run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 85"
        log_test $? 0 "IPv6 route with device only nexthop"
-       check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1"
+       check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1 metric 1024 pref medium"
 
        run_cmd "$IP nexthop add id 123 group 81/85"
        run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 123"
        log_test $? 0 "IPv6 multipath route with nexthop mix - dev only + gw"
-       check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 nexthop via 2001:db8:91::2 dev veth1 nexthop dev veth1"
+       check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 123 metric 1024 nexthop via 2001:db8:91::2 dev veth1 weight 1 nexthop dev veth1 weight 1 pref medium"
 
        #
        # IPv6 route with v4 nexthop - not allowed
@@ -538,7 +540,7 @@ ipv4_fcnal()
 
        run_cmd "$IP nexthop get id 12"
        log_test $? 0 "Get nexthop by id"
-       check_nexthop "id 12" "id 12 via 172.16.1.2 src 172.16.1.1 dev veth1 scope link"
+       check_nexthop "id 12" "id 12 via 172.16.1.2 dev veth1 scope link"
 
        run_cmd "$IP nexthop del id 12"
        log_test $? 0 "Delete nexthop by id"
@@ -685,7 +687,7 @@ ipv4_withv6_fcnal()
        set +e
        run_cmd "$IP ro add 172.16.101.1/32 nhid 11"
        log_test $? 0 "IPv6 nexthop with IPv4 route"
-       check_route "172.16.101.1" "172.16.101.1 nhid 11 via ${lladdr} dev veth1"
+       check_route "172.16.101.1" "172.16.101.1 nhid 11 via inet6 ${lladdr} dev veth1"
 
        set -e
        run_cmd "$IP nexthop add id 12 via 172.16.1.2 dev veth1"
@@ -694,11 +696,11 @@ ipv4_withv6_fcnal()
        run_cmd "$IP ro replace 172.16.101.1/32 nhid 101"
        log_test $? 0 "IPv6 nexthop with IPv4 route"
 
-       check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
+       check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
 
        run_cmd "$IP ro replace 172.16.101.1/32 via inet6 ${lladdr} dev veth1"
        log_test $? 0 "IPv4 route with IPv6 gateway"
-       check_route "172.16.101.1" "172.16.101.1 via ${lladdr} dev veth1"
+       check_route "172.16.101.1" "172.16.101.1 via inet6 ${lladdr} dev veth1"
 
        run_cmd "$IP ro replace 172.16.101.1/32 via inet6 2001:db8:50::1 dev veth1"
        log_test $? 2 "IPv4 route with invalid IPv6 gateway"
@@ -785,10 +787,10 @@ ipv4_fcnal_runtime()
        log_test $? 0 "IPv4 route with device only nexthop"
        check_route "172.16.101.1" "172.16.101.1 nhid 85 dev veth1"
 
-       run_cmd "$IP nexthop add id 122 group 21/85"
-       run_cmd "$IP ro replace 172.16.101.1/32 nhid 122"
+       run_cmd "$IP nexthop add id 123 group 21/85"
+       run_cmd "$IP ro replace 172.16.101.1/32 nhid 123"
        log_test $? 0 "IPv4 multipath route with nexthop mix - dev only + gw"
-       check_route "172.16.101.1" "172.16.101.1 nhid 85 nexthop via 172.16.1.2 dev veth1 nexthop dev veth1"
+       check_route "172.16.101.1" "172.16.101.1 nhid 123 nexthop via 172.16.1.2 dev veth1 weight 1 nexthop dev veth1 weight 1"
 
        #
        # IPv4 with IPv6
@@ -820,7 +822,7 @@ ipv4_fcnal_runtime()
        run_cmd "$IP ro replace 172.16.101.1/32 nhid 101"
        log_test $? 0 "IPv4 route with mixed v4-v6 multipath route"
 
-       check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
+       check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
 
        run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
        log_test $? 0 "IPv6 nexthop with IPv4 route"
index 5445943bf07f222acaba83a253668ec06db032e5..7a1bf94c5bd38bc9837b87911a082a1ef287eee8 100755 (executable)
@@ -106,6 +106,13 @@ do_overlap()
     #
     # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
     ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
+
+    # similar to above: add policies (with partially random address), with shrinking prefixes.
+    for p in 29 28 27;do
+      for k in $(seq 1 32); do
+       ip -net $ns xfrm policy add src 10.253.1.$((RANDOM%255))/$p dst 10.254.1.$((RANDOM%255))/$p dir fwd priority $((200+k)) action block 2>/dev/null
+      done
+    done
 }
 
 do_esp_policy_get_check() {
index 16d84d117bc04c3f2ebf962c7cb684d84f691e6f..8d069490e17be4d84d1ff1178099d5b9de095dc0 100644 (file)
@@ -1,2 +1,4 @@
 pidfd_open_test
+pidfd_poll_test
 pidfd_test
+pidfd_wait
index 720b2d884b3c52560d86078c704b1696a89c5a9d..464c9b76148f1538643fd512e35461f327ea1249 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 CFLAGS += -g -I../../../../usr/include/ -lpthread
 
-TEST_GEN_PROGS := pidfd_test pidfd_open_test
+TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait
 
 include ../lib.mk
 
index 8452e910463f692282e040c6e29682678ac287e7..c6bc68329f4b66e5da2f6720c1f8fcd33d80ce48 100644 (file)
 
 #include "../kselftest.h"
 
+#ifndef P_PIDFD
+#define P_PIDFD 3
+#endif
+
+#ifndef CLONE_PIDFD
+#define CLONE_PIDFD 0x00001000
+#endif
+
+#ifndef __NR_pidfd_open
+#define __NR_pidfd_open -1
+#endif
+
+#ifndef __NR_pidfd_send_signal
+#define __NR_pidfd_send_signal -1
+#endif
+
+#ifndef __NR_clone3
+#define __NR_clone3 -1
+#endif
+
 /*
  * The kernel reserves 300 pids via RESERVED_PIDS in kernel/pid.c
  * That means, when it wraps around any pid < 300 will be skipped.
@@ -53,5 +73,15 @@ again:
        return WEXITSTATUS(status);
 }
 
+static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
+{
+       return syscall(__NR_pidfd_open, pid, flags);
+}
+
+static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
+                                       unsigned int flags)
+{
+       return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
+}
 
 #endif /* __PIDFD_H */
index 0377133dd6dceab6748b206a6c6cfa55cfa32472..b9fe75fc3e517b75d9a70c9ac3f1d60f492392a0 100644 (file)
 #include "pidfd.h"
 #include "../kselftest.h"
 
-static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
-{
-       return syscall(__NR_pidfd_open, pid, flags);
-}
-
 static int safe_int(const char *numstr, int *converted)
 {
        char *err = NULL;
diff --git a/tools/testing/selftests/pidfd/pidfd_poll_test.c b/tools/testing/selftests/pidfd/pidfd_poll_test.c
new file mode 100644 (file)
index 0000000..4b11544
--- /dev/null
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "pidfd.h"
+#include "../kselftest.h"
+
+static bool timeout;
+
+static void handle_alarm(int sig)
+{
+       timeout = true;
+}
+
+int main(int argc, char **argv)
+{
+       struct pollfd fds;
+       int iter, nevents;
+       int nr_iterations = 10000;
+
+       fds.events = POLLIN;
+
+       if (argc > 2)
+               ksft_exit_fail_msg("Unexpected command line argument\n");
+
+       if (argc == 2) {
+               nr_iterations = atoi(argv[1]);
+               if (nr_iterations <= 0)
+                       ksft_exit_fail_msg("invalid input parameter %s\n",
+                                       argv[1]);
+       }
+
+       ksft_print_msg("running pidfd poll test for %d iterations\n",
+               nr_iterations);
+
+       for (iter = 0; iter < nr_iterations; iter++) {
+               int pidfd;
+               int child_pid = fork();
+
+               if (child_pid < 0) {
+                       if (errno == EAGAIN) {
+                               iter--;
+                               continue;
+                       }
+                       ksft_exit_fail_msg(
+                               "%s - failed to fork a child process\n",
+                               strerror(errno));
+               }
+
+               if (child_pid == 0) {
+                       /* Child process just sleeps for a min and exits */
+                       sleep(60);
+                       exit(EXIT_SUCCESS);
+               }
+
+               /* Parent kills the child and waits for its death */
+               pidfd = sys_pidfd_open(child_pid, 0);
+               if (pidfd < 0)
+                       ksft_exit_fail_msg("%s - pidfd_open failed\n",
+                                       strerror(errno));
+
+               /* Setup 3 sec alarm - plenty of time */
+               if (signal(SIGALRM, handle_alarm) == SIG_ERR)
+                       ksft_exit_fail_msg("%s - signal failed\n",
+                                       strerror(errno));
+               alarm(3);
+
+               /* Send SIGKILL to the child */
+               if (sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0))
+                       ksft_exit_fail_msg("%s - pidfd_send_signal failed\n",
+                                       strerror(errno));
+
+               /* Wait for the death notification */
+               fds.fd = pidfd;
+               nevents = poll(&fds, 1, -1);
+
+               /* Check for error conditions */
+               if (nevents < 0)
+                       ksft_exit_fail_msg("%s - poll failed\n",
+                                       strerror(errno));
+
+               if (nevents != 1)
+                       ksft_exit_fail_msg("unexpected poll result: %d\n",
+                                       nevents);
+
+               if (!(fds.revents & POLLIN))
+                       ksft_exit_fail_msg(
+                               "unexpected event type received: 0x%x\n",
+                               fds.revents);
+
+               if (timeout)
+                       ksft_exit_fail_msg(
+                               "death notification wait timeout\n");
+
+               close(pidfd);
+               /* Wait for child to prevent zombies */
+               if (waitpid(child_pid, NULL, 0) < 0)
+                       ksft_exit_fail_msg("%s - waitpid failed\n",
+                                       strerror(errno));
+
+       }
+
+       ksft_test_result_pass("pidfd poll test: pass\n");
+       return ksft_exit_pass();
+}
index b632965e60eb0758a1197faedd44f0b3e7d5c829..7aff2d3b42c078217e4f85b392645a308237fb9b 100644 (file)
 #include "pidfd.h"
 #include "../kselftest.h"
 
-#ifndef __NR_pidfd_send_signal
-#define __NR_pidfd_send_signal -1
-#endif
-
 #define str(s) _str(s)
 #define _str(s) #s
 #define CHILD_THREAD_MIN_WAIT 3 /* seconds */
 
 #define MAX_EVENTS 5
 
-#ifndef CLONE_PIDFD
-#define CLONE_PIDFD 0x00001000
-#endif
-
 static pid_t pidfd_clone(int flags, int *pidfd, int (*fn)(void *))
 {
        size_t stack_size = 1024;
@@ -47,12 +39,6 @@ static pid_t pidfd_clone(int flags, int *pidfd, int (*fn)(void *))
 #endif
 }
 
-static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
-                                       unsigned int flags)
-{
-       return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
-}
-
 static int signal_received;
 
 static void set_signal_received_on_sigusr1(int sig)
diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c
new file mode 100644 (file)
index 0000000..7079f8e
--- /dev/null
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sched.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "pidfd.h"
+#include "../kselftest.h"
+
+#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
+
+static pid_t sys_clone3(struct clone_args *args)
+{
+       return syscall(__NR_clone3, args, sizeof(struct clone_args));
+}
+
+static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options,
+                     struct rusage *ru)
+{
+       return syscall(__NR_waitid, which, pid, info, options, ru);
+}
+
+static int test_pidfd_wait_simple(void)
+{
+       const char *test_name = "pidfd wait simple";
+       int pidfd = -1, status = 0;
+       pid_t parent_tid = -1;
+       struct clone_args args = {
+               .parent_tid = ptr_to_u64(&parent_tid),
+               .pidfd = ptr_to_u64(&pidfd),
+               .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
+               .exit_signal = SIGCHLD,
+       };
+       int ret;
+       pid_t pid;
+       siginfo_t info = {
+               .si_signo = 0,
+       };
+
+       pidfd = open("/proc/self", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
+       if (pidfd < 0)
+               ksft_exit_fail_msg("%s test: failed to open /proc/self %s\n",
+                                  test_name, strerror(errno));
+
+       pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+       if (pid == 0)
+               ksft_exit_fail_msg(
+                       "%s test: succeeded to wait on invalid pidfd %s\n",
+                       test_name, strerror(errno));
+       close(pidfd);
+       pidfd = -1;
+
+       pidfd = open("/dev/null", O_RDONLY | O_CLOEXEC);
+       if (pidfd == 0)
+               ksft_exit_fail_msg("%s test: failed to open /dev/null %s\n",
+                                  test_name, strerror(errno));
+
+       pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+       if (pid == 0)
+               ksft_exit_fail_msg(
+                       "%s test: succeeded to wait on invalid pidfd %s\n",
+                       test_name, strerror(errno));
+       close(pidfd);
+       pidfd = -1;
+
+       pid = sys_clone3(&args);
+       if (pid < 0)
+               ksft_exit_fail_msg("%s test: failed to create new process %s\n",
+                                  test_name, strerror(errno));
+
+       if (pid == 0)
+               exit(EXIT_SUCCESS);
+
+       pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+       if (pid < 0)
+               ksft_exit_fail_msg(
+                       "%s test: failed to wait on process with pid %d and pidfd %d: %s\n",
+                       test_name, parent_tid, pidfd, strerror(errno));
+
+       if (!WIFEXITED(info.si_status) || WEXITSTATUS(info.si_status))
+               ksft_exit_fail_msg(
+                       "%s test: unexpected status received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, parent_tid, pidfd, strerror(errno));
+       close(pidfd);
+
+       if (info.si_signo != SIGCHLD)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_signo, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_code != CLD_EXITED)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_code, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_pid != parent_tid)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_pid, parent_tid, pidfd,
+                       strerror(errno));
+
+       ksft_test_result_pass("%s test: Passed\n", test_name);
+       return 0;
+}
+
+static int test_pidfd_wait_states(void)
+{
+       const char *test_name = "pidfd wait states";
+       int pidfd = -1, status = 0;
+       pid_t parent_tid = -1;
+       struct clone_args args = {
+               .parent_tid = ptr_to_u64(&parent_tid),
+               .pidfd = ptr_to_u64(&pidfd),
+               .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
+               .exit_signal = SIGCHLD,
+       };
+       int ret;
+       pid_t pid;
+       siginfo_t info = {
+               .si_signo = 0,
+       };
+
+       pid = sys_clone3(&args);
+       if (pid < 0)
+               ksft_exit_fail_msg("%s test: failed to create new process %s\n",
+                                  test_name, strerror(errno));
+
+       if (pid == 0) {
+               kill(getpid(), SIGSTOP);
+               kill(getpid(), SIGSTOP);
+               exit(EXIT_SUCCESS);
+       }
+
+       ret = sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED, NULL);
+       if (ret < 0)
+               ksft_exit_fail_msg(
+                       "%s test: failed to wait on WSTOPPED process with pid %d and pidfd %d: %s\n",
+                       test_name, parent_tid, pidfd, strerror(errno));
+
+       if (info.si_signo != SIGCHLD)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_signo, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_code != CLD_STOPPED)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_code, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_pid != parent_tid)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_pid, parent_tid, pidfd,
+                       strerror(errno));
+
+       ret = sys_pidfd_send_signal(pidfd, SIGCONT, NULL, 0);
+       if (ret < 0)
+               ksft_exit_fail_msg(
+                       "%s test: failed to send signal to process with pid %d and pidfd %d: %s\n",
+                       test_name, parent_tid, pidfd, strerror(errno));
+
+       ret = sys_waitid(P_PIDFD, pidfd, &info, WCONTINUED, NULL);
+       if (ret < 0)
+               ksft_exit_fail_msg(
+                       "%s test: failed to wait WCONTINUED on process with pid %d and pidfd %d: %s\n",
+                       test_name, parent_tid, pidfd, strerror(errno));
+
+       if (info.si_signo != SIGCHLD)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_signo, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_code != CLD_CONTINUED)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_code, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_pid != parent_tid)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_pid, parent_tid, pidfd,
+                       strerror(errno));
+
+       ret = sys_waitid(P_PIDFD, pidfd, &info, WUNTRACED, NULL);
+       if (ret < 0)
+               ksft_exit_fail_msg(
+                       "%s test: failed to wait on WUNTRACED process with pid %d and pidfd %d: %s\n",
+                       test_name, parent_tid, pidfd, strerror(errno));
+
+       if (info.si_signo != SIGCHLD)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_signo, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_code != CLD_STOPPED)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_code, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_pid != parent_tid)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_pid, parent_tid, pidfd,
+                       strerror(errno));
+
+       ret = sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0);
+       if (ret < 0)
+               ksft_exit_fail_msg(
+                       "%s test: failed to send SIGKILL to process with pid %d and pidfd %d: %s\n",
+                       test_name, parent_tid, pidfd, strerror(errno));
+
+       ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+       if (ret < 0)
+               ksft_exit_fail_msg(
+                       "%s test: failed to wait on WEXITED process with pid %d and pidfd %d: %s\n",
+                       test_name, parent_tid, pidfd, strerror(errno));
+
+       if (info.si_signo != SIGCHLD)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_signo, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_code != CLD_KILLED)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_code, parent_tid, pidfd,
+                       strerror(errno));
+
+       if (info.si_pid != parent_tid)
+               ksft_exit_fail_msg(
+                       "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+                       test_name, info.si_pid, parent_tid, pidfd,
+                       strerror(errno));
+
+       close(pidfd);
+
+       ksft_test_result_pass("%s test: Passed\n", test_name);
+       return 0;
+}
+
+int main(int argc, char **argv)
+{
+       ksft_print_header();
+       ksft_set_plan(2);
+
+       test_pidfd_wait_simple();
+       test_pidfd_wait_states();
+
+       return ksft_exit_pass();
+}
index affa7f2d96702c9e399caccc5d497f4cb144eca2..9539cffa9e5e3556c53f9aaf9f1b1e25ddd6c649 100644 (file)
@@ -64,7 +64,7 @@ class SubPlugin(TdcPlugin):
             cmdlist.insert(0, self.args.NAMES['NS'])
             cmdlist.insert(0, 'exec')
             cmdlist.insert(0, 'netns')
-            cmdlist.insert(0, 'ip')
+            cmdlist.insert(0, self.args.NAMES['IP'])
         else:
             pass
 
@@ -78,16 +78,16 @@ class SubPlugin(TdcPlugin):
         return command
 
     def _ports_create(self):
-        cmd = 'ip link add $DEV0 type veth peer name $DEV1'
+        cmd = '$IP link add $DEV0 type veth peer name $DEV1'
         self._exec_cmd('pre', cmd)
-        cmd = 'ip link set $DEV0 up'
+        cmd = '$IP link set $DEV0 up'
         self._exec_cmd('pre', cmd)
         if not self.args.namespace:
-            cmd = 'ip link set $DEV1 up'
+            cmd = '$IP link set $DEV1 up'
             self._exec_cmd('pre', cmd)
 
     def _ports_destroy(self):
-        cmd = 'ip link del $DEV0'
+        cmd = '$IP link del $DEV0'
         self._exec_cmd('post', cmd)
 
     def _ns_create(self):
@@ -97,16 +97,16 @@ class SubPlugin(TdcPlugin):
         '''
         self._ports_create()
         if self.args.namespace:
-            cmd = 'ip netns add {}'.format(self.args.NAMES['NS'])
+            cmd = '$IP netns add {}'.format(self.args.NAMES['NS'])
             self._exec_cmd('pre', cmd)
-            cmd = 'ip link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
+            cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
             self._exec_cmd('pre', cmd)
-            cmd = 'ip -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
+            cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
             self._exec_cmd('pre', cmd)
             if self.args.device:
-                cmd = 'ip link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
+                cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
                 self._exec_cmd('pre', cmd)
-                cmd = 'ip -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
+                cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
                 self._exec_cmd('pre', cmd)
 
     def _ns_destroy(self):
@@ -115,7 +115,7 @@ class SubPlugin(TdcPlugin):
         devices as well)
         '''
         if self.args.namespace:
-            cmd = 'ip netns delete {}'.format(self.args.NAMES['NS'])
+            cmd = '$IP netns delete {}'.format(self.args.NAMES['NS'])
             self._exec_cmd('post', cmd)
 
     def _exec_cmd(self, stage, command):
index 44efc2ff863f4b903075d55476e4374862c619dd..0d090482720d2f79ee0af937672f969de728a275 100644 (file)
@@ -211,6 +211,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
        vgic_irq_set_phys_active(irq, true);
 }
 
+static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
+{
+       return (vgic_irq_is_sgi(irq->intid) &&
+               vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
+}
+
 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val)
@@ -223,6 +229,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
+               /* GICD_ISPENDR0 SGI bits are WI */
+               if (is_vgic_v2_sgi(vcpu, irq)) {
+                       vgic_put_irq(vcpu->kvm, irq);
+                       continue;
+               }
+
                raw_spin_lock_irqsave(&irq->irq_lock, flags);
                if (irq->hw)
                        vgic_hw_irq_spending(vcpu, irq, is_uaccess);
@@ -270,6 +282,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
+               /* GICD_ICPENDR0 SGI bits are WI */
+               if (is_vgic_v2_sgi(vcpu, irq)) {
+                       vgic_put_irq(vcpu->kvm, irq);
+                       continue;
+               }
+
                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
                if (irq->hw)
index 96aab77d04718a00164abd251566aaa8e2167b48..b00aa304c260eb412210cf26aa2b011dc51dfa88 100644 (file)
@@ -184,7 +184,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
                if (vgic_irq_is_sgi(irq->intid)) {
                        u32 src = ffs(irq->source);
 
-                       BUG_ON(!src);
+                       if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+                                          irq->intid))
+                               return;
+
                        val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
                        irq->source &= ~(1 << (src - 1));
                        if (irq->source) {
index 0c653a1e5215280f4076db92fdb1db61b7b01386..a4ad431c92a912d2b3a9330db301059858ebc62b 100644 (file)
@@ -167,7 +167,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
                    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
                        u32 src = ffs(irq->source);
 
-                       BUG_ON(!src);
+                       if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+                                          irq->intid))
+                               return;
+
                        val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
                        irq->source &= ~(1 << (src - 1));
                        if (irq->source) {
index 13d4b38a94ec4b81af13cd739497ae6db9640a55..e7bde65ba67c0802582883c195052b96cc427caf 100644 (file)
@@ -254,6 +254,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
        bool penda, pendb;
        int ret;
 
+       /*
+        * list_sort may call this function with the same element when
+        * the list is fairly long.
+        */
+       if (unlikely(irqa == irqb))
+               return 0;
+
        raw_spin_lock(&irqa->irq_lock);
        raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);