]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge tag 'staging-5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 26 Apr 2021 18:14:21 +0000 (11:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 26 Apr 2021 18:14:21 +0000 (11:14 -0700)
Pull staging/IIO driver updates from Greg KH:
 "Here is the big set of staging and IIO driver updates for 5.13-rc1.

  Lots of little churn in here, and some larger churn as well. Major
  things are:

   - removal of wimax drivers, no one has this hardware anymore for this
     failed "experiment".

   - removal of the Google gasket driver, turns out no one wanted to
     maintain it or cares about it anymore, so they asked for it to be
     removed.

   - comedi finally moves out of the staging directory into drivers/comedi

     This is one of the oldest kernel subsystems around, being created
     in the 2.0 kernel days, and was one of the first things added to
     drivers/staging/ when that was created over 15 years ago.

     It should have been moved out of staging a long time ago, it's well
     maintained and used by loads of different devices in the real world
     every day. Nice to see this finally happen.

   - so many tiny coding style cleanups it's not funny.

     Perfect storm of at least 2 different intern project application
     deadlines combined to provide a huge number of new contributions in
     this area from people learning how to do kernel development. Great
     job to everyone involved here.

  There's also the normal updates for IIO drivers with new IIO drivers
  and updates all over that subsystem.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'staging-5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (907 commits)
  staging: octeon: Use 'for_each_child_of_node'
  Staging: rtl8723bs: rtw_xmit: fixed tabbing issue
  staging: rtl8188eu: remove unused function parameters
  staging: rtl8188eu: cmdThread is a task_struct
  staging: rtl8188eu: remove constant variable and dead code
  staging: rtl8188eu: change bLeisurePs' type to bool
  staging: rtl8723bs: remove empty #ifdef block
  staging: rtl8723bs: remove unused DBG_871X_LEVEL macro declarations
  staging: rtl8723bs: split too long line
  staging: rtl8723bs: fix indentation in if block
  staging: rtl8723bs: fix code indent issue
  staging: rtl8723bs: replace DBG_871X_LEVEL logs with netdev_*()
  staging: rtl8192e: indent statement properly
  staging: rtl8723bs: Remove led_blink_hdl() and everything related
  staging: comedi: move out of staging directory
  staging: rtl8723bs: remove sdio_drv_priv structure
  staging: rtl8723bs: remove unused argument in function
  staging: rtl8723bs: remove DBG_871X_SEL_NL macro declaration
  staging: rtl8723bs: replace DBG_871X_SEL_NL with netdev_dbg()
  staging: rtl8723bs: fix indentation issue introduced by long line split
  ...

1632 files changed:
.mailmap
Documentation/ABI/testing/debugfs-driver-habanalabs
Documentation/ABI/testing/debugfs-moxtet
Documentation/ABI/testing/debugfs-turris-mox-rwtm
Documentation/ABI/testing/sysfs-bus-moxtet-devices
Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic
Documentation/ABI/testing/sysfs-class-led-driver-turris-omnia
Documentation/ABI/testing/sysfs-driver-xdata [new file with mode: 0644]
Documentation/ABI/testing/sysfs-firmware-sgi_uv
Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
Documentation/ABI/testing/sysfs-platform-intel-pmc [new file with mode: 0644]
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/laptops/thinkpad-acpi.rst
Documentation/admin-guide/perf/hisi-pmu.rst
Documentation/arm64/booting.rst
Documentation/arm64/pointer-authentication.rst
Documentation/arm64/tagged-address-abi.rst
Documentation/dev-tools/kasan.rst
Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.txt [deleted file]
Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/fpga/fpga-region.txt
Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
Documentation/devicetree/bindings/i2c/i2c-gpio.yaml
Documentation/devicetree/bindings/i2c/i2c-imx.yaml
Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml
Documentation/devicetree/bindings/input/adc-joystick.yaml
Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/idt,32434-pic.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/nuvoton,wpcm450-aic.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
Documentation/devicetree/bindings/mfd/ab8500.txt
Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.txt [deleted file]
Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt [deleted file]
Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml
Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml
Documentation/devicetree/bindings/phy/microchip,sparx5-serdes.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt [deleted file]
Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml
Documentation/devicetree/bindings/soundwire/qcom,sdw.txt
Documentation/devicetree/bindings/timer/ingenic,tcu.yaml
Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt
Documentation/devicetree/bindings/timer/renesas,cmt.yaml
Documentation/devicetree/bindings/timer/renesas,tmu.yaml
Documentation/driver-api/surface_aggregator/client.rst
Documentation/driver-api/surface_aggregator/clients/dtx.rst [new file with mode: 0644]
Documentation/driver-api/surface_aggregator/clients/index.rst
Documentation/fpga/dfl.rst
Documentation/misc-devices/dw-xdata-pcie.rst [new file with mode: 0644]
Documentation/misc-devices/index.rst
Documentation/networking/ethtool-netlink.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/seg6-sysctl.rst
Documentation/security/keys/trusted-encrypted.rst
Documentation/userspace-api/ioctl/ioctl-number.rst
Documentation/x86/sgx.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/arc/boot/dts/haps_hs.dts
arch/arc/kernel/signal.c
arch/arc/kernel/unwind.c
arch/arm/Kconfig
arch/arm/boot/dts/armada-385-turris-omnia.dts
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/dra7-l4.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap44xx-clocks.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/crypto/aes-cipher-core.S
arch/arm/crypto/blake2b-neon-glue.c
arch/arm/crypto/blake2s-core.S
arch/arm/crypto/chacha-scalar-core.S
arch/arm/crypto/curve25519-core.S
arch/arm/crypto/poly1305-glue.c
arch/arm/include/asm/paravirt.h
arch/arm/include/asm/xen/swiotlb-xen.h [new file with mode: 0644]
arch/arm/kernel/paravirt.c
arch/arm/mach-footbridge/cats-pci.c
arch/arm/mach-footbridge/ebsa285-pci.c
arch/arm/mach-footbridge/netwinder-pci.c
arch/arm/mach-footbridge/personal-pci.c
arch/arm/mach-keystone/keystone.c
arch/arm/mach-omap1/ams-delta-fiq-handler.S
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/omap-secure.c
arch/arm/mach-omap2/omap-secure.h
arch/arm/mach-omap2/pmic-cpcap.c
arch/arm/mach-omap2/sr_device.c
arch/arm/mach-pxa/mainstone.c
arch/arm/mach-pxa/pxa_cplds_irqs.c
arch/arm/mm/mmu.c
arch/arm/mm/pmsa-v7.c
arch/arm/mm/pmsa-v8.c
arch/arm/probes/uprobes/core.c
arch/arm/xen/mm.c
arch/arm64/Kconfig
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
arch/arm64/boot/dts/nvidia/tegra186.dtsi
arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi
arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
arch/arm64/configs/defconfig
arch/arm64/crypto/aes-modes.S
arch/arm64/crypto/poly1305-glue.c
arch/arm64/crypto/sha1-ce-core.S
arch/arm64/crypto/sha2-ce-core.S
arch/arm64/crypto/sha3-ce-core.S
arch/arm64/crypto/sha512-ce-core.S
arch/arm64/include/asm/alternative-macros.h
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/asm_pointer_auth.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/daifflags.h
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/irq.h
arch/arm64/include/asm/irq_work.h
arch/arm64/include/asm/irqflags.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mte-kasan.h
arch/arm64/include/asm/mte.h
arch/arm64/include/asm/paravirt.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/pointer_auth.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/ptdump.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/uaccess.h
arch/arm64/include/asm/vdso/gettimeofday.h
arch/arm64/include/asm/word-at-a-time.h
arch/arm64/include/asm/xen/swiotlb-xen.h [new file with mode: 0644]
arch/arm64/kernel/Makefile
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry-fpsimd.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/head.S
arch/arm64/kernel/hyp-stub.S
arch/arm64/kernel/idreg-override.c
arch/arm64/kernel/irq.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/module.c
arch/arm64/kernel/mte.c
arch/arm64/kernel/paravirt.c
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/pointer_auth.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/sleep.S
arch/arm64/kernel/smp.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/syscall.c
arch/arm64/kernel/vdso.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/fault.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S
arch/arm64/mm/ptdump.c
arch/arm64/mm/ptdump_debugfs.c
arch/csky/Kconfig
arch/csky/include/asm/page.h
arch/ia64/configs/generic_defconfig
arch/ia64/include/asm/ptrace.h
arch/ia64/mm/discontig.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/fpsp040/Makefile
arch/m68k/ifpsp060/Makefile
arch/m68k/include/asm/mvme147hw.h
arch/m68k/include/asm/page_mm.h
arch/m68k/include/asm/sun3xflop.h
arch/m68k/kernel/sys_m68k.c
arch/m68k/kernel/syscalls/Makefile
arch/m68k/kernel/syscalls/syscallhdr.sh [deleted file]
arch/m68k/kernel/syscalls/syscalltbl.sh [deleted file]
arch/m68k/kernel/syscalltable.S
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/config.c
arch/mips/crypto/poly1305-glue.c
arch/mips/netlogic/common/irq.c
arch/nds32/mm/cacheflush.c
arch/parisc/include/asm/cmpxchg.h
arch/parisc/include/asm/processor.h
arch/parisc/math-emu/fpu.h
arch/powerpc/crypto/sha1-spe-glue.c
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/ptrace/Makefile
arch/powerpc/kernel/ptrace/ptrace-decl.h
arch/powerpc/kernel/ptrace/ptrace-fpu.c
arch/powerpc/kernel/ptrace/ptrace-novsx.c
arch/powerpc/kernel/ptrace/ptrace-view.c
arch/powerpc/kernel/signal_32.c
arch/riscv/Kconfig
arch/riscv/kernel/entry.S
arch/riscv/kernel/probes/ftrace.c
arch/riscv/kernel/traps.c
arch/riscv/mm/fault.c
arch/s390/include/asm/stacktrace.h
arch/s390/kernel/cpcmd.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/entry.S
arch/s390/kernel/irq.c
arch/s390/kernel/setup.c
arch/s390/kernel/stacktrace.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/efi_thunk_64.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/idt_64.c
arch/x86/boot/compressed/kaslr.c
arch/x86/boot/compressed/mem_encrypt.S
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/boot/compressed/sev-es.c
arch/x86/crypto/crc32-pclmul_glue.c
arch/x86/crypto/curve25519-x86_64.c
arch/x86/crypto/poly1305_glue.c
arch/x86/crypto/twofish-x86_64-asm_64-3way.S
arch/x86/crypto/twofish_glue_3way.c
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/vdso/vdso2c.c
arch/x86/entry/vdso/vdso2c.h
arch/x86/entry/vdso/vdso32/system_call.S
arch/x86/entry/vdso/vma.c
arch/x86/entry/vdso/vsgx.S
arch/x86/events/amd/core.c
arch/x86/events/amd/iommu.h
arch/x86/events/core.c
arch/x86/events/intel/bts.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/p4.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/zhaoxin/core.c
arch/x86/hyperv/hv_apic.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/hv_proc.c
arch/x86/hyperv/hv_spinlock.c
arch/x86/hyperv/irqdomain.c
arch/x86/hyperv/mmu.c
arch/x86/hyperv/nested.c
arch/x86/include/asm/agp.h
arch/x86/include/asm/alternative-asm.h [deleted file]
arch/x86/include/asm/alternative.h
arch/x86/include/asm/cmpxchg.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/elf.h
arch/x86/include/asm/entry-common.h
arch/x86/include/asm/hyperv-tlfs.h
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/intel_pconfig.h
arch/x86/include/asm/intel_pt.h
arch/x86/include/asm/io.h
arch/x86/include/asm/irq_stack.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kfence.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/proto.h
arch/x86/include/asm/set_memory.h
arch/x86/include/asm/setup.h
arch/x86/include/asm/sgx.h [new file with mode: 0644]
arch/x86/include/asm/smap.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/syscall_wrapper.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/uv/uv_geo.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/include/uapi/asm/debugreg.h
arch/x86/include/uapi/asm/msgbuf.h
arch/x86/include/uapi/asm/sgx.h
arch/x86/include/uapi/asm/shmbuf.h
arch/x86/include/uapi/asm/sigcontext.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/acpi/wakeup_64.S
arch/x86/kernel/alternative.c
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/cpu/cacheinfo.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpuid-deps.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/feat_ctl.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/mce/severity.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/mtrr/mtrr.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/cpu/sgx/Makefile
arch/x86/kernel/cpu/sgx/arch.h [deleted file]
arch/x86/kernel/cpu/sgx/driver.c
arch/x86/kernel/cpu/sgx/encl.c
arch/x86/kernel/cpu/sgx/encl.h
arch/x86/kernel/cpu/sgx/encls.h
arch/x86/kernel/cpu/sgx/ioctl.c
arch/x86/kernel/cpu/sgx/main.c
arch/x86/kernel/cpu/sgx/sgx.h
arch/x86/kernel/cpu/sgx/virt.c [new file with mode: 0644]
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/crash.c
arch/x86/kernel/e820.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/head64.c
arch/x86/kernel/idt.c
arch/x86/kernel/irq.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kprobes/ftrace.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt_patch.c [deleted file]
arch/x86/kernel/process.c
arch/x86/kernel/pvclock.c
arch/x86/kernel/relocate_kernel_32.S
arch/x86/kernel/relocate_kernel_64.S
arch/x86/kernel/setup.c
arch/x86/kernel/sev-es-shared.c
arch/x86/kernel/sev-es.c
arch/x86/kernel/signal.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/stacktrace.c
arch/x86/kernel/sysfb_efi.c
arch/x86/kernel/tboot.c
arch/x86/kernel/topology.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_sync.c
arch/x86/kernel/umip.c
arch/x86/kvm/Kconfig
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/irq_comm.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/posted_intr.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/atomic64_386_32.S
arch/x86/lib/atomic64_cx8_32.S
arch/x86/lib/copy_page_64.S
arch/x86/lib/copy_user_64.S
arch/x86/lib/insn-eval.c
arch/x86/lib/memcpy_64.S
arch/x86/lib/memmove_64.S
arch/x86/lib/memset_64.S
arch/x86/lib/mmx_32.c
arch/x86/lib/msr-smp.c
arch/x86/lib/msr.c
arch/x86/lib/retpoline.S
arch/x86/math-emu/fpu_trig.c
arch/x86/math-emu/reg_ld_str.c
arch/x86/math-emu/reg_round.S
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/init_64.c
arch/x86/mm/kaslr.c
arch/x86/mm/kmmio.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/mem_encrypt_boot.S
arch/x86/mm/mem_encrypt_identity.c
arch/x86/mm/pat/memtype.c
arch/x86/mm/pat/set_memory.c
arch/x86/mm/pkeys.c
arch/x86/mm/pti.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp32.c
arch/x86/pci/fixup.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
arch/x86/platform/intel-quark/imr.c
arch/x86/platform/intel-quark/imr_selftest.c
arch/x86/platform/intel/iosf_mbi.c
arch/x86/platform/olpc/olpc-xo15-sci.c
arch/x86/platform/olpc/olpc_dt.c
arch/x86/platform/pvh/head.S
arch/x86/platform/uv/uv_nmi.c
arch/x86/power/cpu.c
arch/x86/realmode/init.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/time.c
block/ioctl.c
certs/.gitignore
certs/Kconfig
certs/Makefile
certs/blacklist.c
certs/blacklist.h
certs/common.c [new file with mode: 0644]
certs/common.h [new file with mode: 0644]
certs/revocation_certificates.S [new file with mode: 0644]
certs/system_keyring.c
crypto/Kconfig
crypto/Makefile
crypto/aegis.h
crypto/aegis128-core.c
crypto/aegis128-neon.c
crypto/af_alg.c
crypto/api.c
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/x509_cert_parser.c
crypto/asymmetric_keys/x509_public_key.c
crypto/crc32_generic.c
crypto/ecc.c
crypto/ecc.h
crypto/ecc_curve_defs.h
crypto/ecdh.c
crypto/ecdh_helper.c
crypto/ecdsa.c [new file with mode: 0644]
crypto/ecdsasignature.asn1 [new file with mode: 0644]
crypto/fcrypt.c
crypto/jitterentropy.c
crypto/keywrap.c
crypto/rng.c
crypto/serpent_generic.c
crypto/testmgr.c
crypto/testmgr.h
drivers/accessibility/speakup/i18n.c
drivers/acpi/processor_idle.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/android/binder_internal.h
drivers/atm/eni.c
drivers/base/attribute_container.c
drivers/base/auxiliary.c
drivers/base/base.h
drivers/base/component.c
drivers/base/core.c
drivers/base/cpu.c
drivers/base/dd.c
drivers/base/devcoredump.c
drivers/base/devres.c
drivers/base/devtmpfs.c
drivers/base/node.c
drivers/base/platform-msi.c
drivers/base/platform.c
drivers/base/power/wakeup_stats.c
drivers/base/swnode.c
drivers/base/test/Kconfig
drivers/base/test/Makefile
drivers/base/test/property-entry-test.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/bluetooth/btusb.c
drivers/bus/mhi/core/boot.c
drivers/bus/mhi/core/debugfs.c
drivers/bus/mhi/core/init.c
drivers/bus/mhi/core/internal.h
drivers/bus/mhi/core/main.c
drivers/bus/mhi/core/pm.c
drivers/bus/mhi/pci_generic.c
drivers/bus/moxtet.c
drivers/bus/mvebu-mbus.c
drivers/char/agp/Kconfig
drivers/char/applicom.c
drivers/char/hw_random/ba431-rng.c
drivers/char/hw_random/bcm2835-rng.c
drivers/char/hw_random/cctrng.c
drivers/char/hw_random/core.c
drivers/char/hw_random/intel-rng.c
drivers/char/hw_random/omap-rng.c
drivers/char/hw_random/pic32-rng.c
drivers/char/hw_random/xiphera-trng.c
drivers/char/lp.c
drivers/char/mwave/tp3780i.c
drivers/char/mwave/tp3780i.h
drivers/char/random.c
drivers/char/tpm/eventlog/acpi.c
drivers/char/tpm/eventlog/common.c
drivers/char/tpm/eventlog/efi.c
drivers/char/tpm/tpm_tis_i2c_cr50.c
drivers/char/virtio_console.c
drivers/clk/clk-fixed-factor.c
drivers/clk/clk.c
drivers/clk/qcom/camcc-sc7180.c
drivers/clk/socfpga/clk-gate.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/clksrc-dbx500-prcmu.c
drivers/clocksource/dw_apb_timer_of.c
drivers/clocksource/hyperv_timer.c
drivers/clocksource/ingenic-ost.c
drivers/clocksource/ingenic-timer.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/timer-atmel-tcb.c
drivers/clocksource/timer-fsl-ftm.c
drivers/clocksource/timer-microchip-pit64b.c
drivers/clocksource/timer-npcm7xx.c
drivers/clocksource/timer-of.c
drivers/clocksource/timer-pistachio.c
drivers/clocksource/timer-ti-dm-systimer.c
drivers/clocksource/timer-vf-pit.c
drivers/crypto/allwinner/Kconfig
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
drivers/crypto/amcc/crypto4xx_alg.c
drivers/crypto/amcc/crypto4xx_core.c
drivers/crypto/amcc/crypto4xx_core.h
drivers/crypto/amcc/crypto4xx_reg_def.h
drivers/crypto/amcc/crypto4xx_sa.h
drivers/crypto/amcc/crypto4xx_trng.h
drivers/crypto/amlogic/amlogic-gxl-cipher.c
drivers/crypto/amlogic/amlogic-gxl-core.c
drivers/crypto/atmel-ecc.c
drivers/crypto/atmel-i2c.c
drivers/crypto/atmel-sha.c
drivers/crypto/atmel-tdes.c
drivers/crypto/bcm/cipher.c
drivers/crypto/bcm/spu.c
drivers/crypto/bcm/spu2.c
drivers/crypto/bcm/util.c
drivers/crypto/caam/caamalg_qi2.c
drivers/crypto/caam/caampkc.c
drivers/crypto/cavium/cpt/cptpf_main.c
drivers/crypto/cavium/nitrox/nitrox_isr.c
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
drivers/crypto/cavium/zip/common.h
drivers/crypto/ccp/ccp-crypto-main.c
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/ccp/ccp-ops.c
drivers/crypto/ccp/sev-dev.c
drivers/crypto/ccp/sp-dev.c
drivers/crypto/ccp/sp-dev.h
drivers/crypto/ccp/sp-pci.c
drivers/crypto/ccp/tee-dev.c
drivers/crypto/ccp/tee-dev.h
drivers/crypto/ccree/cc_driver.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_core.c
drivers/crypto/chelsio/chcr_core.h
drivers/crypto/geode-aes.c
drivers/crypto/hisilicon/Kconfig
drivers/crypto/hisilicon/hpre/hpre.h
drivers/crypto/hisilicon/hpre/hpre_crypto.c
drivers/crypto/hisilicon/hpre/hpre_main.c
drivers/crypto/hisilicon/qm.c
drivers/crypto/hisilicon/qm.h
drivers/crypto/hisilicon/sec/sec_algs.c
drivers/crypto/hisilicon/sec/sec_drv.c
drivers/crypto/hisilicon/sec/sec_drv.h
drivers/crypto/hisilicon/sec2/sec.h
drivers/crypto/hisilicon/sec2/sec_crypto.c
drivers/crypto/hisilicon/sec2/sec_crypto.h
drivers/crypto/hisilicon/sec2/sec_main.c
drivers/crypto/hisilicon/sgl.c
drivers/crypto/hisilicon/trng/trng.c
drivers/crypto/hisilicon/zip/zip.h
drivers/crypto/hisilicon/zip/zip_crypto.c
drivers/crypto/hisilicon/zip/zip_main.c
drivers/crypto/img-hash.c
drivers/crypto/inside-secure/safexcel.c
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/keembay/keembay-ocs-aes-core.c
drivers/crypto/keembay/keembay-ocs-hcu-core.c
drivers/crypto/keembay/ocs-hcu.c
drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
drivers/crypto/marvell/octeontx2/otx2_cptlf.c
drivers/crypto/marvell/octeontx2/otx2_cptlf.h
drivers/crypto/marvell/octeontx2/otx2_cptpf.h
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
drivers/crypto/nx/nx-aes-cbc.c
drivers/crypto/nx/nx-aes-ccm.c
drivers/crypto/nx/nx-aes-ctr.c
drivers/crypto/nx/nx-aes-ecb.c
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/nx/nx-aes-xcbc.c
drivers/crypto/nx/nx-common-powernv.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/crypto/nx/nx.c
drivers/crypto/nx/nx_debugfs.c
drivers/crypto/omap-aes.c
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
drivers/crypto/qat/qat_c62xvf/adf_drv.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
drivers/crypto/qat/qat_common/adf_init.c
drivers/crypto/qat/qat_common/adf_isr.c
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
drivers/crypto/qat/qat_common/adf_transport.c
drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
drivers/crypto/qat/qat_common/adf_vf_isr.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
drivers/crypto/qce/cipher.h
drivers/crypto/qce/common.c
drivers/crypto/qce/common.h
drivers/crypto/qce/sha.c
drivers/crypto/qce/skcipher.c
drivers/crypto/rockchip/rk3288_crypto_ahash.c
drivers/crypto/s5p-sss.c
drivers/crypto/sa2ul.c
drivers/crypto/sa2ul.h
drivers/crypto/stm32/stm32-cryp.c
drivers/crypto/stm32/stm32-hash.c
drivers/crypto/ux500/cryp/cryp.c
drivers/crypto/ux500/cryp/cryp.h
drivers/crypto/ux500/cryp/cryp_core.c
drivers/crypto/ux500/cryp/cryp_irq.c
drivers/crypto/ux500/cryp/cryp_irq.h
drivers/crypto/ux500/cryp/cryp_irqp.h
drivers/crypto/ux500/cryp/cryp_p.h
drivers/crypto/ux500/hash/hash_core.c
drivers/crypto/vmx/aes.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/aes_xts.c
drivers/crypto/vmx/ghash.c
drivers/crypto/vmx/vmx.c
drivers/cxl/mem.c
drivers/dax/bus.c
drivers/dma/dmaengine.c
drivers/dma/dw/Kconfig
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/irq.c
drivers/dma/idxd/sysfs.c
drivers/dma/plx_dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/xilinx/xilinx_dpdma.c
drivers/extcon/extcon-gpio.c
drivers/extcon/extcon-intel-int3496.c
drivers/extcon/extcon-max8997.c
drivers/extcon/extcon-palmas.c
drivers/extcon/extcon-qcom-spmi-misc.c
drivers/extcon/extcon-sm5502.c
drivers/firewire/ohci.c
drivers/firmware/Kconfig
drivers/firmware/efi/libstub/Makefile
drivers/firmware/google/gsmi.c
drivers/firmware/turris-mox-rwtm.c
drivers/fpga/Kconfig
drivers/fpga/dfl-afu-error.c
drivers/fpga/dfl-afu-main.c
drivers/fpga/dfl-afu.h
drivers/fpga/dfl-pci.c
drivers/fpga/xilinx-pr-decoupler.c
drivers/fpga/xilinx-spi.c
drivers/gpio/gpio-moxtet.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/i915/display/intel_acpi.c
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/vlv_dsi.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/panel/panel-dsi-cm.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xen/xen_drm_front_conn.h
drivers/greybus/es2.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
drivers/hid/hid-alps.c
drivers/hid/hid-asus.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/wacom_wac.c
drivers/hv/Kconfig
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/connection.c
drivers/hv/hv.c
drivers/hv/hv_balloon.c
drivers/hv/hv_trace.h
drivers/hv/ring_buffer.c
drivers/hv/vmbus_drv.c
drivers/hwmon/raspberrypi-hwmon.c
drivers/hwtracing/coresight/coresight-core.c
drivers/hwtracing/coresight/coresight-etm-perf.c
drivers/hwtracing/coresight/coresight-etm4x-core.c
drivers/hwtracing/intel_th/core.c
drivers/hwtracing/intel_th/gth.c
drivers/hwtracing/intel_th/intel_th.h
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/intel_th/pti.c
drivers/hwtracing/stm/p_sys-t.c
drivers/hwtracing/stm/policy.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-exynos5.c
drivers/i2c/busses/i2c-hix5hd2.c
drivers/i2c/busses/i2c-jz4780.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-stm32f4.c
drivers/i2c/i2c-core-base.c
drivers/infiniband/core/addr.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/netdev_rx.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/input/joystick/n64joy.c
drivers/input/keyboard/nspire-keypad.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/s6sy761.c
drivers/interconnect/qcom/Kconfig
drivers/interconnect/qcom/Makefile
drivers/interconnect/qcom/icc-rpm.c
drivers/interconnect/qcom/sdm660.c [new file with mode: 0644]
drivers/interconnect/qcom/sm8350.c [new file with mode: 0644]
drivers/interconnect/qcom/sm8350.h [new file with mode: 0644]
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-aspeed-vic.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-csky-apb-intc.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3-mbi.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic-v4.c
drivers/irqchip/irq-hip04.c
drivers/irqchip/irq-idt3243x.c [new file with mode: 0644]
drivers/irqchip/irq-jcore-aic.c
drivers/irqchip/irq-loongson-pch-pic.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-meson-gpio.c
drivers/irqchip/irq-mst-intc.c
drivers/irqchip/irq-mtk-cirq.c
drivers/irqchip/irq-mxs.c
drivers/irqchip/irq-sifive-plic.c
drivers/irqchip/irq-stm32-exti.c
drivers/irqchip/irq-sun4i.c
drivers/irqchip/irq-tb10x.c
drivers/irqchip/irq-ti-sci-inta.c
drivers/irqchip/irq-vic.c
drivers/irqchip/irq-wpcm450-aic.c [new file with mode: 0644]
drivers/irqchip/irq-xilinx-intc.c
drivers/leds/leds-turris-omnia.c
drivers/mailbox/armada-37xx-rwtm-mailbox.c
drivers/md/dm-verity-fec.c
drivers/md/dm-verity-fec.h
drivers/media/pci/intel/ipu3/cio2-bridge.c
drivers/mfd/intel_pmt.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/ad525x_dpot.c
drivers/misc/cxl/context.c
drivers/misc/cxl/fault.c
drivers/misc/dw-xdata-pcie.c [new file with mode: 0644]
drivers/misc/genwqe/card_ddcb.c
drivers/misc/habanalabs/common/command_buffer.c
drivers/misc/habanalabs/common/command_submission.c
drivers/misc/habanalabs/common/context.c
drivers/misc/habanalabs/common/debugfs.c
drivers/misc/habanalabs/common/device.c
drivers/misc/habanalabs/common/firmware_if.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/habanalabs_drv.c
drivers/misc/habanalabs/common/habanalabs_ioctl.c
drivers/misc/habanalabs/common/hw_queue.c
drivers/misc/habanalabs/common/irq.c
drivers/misc/habanalabs/common/memory.c
drivers/misc/habanalabs/common/mmu/mmu.c
drivers/misc/habanalabs/common/pci/pci.c
drivers/misc/habanalabs/common/sysfs.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudiP.h
drivers/misc/habanalabs/gaudi/gaudi_security.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/goya/goyaP.h
drivers/misc/habanalabs/include/common/cpucp_if.h
drivers/misc/habanalabs/include/common/hl_boot_if.h
drivers/misc/habanalabs/include/gaudi/gaudi.h
drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
drivers/misc/habanalabs/include/goya/goya.h
drivers/misc/habanalabs/include/goya/goya_async_events.h
drivers/misc/habanalabs/include/goya/goya_fw_if.h
drivers/misc/kgdbts.c
drivers/misc/lis3lv02d/lis3lv02d.c
drivers/misc/lkdtm/bugs.c
drivers/misc/lkdtm/core.c
drivers/misc/lkdtm/lkdtm.h
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/pvpanic.c [deleted file]
drivers/misc/pvpanic/Kconfig [new file with mode: 0644]
drivers/misc/pvpanic/Makefile [new file with mode: 0644]
drivers/misc/pvpanic/pvpanic-mmio.c [new file with mode: 0644]
drivers/misc/pvpanic/pvpanic-pci.c [new file with mode: 0644]
drivers/misc/pvpanic/pvpanic.c [new file with mode: 0644]
drivers/misc/pvpanic/pvpanic.h [new file with mode: 0644]
drivers/misc/sgi-xp/xp_main.c
drivers/misc/sgi-xp/xpc_main.c
drivers/misc/uacce/uacce.c
drivers/misc/vmw_balloon.c
drivers/misc/vmw_vmci/vmci_doorbell.c
drivers/misc/vmw_vmci/vmci_guest.c
drivers/misc/vmw_vmci/vmci_host.c
drivers/mmc/host/meson-gx-mmc.c
drivers/most/most_cdev.c
drivers/mtd/nand/raw/mtk_nand.c
drivers/mux/gpio.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_controlq.h
drivers/net/ethernet/intel/ice/ice_dcb.c
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/jme.h
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/geneve.c
drivers/net/ieee802154/atusb.c
drivers/net/phy/bcm-phy-lib.c
drivers/net/phy/marvell.c
drivers/net/tun.c
drivers/net/usb/hso.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wan/hdlc_fr.c
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mediatek/mt76/mt7921/regs.h
drivers/net/wireless/virt_wifi.c
drivers/net/xen-netback/xenbus.c
drivers/nvdimm/bus.c
drivers/nvdimm/pmem.c
drivers/nvdimm/region_devs.c
drivers/nvmem/Kconfig
drivers/nvmem/Makefile
drivers/nvmem/brcm_nvram.c [new file with mode: 0644]
drivers/nvmem/core.c
drivers/nvmem/qcom-spmi-sdam.c
drivers/nvmem/qfprom.c
drivers/nvmem/snvs_lpgpr.c
drivers/of/fdt.c
drivers/of/of_private.h
drivers/of/overlay.c
drivers/of/property.c
drivers/of/unittest.c
drivers/pci/controller/pci-hyperv.c
drivers/perf/arm-cci.c
drivers/perf/arm-ccn.c
drivers/perf/arm-cmn.c
drivers/perf/arm_dmc620_pmu.c
drivers/perf/arm_dsu_pmu.c
drivers/perf/arm_pmu_platform.c
drivers/perf/arm_smmuv3_pmu.c
drivers/perf/arm_spe_pmu.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/hisilicon/Makefile
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
drivers/perf/hisilicon/hisi_uncore_pa_pmu.c [new file with mode: 0644]
drivers/perf/hisilicon/hisi_uncore_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.h
drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c [new file with mode: 0644]
drivers/perf/qcom_l2_pmu.c
drivers/perf/qcom_l3_pmu.c
drivers/perf/thunderx2_pmu.c
drivers/perf/xgene_pmu.c
drivers/phy/Kconfig
drivers/phy/Makefile
drivers/phy/broadcom/Kconfig
drivers/phy/cadence/Kconfig
drivers/phy/cadence/phy-cadence-sierra.c
drivers/phy/cadence/phy-cadence-torrent.c
drivers/phy/hisilicon/phy-hi6220-usb.c
drivers/phy/hisilicon/phy-hix5hd2-sata.c
drivers/phy/ingenic/phy-ingenic-usb.c
drivers/phy/intel/phy-intel-lgm-combo.c
drivers/phy/marvell/Kconfig
drivers/phy/marvell/Makefile
drivers/phy/marvell/phy-mvebu-cp110-utmi.c [new file with mode: 0644]
drivers/phy/microchip/Kconfig [new file with mode: 0644]
drivers/phy/microchip/Makefile [new file with mode: 0644]
drivers/phy/microchip/sparx5_serdes.c [new file with mode: 0644]
drivers/phy/microchip/sparx5_serdes.h [new file with mode: 0644]
drivers/phy/microchip/sparx5_serdes_regs.h [new file with mode: 0644]
drivers/phy/phy-core.c
drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
drivers/phy/qualcomm/phy-qcom-qmp.c
drivers/phy/qualcomm/phy-qcom-qmp.h
drivers/phy/qualcomm/phy-qcom-usb-hs.c
drivers/phy/ralink/phy-mt7621-pci.c
drivers/phy/rockchip/phy-rockchip-typec.c
drivers/phy/st/Kconfig
drivers/phy/st/phy-stm32-usbphyc.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/phy/ti/phy-tusb1210.c
drivers/phy/ti/phy-twl4030-usb.c
drivers/phy/xilinx/phy-zynqmp.c
drivers/pinctrl/core.c
drivers/pinctrl/intel/pinctrl-lewisburg.c
drivers/platform/mellanox/mlxbf-bootctl.c
drivers/platform/mellanox/mlxreg-hotplug.c
drivers/platform/surface/Kconfig
drivers/platform/surface/Makefile
drivers/platform/surface/aggregator/controller.c
drivers/platform/surface/surface_aggregator_registry.c [new file with mode: 0644]
drivers/platform/surface/surface_dtx.c [new file with mode: 0644]
drivers/platform/surface/surface_platform_profile.c [new file with mode: 0644]
drivers/platform/surface/surfacepro3_button.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/adv_swbutton.c [new file with mode: 0644]
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/classmate-laptop.c
drivers/platform/x86/dell/alienware-wmi.c
drivers/platform/x86/dell/dell-smbios-base.c
drivers/platform/x86/dell/dell-smbios-wmi.c
drivers/platform/x86/dell/dell-wmi-descriptor.c
drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c
drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c
drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
drivers/platform/x86/dell/dell-wmi.c
drivers/platform/x86/gigabyte-wmi.c [new file with mode: 0644]
drivers/platform/x86/gpd-pocket-fan.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/intel-wmi-sbl-fw-update.c
drivers/platform/x86/intel-wmi-thunderbolt.c
drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/intel_pmc_core.h
drivers/platform/x86/intel_pmt_class.c
drivers/platform/x86/intel_pmt_class.h
drivers/platform/x86/intel_pmt_telemetry.c
drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
drivers/platform/x86/lg-laptop.c
drivers/platform/x86/panasonic-laptop.c
drivers/platform/x86/pmc_atom.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/platform/x86/wmi-bmof.c
drivers/platform/x86/wmi.c
drivers/platform/x86/xo15-ebook.c
drivers/power/supply/axp20x_usb_power.c
drivers/power/supply/bq24735-charger.c
drivers/power/supply/ltc2941-battery-gauge.c
drivers/power/supply/sbs-battery.c
drivers/pps/clients/pps-gpio.c
drivers/ras/cec.c
drivers/regulator/bd9571mwv-regulator.c
drivers/regulator/qcom_spmi-regulator.c
drivers/remoteproc/pru_rproc.c
drivers/remoteproc/qcom_pil_info.c
drivers/scsi/hpsa_cmd.h
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/ufs/ufshcd.c
drivers/sh/intc/core.c
drivers/soc/fsl/qbman/qman.c
drivers/soc/qcom/qcom-geni-se.c
drivers/soundwire/Makefile
drivers/soundwire/bus.c
drivers/soundwire/bus.h
drivers/soundwire/bus_type.c
drivers/soundwire/cadence_master.c
drivers/soundwire/dmi-quirks.c [new file with mode: 0644]
drivers/soundwire/generic_bandwidth_allocation.c
drivers/soundwire/intel.c
drivers/soundwire/intel_init.c
drivers/soundwire/qcom.c
drivers/soundwire/slave.c
drivers/soundwire/stream.c
drivers/target/iscsi/iscsi_target.c
drivers/thunderbolt/retimer.c
drivers/uio/Kconfig
drivers/uio/Makefile
drivers/uio/uio_dfl.c [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/usbip/stub_dev.c
drivers/usb/usbip/usbip_common.h
drivers/usb/usbip/usbip_event.c
drivers/usb/usbip/vhci_hcd.c
drivers/usb/usbip/vhci_sysfs.c
drivers/usb/usbip/vudc_dev.c
drivers/usb/usbip/vudc_sysfs.c
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/core/resources.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vfio/pci/vfio_pci.c
drivers/vhost/vdpa.c
drivers/video/fbdev/core/fbcmap.c
drivers/video/fbdev/hyperv_fb.c
drivers/virt/acrn/vm.c
drivers/w1/slaves/w1_ds2780.c
drivers/w1/slaves/w1_ds2781.c
drivers/w1/slaves/w1_ds2805.c
drivers/w1/slaves/w1_ds28e17.c
drivers/w1/slaves/w1_therm.c
drivers/watchdog/armada_37xx_wdt.c
drivers/watchdog/retu_wdt.c
drivers/xen/Kconfig
drivers/xen/Makefile
drivers/xen/events/events_base.c
drivers/xen/pcpu.c
drivers/xen/time.c
drivers/xen/xen-acpi-cpuhotplug.c [deleted file]
drivers/xen/xen-acpi-memhotplug.c [deleted file]
drivers/xen/xen-pciback/pci_stub.c
drivers/xen/xen-pciback/vpci.c
drivers/xen/xen-stub.c [deleted file]
fs/btrfs/zoned.c
fs/cifs/Kconfig
fs/cifs/Makefile
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/coda/file.c
fs/crypto/Kconfig
fs/debugfs/file.c
fs/debugfs/inode.c
fs/direct-io.c
fs/file.c
fs/hostfs/hostfs_kern.c
fs/io-wq.c
fs/io_uring.c
fs/namei.c
fs/ocfs2/aops.c
fs/ocfs2/file.c
fs/overlayfs/file.c
fs/readdir.c
fs/verity/Kconfig
include/asm-generic/hyperv-tlfs.h
include/asm-generic/mshyperv.h
include/clocksource/hyperv_timer.h
include/crypto/acompress.h
include/crypto/aead.h
include/crypto/akcipher.h
include/crypto/chacha.h
include/crypto/ecc_curve.h [new file with mode: 0644]
include/crypto/ecdh.h
include/crypto/hash.h
include/crypto/internal/poly1305.h
include/crypto/kpp.h
include/crypto/poly1305.h
include/crypto/rng.h
include/crypto/skcipher.h
include/dt-bindings/bus/moxtet.h
include/dt-bindings/interconnect/qcom,sdm660.h [new file with mode: 0644]
include/dt-bindings/interconnect/qcom,sm8350.h [new file with mode: 0644]
include/dt-bindings/mux/ti-serdes.h
include/dt-bindings/phy/phy-cadence-torrent.h [deleted file]
include/dt-bindings/phy/phy-cadence.h [new file with mode: 0644]
include/dt-bindings/phy/phy-ti.h [new file with mode: 0644]
include/keys/asymmetric-type.h
include/keys/system_keyring.h
include/keys/trusted-type.h
include/keys/trusted_tee.h [new file with mode: 0644]
include/keys/trusted_tpm.h
include/linux/armada-37xx-rwtm-mailbox.h
include/linux/asn1_encoder.h [new file with mode: 0644]
include/linux/avf/virtchnl.h
include/linux/bottom_half.h
include/linux/bpf.h
include/linux/clocksource.h
include/linux/cpuhotplug.h
include/linux/device.h
include/linux/devm-helpers.h [new file with mode: 0644]
include/linux/entry-common.h
include/linux/ethtool.h
include/linux/hardirq.h
include/linux/hyperv.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v4.h
include/linux/irqdesc.h
include/linux/irqdomain.h
include/linux/jump_label.h
include/linux/kasan.h
include/linux/marvell_phy.h
include/linux/mhi.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/moxtet.h
include/linux/nd.h
include/linux/netfilter_arp/arp_tables.h
include/linux/netfilter_bridge/ebtables.h
include/linux/nvmem-consumer.h
include/linux/oid_registry.h
include/linux/phy/phy.h
include/linux/platform_data/gpio-omap.h
include/linux/platform_device.h
include/linux/pps-gpio.h [deleted file]
include/linux/preempt.h
include/linux/property.h
include/linux/randomize_kstack.h [new file with mode: 0644]
include/linux/rcupdate.h
include/linux/sched.h
include/linux/skmsg.h
include/linux/soundwire/sdw.h
include/linux/stacktrace.h
include/linux/static_call.h
include/linux/static_call_types.h
include/linux/surface_aggregator/controller.h
include/linux/surface_aggregator/device.h
include/linux/timecounter.h
include/linux/timex.h
include/linux/tpm.h
include/linux/user_namespace.h
include/linux/virtio_net.h
include/linux/wmi.h
include/net/act_api.h
include/net/netns/xfrm.h
include/net/red.h
include/net/rtnetlink.h
include/net/sock.h
include/net/xfrm.h
include/trace/events/random.h
include/uapi/linux/android/binder.h
include/uapi/linux/can.h
include/uapi/linux/capability.h
include/uapi/linux/elf.h
include/uapi/linux/ethtool.h
include/uapi/linux/idxd.h
include/uapi/linux/map_to_7segment.h
include/uapi/linux/prctl.h
include/uapi/linux/rfkill.h
include/uapi/linux/surface_aggregator/dtx.h [new file with mode: 0644]
include/uapi/misc/habanalabs.h
include/uapi/misc/uacce/hisi_qm.h
include/xen/acpi.h
include/xen/arm/swiotlb-xen.h [new file with mode: 0644]
include/xen/interface/features.h
include/xen/swiotlb-xen.h
init/main.c
kernel/bpf/disasm.c
kernel/bpf/inode.c
kernel/bpf/stackmap.c
kernel/bpf/trampoline.c
kernel/bpf/verifier.c
kernel/entry/common.c
kernel/gcov/clang.c
kernel/irq/chip.c
kernel/irq/dummychip.c
kernel/irq/ipi.c
kernel/irq/irq_sim.c
kernel/irq/irqdesc.c
kernel/irq/irqdomain.c
kernel/irq/manage.c
kernel/irq/matrix.c
kernel/irq/migration.c
kernel/irq/msi.c
kernel/irq/proc.c
kernel/irq/resend.c
kernel/irq/spurious.c
kernel/irq/timings.c
kernel/locking/lockdep.c
kernel/locking/lockdep_internals.h
kernel/locking/qrwlock.c
kernel/profile.c
kernel/sched/cputime.c
kernel/softirq.c
kernel/sys.c
kernel/time/alarmtimer.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/jiffies.c
kernel/time/ntp.c
kernel/time/posix-cpu-timers.c
kernel/time/posix-timers.c
kernel/time/test_udelay.c
kernel/time/tick-broadcast-hrtimer.c
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-oneshot.c
kernel/time/tick-sched.c
kernel/time/tick-sched.h
kernel/time/time.c
kernel/time/timecounter.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/time/vsyscall.c
kernel/trace/trace.c
kernel/trace/trace_dynevent.c
kernel/user_namespace.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig
lib/Kconfig.debug
lib/Kconfig.kasan
lib/Makefile
lib/asn1_encoder.c [new file with mode: 0644]
lib/crypto/chacha.c
lib/crypto/poly1305-donna32.c
lib/crypto/poly1305-donna64.c
lib/crypto/poly1305.c
lib/earlycpio.c
lib/kobject_uevent.c
lib/lru_cache.c
lib/oid_registry.c
lib/parman.c
lib/radix-tree.c
lib/test_kasan.c
lib/test_kasan_module.c
mm/filemap.c
mm/gup.c
mm/internal.h
mm/kasan/common.c
mm/kasan/hw_tags.c
mm/kasan/kasan.h
mm/kasan/report.c
mm/kasan/report_generic.c
mm/mapping_dirty_helpers.c
mm/mmap.c
mm/mmu_gather.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_poison.c
mm/percpu-internal.h
mm/percpu-stats.c
mm/percpu.c
mm/ptdump.c
mm/shuffle.c
mm/slab.h
net/batman-adv/translation-table.c
net/bluetooth/ecdh_helper.c
net/bluetooth/hci_request.c
net/bluetooth/selftest.c
net/bluetooth/smp.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/ebtables.c
net/can/bcm.c
net/can/isotp.c
net/can/raw.c
net/core/dev.c
net/core/neighbour.c
net/core/rtnetlink.c
net/core/skmsg.c
net/core/sock.c
net/core/xdp.c
net/dsa/dsa2.c
net/dsa/switch.c
net/ethtool/common.c
net/ethtool/eee.c
net/ethtool/ioctl.c
net/ethtool/netlink.h
net/ethtool/pause.c
net/hsr/hsr_device.c
net/hsr/hsr_forward.c
net/ieee802154/nl-mac.c
net/ieee802154/nl802154.c
net/ipv4/ah4.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/mac80211/cfg.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/mac802154/llsec.c
net/mptcp/protocol.c
net/ncsi/ncsi-manage.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_limit.c
net/netfilter/x_tables.c
net/netlink/af_netlink.c
net/nfc/llcp_sock.c
net/openvswitch/conntrack.c
net/qrtr/qrtr.c
net/rds/message.c
net/rfkill/core.c
net/sched/act_api.c
net/sched/cls_api.c
net/sched/sch_htb.c
net/sched/sch_teql.c
net/sctp/ipv6.c
net/sctp/socket.c
net/tipc/bearer.h
net/tipc/crypto.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/sme.c
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_state.c
scripts/Makefile
scripts/Makefile.kasan
scripts/spdxcheck.py
security/Kconfig.hardening
security/integrity/digsig_asymmetric.c
security/integrity/platform_certs/keyring_handler.c
security/integrity/platform_certs/load_uefi.c
security/keys/Kconfig
security/keys/trusted-keys/Makefile
security/keys/trusted-keys/tpm2key.asn1 [new file with mode: 0644]
security/keys/trusted-keys/trusted_core.c [new file with mode: 0644]
security/keys/trusted-keys/trusted_tee.c [new file with mode: 0644]
security/keys/trusted-keys/trusted_tpm1.c
security/keys/trusted-keys/trusted_tpm2.c
security/selinux/ss/avtab.c
security/selinux/ss/avtab.h
security/selinux/ss/conditional.c
security/selinux/ss/services.c
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
sound/drivers/aloop.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/soc/bcm/cygnus-ssp.c
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/max98373-i2c.c
sound/soc/codecs/max98373-sdw.c
sound/soc/codecs/max98373.c
sound/soc/codecs/wm8960.c
sound/soc/fsl/fsl_esai.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/sof/core.c
sound/soc/sof/intel/apl.c
sound/soc/sof/intel/cnl.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/hda.h
sound/soc/sof/intel/icl.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/sof/intel/tgl.c
sound/soc/sunxi/sun4i-codec.c
tools/arch/ia64/include/asm/barrier.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/kcpuid/Makefile [new file with mode: 0644]
tools/arch/x86/kcpuid/cpuid.csv [new file with mode: 0644]
tools/arch/x86/kcpuid/kcpuid.c [new file with mode: 0644]
tools/cgroup/memcg_slabinfo.py
tools/include/linux/static_call_types.h
tools/include/uapi/asm/errno.h
tools/lib/bpf/ringbuf.c
tools/lib/bpf/xsk.c
tools/perf/builtin-ftrace.c
tools/perf/builtin-inject.c
tools/perf/trace/beauty/tracepoints/x86_msr.sh
tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
tools/perf/util/auxtrace.c
tools/perf/util/block-info.c
tools/perf/util/data.c
tools/perf/util/map.c
tools/power/x86/intel-speed-select/isst-config.c
tools/power/x86/intel-speed-select/isst-display.c
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/arm64/Makefile
tools/testing/selftests/arm64/bti/.gitignore [new file with mode: 0644]
tools/testing/selftests/arm64/bti/Makefile [new file with mode: 0644]
tools/testing/selftests/arm64/bti/assembler.h [new file with mode: 0644]
tools/testing/selftests/arm64/bti/btitest.h [new file with mode: 0644]
tools/testing/selftests/arm64/bti/compiler.h [new file with mode: 0644]
tools/testing/selftests/arm64/bti/gen/.gitignore [new file with mode: 0644]
tools/testing/selftests/arm64/bti/signal.c [new file with mode: 0644]
tools/testing/selftests/arm64/bti/signal.h [new file with mode: 0644]
tools/testing/selftests/arm64/bti/start.S [new file with mode: 0644]
tools/testing/selftests/arm64/bti/syscall.S [new file with mode: 0644]
tools/testing/selftests/arm64/bti/system.c [new file with mode: 0644]
tools/testing/selftests/arm64/bti/system.h [new file with mode: 0644]
tools/testing/selftests/arm64/bti/test.c [new file with mode: 0644]
tools/testing/selftests/arm64/bti/teststubs.S [new file with mode: 0644]
tools/testing/selftests/arm64/bti/trampoline.S [new file with mode: 0644]
tools/testing/selftests/arm64/mte/Makefile
tools/testing/selftests/arm64/mte/check_ksm_options.c
tools/testing/selftests/arm64/mte/check_user_mem.c
tools/testing/selftests/arm64/mte/mte_common_util.c
tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/bounds.c
tools/testing/selftests/bpf/verifier/bounds_deduction.c
tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
tools/testing/selftests/bpf/verifier/map_ptr.c
tools/testing/selftests/bpf/verifier/unpriv.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/firmware/fw_namespace.c
tools/testing/selftests/lkdtm/.gitignore
tools/testing/selftests/lkdtm/Makefile
tools/testing/selftests/lkdtm/stack-entropy.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
tools/testing/selftests/sgx/defines.h
tools/testing/selftests/sgx/load.c
tools/testing/selftests/sgx/main.c
tools/testing/selftests/timers/clocksource-switch.c
tools/testing/selftests/timers/leap-a-day.c
tools/testing/selftests/timers/leapcrash.c
tools/testing/selftests/timers/threadtest.c
tools/testing/selftests/x86/thunks_32.S

index 541635d2e02ecd1fbfb376224eb2cf535b16a35d..2d93232ed72b80bd7a6995122fc177c8f599b0e1 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -168,6 +168,7 @@ Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
 Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
 John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
+Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
 <josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
@@ -253,8 +254,14 @@ Morten Welinder <welinder@anemone.rentec.com>
 Morten Welinder <welinder@darter.rentec.com>
 Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
+Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
 Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
+Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
+Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
+Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
+Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
+Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
 Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
 Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
 Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
index d447a611c41b7707f00ec9c8e925c52013d25f75..c78fc9282876f3d12e01cf7dca56330cc75cf850 100644 (file)
@@ -82,6 +82,24 @@ Description:    Allows the root user to read or write 64 bit data directly
                 If the IOMMU is disabled, it also allows the root user to read
                 or write from the host a device VA of a host mapped memory
 
+What:           /sys/kernel/debug/habanalabs/hl<n>/data_dma
+Date:           Apr 2021
+KernelVersion:  5.13
+Contact:        ogabbay@kernel.org
+Description:    Allows the root user to read from the device's internal
+                memory (DRAM/SRAM) through a DMA engine.
+                This property is a binary blob that contains the result of the
+                DMA transfer.
+                This custom interface is needed (instead of using the generic
+                Linux user-space PCI mapping) because the amount of internal
+                memory is huge (>32GB) and reading it via the PCI bar will take
+                a very long time.
+                This interface doesn't support concurrency in the same device.
+                In GAUDI and GOYA, this action can cause undefined behavior
+                in case the it is done while the device is executing user
+                workloads.
+                Only supported on GAUDI at this stage.
+
 What:           /sys/kernel/debug/habanalabs/hl<n>/device
 Date:           Jan 2019
 KernelVersion:  5.1
@@ -90,6 +108,24 @@ Description:    Enables the root user to set the device to specific state.
                 Valid values are "disable", "enable", "suspend", "resume".
                 User can read this property to see the valid values
 
+What:           /sys/kernel/debug/habanalabs/hl<n>/dma_size
+Date:           Apr 2021
+KernelVersion:  5.13
+Contact:        ogabbay@kernel.org
+Description:    Specify the size of the DMA transaction when using DMA to read
+                from the device's internal memory. The value can not be larger
+                than 128MB. Writing to this value initiates the DMA transfer.
+                When the write is finished, the user can read the "data_dma"
+                blob
+
+What:           /sys/kernel/debug/habanalabs/hl<n>/dump_security_violations
+Date:           Jan 2021
+KernelVersion:  5.12
+Contact:        ogabbay@kernel.org
+Description:    Dumps all security violations to dmesg. This will also ack
+                all security violations meanings those violations will not be
+                dumped next time user calls this API
+
 What:           /sys/kernel/debug/habanalabs/hl<n>/engines
 Date:           Jul 2019
 KernelVersion:  5.3
@@ -154,6 +190,16 @@ Description:    Displays the hop values and physical address for a given ASID
                 e.g. to display info about VA 0x1000 for ASID 1 you need to do:
                 echo "1 0x1000" > /sys/kernel/debug/habanalabs/hl0/mmu
 
+What:           /sys/kernel/debug/habanalabs/hl<n>/mmu_error
+Date:           Mar 2021
+KernelVersion:  5.12
+Contact:        fkassabri@habana.ai
+Description:    Check and display page fault or access violation mmu errors for
+                all MMUs specified in mmu_cap_mask.
+                e.g. to display error info for MMU hw cap bit 9, you need to do:
+                echo "0x200" > /sys/kernel/debug/habanalabs/hl0/mmu_error
+                cat /sys/kernel/debug/habanalabs/hl0/mmu_error
+
 What:           /sys/kernel/debug/habanalabs/hl<n>/set_power_state
 Date:           Jan 2019
 KernelVersion:  5.1
@@ -161,6 +207,13 @@ Contact:        ogabbay@kernel.org
 Description:    Sets the PCI power state. Valid values are "1" for D0 and "2"
                 for D3Hot
 
+What:           /sys/kernel/debug/habanalabs/hl<n>/stop_on_err
+Date:           Mar 2020
+KernelVersion:  5.6
+Contact:        ogabbay@kernel.org
+Description:    Sets the stop-on_error option for the device engines. Value of
+                "0" is for disable, otherwise enable.
+
 What:           /sys/kernel/debug/habanalabs/hl<n>/userptr
 Date:           Jan 2019
 KernelVersion:  5.1
@@ -174,19 +227,4 @@ Date:           Jan 2019
 KernelVersion:  5.1
 Contact:        ogabbay@kernel.org
 Description:    Displays a list with information about all the active virtual
-                address mappings per ASID
-
-What:           /sys/kernel/debug/habanalabs/hl<n>/stop_on_err
-Date:           Mar 2020
-KernelVersion:  5.6
-Contact:        ogabbay@kernel.org
-Description:    Sets the stop-on_error option for the device engines. Value of
-                "0" is for disable, otherwise enable.
-
-What:           /sys/kernel/debug/habanalabs/hl<n>/dump_security_violations
-Date:           Jan 2021
-KernelVersion:  5.12
-Contact:        ogabbay@kernel.org
-Description:    Dumps all security violations to dmesg. This will also ack
-                all security violations meanings those violations will not be
-                dumped next time user calls this API
+                address mappings per ASID and all user mappings of HW blocks
index 6eee10c3d5a1ae6f5e30f114fea6200ba8d2e44f..637d8587d03d21d54989e1314e92205920cf738c 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/kernel/debug/moxtet/input
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Read input from the shift registers, in hexadecimal.
                Returns N+1 bytes, where N is the number of Moxtet connected
                modules. The first byte is from the CPU board itself.
@@ -19,7 +19,7 @@ Description:  (Read) Read input from the shift registers, in hexadecimal.
 What:          /sys/kernel/debug/moxtet/output
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (RW) Read last written value to the shift registers, in
                hexadecimal, or write values to the shift registers, also
                in hexadecimal.
index 326df1b74707e1753a6d351b1dd1f783d4b32a43..813987d5de4e96d0f48ed0d238526b28faa2f5e5 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/kernel/debug/turris-mox-rwtm/do_sign
 Date:          Jun 2020
 KernelVersion: 5.8
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:
 
                ======= ===========================================================
index 4a6d61b44f3f4fb59323c21963691455f4dc8be3..32dccc00d57dff427f1342dd08d92231e398393d 100644 (file)
@@ -1,17 +1,17 @@
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_description
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module description. Format: string
 
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_id
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module ID. Format: %x
 
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_name
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module name. Format: string
index 1936f732415571326a24ca147833451917cabdc1..4ec03cd363579c176e971fd55602c2ef6ae74a6d 100644 (file)
@@ -1,4 +1,5 @@
-What:          /sys/devices/pci0000:00/*/QEMU0001:00/capability
+What:          /sys/devices/pci0000:00/*/QEMU0001:00/capability for MMIO
+               /sys/bus/pci/drivers/pvpanic-pci/0000:00:0*.0/capability for PCI
 Date:          Jan 2021
 Contact:       zhenwei pi <pizhenwei@bytedance.com>
 Description:
@@ -12,6 +13,7 @@ Description:
                https://git.qemu.org/?p=qemu.git;a=blob_plain;f=docs/specs/pvpanic.txt
 
 What:          /sys/devices/pci0000:00/*/QEMU0001:00/events
+               /sys/bus/pci/drivers/pvpanic-pci/0000:00:0*.0/events for PCI
 Date:          Jan 2021
 Contact:       zhenwei pi <pizhenwei@bytedance.com>
 Description:
index 795a5de12fc139f59245df6f15efd09bd01f1f49..c4d46970c1cf923ae12341e5cef2333f2dc08667 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/class/leds/<led>/device/brightness
 Date:          July 2020
 KernelVersion: 5.9
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (RW) On the front panel of the Turris Omnia router there is also
                a button which can be used to control the intensity of all the
                LEDs at once, so that if they are too bright, user can dim them.
diff --git a/Documentation/ABI/testing/sysfs-driver-xdata b/Documentation/ABI/testing/sysfs-driver-xdata
new file mode 100644 (file)
index 0000000..f574e8e
--- /dev/null
@@ -0,0 +1,49 @@
+What:          /sys/class/misc/drivers/dw-xdata-pcie.<device>/write
+Date:          April 2021
+KernelVersion: 5.13
+Contact:       Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+Description:   Allows the user to enable the PCIe traffic generator which
+               will create write TLPs frames - from the Root Complex to the
+               Endpoint direction or to disable the PCIe traffic generator
+               in all directions.
+
+               Write y/1/on to enable, n/0/off to disable
+
+               Usage e.g.
+                echo 1 > /sys/class/misc/dw-xdata-pcie.<device>/write
+               or
+                echo 0 > /sys/class/misc/dw-xdata-pcie.<device>/write
+
+               The user can read the current PCIe link throughput generated
+               through this generator in MB/s.
+
+               Usage e.g.
+                cat /sys/class/misc/dw-xdata-pcie.<device>/write
+                204
+
+               The file is read and write.
+
+What:          /sys/class/misc/dw-xdata-pcie.<device>/read
+Date:          April 2021
+KernelVersion: 5.13
+Contact:       Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+Description:   Allows the user to enable the PCIe traffic generator which
+               will create read TLPs frames - from the Endpoint to the Root
+               Complex direction or to disable the PCIe traffic generator
+                in all directions.
+
+               Write y/1/on to enable, n/0/off to disable
+
+               Usage e.g.
+                echo 1 > /sys/class/misc/dw-xdata-pcie.<device>/read
+               or
+                echo 0 > /sys/class/misc/dw-xdata-pcie.<device>/read
+
+               The user can read the current PCIe link throughput generated
+               through this generator in MB/s.
+
+               Usage e.g.
+                cat /sys/class/misc/dw-xdata-pcie.<device>/read
+                199
+
+               The file is read and write.
index 637c668cbe45c24e02b8c7725bcfb5d676869521..12ed843e1d3e0a8826f2e189d2bfd1287e5ede8a 100644 (file)
@@ -39,8 +39,8 @@ Description:
 
                The uv_type entry contains the hub revision number.
                This value can be used to identify the UV system version::
-                       "0.*" = Hubless UV ('*' is subtype)
 
+                       "0.*" = Hubless UV ('*' is subtype)
                        "3.0" = UV2
                        "5.0" = UV3
                        "7.0" = UV4
index b8631f5a29c4c5779347333098ac14252293dbe3..ea5e5b489bc77ba6f429481f8216f99d77d5bd2e 100644 (file)
@@ -1,21 +1,21 @@
 What:          /sys/firmware/turris-mox-rwtm/board_version
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Board version burned into eFuses of this Turris Mox board.
                Format: %i
 
 What:          /sys/firmware/turris-mox-rwtm/mac_address*
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) MAC addresses burned into eFuses of this Turris Mox board.
                Format: %pM
 
 What:          /sys/firmware/turris-mox-rwtm/pubkey
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) ECDSA public key (in pubkey hex compressed form) computed
                as pair to the ECDSA private key burned into eFuses of this
                Turris Mox Board.
@@ -24,7 +24,7 @@ Description:  (Read) ECDSA public key (in pubkey hex compressed form) computed
 What:          /sys/firmware/turris-mox-rwtm/ram_size
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) RAM size in MiB of this Turris Mox board as was detected
                during manufacturing and burned into eFuses. Can be 512 or 1024.
                Format: %i
@@ -32,6 +32,6 @@ Description:  (Read) RAM size in MiB of this Turris Mox board as was detected
 What:          /sys/firmware/turris-mox-rwtm/serial_number
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Serial number burned into eFuses of this Turris Mox device.
                Format: %016X
diff --git a/Documentation/ABI/testing/sysfs-platform-intel-pmc b/Documentation/ABI/testing/sysfs-platform-intel-pmc
new file mode 100644 (file)
index 0000000..ef199af
--- /dev/null
@@ -0,0 +1,20 @@
+What:          /sys/devices/platform/<platform>/etr3
+Date:          Apr 2021
+KernelVersion: 5.13
+Contact:       "Tomas Winkler" <tomas.winkler@intel.com>
+Description:
+               The file exposes "Extended Test Mode Register 3" global
+               reset bits. The bits are used during an Intel platform
+               manufacturing process to indicate that consequent reset
+               of the platform is a "global reset". This type of reset
+               is required in order for manufacturing configurations
+               to take effect.
+
+               Display global reset setting bits for PMC.
+                       * bit 31 - global reset is locked
+                       * bit 20 - global reset is set
+               Writing bit 20 value to the etr3 will induce
+               a platform "global reset" upon consequent platform reset,
+               in case the register is not locked.
+               The "global reset bit" should be locked on a production
+               system and the file is in read-only mode.
index 04545725f187ffe787f3a226a328b1c951251f16..9b3c086d4266b0075f7eea763a70921ad2e4d8aa 100644 (file)
                                   state is kept private from the host.
                                   Not valid if the kernel is running in EL2.
 
-                       Defaults to VHE/nVHE based on hardware support and
-                       the value of CONFIG_ARM64_VHE.
+                       Defaults to VHE/nVHE based on hardware support.
 
        kvm-arm.vgic_v3_group0_trap=
                        [KVM,ARM] Trap guest accesses to GICv3 group-0
                        fully seed the kernel's CRNG. Default is controlled
                        by CONFIG_RANDOM_TRUST_CPU.
 
+       randomize_kstack_offset=
+                       [KNL] Enable or disable kernel stack offset
+                       randomization, which provides roughly 5 bits of
+                       entropy, frustrating memory corruption attacks
+                       that depend on stack address determinism or
+                       cross-syscall address exposures. This is only
+                       available on architectures that have defined
+                       CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET.
+                       Format: <bool>  (1/Y/y=enable, 0/N/n=disable)
+                       Default is CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT.
+
        ras=option[,option,...] [KNL] RAS-specific options
 
                cec_disable     [X86]
        spia_peddr=
 
        split_lock_detect=
-                       [X86] Enable split lock detection
+                       [X86] Enable split lock detection or bus lock detection
 
                        When enabled (and if hardware support is present), atomic
                        instructions that access data across cache line
-                       boundaries will result in an alignment check exception.
+                       boundaries will result in an alignment check exception
+                       for split lock detection or a debug exception for
+                       bus lock detection.
 
                        off     - not enabled
 
-                       warn    - the kernel will emit rate limited warnings
+                       warn    - the kernel will emit rate-limited warnings
                                  about applications triggering the #AC
-                                 exception. This mode is the default on CPUs
-                                 that supports split lock detection.
+                                 exception or the #DB exception. This mode is
+                                 the default on CPUs that support split lock
+                                 detection or bus lock detection. Default
+                                 behavior is by #AC if both features are
+                                 enabled in hardware.
 
                        fatal   - the kernel will send SIGBUS to applications
-                                 that trigger the #AC exception.
+                                 that trigger the #AC exception or the #DB
+                                 exception. Default behavior is by #AC if
+                                 both features are enabled in hardware.
 
                        If an #AC exception is hit in the kernel or in
                        firmware (i.e. not while executing in user mode)
                        the kernel will oops in either "warn" or "fatal"
                        mode.
 
+                       #DB exception for bus lock is triggered only when
+                       CPL > 0.
+
        srbds=          [X86,INTEL]
                        Control the Special Register Buffer Data Sampling
                        (SRBDS) mitigation.
                        See Documentation/admin-guide/mm/transhuge.rst
                        for more details.
 
+       trusted.source= [KEYS]
+                       Format: <string>
+                       This parameter identifies the trust source as a backend
+                       for trusted keys implementation. Supported trust
+                       sources:
+                       - "tpm"
+                       - "tee"
+                       If not specified then it defaults to iterating through
+                       the trust source list starting with TPM and assigns the
+                       first trust source as a backend which is initialized
+                       successfully during iteration.
+
        tsc=            Disable clocksource stability checks for TSC.
                        Format: <string>
                        [x86] reliable: mark tsc clocksource as reliable, this
index 91fd6846ce17ca032309d376d58f8f0a9e39e412..6721a80a2d4fbfd48328a4fe2ca18d892a311808 100644 (file)
@@ -52,6 +52,7 @@ detailed description):
        - LCD Shadow (PrivacyGuard) enable and disable
        - Lap mode sensor
        - Setting keyboard language
+       - WWAN Antenna type
 
 A compatibility table by model and feature is maintained on the web
 site, http://ibm-acpi.sf.net/. I appreciate any success or failure
@@ -1490,6 +1491,25 @@ fr(French), fr-ch(French(Switzerland)), hu(Hungarian), it(Italy), jp (Japan),
 nl(Dutch), nn(Norway), pl(Polish), pt(portugese), sl(Slovenian), sv(Sweden),
 tr(Turkey)
 
+WWAN Antenna type
+-----------------
+
+sysfs: wwan_antenna_type
+
+On some newer Thinkpads we need to set SAR value based on the antenna
+type. This interface will be used by userspace to get the antenna type
+and set the corresponding SAR value, as is required for FCC certification.
+
+The available commands are::
+
+        cat /sys/devices/platform/thinkpad_acpi/wwan_antenna_type
+
+Currently 2 antenna types are supported as mentioned below:
+- type a
+- type b
+
+The property is read-only. If the platform doesn't have support the sysfs
+class is not created.
 
 Adaptive keyboard
 -----------------
index 404a5c3d9d00d782b98ea0d76cea01acd85700a7..546979360513154824e221ae2739f25c59bbc4d1 100644 (file)
@@ -53,6 +53,60 @@ Example usage of perf::
   $# perf stat -a -e hisi_sccl3_l3c0/rd_hit_cpipe/ sleep 5
   $# perf stat -a -e hisi_sccl3_l3c0/config=0x02/ sleep 5
 
+For HiSilicon uncore PMU v2 whose identifier is 0x30, the topology is the same
+as PMU v1, but some new functions are added to the hardware.
+
+(a) L3C PMU supports filtering by core/thread within the cluster which can be
+specified as a bitmap::
+
+  $# perf stat -a -e hisi_sccl3_l3c0/config=0x02,tt_core=0x3/ sleep 5
+
+This will only count the operations from core/thread 0 and 1 in this cluster.
+
+(b) Tracetag allow the user to chose to count only read, write or atomic
+operations via the tt_req parameeter in perf. The default value counts all
+operations. tt_req is 3bits, 3'b100 represents read operations, 3'b101
+represents write operations, 3'b110 represents atomic store operations and
+3'b111 represents atomic non-store operations, other values are reserved::
+
+  $# perf stat -a -e hisi_sccl3_l3c0/config=0x02,tt_req=0x4/ sleep 5
+
+This will only count the read operations in this cluster.
+
+(c) Datasrc allows the user to check where the data comes from. It is 5 bits.
+Some important codes are as follows:
+5'b00001: comes from L3C in this die;
+5'b01000: comes from L3C in the cross-die;
+5'b01001: comes from L3C which is in another socket;
+5'b01110: comes from the local DDR;
+5'b01111: comes from the cross-die DDR;
+5'b10000: comes from cross-socket DDR;
+etc, it is mainly helpful to find that the data source is nearest from the CPU
+cores. If datasrc_cfg is used in the multi-chips, the datasrc_skt shall be
+configured in perf command::
+
+  $# perf stat -a -e hisi_sccl3_l3c0/config=0xb9,datasrc_cfg=0xE/,
+  hisi_sccl3_l3c0/config=0xb9,datasrc_cfg=0xF/ sleep 5
+
+(d)Some HiSilicon SoCs encapsulate multiple CPU and IO dies. Each CPU die
+contains several Compute Clusters (CCLs). The I/O dies are called Super I/O
+clusters (SICL) containing multiple I/O clusters (ICLs). Each CCL/ICL in the
+SoC has a unique ID. Each ID is 11bits, include a 6-bit SCCL-ID and 5-bit
+CCL/ICL-ID. For I/O die, the ICL-ID is followed by:
+5'b00000: I/O_MGMT_ICL;
+5'b00001: Network_ICL;
+5'b00011: HAC_ICL;
+5'b10000: PCIe_ICL;
+
+Users could configure IDs to count data come from specific CCL/ICL, by setting
+srcid_cmd & srcid_msk, and data desitined for specific CCL/ICL by setting
+tgtid_cmd & tgtid_msk. A set bit in srcid_msk/tgtid_msk means the PMU will not
+check the bit when matching against the srcid_cmd/tgtid_cmd.
+
+If all of these options are disabled, it can works by the default value that
+doesn't distinguish the filter condition and ID information and will return
+the total counter values in the PMU counters.
+
 The current driver does not support sampling. So "perf record" is unsupported.
 Also attach to a task is unsupported as the events are all uncore.
 
index 7552dbc1cc54c76d9b6ea3b6983d9ee800453d84..4fcc00add117bc55ad958728db2af1407b8fe084 100644 (file)
@@ -202,9 +202,10 @@ Before jumping into the kernel, the following conditions must be met:
 
 - System registers
 
-  All writable architected system registers at the exception level where
-  the kernel image will be entered must be initialised by software at a
-  higher exception level to prevent execution in an UNKNOWN state.
+  All writable architected system registers at or below the exception
+  level where the kernel image will be entered must be initialised by
+  software at a higher exception level to prevent execution in an UNKNOWN
+  state.
 
   - SCR_EL3.FIQ must have the same value across all CPUs the kernel is
     executing on.
@@ -270,6 +271,12 @@ Before jumping into the kernel, the following conditions must be met:
       having 0b1 set for the corresponding bit for each of the auxiliary
       counters present.
 
+  For CPUs with the Fine Grained Traps (FEAT_FGT) extension present:
+
+  - If EL3 is present and the kernel is entered at EL2:
+
+    - SCR_EL3.FGTEn (bit 27) must be initialised to 0b1.
+
 The requirements described above for CPU mode, caches, MMUs, architected
 timers, coherency and system registers apply to all CPUs.  All CPUs must
 enter the kernel in the same exception level.
index 30b2ab06526b27bdd5f6f59922db85cf8b2939bf..f127666ea3a81659daf3bba59c4c64c38bb6ff20 100644 (file)
@@ -107,3 +107,37 @@ filter out the Pointer Authentication system key registers from
 KVM_GET/SET_REG_* ioctls and mask those features from cpufeature ID
 register. Any attempt to use the Pointer Authentication instructions will
 result in an UNDEFINED exception being injected into the guest.
+
+
+Enabling and disabling keys
+---------------------------
+
+The prctl PR_PAC_SET_ENABLED_KEYS allows the user program to control which
+PAC keys are enabled in a particular task. It takes two arguments, the
+first being a bitmask of PR_PAC_APIAKEY, PR_PAC_APIBKEY, PR_PAC_APDAKEY
+and PR_PAC_APDBKEY specifying which keys shall be affected by this prctl,
+and the second being a bitmask of the same bits specifying whether the key
+should be enabled or disabled. For example::
+
+  prctl(PR_PAC_SET_ENABLED_KEYS,
+        PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY,
+        PR_PAC_APIBKEY, 0, 0);
+
+disables all keys except the IB key.
+
+The main reason why this is useful is to enable a userspace ABI that uses PAC
+instructions to sign and authenticate function pointers and other pointers
+exposed outside of the function, while still allowing binaries conforming to
+the ABI to interoperate with legacy binaries that do not sign or authenticate
+pointers.
+
+The idea is that a dynamic loader or early startup code would issue this
+prctl very early after establishing that a process may load legacy binaries,
+but before executing any PAC instructions.
+
+For compatibility with previous kernel versions, processes start up with IA,
+IB, DA and DB enabled, and are reset to this state on exec(). Processes created
+via fork() and clone() inherit the key enabled state from the calling process.
+
+It is recommended to avoid disabling the IA key, as this has higher performance
+overhead than disabling any of the other keys.
index 4a9d9c794ee5d889638d1a3af008dec06d11feaf..cbc4d4500241852d43d1f9fe4bea7729021b49ba 100644 (file)
@@ -40,7 +40,7 @@ space obtained in one of the following ways:
   during creation and with the same restrictions as for ``mmap()`` above
   (e.g. data, bss, stack).
 
-The AArch64 Tagged Address ABI has two stages of relaxation depending
+The AArch64 Tagged Address ABI has two stages of relaxation depending on
 how the user addresses are used by the kernel:
 
 1. User addresses not accessed by the kernel but used for address space
index ddf4239a58908cef17c30b716d86277bd49911c4..6f6ab3ed7b793aecc569b96128c7ab34c8e8a753 100644 (file)
@@ -161,6 +161,15 @@ particular KASAN features.
 
 - ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
 
+- ``kasan.mode=sync`` or ``=async`` controls whether KASAN is configured in
+  synchronous or asynchronous mode of execution (default: ``sync``).
+  Synchronous mode: a bad access is detected immediately when a tag
+  check fault occurs.
+  Asynchronous mode: a bad access detection is delayed. When a tag check
+  fault occurs, the information is stored in hardware (in the TFSR_EL1
+  register for arm64). The kernel periodically checks the hardware and
+  only reports tag faults during these checks.
+
 - ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
   traces collection (default: ``on``).
 
index 1d48ac712b23c0268db1ad961802fc0a37deb6a2..a410d2cedde6317216fa6aa188af8730fc9c66e2 100644 (file)
@@ -14,6 +14,7 @@ properties:
     enum:
       - ti,j721e-sa2ul
       - ti,am654-sa2ul
+      - ti,am64-sa2ul
 
   reg:
     maxItems: 1
@@ -45,6 +46,18 @@ properties:
     description:
       Address translation for the possible RNG child node for SA2UL
 
+  clocks:
+    items:
+      - description: Clock used by PKA
+      - description: Main Input Clock
+      - description: Clock used by rng
+
+  clock-names:
+    items:
+      - const: pka_in_clk
+      - const: x1_clk
+      - const: x2_clk
+
 patternProperties:
   "^rng@[a-f0-9]+$":
     type: object
@@ -57,7 +70,16 @@ required:
   - power-domains
   - dmas
   - dma-names
-  - dma-coherent
+
+if:
+  properties:
+    compatible:
+      enum:
+        - ti,j721e-sa2ul
+        - ti,am654-sa2ul
+then:
+  required:
+    - dma-coherent
 
 additionalProperties: false
 
diff --git a/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.txt b/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.txt
deleted file mode 100644 (file)
index 35383ad..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-Qualcomm's PM8941 USB ID Extcon device
-
-Some Qualcomm PMICs have a "misc" module that can be used to detect when
-the USB ID pin has been pulled low or high.
-
-PROPERTIES
-
-- compatible:
-    Usage: required
-    Value type: <string>
-    Definition: Should contain "qcom,pm8941-misc";
-
-- reg:
-    Usage: required
-    Value type: <u32>
-    Definition: Should contain the offset to the misc address space
-
-- interrupts:
-    Usage: required
-    Value type: <prop-encoded-array>
-    Definition: Should contain the usb id interrupt
-
-- interrupt-names:
-    Usage: required
-    Value type: <stringlist>
-    Definition: Should contain the string "usb_id" for the usb id interrupt
-
-Example:
-
-       pmic {
-               usb_id: misc@900 {
-                       compatible = "qcom,pm8941-misc";
-                       reg = <0x900>;
-                       interrupts = <0x0 0x9 0 IRQ_TYPE_EDGE_BOTH>;
-                       interrupt-names = "usb_id";
-               };
-       }
-
-       usb-controller {
-               extcon = <&usb_id>;
-       };
diff --git a/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml b/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml
new file mode 100644 (file)
index 0000000..6a9c96f
--- /dev/null
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/qcom,pm8941-misc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. PM8941 USB ID Extcon device
+
+maintainers:
+  - Guru Das Srinagesh <gurus@codeaurora.org>
+
+description: |
+  Some Qualcomm PMICs have a "misc" module that can be used to detect when
+  the USB ID pin has been pulled low or high.
+
+properties:
+  compatible:
+    items:
+      - const: qcom,pm8941-misc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    minItems: 1
+    maxItems: 2
+
+  interrupt-names:
+    minItems: 1
+    items:
+      - const: usb_id
+      - const: usb_vbus
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    pmic {
+            #address-cells = <1>;
+            #size-cells = <0>;
+            interrupt-controller;
+            #interrupt-cells = <4>;
+
+            usb_id: misc@900 {
+                    compatible = "qcom,pm8941-misc";
+                    reg = <0x900>;
+                    interrupts = <0x0 0x9 0 IRQ_TYPE_EDGE_BOTH>;
+                    interrupt-names = "usb_id";
+            };
+    };
+
+    usb-controller {
+           extcon = <&usb_id>;
+    };
index e811cf8250199b14205787cfdedf8cd6e4aa4df2..d787d57491a1c537786a9d537a9a976c85624e77 100644 (file)
@@ -245,36 +245,31 @@ Base tree contains:
 
 Overlay contains:
 
-/dts-v1/ /plugin/;
-/ {
-       fragment@0 {
-               target = <&fpga_region0>;
-               #address-cells = <1>;
-               #size-cells = <1>;
-               __overlay__ {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-
-                       firmware-name = "soc_system.rbf";
-                       fpga-bridges = <&fpga_bridge1>;
-                       ranges = <0x20000 0xff200000 0x100000>,
-                                <0x0 0xc0000000 0x20000000>;
-
-                       gpio@10040 {
-                               compatible = "altr,pio-1.0";
-                               reg = <0x10040 0x20>;
-                               altr,ngpio = <4>;
-                               #gpio-cells = <2>;
-                               clocks = <2>;
-                               gpio-controller;
-                       };
-
-                       onchip-memory {
-                               device_type = "memory";
-                               compatible = "altr,onchipmem-15.1";
-                               reg = <0x0 0x10000>;
-                       };
-               };
+/dts-v1/;
+/plugin/;
+
+&fpga_region0 {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       firmware-name = "soc_system.rbf";
+       fpga-bridges = <&fpga_bridge1>;
+       ranges = <0x20000 0xff200000 0x100000>,
+                <0x0 0xc0000000 0x20000000>;
+
+       gpio@10040 {
+               compatible = "altr,pio-1.0";
+               reg = <0x10040 0x20>;
+               altr,ngpio = <4>;
+               #gpio-cells = <2>;
+               clocks = <2>;
+               gpio-controller;
+       };
+
+       onchip-memory {
+               device_type = "memory";
+               compatible = "altr,onchipmem-15.1";
+               reg = <0x0 0x10000>;
        };
 };
 
@@ -371,25 +366,22 @@ Live Device Tree contains:
        };
 
 DT Overlay contains:
-/dts-v1/ /plugin/;
-/ {
-fragment@0 {
-       target = <&fpga_region0>;
+
+/dts-v1/;
+/plugin/;
+
+&fpga_region0 {
        #address-cells = <1>;
        #size-cells = <1>;
-       __overlay__ {
-               #address-cells = <1>;
-               #size-cells = <1>;
 
-               firmware-name = "zynq-gpio.bin";
+       firmware-name = "zynq-gpio.bin";
 
-               gpio1: gpio@40000000 {
-                       compatible = "xlnx,xps-gpio-1.00.a";
-                       reg = <0x40000000 0x10000>;
-                       gpio-controller;
-                       #gpio-cells = <0x2>;
-                       xlnx,gpio-width= <0x6>;
-               };
+       gpio1: gpio@40000000 {
+               compatible = "xlnx,xps-gpio-1.00.a";
+               reg = <0x40000000 0x10000>;
+               gpio-controller;
+               #gpio-cells = <0x2>;
+               xlnx,gpio-width= <0x6>;
        };
 };
 
@@ -402,41 +394,37 @@ This example programs the FPGA to have two regions that can later be partially
 configured.  Each region has its own bridge in the FPGA fabric.
 
 DT Overlay contains:
-/dts-v1/ /plugin/;
-/ {
-       fragment@0 {
-               target = <&fpga_region0>;
-               #address-cells = <1>;
-               #size-cells = <1>;
-               __overlay__ {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-
-                       firmware-name = "base.rbf";
-
-                       fpga-bridge@4400 {
-                               compatible = "altr,freeze-bridge-controller";
-                               reg = <0x4400 0x10>;
-
-                               fpga_region1: fpga-region1 {
-                                       compatible = "fpga-region";
-                                       #address-cells = <0x1>;
-                                       #size-cells = <0x1>;
-                                       ranges;
-                               };
-                       };
-
-                       fpga-bridge@4420 {
-                               compatible = "altr,freeze-bridge-controller";
-                               reg = <0x4420 0x10>;
-
-                               fpga_region2: fpga-region2 {
-                                       compatible = "fpga-region";
-                                       #address-cells = <0x1>;
-                                       #size-cells = <0x1>;
-                                       ranges;
-                               };
-                       };
+
+/dts-v1/;
+/plugin/;
+
+&fpga_region0 {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       firmware-name = "base.rbf";
+
+       fpga-bridge@4400 {
+               compatible = "altr,freeze-bridge-controller";
+               reg = <0x4400 0x10>;
+
+               fpga_region1: fpga-region1 {
+                       compatible = "fpga-region";
+                       #address-cells = <0x1>;
+                       #size-cells = <0x1>;
+                       ranges;
+               };
+       };
+
+       fpga-bridge@4420 {
+               compatible = "altr,freeze-bridge-controller";
+               reg = <0x4420 0x10>;
+
+               fpga_region2: fpga-region2 {
+                       compatible = "fpga-region";
+                       #address-cells = <0x1>;
+                       #size-cells = <0x1>;
+                       ranges;
                };
        };
 };
@@ -451,28 +439,23 @@ differences are that the FPGA is partially reconfigured due to the
 "partial-fpga-config" boolean and the only bridge that is controlled during
 programming is the FPGA based bridge of fpga_region1.
 
-/dts-v1/ /plugin/;
-/ {
-       fragment@0 {
-               target = <&fpga_region1>;
-               #address-cells = <1>;
-               #size-cells = <1>;
-               __overlay__ {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-
-                       firmware-name = "soc_image2.rbf";
-                       partial-fpga-config;
-
-                       gpio@10040 {
-                               compatible = "altr,pio-1.0";
-                               reg = <0x10040 0x20>;
-                               clocks = <0x2>;
-                               altr,ngpio = <0x4>;
-                               #gpio-cells = <0x2>;
-                               gpio-controller;
-                       };
-               };
+/dts-v1/;
+/plugin/;
+
+&fpga_region1 {
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       firmware-name = "soc_image2.rbf";
+       partial-fpga-config;
+
+       gpio@10040 {
+               compatible = "altr,pio-1.0";
+               reg = <0x10040 0x20>;
+               clocks = <0x2>;
+               altr,ngpio = <0x4>;
+               #gpio-cells = <0x2>;
+               gpio-controller;
        };
 };
 
index 4284d293fa6135bf757d53f2bc8faeb29b86e8e2..0acdfa6d62a4d026d2ff49d4123723f6580d68b5 100644 (file)
@@ -7,13 +7,24 @@ changes from passing through the bridge.  The controller can also
 couple / enable the bridges which allows traffic to pass through the
 bridge normally.
 
+Xilinx LogiCORE Dynamic Function eXchange(DFX) AXI shutdown manager
+Softcore is compatible with the Xilinx LogiCORE pr-decoupler.
+
+The Dynamic Function eXchange AXI shutdown manager prevents AXI traffic
+from passing through the bridge. The controller safely handles AXI4MM
+and AXI4-Lite interfaces on a Reconfigurable Partition when it is
+undergoing dynamic reconfiguration, preventing the system deadlock
+that can occur if AXI transactions are interrupted by DFX
+
 The Driver supports only MMIO handling. A PR region can have multiple
 PR Decouplers which can be handled independently or chained via decouple/
 decouple_status signals.
 
 Required properties:
 - compatible           : Should contain "xlnx,pr-decoupler-1.00" followed by
-                          "xlnx,pr-decoupler"
+                          "xlnx,pr-decoupler" or
+                          "xlnx,dfx-axi-shutdown-manager-1.00" followed by
+                          "xlnx,dfx-axi-shutdown-manager"
 - regs                 : base address and size for decoupler module
 - clocks               : input clock to IP
 - clock-names          : should contain "aclk"
@@ -22,6 +33,7 @@ See Documentation/devicetree/bindings/fpga/fpga-region.txt and
 Documentation/devicetree/bindings/fpga/fpga-bridge.txt for generic bindings.
 
 Example:
+Partial Reconfig Decoupler:
        fpga-bridge@100000450 {
                compatible = "xlnx,pr-decoupler-1.00",
                             "xlnx-pr-decoupler";
@@ -30,3 +42,13 @@ Example:
                clock-names = "aclk";
                bridge-enable = <0>;
        };
+
+Dynamic Function eXchange AXI shutdown manager:
+       fpga-bridge@100000450 {
+               compatible = "xlnx,dfx-axi-shutdown-manager-1.00",
+                            "xlnx,dfx-axi-shutdown-manager";
+               regs = <0x10000045 0x10>;
+               clocks = <&clkc 15>;
+               clock-names = "aclk";
+               bridge-enable = <0>;
+       };
index 37f18d684f6a6925ff0cccb35ff3bb5cd296b86f..4c5c3712970e5cd0a77de4ec1e14e0771c7826cf 100644 (file)
@@ -32,7 +32,7 @@ Optional node properties:
 - "#thermal-sensor-cells" Used to expose itself to thermal fw.
 
 Read more about iio bindings at
-       Documentation/devicetree/bindings/iio/iio-bindings.txt
+       https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/
 
 Example:
        ncp15wb473@0 {
index ff99344788ab84374e797b6375fa05d41f45b152..fd040284561f89b70e8c3037e85acc2453cb2c82 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Bindings for GPIO bitbanged I2C
 
 maintainers:
-  - Wolfram Sang <wolfram@the-dreams.de>
+  - Wolfram Sang <wsa@kernel.org>
 
 allOf:
   - $ref: /schemas/i2c/i2c-controller.yaml#
index f23966b0d6c6c6d1cf616ab6b840a7bac001ca50..3592d49235e0907eea73766265e1edf4f4627b3e 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale Inter IC (I2C) and High Speed Inter IC (HS-I2C) for i.MX
 
 maintainers:
-  - Wolfram Sang <wolfram@the-dreams.de>
+  - Oleksij Rempel <o.rempel@pengutronix.de>
 
 allOf:
   - $ref: /schemas/i2c/i2c-controller.yaml#
index 9f414dbdae863454d02361fb642ab8d70b141618..433a3fb55a2e42d3bcb78fee5ab23c02d22e69a7 100644 (file)
@@ -14,8 +14,9 @@ description: >
   Industrial I/O subsystem bindings for ADC controller found in
   Ingenic JZ47xx SoCs.
 
-  ADC clients must use the format described in iio-bindings.txt, giving
-  a phandle and IIO specifier pair ("io-channels") to the ADC controller.
+  ADC clients must use the format described in
+  https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml,
+  giving a phandle and IIO specifier pair ("io-channels") to the ADC controller.
 
 properties:
   compatible:
index 054406bbd22b6a81e068dad6b27ff2375246e0d7..721878d5b7af231d28fb336ad08acba58ba9da05 100644 (file)
@@ -24,7 +24,9 @@ properties:
     description: >
       List of phandle and IIO specifier pairs.
       Each pair defines one ADC channel to which a joystick axis is connected.
-      See Documentation/devicetree/bindings/iio/iio-bindings.txt for details.
+      See
+      https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+      for details.
 
   '#address-cells':
     const: 1
index 51456c0e9a27870e7efd4e6938492ccad66cc7ed..af5223bb5bdd456985897db3da2607fc34a320c0 100644 (file)
@@ -5,7 +5,10 @@ Required properties:
  - compatible: must be "resistive-adc-touch"
 The device must be connected to an ADC device that provides channels for
 position measurement and optional pressure.
-Refer to ../iio/iio-bindings.txt for details
+Refer to
+https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+for details
+
  - iio-channels: must have at least two channels connected to an ADC device.
 These should correspond to the channels exposed by the ADC device and should
 have the right index as the ADC device registers them. These channels
index 799e73cdb90b494af79c98a8dac61ecaf61b74fd..cb6498108b78a67832fc44a8c13530d066ed2f39 100644 (file)
@@ -71,6 +71,16 @@ properties:
       - qcom,sm8250-mmss-noc
       - qcom,sm8250-npu-noc
       - qcom,sm8250-system-noc
+      - qcom,sm8350-aggre1-noc
+      - qcom,sm8350-aggre2-noc
+      - qcom,sm8350-config-noc
+      - qcom,sm8350-dc-noc
+      - qcom,sm8350-gem-noc
+      - qcom,sm8350-lpass-ag-noc
+      - qcom,sm8350-mc-virt
+      - qcom,sm8350-mmss-noc
+      - qcom,sm8350-compute-noc
+      - qcom,sm8350-system-noc
 
   '#interconnect-cells':
     enum: [ 1, 2 ]
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
new file mode 100644 (file)
index 0000000..29de780
--- /dev/null
@@ -0,0 +1,147 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,sdm660.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SDM660 Network-On-Chip interconnect
+
+maintainers:
+  - AngeloGioacchino Del Regno <kholk11@gmail.com>
+
+description: |
+  The Qualcomm SDM660 interconnect providers support adjusting the
+  bandwidth requirements between the various NoC fabrics.
+
+properties:
+  reg:
+    maxItems: 1
+
+  compatible:
+    enum:
+      - qcom,sdm660-a2noc
+      - qcom,sdm660-bimc
+      - qcom,sdm660-cnoc
+      - qcom,sdm660-gnoc
+      - qcom,sdm660-mnoc
+      - qcom,sdm660-snoc
+
+  '#interconnect-cells':
+    const: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 3
+
+  clock-names:
+    minItems: 1
+    maxItems: 3
+
+required:
+  - compatible
+  - reg
+  - '#interconnect-cells'
+  - clock-names
+  - clocks
+
+additionalProperties: false
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,sdm660-mnoc
+    then:
+      properties:
+        clocks:
+          items:
+            - description: Bus Clock.
+            - description: Bus A Clock.
+            - description: CPU-NoC High-performance Bus Clock.
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+            - const: iface
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,sdm660-a2noc
+              - qcom,sdm660-bimc
+              - qcom,sdm660-cnoc
+              - qcom,sdm660-gnoc
+              - qcom,sdm660-snoc
+    then:
+      properties:
+        clocks:
+          items:
+            - description: Bus Clock.
+            - description: Bus A Clock.
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+
+examples:
+  - |
+      #include <dt-bindings/clock/qcom,rpmcc.h>
+      #include <dt-bindings/clock/qcom,mmcc-sdm660.h>
+
+      bimc: interconnect@1008000 {
+              compatible = "qcom,sdm660-bimc";
+              reg = <0x01008000 0x78000>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a";
+              clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
+                       <&rpmcc RPM_SMD_BIMC_A_CLK>;
+      };
+
+      cnoc: interconnect@1500000 {
+              compatible = "qcom,sdm660-cnoc";
+              reg = <0x01500000 0x10000>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a";
+              clocks = <&rpmcc RPM_SMD_CNOC_CLK>,
+                       <&rpmcc RPM_SMD_CNOC_A_CLK>;
+      };
+
+      snoc: interconnect@1626000 {
+              compatible = "qcom,sdm660-snoc";
+              reg = <0x01626000 0x7090>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a";
+              clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
+                       <&rpmcc RPM_SMD_SNOC_A_CLK>;
+      };
+
+      a2noc: interconnect@1704000 {
+              compatible = "qcom,sdm660-a2noc";
+              reg = <0x01704000 0xc100>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a";
+              clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
+                       <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
+      };
+
+      mnoc: interconnect@1745000 {
+              compatible = "qcom,sdm660-mnoc";
+              reg = <0x01745000 0xa010>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a", "iface";
+              clocks = <&rpmcc RPM_SMD_MMSSNOC_AXI_CLK>,
+                       <&rpmcc RPM_SMD_MMSSNOC_AXI_CLK_A>,
+                       <&mmcc AHB_CLK_SRC>;
+      };
+
+      gnoc: interconnect@17900000 {
+              compatible = "qcom,sdm660-gnoc";
+              reg = <0x17900000 0xe000>;
+              #interconnect-cells = <1>;
+              clock-names = "bus", "bus_a";
+              clocks = <&xo_board>, <&xo_board>;
+      };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/idt,32434-pic.yaml b/Documentation/devicetree/bindings/interrupt-controller/idt,32434-pic.yaml
new file mode 100644 (file)
index 0000000..df5d8d1
--- /dev/null
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/idt,32434-pic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IDT 79RC32434 Interrupt Controller Device Tree Bindings
+
+maintainers:
+  - Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+allOf:
+  - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+  "#interrupt-cells":
+    const: 1
+
+  compatible:
+    const: idt,32434-pic
+
+  reg:
+    maxItems: 1
+
+  interrupt-controller: true
+
+required:
+  - "#interrupt-cells"
+  - compatible
+  - reg
+  - interrupt-controller
+
+additionalProperties: false
+
+examples:
+  - |
+    idtpic3: interrupt-controller@3800c {
+        compatible = "idt,32434-pic";
+        reg = <0x3800c 0x0c>;
+
+        interrupt-controller;
+        #interrupt-cells = <1>;
+
+        interrupt-parent = <&cpuintc>;
+        interrupts = <3>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nuvoton,wpcm450-aic.yaml b/Documentation/devicetree/bindings/interrupt-controller/nuvoton,wpcm450-aic.yaml
new file mode 100644 (file)
index 0000000..9ce6804
--- /dev/null
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/nuvoton,wpcm450-aic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton WPCM450 Advanced Interrupt Controller bindings
+
+maintainers:
+  - Jonathan Neuschäfer <j.neuschaefer@gmx.net>
+
+properties:
+  '#interrupt-cells':
+    const: 2
+
+  compatible:
+    const: nuvoton,wpcm450-aic
+
+  interrupt-controller: true
+
+  reg:
+    maxItems: 1
+
+additionalProperties: false
+
+required:
+  - '#interrupt-cells'
+  - compatible
+  - reg
+  - interrupt-controller
+
+examples:
+  - |
+    aic: interrupt-controller@b8002000 {
+        compatible = "nuvoton,wpcm450-aic";
+        reg = <0xb8002000 0x1000>;
+        interrupt-controller;
+        #interrupt-cells = <2>;
+    };
index e9afb48182c78f8fa241ac0239782328210a2b3d..98d89e53013dc0c7b7d441224616b045bd211afe 100644 (file)
@@ -19,6 +19,7 @@ Properties:
        Value type: <string>
        Definition: Should contain "qcom,<soc>-pdc" and "qcom,pdc"
                    - "qcom,sc7180-pdc": For SC7180
+                   - "qcom,sc7280-pdc": For SC7280
                    - "qcom,sdm845-pdc": For SDM845
                    - "qcom,sdm8250-pdc": For SM8250
                    - "qcom,sdm8350-pdc": For SM8350
index fe7fa25877fd20741551573b6d653664abd0a86c..c7ed2871da06a1b86ad829d0cdf26f3b4e4ed42d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: CZ.NIC's Turris Omnia LEDs driver
 
 maintainers:
-  - Marek Behún <marek.behun@nic.cz>
+  - Marek Behún <kabel@kernel.org>
 
 description:
   This module adds support for the RGB LEDs found on the front panel of the
index d2a6e835c2575f36679b6a903c8638699b6995a7..937b3e5505e00dcfd9d446d12314f1165a4e5f84 100644 (file)
@@ -72,7 +72,9 @@ Required child device properties:
                                                pwm|regulator|rtc|sysctrl|usb]";
 
   A few child devices require ADC channels from the GPADC node. Those follow the
-  standard bindings from iio/iio-bindings.txt and iio/adc/adc.txt
+  standard bindings from
+  https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+  and Documentation/devicetree/bindings/iio/adc/adc.yaml
 
   abx500-temp           : io-channels "aux1" and "aux2" for measuring external
                           temperatures.
index 5ddcc8f4febc08b631a23d9bd15d35921267ffaa..b52e7a33f0f90274cfa15914346e48a26fa4cc6d 100644 (file)
@@ -16,14 +16,14 @@ Optional subnodes:
 The sub-functions of CPCAP get their own node with their own compatible values,
 which are described in the following files:
 
-- ../power/supply/cpcap-battery.txt
-- ../power/supply/cpcap-charger.txt
-- ../regulator/cpcap-regulator.txt
-- ../phy/phy-cpcap-usb.txt
-- ../input/cpcap-pwrbutton.txt
-- ../rtc/cpcap-rtc.txt
-- ../leds/leds-cpcap.txt
-- ../iio/adc/cpcap-adc.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-battery.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
+- Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
+- Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
+- Documentation/devicetree/bindings/input/cpcap-pwrbutton.txt
+- Documentation/devicetree/bindings/rtc/cpcap-rtc.txt
+- Documentation/devicetree/bindings/leds/leds-cpcap.txt
+- Documentation/devicetree/bindings/iio/adc/motorola,cpcap-adc.yaml
 
 The only exception is the audio codec. Instead of a compatible value its
 node must be named "audio-codec".
index 79c38ea142372ab6e5befa4a940ff5d522a919ea..13c26f23a8209c018ee3b74f0bdf51da0af87688 100644 (file)
@@ -32,7 +32,7 @@ required:
   - interrupts
   - interrupt-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 4b7d1e5d003c7f282aa1e61852ed4b7bcce4af1d..e8f04687a3e096b5430c9443cc95a356a28442a1 100644 (file)
@@ -49,7 +49,7 @@ properties:
     description:
       Reference to an nvmem node for the MAC address
 
-  nvmem-cells-names:
+  nvmem-cell-names:
     const: mac-address
 
   phy-connection-type:
index b921731cd970e35a9221cae0f33fb982209949df..df9e844dd6bc6e7bcb7c88d93bbbc4af3888f79e 100644 (file)
@@ -65,6 +65,71 @@ KSZ9031:
   step is 60ps. The default value is the neutral setting, so setting
   rxc-skew-ps=<0> actually results in -900 picoseconds adjustment.
 
+  The KSZ9031 hardware supports a range of skew values from negative to
+  positive, where the specific range is property dependent. All values
+  specified in the devicetree are offset by the minimum value so they
+  can be represented as positive integers in the devicetree since it's
+  difficult to represent a negative number in the devictree.
+
+  The following 5-bit values table apply to rxc-skew-ps and txc-skew-ps.
+
+  Pad Skew Value       Delay (ps)      Devicetree Value
+  ------------------------------------------------------
+  0_0000               -900ps          0
+  0_0001               -840ps          60
+  0_0010               -780ps          120
+  0_0011               -720ps          180
+  0_0100               -660ps          240
+  0_0101               -600ps          300
+  0_0110               -540ps          360
+  0_0111               -480ps          420
+  0_1000               -420ps          480
+  0_1001               -360ps          540
+  0_1010               -300ps          600
+  0_1011               -240ps          660
+  0_1100               -180ps          720
+  0_1101               -120ps          780
+  0_1110               -60ps           840
+  0_1111               0ps             900
+  1_0000               60ps            960
+  1_0001               120ps           1020
+  1_0010               180ps           1080
+  1_0011               240ps           1140
+  1_0100               300ps           1200
+  1_0101               360ps           1260
+  1_0110               420ps           1320
+  1_0111               480ps           1380
+  1_1000               540ps           1440
+  1_1001               600ps           1500
+  1_1010               660ps           1560
+  1_1011               720ps           1620
+  1_1100               780ps           1680
+  1_1101               840ps           1740
+  1_1110               900ps           1800
+  1_1111               960ps           1860
+
+  The following 4-bit values table apply to the txdX-skew-ps, rxdX-skew-ps
+  data pads, and the rxdv-skew-ps, txen-skew-ps control pads.
+
+  Pad Skew Value       Delay (ps)      Devicetree Value
+  ------------------------------------------------------
+  0000                 -420ps          0
+  0001                 -360ps          60
+  0010                 -300ps          120
+  0011                 -240ps          180
+  0100                 -180ps          240
+  0101                 -120ps          300
+  0110                 -60ps           360
+  0111                 0ps             420
+  1000                 60ps            480
+  1001                 120ps           540
+  1010                 180ps           600
+  1011                 240ps           660
+  1100                 300ps           720
+  1101                 360ps           780
+  1110                 420ps           840
+  1111                 480ps           900
+
   Optional properties:
 
     Maximum value of 1860, default value 900:
@@ -120,11 +185,21 @@ KSZ9131:
 
 Examples:
 
+       /* Attach to an Ethernet device with autodetected PHY */
+       &enet {
+               rxc-skew-ps = <1800>;
+               rxdv-skew-ps = <0>;
+               txc-skew-ps = <1800>;
+               txen-skew-ps = <0>;
+               status = "okay";
+       };
+
+       /* Attach to an explicitly-specified PHY */
        mdio {
                phy0: ethernet-phy@0 {
-                       rxc-skew-ps = <3000>;
+                       rxc-skew-ps = <1800>;
                        rxdv-skew-ps = <0>;
-                       txc-skew-ps = <3000>;
+                       txc-skew-ps = <1800>;
                        txen-skew-ps = <0>;
                        reg = <0>;
                };
@@ -133,3 +208,20 @@ Examples:
                phy = <&phy0>;
                phy-mode = "rgmii-id";
        };
+
+References
+
+  Micrel ksz9021rl/rn Data Sheet, Revision 1.2. Dated 2/13/2014.
+  http://www.micrel.com/_PDF/Ethernet/datasheets/ksz9021rl-rn_ds.pdf
+
+  Micrel ksz9031rnx Data Sheet, Revision 2.1. Dated 11/20/2014.
+  http://www.micrel.com/_PDF/Ethernet/datasheets/KSZ9031RNX.pdf
+
+Notes:
+
+  Note that a previous version of the Micrel ksz9021rl/rn Data Sheet
+  was missing extended register 106 (transmit data pad skews), and
+  incorrectly specified the ps per step as 200ps/step instead of
+  120ps/step. The latest update to this document reflects the latest
+  revision of the Micrel specification even though usage in the kernel
+  still reflects that incorrect document.
diff --git a/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml b/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml
new file mode 100644 (file)
index 0000000..58ff6b0
--- /dev/null
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/brcm,nvram.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom's NVRAM
+
+description: |
+  Broadcom's NVRAM is a structure containing device specific environment
+  variables. It is used for storing device configuration, booting parameters
+  and calibration data.
+
+  NVRAM can be accessed on Broadcom BCM47xx MIPS and Northstar ARM Cortex-A9
+  devices usiong I/O mapped memory.
+
+maintainers:
+  - Rafał Miłecki <rafal@milecki.pl>
+
+allOf:
+  - $ref: "nvmem.yaml#"
+
+properties:
+  compatible:
+    const: brcm,nvram
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    nvram@1eff0000 {
+            compatible = "brcm,nvram";
+            reg = <0x1eff0000 0x10000>;
+    };
index ef93c3b95424d911b64f99d1ecdc14d37097ff16..2f2895b1f06dc950c1bbcc365b1bb616d9f25d12 100644 (file)
@@ -8,6 +8,7 @@ Required properties:
              "mediatek,mt7623-efuse", "mediatek,efuse": for MT7623
              "mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
              "mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
+             "mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
 - reg: Should contain registers location and length
 
 = Data cells =
index 992777c90a0bf3803ed7fb2edfe0c4a065353a2f..861b205016b184743c0f55e07972ed70fe46ae61 100644 (file)
@@ -24,6 +24,7 @@ properties:
           - qcom,msm8998-qfprom
           - qcom,qcs404-qfprom
           - qcom,sc7180-qfprom
+          - qcom,sc7280-qfprom
           - qcom,sdm845-qfprom
       - const: qcom,qfprom
 
diff --git a/Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.txt b/Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.txt
deleted file mode 100644 (file)
index a7aee9e..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-Driver for Broadcom Northstar USB 2.0 PHY
-
-Required properties:
-- compatible: brcm,ns-usb2-phy
-- reg: iomem address range of DMU (Device Management Unit)
-- reg-names: "dmu", the only needed & supported reg right now
-- clocks: USB PHY reference clock
-- clock-names: "phy-ref-clk", the only needed & supported clock right now
-
-To initialize USB 2.0 PHY driver needs to setup PLL correctly. To do this it
-requires passing phandle to the USB PHY reference clock.
-
-Example:
-       usb2-phy {
-               compatible = "brcm,ns-usb2-phy";
-               reg = <0x1800c000 0x1000>;
-               reg-names = "dmu";
-               #phy-cells = <0>;
-               clocks = <&genpll BCM_NSP_GENPLL_USB_PHY_REF_CLK>;
-               clock-names = "phy-ref-clk";
-       };
diff --git a/Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/bcm-ns-usb2-phy.yaml
new file mode 100644 (file)
index 0000000..05b4dcd
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/bcm-ns-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Northstar USB 2.0 PHY
+
+description: >
+  To initialize USB 2.0 PHY driver needs to setup PLL correctly.
+  To do this it requires passing phandle to the USB PHY reference clock.
+
+maintainers:
+  - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+  compatible:
+    const: brcm,ns-usb2-phy
+
+  reg:
+    items:
+      - description: iomem address range of DMU (Device Management Unit)
+
+  reg-names:
+    items:
+      - const: dmu
+
+  clocks:
+    items:
+      - description: USB PHY reference clock
+
+  clock-names:
+    items:
+      - const: phy-ref-clk
+
+  "#phy-cells":
+    const: 0
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - clocks
+  - clock-names
+  - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/bcm-nsp.h>
+    phy@1800c000 {
+        compatible = "brcm,ns-usb2-phy";
+        reg = <0x1800c000 0x1000>;
+        reg-names = "dmu";
+        clocks = <&genpll BCM_NSP_GENPLL_USB_PHY_REF_CLK>;
+        clock-names = "phy-ref-clk";
+        #phy-cells = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt b/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt
deleted file mode 100644 (file)
index 32f0572..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-Driver for Broadcom Northstar USB 3.0 PHY
-
-Required properties:
-
-- compatible: one of: "brcm,ns-ax-usb3-phy", "brcm,ns-bx-usb3-phy".
-- reg: address of MDIO bus device
-- usb3-dmp-syscon: phandle to syscon with DMP (Device Management Plugin)
-                  registers
-- #phy-cells: must be 0
-
-Initialization of USB 3.0 PHY depends on Northstar version. There are currently
-three known series: Ax, Bx and Cx.
-Known A0: BCM4707 rev 0
-Known B0: BCM4707 rev 4, BCM53573 rev 2
-Known B1: BCM4707 rev 6
-Known C0: BCM47094 rev 0
-
-Example:
-       mdio: mdio@0 {
-               reg = <0x0>;
-               #size-cells = <1>;
-               #address-cells = <0>;
-
-               usb3-phy@10 {
-                       compatible = "brcm,ns-ax-usb3-phy";
-                       reg = <0x10>;
-                       usb3-dmp-syscon = <&usb3_dmp>;
-                       #phy-cells = <0>;
-               };
-       };
-
-       usb3_dmp: syscon@18105000 {
-               reg = <0x18105000 0x1000>;
-       };
diff --git a/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.yaml b/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.yaml
new file mode 100644 (file)
index 0000000..7fd419d
--- /dev/null
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/bcm-ns-usb3-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Northstar USB 3.0 PHY
+
+description: |
+  Initialization of USB 3.0 PHY depends on Northstar version. There are currently
+  three known series: Ax, Bx and Cx.
+  Known A0: BCM4707 rev 0
+  Known B0: BCM4707 rev 4, BCM53573 rev 2
+  Known B1: BCM4707 rev 6
+  Known C0: BCM47094 rev 0
+
+maintainers:
+  - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+  compatible:
+    enum:
+      - brcm,ns-ax-usb3-phy
+      - brcm,ns-bx-usb3-phy
+
+  reg:
+    description: address of MDIO bus device
+    maxItems: 1
+
+  usb3-dmp-syscon:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      Phandle to the DMP (Device Management Plugin) syscon
+
+  "#phy-cells":
+    const: 0
+
+required:
+  - compatible
+  - reg
+  - usb3-dmp-syscon
+  - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    mdio {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        usb3-phy@10 {
+            compatible = "brcm,ns-ax-usb3-phy";
+            reg = <0x10>;
+            usb3-dmp-syscon = <&usb3_dmp>;
+            #phy-cells = <0>;
+        };
+    };
+
+    usb3_dmp: syscon@18105000 {
+        reg = <0x18105000 0x1000>;
+    };
index 0497368d1fcafb1f0a903de8da1f38466347ea31..5f9e91bfb5ff4383fe15fb8d519ac93e9bfb2d94 100644 (file)
@@ -42,6 +42,9 @@ properties:
       - const: usb_mdio
       - const: bdc_ec
 
+  power-domains:
+    maxItems: 1
+
   clocks:
     minItems: 1
     maxItems: 2
diff --git a/Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,armada-3700-utmi-phy.yaml
new file mode 100644 (file)
index 0000000..2437c36
--- /dev/null
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/marvell,armada-3700-utmi-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Marvell Armada UTMI/UTMI+ PHY
+
+maintainers:
+  - Miquel Raynal <miquel.raynal@bootlin.com>
+
+description:
+  On Armada 3700, there are two USB controllers, one is compatible with
+  the USB2 and USB3 specifications and supports OTG. The other one is USB2
+  compliant and only supports host mode. Both of these controllers come with
+  a slightly different UTMI PHY.
+
+properties:
+  compatible:
+    enum:
+      - marvell,a3700-utmi-host-phy
+      - marvell,a3700-utmi-otg-phy
+  reg:
+    maxItems: 1
+
+  "#phy-cells":
+    const: 0
+
+  marvell,usb-misc-reg:
+    description:
+      Phandle on the "USB miscellaneous registers" shared region
+      covering registers related to both the host controller and
+      the PHY.
+    $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+  - compatible
+  - reg
+  - "#phy-cells"
+  - marvell,usb-misc-reg
+
+additionalProperties: false
+
+examples:
+  - |
+    usb2_utmi_host_phy: phy@5f000 {
+      compatible = "marvell,armada-3700-utmi-host-phy";
+      reg = <0x5f000 0x800>;
+      marvell,usb-misc-reg = <&usb2_syscon>;
+      #phy-cells = <0>;
+    };
+
+    usb2_syscon: system-controller@5f800 {
+      compatible = "marvell,armada-3700-usb2-host-misc", "syscon";
+      reg = <0x5f800 0x800>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml
new file mode 100644 (file)
index 0000000..30f3b5f
--- /dev/null
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/marvell,armada-cp110-utmi-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Marvell Armada CP110/CP115 UTMI PHY
+
+maintainers:
+  - Konstantin Porotchkin <kostap@marvell.com>
+
+description:
+  On Armada 7k/8k and CN913x, there are two host and one device USB controllers.
+  Each of two exiting UTMI PHYs could be connected to either USB host or USB device
+  controller.
+  The USB device controller can only be connected to a single UTMI PHY port
+                      0.H----- USB HOST0
+  UTMI PHY0  --------/
+                      0.D-----0
+                               \------ USB DEVICE
+                      1.D-----1
+  UTMI PHY1  --------\
+                      1.H----- USB HOST1
+
+properties:
+  compatible:
+    const: marvell,cp110-utmi-phy
+
+  reg:
+    maxItems: 1
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+  marvell,system-controller:
+    description:
+      Phandle to the system controller node
+    $ref: /schemas/types.yaml#/definitions/phandle
+
+#Required child nodes:
+
+patternProperties:
+  "^usb-phy@[0|1]$":
+    type: object
+    description:
+      Each UTMI PHY port must be represented as a sub-node.
+
+    properties:
+      reg:
+        description: phy port index.
+        maxItems: 1
+
+      "#phy-cells":
+        const: 0
+
+    required:
+      - reg
+      - "#phy-cells"
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - "#address-cells"
+  - "#size-cells"
+  - marvell,system-controller
+
+additionalProperties: false
+
+examples:
+  - |
+    cp0_utmi: utmi@580000 {
+      compatible = "marvell,cp110-utmi-phy";
+      reg = <0x580000 0x2000>;
+      marvell,system-controller = <&cp0_syscon0>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      cp0_utmi0: usb-phy@0 {
+        reg = <0>;
+        #phy-cells = <0>;
+      };
+
+      cp0_utmi1: usb-phy@1 {
+        reg = <1>;
+        #phy-cells = <0>;
+      };
+    };
+
+    cp0_usb3_0 {
+      usb-phy = <&cp0_usb3_0_phy0>;
+      phys = <&cp0_utmi0>;
+      phy-names = "utmi";
+      /* UTMI0 is connected to USB host controller (default mode) */
+      dr_mode = "host";
+    };
+
+    cp0_usb3_1 {
+      usb-phy = <&cp0_usb3_0_phy1>;
+      phys = <&cp0_utmi1>;
+      phy-names = "utmi";
+      /* UTMI1 is connected to USB device controller */
+      dr_mode = "peripheral";
+    };
index 71d4acea1f669a7d01a6cde25e67ea1ed4ea2093..6e4d795f9b02aeab819ce7d0a649e8ea08a27b6a 100644 (file)
@@ -19,11 +19,14 @@ properties:
     pattern: "^dsi-phy@[0-9a-f]+$"
 
   compatible:
-    enum:
-      - mediatek,mt2701-mipi-tx
-      - mediatek,mt7623-mipi-tx
-      - mediatek,mt8173-mipi-tx
-      - mediatek,mt8183-mipi-tx
+    oneOf:
+      - items:
+          - enum:
+              - mediatek,mt7623-mipi-tx
+          - const: mediatek,mt2701-mipi-tx
+      - const: mediatek,mt2701-mipi-tx
+      - const: mediatek,mt8173-mipi-tx
+      - const: mediatek,mt8183-mipi-tx
 
   reg:
     maxItems: 1
index 4752517a144604cadcbd761d861f9e8d07645f45..0d94950b84ca3fc3b2ed9755d6bbf2e2ff558ca4 100644 (file)
@@ -21,10 +21,13 @@ properties:
     pattern: "^hdmi-phy@[0-9a-f]+$"
 
   compatible:
-    enum:
-      - mediatek,mt2701-hdmi-phy
-      - mediatek,mt7623-hdmi-phy
-      - mediatek,mt8173-hdmi-phy
+    oneOf:
+      - items:
+          - enum:
+              - mediatek,mt7623-hdmi-phy
+          - const: mediatek,mt2701-hdmi-phy
+      - const: mediatek,mt2701-hdmi-phy
+      - const: mediatek,mt8173-hdmi-phy
 
   reg:
     maxItems: 1
index 602e6ff45785e8f2e6797155263e0a1d5f8a30ec..b8a7651a3d9aea654b4552bb1df6e4f2fd679acc 100644 (file)
@@ -79,6 +79,7 @@ properties:
               - mediatek,mt2712-tphy
               - mediatek,mt7629-tphy
               - mediatek,mt8183-tphy
+              - mediatek,mt8195-tphy
           - const: mediatek,generic-tphy-v2
       - const: mediatek,mt2701-u3phy
         deprecated: true
@@ -117,7 +118,7 @@ properties:
 
 # Required child node:
 patternProperties:
-  "^usb-phy@[0-9a-f]+$":
+  "^(usb|pcie|sata)-phy@[0-9a-f]+$":
     type: object
     description:
       A sub-node is required for each port the controller provides.
index 3a9be82e7f13f7775d7b6dc233d1aa6dbd982332..74cc32c1d2e85cac5e18090de2dbb1d2ccdd87be 100644 (file)
@@ -22,7 +22,12 @@ properties:
     pattern: "^ufs-phy@[0-9a-f]+$"
 
   compatible:
-    const: mediatek,mt8183-ufsphy
+    oneOf:
+      - items:
+          - enum:
+              - mediatek,mt8195-ufsphy
+          - const: mediatek,mt8183-ufsphy
+      - const: mediatek,mt8183-ufsphy
 
   reg:
     maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/microchip,sparx5-serdes.yaml b/Documentation/devicetree/bindings/phy/microchip,sparx5-serdes.yaml
new file mode 100644 (file)
index 0000000..bdbdb3b
--- /dev/null
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/microchip,sparx5-serdes.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Sparx5 Serdes controller
+
+maintainers:
+  - Steen Hegelund <steen.hegelund@microchip.com>
+
+description: |
+  The Sparx5 SERDES interfaces share the same basic functionality, but
+  support different operating modes and line rates.
+
+  The following list lists the SERDES features:
+
+  * RX Adaptive Decision Feedback Equalizer (DFE)
+  * Programmable continuous time linear equalizer (CTLE)
+  * Rx variable gain control
+  * Rx built-in fault detector (loss-of-lock/loss-of-signal)
+  * Adjustable tx de-emphasis (FFE)
+  * Tx output amplitude control
+  * Supports rx eye monitor
+  * Multiple loopback modes
+  * Prbs generator and checker
+  * Polarity inversion control
+
+  SERDES6G:
+
+  The SERDES6G is a high-speed SERDES interface, which can operate at
+  the following data rates:
+
+  * 100 Mbps (100BASE-FX)
+  * 1.25 Gbps (SGMII/1000BASE-X/1000BASE-KX)
+  * 3.125 Gbps (2.5GBASE-X/2.5GBASE-KX)
+  * 5.15625 Gbps (5GBASE-KR/5G-USXGMII)
+
+  SERDES10G
+
+  The SERDES10G is a high-speed SERDES interface, which can operate at
+  the following data rates:
+
+  * 100 Mbps (100BASE-FX)
+  * 1.25 Gbps (SGMII/1000BASE-X/1000BASE-KX)
+  * 3.125 Gbps (2.5GBASE-X/2.5GBASE-KX)
+  * 5 Gbps (QSGMII/USGMII)
+  * 5.15625 Gbps (5GBASE-KR/5G-USXGMII)
+  * 10 Gbps (10G-USGMII)
+  * 10.3125 Gbps (10GBASE-R/10GBASE-KR/USXGMII)
+
+  SERDES25G
+
+  The SERDES25G is a high-speed SERDES interface, which can operate at
+  the following data rates:
+
+  * 1.25 Gbps (SGMII/1000BASE-X/1000BASE-KX)
+  * 3.125 Gbps (2.5GBASE-X/2.5GBASE-KX)
+  * 5 Gbps (QSGMII/USGMII)
+  * 5.15625 Gbps (5GBASE-KR/5G-USXGMII)
+  * 10 Gbps (10G-USGMII)
+  * 10.3125 Gbps (10GBASE-R/10GBASE-KR/USXGMII)
+  * 25.78125 Gbps (25GBASE-KR/25GBASE-CR/25GBASE-SR/25GBASE-LR/25GBASE-ER)
+
+properties:
+  $nodename:
+    pattern: "^serdes@[0-9a-f]+$"
+
+  compatible:
+    const: microchip,sparx5-serdes
+
+  reg:
+    minItems: 1
+
+  '#phy-cells':
+    const: 1
+    description: |
+      - The main serdes input port
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - '#phy-cells'
+  - clocks
+
+additionalProperties: false
+
+examples:
+  - |
+    serdes: serdes@10808000 {
+      compatible = "microchip,sparx5-serdes";
+      #phy-cells = <1>;
+      clocks = <&sys_clk>;
+      reg = <0x10808000 0x5d0000>;
+    };
+
+...
index d210843863df084b646b5f07224117c7086562fd..84383e2e0b34d254fe2406a9cb35900d991280a8 100644 (file)
@@ -26,6 +26,9 @@ properties:
   '#size-cells':
     const: 0
 
+  '#clock-cells':
+    const: 1
+
   resets:
     minItems: 1
     maxItems: 2
@@ -49,12 +52,24 @@ properties:
     const: serdes
 
   clocks:
-    maxItems: 2
+    minItems: 2
+    maxItems: 4
 
   clock-names:
+    minItems: 2
     items:
       - const: cmn_refclk_dig_div
       - const: cmn_refclk1_dig_div
+      - const: pll0_refclk
+      - const: pll1_refclk
+
+  assigned-clocks:
+    minItems: 1
+    maxItems: 2
+
+  assigned-clock-parents:
+    minItems: 1
+    maxItems: 2
 
   cdns,autoconf:
     type: boolean
index e266ade53d87aef686221606ad54d72aeafc6b55..01dcd14e7b2ad21248efdd588efcaf279d1fa54e 100644 (file)
@@ -28,13 +28,27 @@ properties:
   '#size-cells':
     const: 0
 
+  '#clock-cells':
+    const: 1
+
   clocks:
-    maxItems: 1
+    minItems: 1
+    maxItems: 2
     description:
-      PHY reference clock. Must contain an entry in clock-names.
+      PHY reference clock for 1 item. Must contain an entry in clock-names.
+      Optional Parent to enable output reference clock.
 
   clock-names:
-    const: refclk
+    minItems: 1
+    items:
+      - const: refclk
+      - const: phy_en_refclk
+
+  assigned-clocks:
+    maxItems: 3
+
+  assigned-clock-parents:
+    maxItems: 3
 
   reg:
     minItems: 1
@@ -170,7 +184,7 @@ examples:
     };
   - |
     #include <dt-bindings/phy/phy.h>
-    #include <dt-bindings/phy/phy-cadence-torrent.h>
+    #include <dt-bindings/phy/phy-cadence.h>
 
     bus {
         #address-cells = <2>;
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
deleted file mode 100644 (file)
index aa99cee..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-MVEBU A3700 UTMI PHY
---------------------
-
-USB2 UTMI+ PHY controllers can be found on the following Marvell MVEBU SoCs:
-* Armada 3700
-
-On Armada 3700, there are two USB controllers, one is compatible with the USB2
-and USB3 specifications and supports OTG. The other one is USB2 compliant and
-only supports host mode. Both of these controllers come with a slightly
-different UTMI PHY.
-
-Required Properties:
-
-- compatible: Should be one of:
-             * "marvell,a3700-utmi-host-phy" for the PHY connected to
-               the USB2 host-only controller.
-             * "marvell,a3700-utmi-otg-phy" for the PHY connected to
-               the USB3 and USB2 OTG capable controller.
-- reg: PHY IP register range.
-- marvell,usb-misc-reg: handle on the "USB miscellaneous registers" shared
-                       region covering registers related to both the host
-                       controller and the PHY.
-- #phy-cells: Standard property (Documentation: phy-bindings.txt) Should be 0.
-
-
-Example:
-
-       usb2_utmi_host_phy: phy@5f000 {
-               compatible = "marvell,armada-3700-utmi-host-phy";
-               reg = <0x5f000 0x800>;
-               marvell,usb-misc-reg = <&usb2_syscon>;
-               #phy-cells = <0>;
-       };
-
-       usb2_syscon: system-controller@5f800 {
-               compatible = "marvell,armada-3700-usb2-host-misc", "syscon";
-               reg = <0x5f800 0x800>;
-       };
index 46df6786727a6b40343ddb454469a5c51246b4fb..018cc1246ee1917ac8a8510a16752708e886386f 100644 (file)
@@ -51,6 +51,10 @@ properties:
   vdda1v8-supply:
     description: regulator providing 1V8 power supply to the PLL block
 
+  '#clock-cells':
+    description: number of clock cells for ck_usbo_48m consumer
+    const: 0
+
 #Required child nodes:
 
 patternProperties:
@@ -120,6 +124,7 @@ examples:
         vdda1v8-supply = <&reg18>;
         #address-cells = <1>;
         #size-cells = <0>;
+        #clock-cells = <0>;
 
         usbphyc_port0: usb-phy@0 {
             reg = <0>;
index 626447fee09252d179e40b5192edc757159245c3..7808ec8bc7128032366a6582aab043a3e2a2e1b5 100644 (file)
@@ -25,11 +25,13 @@ properties:
       - qcom,msm8998-qmp-pcie-phy
       - qcom,msm8998-qmp-ufs-phy
       - qcom,msm8998-qmp-usb3-phy
+      - qcom,sc7180-qmp-usb3-phy
       - qcom,sc8180x-qmp-ufs-phy
       - qcom,sc8180x-qmp-usb3-phy
       - qcom,sdm845-qhp-pcie-phy
       - qcom,sdm845-qmp-pcie-phy
       - qcom,sdm845-qmp-ufs-phy
+      - qcom,sdm845-qmp-usb3-phy
       - qcom,sdm845-qmp-usb3-uni-phy
       - qcom,sm8150-qmp-ufs-phy
       - qcom,sm8150-qmp-usb3-phy
index 33974ad10afe4749a5cc961fe6cca8778fbc5090..217aa6c91893d250991d407d8fdb6037b0c643f8 100644 (file)
@@ -14,9 +14,8 @@ properties:
   compatible:
     enum:
       - qcom,sc7180-qmp-usb3-dp-phy
-      - qcom,sc7180-qmp-usb3-phy
       - qcom,sdm845-qmp-usb3-dp-phy
-      - qcom,sdm845-qmp-usb3-phy
+      - qcom,sm8250-qmp-usb3-dp-phy
   reg:
     items:
       - description: Address and length of PHY's USB serdes block.
index ee77c645832667d70ed150d634a66215dce48cb1..20203a8a9e41a76a37e3f345d353d1b20bdc7bac 100644 (file)
@@ -16,6 +16,7 @@ properties:
   compatible:
     enum:
       - qcom,usb-snps-hs-7nm-phy
+      - qcom,sc7280-usb-hs-phy
       - qcom,sm8150-usb-hs-phy
       - qcom,sm8250-usb-hs-phy
       - qcom,sm8350-usb-hs-phy
index bbbd85501ada84565dd648da6057917f8b9192e8..57e1d013a5028badeaf05f5ed88c68d946c9e73c 100644 (file)
@@ -15,6 +15,7 @@ properties:
     enum:
       - ti,j721e-wiz-16g
       - ti,j721e-wiz-10g
+      - ti,am64-wiz-10g
 
   power-domains:
     maxItems: 1
@@ -42,6 +43,9 @@ properties:
   "#reset-cells":
     const: 1
 
+  "#clock-cells":
+    const: 1
+
   ranges: true
 
   assigned-clocks:
index c147900f9041ab93ed9ba0031551456fa8ee9ed6..6da674666d45610390e4819b8f26005df79a0326 100644 (file)
@@ -28,6 +28,12 @@ properties:
   clock-names:
     const: ipsec
 
+  resets:
+    maxItems: 1
+
+  reset-names:
+    const: ipsec
+
   interrupts:
     maxItems: 1
 
@@ -35,6 +41,18 @@ required:
   - compatible
   - reg
 
+if:
+  properties:
+    compatible:
+      enum:
+        - brcm,bcm6368-rng
+then:
+  required:
+    - clocks
+    - clock-names
+    - resets
+    - reset-names
+
 additionalProperties: false
 
 examples:
@@ -58,4 +76,7 @@ examples:
 
         clocks = <&periph_clk 18>;
         clock-names = "ipsec";
+
+        resets = <&periph_rst 4>;
+        reset-names = "ipsec";
     };
index b104be131235db792adb10d428583edde0b8b724..b93a2b3e029d952c4bd76d4b3a98876dbdf0cb25 100644 (file)
@@ -54,6 +54,8 @@ board specific bus parameters.
        Value type: <prop-encoded-array>
        Definition: should specify payload transport window offset1 of each
                    data port. Out ports followed by In ports.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,ports-offset2:
@@ -61,6 +63,8 @@ board specific bus parameters.
        Value type: <prop-encoded-array>
        Definition: should specify payload transport window offset2 of each
                    data port. Out ports followed by In ports.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,ports-sinterval-low:
@@ -69,12 +73,16 @@ board specific bus parameters.
        Definition: should be sample interval low of each data port.
                    Out ports followed by In ports. Used for Sample Interval
                    calculation.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,ports-word-length:
        Usage: optional
        Value type: <prop-encoded-array>
        Definition: should be size of payload channel sample.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,ports-block-pack-mode:
@@ -84,6 +92,8 @@ board specific bus parameters.
                    0 to indicate Blocks are per Channel
                    1 to indicate Blocks are per Port.
                    Out ports followed by In ports.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,ports-block-group-count:
@@ -92,6 +102,8 @@ board specific bus parameters.
        Definition: should be in range 1 to 4 to indicate how many sample
                    intervals are combined into a payload.
                    Out ports followed by In ports.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,ports-lane-control:
@@ -100,6 +112,8 @@ board specific bus parameters.
        Definition: should be in range 0 to 7 to identify which data lane
                    the data port uses.
                    Out ports followed by In ports.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,ports-hstart:
@@ -109,6 +123,8 @@ board specific bus parameters.
                    SoundWire Frame, i.e. left edge of the Transport sub-frame
                    for each port. Values between 0 and 15 are valid.
                    Out ports followed by In ports.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,ports-hstop:
@@ -118,6 +134,8 @@ board specific bus parameters.
                    SoundWire Frame, i.e. the right edge of the Transport
                    sub-frame for each port. Values between 0 and 15 are valid.
                    Out ports followed by In ports.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 - qcom,dports-type:
@@ -128,6 +146,8 @@ board specific bus parameters.
                    1 for simple ports
                    2 for full port
                    Out ports followed by In ports.
+                   Value of 0xFF indicates that this option is not implemented
+                   or applicable for the respective data port.
                    More info in MIPI Alliance SoundWire 1.0 Specifications.
 
 Note:
index 024bcad75101f112c3a26cf879513a39818ef9af..8165df4599cf5338be1de82119d8adc98ea294cd 100644 (file)
@@ -20,6 +20,8 @@ select:
         enum:
           - ingenic,jz4740-tcu
           - ingenic,jz4725b-tcu
+          - ingenic,jz4760-tcu
+          - ingenic,jz4760b-tcu
           - ingenic,jz4770-tcu
           - ingenic,jz4780-tcu
           - ingenic,x1000-tcu
@@ -52,12 +54,15 @@ properties:
           - enum:
               - ingenic,jz4740-tcu
               - ingenic,jz4725b-tcu
-              - ingenic,jz4770-tcu
+              - ingenic,jz4760-tcu
               - ingenic,x1000-tcu
           - const: simple-mfd
       - items:
-          - const: ingenic,jz4780-tcu
-          - const: ingenic,jz4770-tcu
+          - enum:
+              - ingenic,jz4780-tcu
+              - ingenic,jz4770-tcu
+              - ingenic,jz4760b-tcu
+          - const: ingenic,jz4760-tcu
           - const: simple-mfd
 
   reg:
@@ -118,6 +123,8 @@ patternProperties:
           - items:
               - enum:
                   - ingenic,jz4770-watchdog
+                  - ingenic,jz4760b-watchdog
+                  - ingenic,jz4760-watchdog
                   - ingenic,jz4725b-watchdog
               - const: ingenic,jz4740-watchdog
 
@@ -147,6 +154,8 @@ patternProperties:
               - ingenic,jz4725b-pwm
           - items:
               - enum:
+                  - ingenic,jz4760-pwm
+                  - ingenic,jz4760b-pwm
                   - ingenic,jz4770-pwm
                   - ingenic,jz4780-pwm
               - const: ingenic,jz4740-pwm
@@ -183,10 +192,15 @@ patternProperties:
         oneOf:
           - enum:
               - ingenic,jz4725b-ost
-              - ingenic,jz4770-ost
+              - ingenic,jz4760b-ost
           - items:
-              - const: ingenic,jz4780-ost
-              - const: ingenic,jz4770-ost
+              - const: ingenic,jz4760-ost
+              - const: ingenic,jz4725b-ost
+          - items:
+              - enum:
+                  - ingenic,jz4780-ost
+                  - ingenic,jz4770-ost
+              - const: ingenic,jz4760b-ost
 
       reg:
         maxItems: 1
@@ -226,7 +240,7 @@ examples:
     #include <dt-bindings/clock/jz4770-cgu.h>
     #include <dt-bindings/clock/ingenic,tcu.h>
     tcu: timer@10002000 {
-      compatible = "ingenic,jz4770-tcu", "simple-mfd";
+      compatible = "ingenic,jz4770-tcu", "ingenic,jz4760-tcu", "simple-mfd";
       reg = <0x10002000 0x1000>;
       #address-cells = <1>;
       #size-cells = <1>;
@@ -272,7 +286,7 @@ examples:
       };
 
       ost: timer@e0 {
-        compatible = "ingenic,jz4770-ost";
+        compatible = "ingenic,jz4770-ost", "ingenic,jz4760b-ost";
         reg = <0xe0 0x20>;
 
         clocks = <&tcu TCU_CLK_OST>;
index 97258f1a1505bf504fdda399945187e20c59b287..ac3a5e887455d69eacdd2ce34790605ee72107b8 100644 (file)
@@ -4,7 +4,8 @@ Nuvoton NPCM7xx have three timer modules, each timer module provides five 24-bit
 timer counters.
 
 Required properties:
-- compatible      : "nuvoton,npcm750-timer" for Poleg NPCM750.
+- compatible      : "nuvoton,npcm750-timer" for Poleg NPCM750, or
+                    "nuvoton,wpcm450-timer" for Hermon WPCM450.
 - reg             : Offset and length of the register set for the device.
 - interrupts      : Contain the timer interrupt of timer 0.
 - clocks          : phandle of timer reference clock (usually a 25 MHz clock).
index 428db3a21bb9c38419a61e77c9e88710afe23a63..53dd6d9f518f96f2ff9bff65f08da01ef9767dbc 100644 (file)
@@ -74,11 +74,13 @@ properties:
               - renesas,r8a774e1-cmt0     # 32-bit CMT0 on RZ/G2H
               - renesas,r8a7795-cmt0      # 32-bit CMT0 on R-Car H3
               - renesas,r8a7796-cmt0      # 32-bit CMT0 on R-Car M3-W
+              - renesas,r8a77961-cmt0     # 32-bit CMT0 on R-Car M3-W+
               - renesas,r8a77965-cmt0     # 32-bit CMT0 on R-Car M3-N
               - renesas,r8a77970-cmt0     # 32-bit CMT0 on R-Car V3M
               - renesas,r8a77980-cmt0     # 32-bit CMT0 on R-Car V3H
               - renesas,r8a77990-cmt0     # 32-bit CMT0 on R-Car E3
               - renesas,r8a77995-cmt0     # 32-bit CMT0 on R-Car D3
+              - renesas,r8a779a0-cmt0     # 32-bit CMT0 on R-Car V3U
           - const: renesas,rcar-gen3-cmt0 # 32-bit CMT0 on R-Car Gen3 and RZ/G2
 
       - items:
@@ -89,11 +91,13 @@ properties:
               - renesas,r8a774e1-cmt1     # 48-bit CMT on RZ/G2H
               - renesas,r8a7795-cmt1      # 48-bit CMT on R-Car H3
               - renesas,r8a7796-cmt1      # 48-bit CMT on R-Car M3-W
+              - renesas,r8a77961-cmt1     # 48-bit CMT on R-Car M3-W+
               - renesas,r8a77965-cmt1     # 48-bit CMT on R-Car M3-N
               - renesas,r8a77970-cmt1     # 48-bit CMT on R-Car V3M
               - renesas,r8a77980-cmt1     # 48-bit CMT on R-Car V3H
               - renesas,r8a77990-cmt1     # 48-bit CMT on R-Car E3
               - renesas,r8a77995-cmt1     # 48-bit CMT on R-Car D3
+              - renesas,r8a779a0-cmt1     # 48-bit CMT on R-Car V3U
           - const: renesas,rcar-gen3-cmt1 # 48-bit CMT on R-Car Gen3 and RZ/G2
 
   reg:
index c54188731a1bd7c11d1efbf5de56e27b25e93290..f0f0f121c355b159b25826d59514f3dcdaf98169 100644 (file)
@@ -28,8 +28,14 @@ properties:
           - renesas,tmu-r8a774e1 # RZ/G2H
           - renesas,tmu-r8a7778  # R-Car M1A
           - renesas,tmu-r8a7779  # R-Car H1
+          - renesas,tmu-r8a7795  # R-Car H3
+          - renesas,tmu-r8a7796  # R-Car M3-W
+          - renesas,tmu-r8a77961 # R-Car M3-W+
+          - renesas,tmu-r8a77965 # R-Car M3-N
           - renesas,tmu-r8a77970 # R-Car V3M
           - renesas,tmu-r8a77980 # R-Car V3H
+          - renesas,tmu-r8a77990 # R-Car E3
+          - renesas,tmu-r8a77995 # R-Car D3
       - const: renesas,tmu
 
   reg:
index 26d13085a11754aaed8d31d20574d10b796fabe3..e519d374c378912c0f09b327a2459cd7170ea2bc 100644 (file)
@@ -248,7 +248,7 @@ This example defines a function
 
 .. code-block:: c
 
-   int __ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, const __le32 *arg);
+   static int __ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, const __le32 *arg);
 
 executing the specified request, with the controller passed in when calling
 said function. In this example, the argument is provided via the ``arg``
@@ -296,7 +296,7 @@ This invocation of the macro defines a function
 
 .. code-block:: c
 
-   int ssam_bat_get_sta(struct ssam_device *sdev, __le32 *ret);
+   static int ssam_bat_get_sta(struct ssam_device *sdev, __le32 *ret);
 
 executing the specified request, using the device IDs and controller given
 in the client device. The full list of such macros for client devices is:
diff --git a/Documentation/driver-api/surface_aggregator/clients/dtx.rst b/Documentation/driver-api/surface_aggregator/clients/dtx.rst
new file mode 100644 (file)
index 0000000..e7e7c20
--- /dev/null
@@ -0,0 +1,718 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. |__u16| replace:: :c:type:`__u16 <__u16>`
+.. |sdtx_event| replace:: :c:type:`struct sdtx_event <sdtx_event>`
+.. |sdtx_event_code| replace:: :c:type:`enum sdtx_event_code <sdtx_event_code>`
+.. |sdtx_base_info| replace:: :c:type:`struct sdtx_base_info <sdtx_base_info>`
+.. |sdtx_device_mode| replace:: :c:type:`struct sdtx_device_mode <sdtx_device_mode>`
+
+======================================================
+User-Space DTX (Clipboard Detachment System) Interface
+======================================================
+
+The ``surface_dtx`` driver is responsible for proper clipboard detachment
+and re-attachment handling. To this end, it provides the ``/dev/surface/dtx``
+device file, through which it can interface with a user-space daemon. This
+daemon is then ultimately responsible for determining and taking necessary
+actions, such as unmounting devices attached to the base,
+unloading/reloading the graphics-driver, user-notifications, etc.
+
+There are two basic communication principles used in this driver: Commands
+(in other parts of the documentation also referred to as requests) and
+events. Commands are sent to the EC and may have a different implications in
+different contexts. Events are sent by the EC upon some internal state
+change. Commands are always driver-initiated, whereas events are always
+initiated by the EC.
+
+.. contents::
+
+Nomenclature
+============
+
+* **Clipboard:**
+  The detachable upper part of the Surface Book, housing the screen and CPU.
+
+* **Base:**
+  The lower part of the Surface Book from which the clipboard can be
+  detached, optionally (model dependent) housing the discrete GPU (dGPU).
+
+* **Latch:**
+  The mechanism keeping the clipboard attached to the base in normal
+  operation and allowing it to be detached when requested.
+
+* **Silently ignored commands:**
+  The command is accepted by the EC as a valid command and acknowledged
+  (following the standard communication protocol), but the EC does not act
+  upon it, i.e. ignores it.e upper part of the
+
+
+Detachment Process
+==================
+
+Warning: This part of the documentation is based on reverse engineering and
+testing and thus may contain errors or be incomplete.
+
+Latch States
+------------
+
+The latch mechanism has two major states: *open* and *closed*. In the
+*closed* state (default), the clipboard is secured to the base, whereas in
+the *open* state, the clipboard can be removed by a user.
+
+The latch can additionally be locked and, correspondingly, unlocked, which
+can influence the detachment procedure. Specifically, this locking mechanism
+is intended to prevent the dGPU, positioned in the base of the device, from
+being hot-unplugged while in use. More details can be found in the
+documentation for the detachment procedure below. By default, the latch is
+unlocked.
+
+Detachment Procedure
+--------------------
+
+Note that the detachment process is governed fully by the EC. The
+``surface_dtx`` driver only relays events from the EC to user-space and
+commands from user-space to the EC, i.e. it does not influence this process.
+
+The detachment process is started with the user pressing the *detach* button
+on the base of the device or executing the ``SDTX_IOCTL_LATCH_REQUEST`` IOCTL.
+Following that:
+
+1. The EC turns on the indicator led on the detach-button, sends a
+   *detach-request* event (``SDTX_EVENT_REQUEST``), and awaits further
+   instructions/commands. In case the latch is unlocked, the led will flash
+   green. If the latch has been locked, the led will be solid red
+
+2. The event is, via the ``surface_dtx`` driver, relayed to user-space, where
+   an appropriate user-space daemon can handle it and send instructions back
+   to the EC via IOCTLs provided by this driver.
+
+3. The EC waits for instructions from user-space and acts according to them.
+   If the EC does not receive any instructions in a given period, it will
+   time out and continue as follows:
+
+   - If the latch is unlocked, the EC will open the latch and the clipboard
+     can be detached from the base. This is the exact behavior as without
+     this driver or any user-space daemon. See the ``SDTX_IOCTL_LATCH_CONFIRM``
+     description below for more details on the follow-up behavior of the EC.
+
+   - If the latch is locked, the EC will *not* open the latch, meaning the
+     clipboard cannot be detached from the base. Furthermore, the EC sends
+     an cancel event (``SDTX_EVENT_CANCEL``) detailing this with the cancel
+     reason ``SDTX_DETACH_TIMEDOUT`` (see :ref:`events` for details).
+
+Valid responses by a user-space daemon to a detachment request event are:
+
+- Execute ``SDTX_IOCTL_LATCH_REQUEST``. This will immediately abort the
+  detachment process. Furthermore, the EC will send a detach-request event,
+  similar to the user pressing the detach-button to cancel said process (see
+  below).
+
+- Execute ``SDTX_IOCTL_LATCH_CONFIRM``. This will cause the EC to open the
+  latch, after which the user can separate clipboard and base.
+
+  As this changes the latch state, a *latch-status* event
+  (``SDTX_EVENT_LATCH_STATUS``) will be sent once the latch has been opened
+  successfully. If the EC fails to open the latch, e.g. due to hardware
+  error or low battery, a latch-cancel event (``SDTX_EVENT_CANCEL``) will be
+  sent with the cancel reason indicating the specific failure.
+
+  If the latch is currently locked, the latch will automatically be
+  unlocked before it is opened.
+
+- Execute ``SDTX_IOCTL_LATCH_HEARTBEAT``. This will reset the internal timeout.
+  No other actions will be performed, i.e. the detachment process will neither
+  be completed nor canceled, and the EC will still be waiting for further
+  responses.
+
+- Execute ``SDTX_IOCTL_LATCH_CANCEL``. This will abort the detachment process,
+  similar to ``SDTX_IOCTL_LATCH_REQUEST``, described above, or the button
+  press, described below. A *generic request* event (``SDTX_EVENT_REQUEST``)
+  is send in response to this. In contrast to those, however, this command
+  does not trigger a new detachment process if none is currently in
+  progress.
+
+- Do nothing. The detachment process eventually times out as described in
+  point 3.
+
+See :ref:`ioctls` for more details on these responses.
+
+It is important to note that, if the user presses the detach button at any
+point when a detachment operation is in progress (i.e. after the EC has sent
+the initial *detach-request* event (``SDTX_EVENT_REQUEST``) and before it
+received the corresponding response concluding the process), the detachment
+process is canceled on the EC-level and an identical event is being sent.
+Thus a *detach-request* event, by itself, does not signal the start of the
+detachment process.
+
+The detachment process may further be canceled by the EC due to hardware
+failures or a low clipboard battery. This is done via a cancel event
+(``SDTX_EVENT_CANCEL``) with the corresponding cancel reason.
+
+
+User-Space Interface Documentation
+==================================
+
+Error Codes and Status Values
+-----------------------------
+
+Error and status codes are divided into different categories, which can be
+used to determine if the status code is an error, and, if it is, the
+severity and type of that error. The current categories are:
+
+.. flat-table:: Overview of Status/Error Categories.
+   :widths: 2 1 3
+   :header-rows: 1
+
+   * - Name
+     - Value
+     - Short Description
+
+   * - ``STATUS``
+     - ``0x0000``
+     - Non-error status codes.
+
+   * - ``RUNTIME_ERROR``
+     - ``0x1000``
+     - Non-critical runtime errors.
+
+   * - ``HARDWARE_ERROR``
+     - ``0x2000``
+     - Critical hardware failures.
+
+   * - ``UNKNOWN``
+     - ``0xF000``
+     - Unknown error codes.
+
+Other categories are reserved for future use. The ``SDTX_CATEGORY()`` macro
+can be used to determine the category of any status value. The
+``SDTX_SUCCESS()`` macro can be used to check if the status value is a
+success value (``SDTX_CATEGORY_STATUS``) or if it indicates a failure.
+
+Unknown status or error codes sent by the EC are assigned to the ``UNKNOWN``
+category by the driver and may be implemented via their own code in the
+future.
+
+Currently used error codes are:
+
+.. flat-table:: Overview of Error Codes.
+   :widths: 2 1 1 3
+   :header-rows: 1
+
+   * - Name
+     - Category
+     - Value
+     - Short Description
+
+   * - ``SDTX_DETACH_NOT_FEASIBLE``
+     - ``RUNTIME``
+     - ``0x1001``
+     - Detachment not feasible due to low clipboard battery.
+
+   * - ``SDTX_DETACH_TIMEDOUT``
+     - ``RUNTIME``
+     - ``0x1002``
+     - Detachment process timed out while the latch was locked.
+
+   * - ``SDTX_ERR_FAILED_TO_OPEN``
+     - ``HARDWARE``
+     - ``0x2001``
+     - Failed to open latch.
+
+   * - ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``
+     - ``HARDWARE``
+     - ``0x2002``
+     - Failed to keep latch open.
+
+   * - ``SDTX_ERR_FAILED_TO_CLOSE``
+     - ``HARDWARE``
+     - ``0x2003``
+     - Failed to close latch.
+
+Other error codes are reserved for future use. Non-error status codes may
+overlap and are generally only unique within their use-case:
+
+.. flat-table:: Latch Status Codes.
+   :widths: 2 1 1 3
+   :header-rows: 1
+
+   * - Name
+     - Category
+     - Value
+     - Short Description
+
+   * - ``SDTX_LATCH_CLOSED``
+     - ``STATUS``
+     - ``0x0000``
+     - Latch is closed/has been closed.
+
+   * - ``SDTX_LATCH_OPENED``
+     - ``STATUS``
+     - ``0x0001``
+     - Latch is open/has been opened.
+
+.. flat-table:: Base State Codes.
+   :widths: 2 1 1 3
+   :header-rows: 1
+
+   * - Name
+     - Category
+     - Value
+     - Short Description
+
+   * - ``SDTX_BASE_DETACHED``
+     - ``STATUS``
+     - ``0x0000``
+     - Base has been detached/is not present.
+
+   * - ``SDTX_BASE_ATTACHED``
+     - ``STATUS``
+     - ``0x0001``
+     - Base has been attached/is present.
+
+Again, other codes are reserved for future use.
+
+.. _events:
+
+Events
+------
+
+Events can be received by reading from the device file. They are disabled by
+default and have to be enabled by executing ``SDTX_IOCTL_EVENTS_ENABLE``
+first. All events follow the layout prescribed by |sdtx_event|. Specific
+event types can be identified by their event code, described in
+|sdtx_event_code|. Note that other event codes are reserved for future use,
+thus an event parser must be able to handle any unknown/unsupported event
+types gracefully, by relying on the payload length given in the event header.
+
+Currently provided event types are:
+
+.. flat-table:: Overview of DTX events.
+   :widths: 2 1 1 3
+   :header-rows: 1
+
+   * - Name
+     - Code
+     - Payload
+     - Short Description
+
+   * - ``SDTX_EVENT_REQUEST``
+     - ``1``
+     - ``0`` bytes
+     - Detachment process initiated/aborted.
+
+   * - ``SDTX_EVENT_CANCEL``
+     - ``2``
+     - ``2`` bytes
+     - EC canceled detachment process.
+
+   * - ``SDTX_EVENT_BASE_CONNECTION``
+     - ``3``
+     - ``4`` bytes
+     - Base connection state changed.
+
+   * - ``SDTX_EVENT_LATCH_STATUS``
+     - ``4``
+     - ``2`` bytes
+     - Latch status changed.
+
+   * - ``SDTX_EVENT_DEVICE_MODE``
+     - ``5``
+     - ``2`` bytes
+     - Device mode changed.
+
+Individual events in more detail:
+
+``SDTX_EVENT_REQUEST``
+^^^^^^^^^^^^^^^^^^^^^^
+
+Sent when a detachment process is started or, if in progress, aborted by the
+user, either via a detach button press or a detach request
+(``SDTX_IOCTL_LATCH_REQUEST``) being sent from user-space.
+
+Does not have any payload.
+
+``SDTX_EVENT_CANCEL``
+^^^^^^^^^^^^^^^^^^^^^
+
+Sent when a detachment process is canceled by the EC due to unfulfilled
+preconditions (e.g. clipboard battery too low to detach) or hardware
+failure. The reason for cancellation is given in the event payload detailed
+below and can be one of
+
+* ``SDTX_DETACH_TIMEDOUT``: Detachment timed out while the latch was locked.
+  The latch has neither been opened nor unlocked.
+
+* ``SDTX_DETACH_NOT_FEASIBLE``: Detachment not feasible due to low clipboard
+  battery.
+
+* ``SDTX_ERR_FAILED_TO_OPEN``: Could not open the latch (hardware failure).
+
+* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``: Could not keep the latch open (hardware
+  failure).
+
+* ``SDTX_ERR_FAILED_TO_CLOSE``: Could not close the latch (hardware failure).
+
+Other error codes in this context are reserved for future use.
+
+These codes can be classified via the ``SDTX_CATEGORY()`` macro to discern
+between critical hardware errors (``SDTX_CATEGORY_HARDWARE_ERROR``) or
+runtime errors (``SDTX_CATEGORY_RUNTIME_ERROR``), the latter of which may
+happen during normal operation if certain preconditions for detachment are
+not given.
+
+.. flat-table:: Detachment Cancel Event Payload
+   :widths: 1 1 4
+   :header-rows: 1
+
+   * - Field
+     - Type
+     - Description
+
+   * - ``reason``
+     - |__u16|
+     - Reason for cancellation.
+
+``SDTX_EVENT_BASE_CONNECTION``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sent when the base connection state has changed, i.e. when the base has been
+attached, detached, or detachment has become infeasible due to low clipboard
+battery. The new state and, if a base is connected, ID of the base is
+provided as payload of type |sdtx_base_info| with its layout presented
+below:
+
+.. flat-table:: Base-Connection-Change Event Payload
+   :widths: 1 1 4
+   :header-rows: 1
+
+   * - Field
+     - Type
+     - Description
+
+   * - ``state``
+     - |__u16|
+     - Base connection state.
+
+   * - ``base_id``
+     - |__u16|
+     - Type of base connected (zero if none).
+
+Possible values for ``state`` are:
+
+* ``SDTX_BASE_DETACHED``,
+* ``SDTX_BASE_ATTACHED``, and
+* ``SDTX_DETACH_NOT_FEASIBLE``.
+
+Other values are reserved for future use.
+
+``SDTX_EVENT_LATCH_STATUS``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sent when the latch status has changed, i.e. when the latch has been opened,
+closed, or an error occurred. The current status is provided as payload:
+
+.. flat-table:: Latch-Status-Change Event Payload
+   :widths: 1 1 4
+   :header-rows: 1
+
+   * - Field
+     - Type
+     - Description
+
+   * - ``status``
+     - |__u16|
+     - Latch status.
+
+Possible values for ``status`` are:
+
+* ``SDTX_LATCH_CLOSED``,
+* ``SDTX_LATCH_OPENED``,
+* ``SDTX_ERR_FAILED_TO_OPEN``,
+* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``, and
+* ``SDTX_ERR_FAILED_TO_CLOSE``.
+
+Other values are reserved for future use.
+
+``SDTX_EVENT_DEVICE_MODE``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sent when the device mode has changed. The new device mode is provided as
+payload:
+
+.. flat-table:: Device-Mode-Change Event Payload
+   :widths: 1 1 4
+   :header-rows: 1
+
+   * - Field
+     - Type
+     - Description
+
+   * - ``mode``
+     - |__u16|
+     - Device operation mode.
+
+Possible values for ``mode`` are:
+
+* ``SDTX_DEVICE_MODE_TABLET``,
+* ``SDTX_DEVICE_MODE_LAPTOP``, and
+* ``SDTX_DEVICE_MODE_STUDIO``.
+
+Other values are reserved for future use.
+
+.. _ioctls:
+
+IOCTLs
+------
+
+The following IOCTLs are provided:
+
+.. flat-table:: Overview of DTX IOCTLs
+   :widths: 1 1 1 1 4
+   :header-rows: 1
+
+   * - Type
+     - Number
+     - Direction
+     - Name
+     - Description
+
+   * - ``0xA5``
+     - ``0x21``
+     - ``-``
+     - ``EVENTS_ENABLE``
+     - Enable events for the current file descriptor.
+
+   * - ``0xA5``
+     - ``0x22``
+     - ``-``
+     - ``EVENTS_DISABLE``
+     - Disable events for the current file descriptor.
+
+   * - ``0xA5``
+     - ``0x23``
+     - ``-``
+     - ``LATCH_LOCK``
+     - Lock the latch.
+
+   * - ``0xA5``
+     - ``0x24``
+     - ``-``
+     - ``LATCH_UNLOCK``
+     - Unlock the latch.
+
+   * - ``0xA5``
+     - ``0x25``
+     - ``-``
+     - ``LATCH_REQUEST``
+     - Request clipboard detachment.
+
+   * - ``0xA5``
+     - ``0x26``
+     - ``-``
+     - ``LATCH_CONFIRM``
+     - Confirm clipboard detachment request.
+
+   * - ``0xA5``
+     - ``0x27``
+     - ``-``
+     - ``LATCH_HEARTBEAT``
+     - Send heartbeat signal to EC.
+
+   * - ``0xA5``
+     - ``0x28``
+     - ``-``
+     - ``LATCH_CANCEL``
+     - Cancel detachment process.
+
+   * - ``0xA5``
+     - ``0x29``
+     - ``R``
+     - ``GET_BASE_INFO``
+     - Get current base/connection information.
+
+   * - ``0xA5``
+     - ``0x2A``
+     - ``R``
+     - ``GET_DEVICE_MODE``
+     - Get current device operation mode.
+
+   * - ``0xA5``
+     - ``0x2B``
+     - ``R``
+     - ``GET_LATCH_STATUS``
+     - Get current device latch status.
+
+``SDTX_IOCTL_EVENTS_ENABLE``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IO(0xA5, 0x22)``.
+
+Enable events for the current file descriptor. Events can be obtained by
+reading from the device, if enabled. Events are disabled by default.
+
+``SDTX_IOCTL_EVENTS_DISABLE``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IO(0xA5, 0x22)``.
+
+Disable events for the current file descriptor. Events can be obtained by
+reading from the device, if enabled. Events are disabled by default.
+
+``SDTX_IOCTL_LATCH_LOCK``
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IO(0xA5, 0x23)``.
+
+Locks the latch, causing the detachment procedure to abort without opening
+the latch on timeout. The latch is unlocked by default. This command will be
+silently ignored if the latch is already locked.
+
+``SDTX_IOCTL_LATCH_UNLOCK``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IO(0xA5, 0x24)``.
+
+Unlocks the latch, causing the detachment procedure to open the latch on
+timeout. The latch is unlocked by default. This command will not open the
+latch when sent during an ongoing detachment process. It will be silently
+ignored if the latch is already unlocked.
+
+``SDTX_IOCTL_LATCH_REQUEST``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IO(0xA5, 0x25)``.
+
+Generic latch request. Behavior depends on the context: If no
+detachment-process is active, detachment is requested. Otherwise the
+currently active detachment-process will be aborted.
+
+If a detachment process is canceled by this operation, a generic detachment
+request event (``SDTX_EVENT_REQUEST``) will be sent.
+
+This essentially behaves the same as a detachment button press.
+
+``SDTX_IOCTL_LATCH_CONFIRM``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IO(0xA5, 0x26)``.
+
+Acknowledges and confirms a latch request. If sent during an ongoing
+detachment process, this command causes the latch to be opened immediately.
+The latch will also be opened if it has been locked. In this case, the latch
+lock is reset to the unlocked state.
+
+This command will be silently ignored if there is currently no detachment
+procedure in progress.
+
+``SDTX_IOCTL_LATCH_HEARTBEAT``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IO(0xA5, 0x27)``.
+
+Sends a heartbeat, essentially resetting the detachment timeout. This
+command can be used to keep the detachment process alive while work required
+for the detachment to succeed is still in progress.
+
+This command will be silently ignored if there is currently no detachment
+procedure in progress.
+
+``SDTX_IOCTL_LATCH_CANCEL``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IO(0xA5, 0x28)``.
+
+Cancels detachment in progress (if any). If a detachment process is canceled
+by this operation, a generic detachment request event
+(``SDTX_EVENT_REQUEST``) will be sent.
+
+This command will be silently ignored if there is currently no detachment
+procedure in progress.
+
+``SDTX_IOCTL_GET_BASE_INFO``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IOR(0xA5, 0x29, struct sdtx_base_info)``.
+
+Get the current base connection state (i.e. attached/detached) and the type
+of the base connected to the clipboard. This is command essentially provides
+a way to query the information provided by the base connection change event
+(``SDTX_EVENT_BASE_CONNECTION``).
+
+Possible values for ``struct sdtx_base_info.state`` are:
+
+* ``SDTX_BASE_DETACHED``,
+* ``SDTX_BASE_ATTACHED``, and
+* ``SDTX_DETACH_NOT_FEASIBLE``.
+
+Other values are reserved for future use.
+
+``SDTX_IOCTL_GET_DEVICE_MODE``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IOR(0xA5, 0x2A, __u16)``.
+
+Returns the device operation mode, indicating if and how the base is
+attached to the clipboard. This is command essentially provides a way to
+query the information provided by the device mode change event
+(``SDTX_EVENT_DEVICE_MODE``).
+
+Returned values are:
+
+* ``SDTX_DEVICE_MODE_LAPTOP``
+* ``SDTX_DEVICE_MODE_TABLET``
+* ``SDTX_DEVICE_MODE_STUDIO``
+
+See |sdtx_device_mode| for details. Other values are reserved for future
+use.
+
+
+``SDTX_IOCTL_GET_LATCH_STATUS``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Defined as ``_IOR(0xA5, 0x2B, __u16)``.
+
+Get the current latch status or (presumably) the last error encountered when
+trying to open/close the latch. This is command essentially provides a way
+to query the information provided by the latch status change event
+(``SDTX_EVENT_LATCH_STATUS``).
+
+Returned values are:
+
+* ``SDTX_LATCH_CLOSED``,
+* ``SDTX_LATCH_OPENED``,
+* ``SDTX_ERR_FAILED_TO_OPEN``,
+* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``, and
+* ``SDTX_ERR_FAILED_TO_CLOSE``.
+
+Other values are reserved for future use.
+
+A Note on Base IDs
+------------------
+
+Base types/IDs provided via ``SDTX_EVENT_BASE_CONNECTION`` or
+``SDTX_IOCTL_GET_BASE_INFO`` are directly forwarded from the EC in the lower
+byte of the combined |__u16| value, with the driver storing the EC type from
+which this ID comes in the high byte (without this, base IDs over different
+types of ECs may be overlapping).
+
+The ``SDTX_DEVICE_TYPE()`` macro can be used to determine the EC device
+type. This can be one of
+
+* ``SDTX_DEVICE_TYPE_HID``, for Surface Aggregator Module over HID, and
+
+* ``SDTX_DEVICE_TYPE_SSH``, for Surface Aggregator Module over Surface Serial
+  Hub.
+
+Note that currently only the ``SSH`` type EC is supported, however ``HID``
+type is reserved for future use.
+
+Structures and Enums
+--------------------
+
+.. kernel-doc:: include/uapi/linux/surface_aggregator/dtx.h
+
+API Users
+=========
+
+A user-space daemon utilizing this API can be found at
+https://github.com/linux-surface/surface-dtx-daemon.
index 3ccabce2327150c67bf83258be46ddf231e7c63e..98ea9946b8a2e8f42db4914b6bab52dc60aa194b 100644 (file)
@@ -11,6 +11,7 @@ This is the documentation for client drivers themselves. Refer to
    :maxdepth: 1
 
    cdev
+   dtx
    san
 
 .. only::  subproject and html
index c41ac76ffaae110368ab7126d7fe9d01320eaf55..f3a1223f2517e9d01f905a94a874f51c54ca0604 100644 (file)
@@ -7,6 +7,7 @@ Authors:
 - Enno Luebbers <enno.luebbers@intel.com>
 - Xiao Guangrong <guangrong.xiao@linux.intel.com>
 - Wu Hao <hao.wu@intel.com>
+- Xu Yilun <yilun.xu@intel.com>
 
 The Device Feature List (DFL) FPGA framework (and drivers according to
 this framework) hides the very details of low layer hardwares and provides
@@ -530,6 +531,31 @@ Being able to specify more than one DFL per BAR has been considered, but it
 was determined the use case did not provide value.  Specifying a single DFL
 per BAR simplifies the implementation and allows for extra error checking.
 
+
+Userspace driver support for DFL devices
+========================================
+The purpose of an FPGA is to be reprogrammed with newly developed hardware
+components. New hardware can instantiate a new private feature in the DFL, and
+then present a DFL device in the system. In some cases users may need a
+userspace driver for the DFL device:
+
+* Users may need to run some diagnostic test for their hardware.
+* Users may prototype the kernel driver in user space.
+* Some hardware is designed for specific purposes and does not fit into one of
+  the standard kernel subsystems.
+
+This requires direct access to MMIO space and interrupt handling from
+userspace. The uio_dfl module exposes the UIO device interfaces for this
+purpose.
+
+Currently the uio_dfl driver only supports the Ether Group sub feature, which
+has no irq in hardware. So the interrupt handling is not added in this driver.
+
+UIO_DFL should be selected to enable the uio_dfl module driver. To support a
+new DFL feature via UIO direct access, its feature id should be added to the
+driver's id_table.
+
+
 Open discussion
 ===============
 FME driver exports one ioctl (DFL_FPGA_FME_PORT_PR) for partial reconfiguration
diff --git a/Documentation/misc-devices/dw-xdata-pcie.rst b/Documentation/misc-devices/dw-xdata-pcie.rst
new file mode 100644 (file)
index 0000000..781c679
--- /dev/null
@@ -0,0 +1,64 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================================================================
+Driver for Synopsys DesignWare PCIe traffic generator (also known as xData)
+===========================================================================
+
+Supported chips:
+Synopsys DesignWare PCIe prototype solution
+
+Datasheet:
+Not freely available
+
+Author:
+Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+
+Description
+-----------
+
+This driver should be used as a host-side (Root Complex) driver and Synopsys
+DesignWare prototype that includes this IP.
+
+The dw-xdata-pcie driver can be used to enable/disable PCIe traffic
+generator in either direction (mutual exclusion) besides allowing the
+PCIe link performance analysis.
+
+The interaction with this driver is done through the module parameter and
+can be changed in runtime. The driver outputs the requested command state
+information to ``/var/log/kern.log`` or dmesg.
+
+Example
+-------
+
+Write TLPs traffic generation - Root Complex to Endpoint direction
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Generate traffic::
+
+ # echo 1 > /sys/class/misc/dw-xdata-pcie.0/write
+
+Get link throughput in MB/s::
+
+ # cat /sys/class/misc/dw-xdata-pcie.0/write
+ 204
+
+Stop traffic in any direction::
+
+ # echo 0 > /sys/class/misc/dw-xdata-pcie.0/write
+
+Read TLPs traffic generation - Endpoint to Root Complex direction
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Generate traffic::
+
+ # echo 1 > /sys/class/misc/dw-xdata-pcie.0/read
+
+Get link throughput in MB/s::
+
+ # cat /sys/class/misc/dw-xdata-pcie.0/read
+ 199
+
+Stop traffic in any direction::
+
+ # echo 0 > /sys/class/misc/dw-xdata-pcie.0/read
+
index 64420b3314feb4bb1399edbbeb541f0a09030b0e..30ac58f8190132c1781e4d99fc84700df0bbef4d 100644 (file)
@@ -19,6 +19,7 @@ fit into other categories.
    bh1770glc
    eeprom
    c2port
+   dw-xdata-pcie
    ibmvmc
    ics932s401
    isl29003
index 05073482db055eafa731e13662c015e9e1498e5f..dc03ff88454112235e73651817c39255a4a032fb 100644 (file)
@@ -976,9 +976,9 @@ constraints on coalescing parameters and their values.
 
 
 PAUSE_GET
-============
+=========
 
-Gets channel counts like ``ETHTOOL_GPAUSE`` ioctl request.
+Gets pause frame settings like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
 
 Request contents:
 
@@ -1007,7 +1007,7 @@ the statistics in the following structure:
 Each member has a corresponding attribute defined.
 
 PAUSE_SET
-============
+=========
 
 Sets pause parameters like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
 
@@ -1024,7 +1024,7 @@ Request contents:
 EEE_GET
 =======
 
-Gets channel counts like ``ETHTOOL_GEEE`` ioctl request.
+Gets Energy Efficient Ethernet settings like ``ETHTOOL_GEEE`` ioctl request.
 
 Request contents:
 
@@ -1054,7 +1054,7 @@ first 32 are provided by the ``ethtool_ops`` callback.
 EEE_SET
 =======
 
-Sets pause parameters like ``ETHTOOL_GEEEPARAM`` ioctl request.
+Sets Energy Efficient Ethernet parameters like ``ETHTOOL_SEEE`` ioctl request.
 
 Request contents:
 
index c7952ac5bd2f1f5f5afccfcc1bf1a7952d7d8e28..3feb5e565b1a1d59b3593f03d6fb554a73e70861 100644 (file)
@@ -1849,21 +1849,6 @@ ip6frag_low_thresh - INTEGER
 ip6frag_time - INTEGER
        Time in seconds to keep an IPv6 fragment in memory.
 
-IPv6 Segment Routing:
-
-seg6_flowlabel - INTEGER
-       Controls the behaviour of computing the flowlabel of outer
-       IPv6 header in case of SR T.encaps
-
-        == =======================================================
-        -1  set flowlabel to zero.
-         0  copy flowlabel from Inner packet in case of Inner IPv6
-            (Set flowlabel to 0 in case IPv4/L2)
-         1  Compute the flowlabel using seg6_make_flowlabel()
-        == =======================================================
-
-       Default is 0.
-
 ``conf/default/*``:
        Change the interface-specific default settings.
 
index ec73e144503075b7fa25f6f9d25351d8c4749884..07c20e470bafe60f40eb4753cdddbdd3a1dca229 100644 (file)
@@ -24,3 +24,16 @@ seg6_require_hmac - INTEGER
        * 1 - Drop SR packets without HMAC, validate SR packets with HMAC
 
        Default is 0.
+
+seg6_flowlabel - INTEGER
+       Controls the behaviour of computing the flowlabel of outer
+       IPv6 header in case of SR T.encaps
+
+        == =======================================================
+        -1  set flowlabel to zero.
+         0  copy flowlabel from Inner packet in case of Inner IPv6
+            (Set flowlabel to 0 in case IPv4/L2)
+         1  Compute the flowlabel using seg6_make_flowlabel()
+        == =======================================================
+
+       Default is 0.
index 1da879a68640bdac88a558fa830586070cfa49f5..80d5a5af62a1de0a571ab749319958e7c9912e75 100644 (file)
@@ -6,30 +6,127 @@ Trusted and Encrypted Keys are two new key types added to the existing kernel
 key ring service.  Both of these new types are variable length symmetric keys,
 and in both cases all keys are created in the kernel, and user space sees,
 stores, and loads only encrypted blobs.  Trusted Keys require the availability
-of a Trusted Platform Module (TPM) chip for greater security, while Encrypted
-Keys can be used on any system.  All user level blobs, are displayed and loaded
-in hex ascii for convenience, and are integrity verified.
+of a Trust Source for greater security, while Encrypted Keys can be used on any
+system. All user level blobs, are displayed and loaded in hex ASCII for
+convenience, and are integrity verified.
 
-Trusted Keys use a TPM both to generate and to seal the keys.  Keys are sealed
-under a 2048 bit RSA key in the TPM, and optionally sealed to specified PCR
-(integrity measurement) values, and only unsealed by the TPM, if PCRs and blob
-integrity verifications match.  A loaded Trusted Key can be updated with new
-(future) PCR values, so keys are easily migrated to new pcr values, such as
-when the kernel and initramfs are updated.  The same key can have many saved
-blobs under different PCR values, so multiple boots are easily supported.
 
-TPM 1.2
--------
+Trust Source
+============
 
-By default, trusted keys are sealed under the SRK, which has the default
-authorization value (20 zeros).  This can be set at takeownership time with the
-trouser's utility: "tpm_takeownership -u -z".
+A trust source provides the source of security for Trusted Keys.  This
+section lists currently supported trust sources, along with their security
+considerations.  Whether or not a trust source is sufficiently safe depends
+on the strength and correctness of its implementation, as well as the threat
+environment for a specific use case.  Since the kernel doesn't know what the
+environment is, and there is no metric of trust, it is dependent on the
+consumer of the Trusted Keys to determine if the trust source is sufficiently
+safe.
 
-TPM 2.0
--------
+  *  Root of trust for storage
 
-The user must first create a storage key and make it persistent, so the key is
-available after reboot. This can be done using the following commands.
+     (1) TPM (Trusted Platform Module: hardware device)
+
+         Rooted to Storage Root Key (SRK) which never leaves the TPM that
+         provides crypto operation to establish root of trust for storage.
+
+     (2) TEE (Trusted Execution Environment: OP-TEE based on Arm TrustZone)
+
+         Rooted to Hardware Unique Key (HUK) which is generally burnt in on-chip
+         fuses and is accessible to TEE only.
+
+  *  Execution isolation
+
+     (1) TPM
+
+         Fixed set of operations running in isolated execution environment.
+
+     (2) TEE
+
+         Customizable set of operations running in isolated execution
+         environment verified via Secure/Trusted boot process.
+
+  * Optional binding to platform integrity state
+
+     (1) TPM
+
+         Keys can be optionally sealed to specified PCR (integrity measurement)
+         values, and only unsealed by the TPM, if PCRs and blob integrity
+         verifications match. A loaded Trusted Key can be updated with new
+         (future) PCR values, so keys are easily migrated to new PCR values,
+         such as when the kernel and initramfs are updated. The same key can
+         have many saved blobs under different PCR values, so multiple boots are
+         easily supported.
+
+     (2) TEE
+
+         Relies on Secure/Trusted boot process for platform integrity. It can
+         be extended with TEE based measured boot process.
+
+  *  Interfaces and APIs
+
+     (1) TPM
+
+         TPMs have well-documented, standardized interfaces and APIs.
+
+     (2) TEE
+
+         TEEs have well-documented, standardized client interface and APIs. For
+         more details refer to ``Documentation/staging/tee.rst``.
+
+
+  *  Threat model
+
+     The strength and appropriateness of a particular TPM or TEE for a given
+     purpose must be assessed when using them to protect security-relevant data.
+
+
+Key Generation
+==============
+
+Trusted Keys
+------------
+
+New keys are created from random numbers generated in the trust source. They
+are encrypted/decrypted using a child key in the storage key hierarchy.
+Encryption and decryption of the child key must be protected by a strong
+access control policy within the trust source.
+
+  *  TPM (hardware device) based RNG
+
+     Strength of random numbers may vary from one device manufacturer to
+     another.
+
+  *  TEE (OP-TEE based on Arm TrustZone) based RNG
+
+     RNG is customizable as per platform needs. It can either be direct output
+     from platform specific hardware RNG or a software based Fortuna CSPRNG
+     which can be seeded via multiple entropy sources.
+
+Encrypted Keys
+--------------
+
+Encrypted keys do not depend on a trust source, and are faster, as they use AES
+for encryption/decryption. New keys are created from kernel-generated random
+numbers, and are encrypted/decrypted using a specified ‘master’ key. The
+‘master’ key can either be a trusted-key or user-key type. The main disadvantage
+of encrypted keys is that if they are not rooted in a trusted key, they are only
+as secure as the user key encrypting them. The master user key should therefore
+be loaded in as secure a way as possible, preferably early in boot.
+
+
+Usage
+=====
+
+Trusted Keys usage: TPM
+-----------------------
+
+TPM 1.2: By default, trusted keys are sealed under the SRK, which has the
+default authorization value (20 bytes of 0s).  This can be set at takeownership
+time with the TrouSerS utility: "tpm_takeownership -u -z".
+
+TPM 2.0: The user must first create a storage key and make it persistent, so the
+key is available after reboot. This can be done using the following commands.
 
 With the IBM TSS 2 stack::
 
@@ -78,14 +175,21 @@ TPM_STORED_DATA format.  The key length for new keys are always in bytes.
 Trusted Keys can be 32 - 128 bytes (256 - 1024 bits), the upper limit is to fit
 within the 2048 bit SRK (RSA) keylength, with all necessary structure/padding.
 
-Encrypted keys do not depend on a TPM, and are faster, as they use AES for
-encryption/decryption.  New keys are created from kernel generated random
-numbers, and are encrypted/decrypted using a specified 'master' key.  The
-'master' key can either be a trusted-key or user-key type.  The main
-disadvantage of encrypted keys is that if they are not rooted in a trusted key,
-they are only as secure as the user key encrypting them.  The master user key
-should therefore be loaded in as secure a way as possible, preferably early in
-boot.
+Trusted Keys usage: TEE
+-----------------------
+
+Usage::
+
+    keyctl add trusted name "new keylen" ring
+    keyctl add trusted name "load hex_blob" ring
+    keyctl print keyid
+
+"keyctl print" returns an ASCII hex copy of the sealed key, which is in format
+specific to TEE device implementation.  The key length for new keys is always
+in bytes. Trusted Keys can be 32 - 128 bytes (256 - 1024 bits).
+
+Encrypted Keys usage
+--------------------
 
 The decrypted portion of encrypted keys can contain either a simple symmetric
 key or a more complex structure. The format of the more complex structure is
@@ -103,8 +207,8 @@ Where::
        format:= 'default | ecryptfs | enc32'
        key-type:= 'trusted' | 'user'
 
-
-Examples of trusted and encrypted key usage:
+Examples of trusted and encrypted key usage
+-------------------------------------------
 
 Create and save a trusted key named "kmk" of length 32 bytes.
 
@@ -150,7 +254,7 @@ Load a trusted key from the saved blob::
     f1f8fff03ad0acb083725535636addb08d73dedb9832da198081e5deae84bfaf0409c22b
     e4a8aea2b607ec96931e6f4d4fe563ba
 
-Reseal a trusted key under new pcr values::
+Reseal (TPM specific) a trusted key under new PCR values::
 
     $ keyctl update 268728824 "update pcrinfo=`cat pcr.blob`"
     $ keyctl print 268728824
@@ -164,11 +268,12 @@ Reseal a trusted key under new pcr values::
     7ef6a24defe4846104209bf0c3eced7fa1a672ed5b125fc9d8cd88b476a658a4434644ef
     df8ae9a178e9f83ba9f08d10fa47e4226b98b0702f06b3b8
 
+
 The initial consumer of trusted keys is EVM, which at boot time needs a high
-quality symmetric key for HMAC protection of file metadata.  The use of a
+quality symmetric key for HMAC protection of file metadata. The use of a
 trusted key provides strong guarantees that the EVM key has not been
-compromised by a user level problem, and when sealed to specific boot PCR
-values, protects against boot and offline attacks.  Create and save an
+compromised by a user level problem, and when sealed to a platform integrity
+state, protects against boot and offline attacks. Create and save an
 encrypted key "evm" using the above trusted key "kmk":
 
 option 1: omitting 'format'::
@@ -207,3 +312,61 @@ about the usage can be found in the file
 Another new format 'enc32' has been defined in order to support encrypted keys
 with payload size of 32 bytes. This will initially be used for nvdimm security
 but may expand to other usages that require 32 bytes payload.
+
+
+TPM 2.0 ASN.1 Key Format
+------------------------
+
+The TPM 2.0 ASN.1 key format is designed to be easily recognisable,
+even in binary form (fixing a problem we had with the TPM 1.2 ASN.1
+format) and to be extensible for additions like importable keys and
+policy::
+
+    TPMKey ::= SEQUENCE {
+        type           OBJECT IDENTIFIER
+        emptyAuth      [0] EXPLICIT BOOLEAN OPTIONAL
+        parent         INTEGER
+        pubkey         OCTET STRING
+        privkey                OCTET STRING
+    }
+
+type is what distinguishes the key even in binary form since the OID
+is provided by the TCG to be unique and thus forms a recognizable
+binary pattern at offset 3 in the key.  The OIDs currently made
+available are::
+
+    2.23.133.10.1.3 TPM Loadable key.  This is an asymmetric key (Usually
+                    RSA2048 or Elliptic Curve) which can be imported by a
+                    TPM2_Load() operation.
+
+    2.23.133.10.1.4 TPM Importable Key.  This is an asymmetric key (Usually
+                    RSA2048 or Elliptic Curve) which can be imported by a
+                    TPM2_Import() operation.
+
+    2.23.133.10.1.5 TPM Sealed Data.  This is a set of data (up to 128
+                    bytes) which is sealed by the TPM.  It usually
+                    represents a symmetric key and must be unsealed before
+                    use.
+
+The trusted key code only uses the TPM Sealed Data OID.
+
+emptyAuth is true if the key has well known authorization "".  If it
+is false or not present, the key requires an explicit authorization
+phrase.  This is used by most user space consumers to decide whether
+to prompt for a password.
+
+parent represents the parent key handle, either in the 0x81 MSO space,
+like 0x81000001 for the RSA primary storage key.  Userspace programmes
+also support specifying the primary handle in the 0x40 MSO space.  If
+this happens the Elliptic Curve variant of the primary key using the
+TCG defined template will be generated on the fly into a volatile
+object and used as the parent.  The current kernel code only supports
+the 0x81 MSO form.
+
+pubkey is the binary representation of TPM2B_PRIVATE excluding the
+initial TPM2B header, which can be reconstructed from the ASN.1 octet
+string length.
+
+privkey is the binary representation of TPM2B_PUBLIC excluding the
+initial TPM2B header which can be reconstructed from the ASN.1 octed
+string length.
index 96b2ae9f277f2ed2056c2f56db9f0166f8ca3469..5618a39456df4e18d5053b3088b6f781384b8bda 100644 (file)
@@ -328,6 +328,8 @@ Code  Seq#    Include File                                           Comments
 0xA4  00-1F  uapi/asm/sgx.h                                          <mailto:linux-sgx@vger.kernel.org>
 0xA5  01     linux/surface_aggregator/cdev.h                         Microsoft Surface Platform System Aggregator
                                                                      <mailto:luzmaximilian@gmail.com>
+0xA5  20-2F  linux/surface_aggregator/dtx.h                          Microsoft Surface DTX driver
+                                                                     <mailto:luzmaximilian@gmail.com>
 0xAA  00-3F  linux/uapi/linux/userfaultfd.h
 0xAB  00-1F  linux/nbd.h
 0xAC  00-1F  linux/raw.h
index eaee1368b4fd8b838afba5d69d626fad1ec9e39d..dd0ac96ff9efba9021b0398a8c0323618e1c33b1 100644 (file)
@@ -209,3 +209,44 @@ An application may be loaded into a container enclave which is specially
 configured with a library OS and run-time which permits the application to run.
 The enclave run-time and library OS work together to execute the application
 when a thread enters the enclave.
+
+Impact of Potential Kernel SGX Bugs
+===================================
+
+EPC leaks
+---------
+
+When EPC page leaks happen, a WARNING like this is shown in dmesg:
+
+"EREMOVE returned ... and an EPC page was leaked.  SGX may become unusable..."
+
+This is effectively a kernel use-after-free of an EPC page, and due
+to the way SGX works, the bug is detected at freeing. Rather than
+adding the page back to the pool of available EPC pages, the kernel
+intentionally leaks the page to avoid additional errors in the future.
+
+When this happens, the kernel will likely soon leak more EPC pages, and
+SGX will likely become unusable because the memory available to SGX is
+limited. However, while this may be fatal to SGX, the rest of the kernel
+is unlikely to be impacted and should continue to work.
+
+As a result, when this happpens, user should stop running any new
+SGX workloads, (or just any new workloads), and migrate all valuable
+workloads. Although a machine reboot can recover all EPC memory, the bug
+should be reported to Linux developers.
+
+
+Virtual EPC
+===========
+
+The implementation has also a virtual EPC driver to support SGX enclaves
+in guests. Unlike the SGX driver, an EPC page allocated by the virtual
+EPC driver doesn't have a specific enclave associated with it. This is
+because KVM doesn't track how a guest uses EPC pages.
+
+As a result, the SGX core page reclaimer doesn't support reclaiming EPC
+pages allocated to KVM guests through the virtual EPC driver. If the
+user wants to deploy SGX applications both on the host and in guests
+on the same machine, the user should reserve enough EPC (by taking out
+total virtual EPC size of all SGX VMs from the physical EPC size) for
+host SGX applications so they can run with acceptable performance.
index 7ea89f5f4886f057304e2d9997e2e82b889e231d..1d08d7a76470d730768ac00004f912ae2939fb83 100644 (file)
@@ -572,6 +572,12 @@ S: Maintained
 F:     Documentation/scsi/advansys.rst
 F:     drivers/scsi/advansys.c
 
+ADVANTECH SWBTN DRIVER
+M:     Andrea Ho <Andrea.Ho@advantech.com.tw>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/adv_swbutton.c
+
 ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
 M:     Michael Hennerich <michael.hennerich@analog.com>
 S:     Supported
@@ -696,6 +702,11 @@ S: Maintained
 F:     Documentation/i2c/busses/i2c-ali1563.rst
 F:     drivers/i2c/busses/i2c-ali1563.c
 
+ALIENWARE WMI DRIVER
+L:     Dell.Client.Kernel@dell.com
+S:     Maintained
+F:     drivers/platform/x86/dell/alienware-wmi.c
+
 ALL SENSORS DLH SERIES PRESSURE SENSORS DRIVER
 M:     Tomislav Denis <tomislav.denis@avl.com>
 L:     linux-iio@vger.kernel.org
@@ -1575,11 +1586,13 @@ R:      Jernej Skrabec <jernej.skrabec@siol.net>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux.git
+L:     linux-sunxi@lists.linux.dev
 F:     arch/arm/mach-sunxi/
 F:     arch/arm64/boot/dts/allwinner/
 F:     drivers/clk/sunxi-ng/
 F:     drivers/pinctrl/sunxi/
 F:     drivers/soc/sunxi/
+N:     allwinner
 N:     sun[x456789]i
 N:     sun50i
 
@@ -1763,6 +1776,7 @@ F:        Documentation/devicetree/bindings/arm/coresight.txt
 F:     Documentation/trace/coresight/*
 F:     drivers/hwtracing/coresight/*
 F:     include/dt-bindings/arm/coresight-cti-dt.h
+F:     include/linux/coresight*
 F:     tools/perf/arch/arm/util/auxtrace.c
 F:     tools/perf/arch/arm/util/cs-etm.c
 F:     tools/perf/arch/arm/util/cs-etm.h
@@ -1789,19 +1803,26 @@ F:      drivers/net/ethernet/cortina/
 F:     drivers/pinctrl/pinctrl-gemini.c
 F:     drivers/rtc/rtc-ftrtc010.c
 
-ARM/CZ.NIC TURRIS MOX SUPPORT
-M:     Marek Behun <marek.behun@nic.cz>
+ARM/CZ.NIC TURRIS SUPPORT
+M:     Marek Behun <kabel@kernel.org>
 S:     Maintained
-W:     http://mox.turris.cz
+W:     https://www.turris.cz/
 F:     Documentation/ABI/testing/debugfs-moxtet
 F:     Documentation/ABI/testing/sysfs-bus-moxtet-devices
 F:     Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
 F:     Documentation/devicetree/bindings/bus/moxtet.txt
 F:     Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt
 F:     Documentation/devicetree/bindings/gpio/gpio-moxtet.txt
+F:     Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
+F:     Documentation/devicetree/bindings/watchdog/armada-37xx-wdt.txt
 F:     drivers/bus/moxtet.c
 F:     drivers/firmware/turris-mox-rwtm.c
+F:     drivers/leds/leds-turris-omnia.c
+F:     drivers/mailbox/armada-37xx-rwtm-mailbox.c
 F:     drivers/gpio/gpio-moxtet.c
+F:     drivers/watchdog/armada_37xx_wdt.c
+F:     include/dt-bindings/bus/moxtet.h
+F:     include/linux/armada-37xx-rwtm-mailbox.h
 F:     include/linux/moxtet.h
 
 ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
@@ -5039,19 +5060,19 @@ F:      drivers/platform/x86/dell/dell_rbu.c
 
 DELL SMBIOS DRIVER
 M:     Pali Rohár <pali@kernel.org>
-M:     Mario Limonciello <mario.limonciello@dell.com>
+L:     Dell.Client.Kernel@dell.com
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/dell/dell-smbios.*
 
 DELL SMBIOS SMM DRIVER
-M:     Mario Limonciello <mario.limonciello@dell.com>
+L:     Dell.Client.Kernel@dell.com
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/dell/dell-smbios-smm.c
 
 DELL SMBIOS WMI DRIVER
-M:     Mario Limonciello <mario.limonciello@dell.com>
+L:     Dell.Client.Kernel@dell.com
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -5065,14 +5086,14 @@ F:      Documentation/driver-api/dcdbas.rst
 F:     drivers/platform/x86/dell/dcdbas.*
 
 DELL WMI DESCRIPTOR DRIVER
-M:     Mario Limonciello <mario.limonciello@dell.com>
+L:     Dell.Client.Kernel@dell.com
 S:     Maintained
 F:     drivers/platform/x86/dell/dell-wmi-descriptor.c
 
 DELL WMI SYSMAN DRIVER
 M:     Divya Bharathi <divya.bharathi@dell.com>
-M:     Mario Limonciello <mario.limonciello@dell.com>
 M:     Prasanth Ksr <prasanth.ksr@dell.com>
+L:     Dell.Client.Kernel@dell.com
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-class-firmware-attributes
@@ -5104,6 +5125,13 @@ S:       Maintained
 F:     drivers/dma/dw-edma/
 F:     include/linux/dma/edma.h
 
+DESIGNWARE XDATA IP DRIVER
+M:     Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     Documentation/misc-devices/dw-xdata-pcie.rst
+F:     drivers/misc/dw-xdata-pcie.c
+
 DESIGNWARE USB2 DRD IP DRIVER
 M:     Minas Harutyunyan <hminas@synopsys.com>
 L:     linux-usb@vger.kernel.org
@@ -5174,6 +5202,12 @@ M:       Torben Mathiasen <device@lanana.org>
 S:     Maintained
 W:     http://lanana.org/docs/device-list/index.html
 
+DEVICE RESOURCE MANAGEMENT HELPERS
+M:     Hans de Goede <hdegoede@redhat.com>
+R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+S:     Maintained
+F:     include/linux/devm-helpers.h
+
 DEVICE-MAPPER  (LVM)
 M:     Alasdair Kergon <agk@redhat.com>
 M:     Mike Snitzer <snitzer@redhat.com>
@@ -6995,6 +7029,7 @@ S:        Maintained
 F:     Documentation/ABI/testing/sysfs-bus-dfl*
 F:     Documentation/fpga/dfl.rst
 F:     drivers/fpga/dfl*
+F:     drivers/uio/uio_dfl.c
 F:     include/linux/dfl.h
 F:     include/uapi/linux/fpga-dfl.h
 
@@ -7094,7 +7129,7 @@ S:        Maintained
 F:     drivers/i2c/busses/i2c-cpm.c
 
 FREESCALE IMX / MXC FEC DRIVER
-M:     Fugang Duan <fugang.duan@nxp.com>
+M:     Joakim Zhang <qiangqing.zhang@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -7539,6 +7574,12 @@ F:       Documentation/filesystems/gfs2*
 F:     fs/gfs2/
 F:     include/uapi/linux/gfs2_ondisk.h
 
+GIGABYTE WMI DRIVER
+M:     Thomas Weißschuh <thomas@weissschuh.net>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/gigabyte-wmi.c
+
 GNSS SUBSYSTEM
 M:     Johan Hovold <johan@kernel.org>
 S:     Maintained
@@ -7888,6 +7929,11 @@ W:       https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
 F:     drivers/media/usb/hdpvr/
 
+HEWLETT PACKARD ENTERPRISE ILO CHIF DRIVER
+M:     Matt Hsiao <matt.hsiao@hpe.com>
+S:     Supported
+F:     drivers/misc/hpilo.[ch]
+
 HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
 M:     Jerry Hoemann <jerry.hoemann@hpe.com>
 S:     Supported
@@ -8514,9 +8560,9 @@ F:        drivers/pci/hotplug/rpaphp*
 
 IBM Power SRIOV Virtual NIC Device Driver
 M:     Dany Madden <drt@linux.ibm.com>
-M:     Lijun Pan <ljp@linux.ibm.com>
 M:     Sukadev Bhattiprolu <sukadev@linux.ibm.com>
 R:     Thomas Falcon <tlfalcon@linux.ibm.com>
+R:     Lijun Pan <lijunp213@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmvnic.*
@@ -8542,7 +8588,8 @@ S:        Supported
 F:     drivers/scsi/ibmvscsi/ibmvfc*
 
 IBM Power Virtual Management Channel Driver
-M:     Steven Royer <seroyer@linux.ibm.com>
+M:     Brad Warrum <bwarrum@linux.ibm.com>
+M:     Ritu Agarwal <rituagar@linux.ibm.com>
 S:     Supported
 F:     drivers/misc/ibmvmc.*
 
@@ -8600,9 +8647,8 @@ F:        drivers/ide/
 F:     include/linux/ide.h
 
 IDE/ATAPI DRIVERS
-M:     Borislav Petkov <bp@alien8.de>
 L:     linux-ide@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     Documentation/cdrom/ide-cd.rst
 F:     drivers/ide/ide-cd*
 
@@ -9135,6 +9181,7 @@ M:        Rajneesh Bhardwaj <irenic.rajneesh@gmail.com>
 M:     David E Box <david.e.box@intel.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
+F:     Documentation/ABI/testing/sysfs-platform-intel-pmc
 F:     drivers/platform/x86/intel_pmc_core*
 
 INTEL PMIC GPIO DRIVERS
@@ -9245,7 +9292,7 @@ W:        https://slimbootloader.github.io/security/firmware-update.html
 F:     drivers/platform/x86/intel-wmi-sbl-fw-update.c
 
 INTEL WMI THUNDERBOLT FORCE POWER DRIVER
-M:     Mario Limonciello <mario.limonciello@dell.com>
+L:     Dell.Client.Kernel@dell.com
 S:     Maintained
 F:     drivers/platform/x86/intel-wmi-thunderbolt.c
 
@@ -9275,6 +9322,7 @@ Q:        https://patchwork.kernel.org/project/intel-sgx/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/sgx
 F:     Documentation/x86/sgx.rst
 F:     arch/x86/entry/vdso/vsgx.S
+F:     arch/x86/include/asm/sgx.h
 F:     arch/x86/include/uapi/asm/sgx.h
 F:     arch/x86/kernel/cpu/sgx/*
 F:     tools/testing/selftests/sgx/*
@@ -9284,6 +9332,7 @@ INTERCONNECT API
 M:     Georgi Djakov <djakov@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/djakov/icc.git
 F:     Documentation/devicetree/bindings/interconnect/
 F:     Documentation/driver-api/interconnect.rst
 F:     drivers/interconnect/
@@ -9887,6 +9936,14 @@ F:       include/keys/trusted-type.h
 F:     include/keys/trusted_tpm.h
 F:     security/keys/trusted-keys/
 
+KEYS-TRUSTED-TEE
+M:     Sumit Garg <sumit.garg@linaro.org>
+L:     linux-integrity@vger.kernel.org
+L:     keyrings@vger.kernel.org
+S:     Supported
+F:     include/keys/trusted_tee.h
+F:     security/keys/trusted-keys/trusted_tee.c
+
 KEYS/KEYRINGS
 M:     David Howells <dhowells@redhat.com>
 M:     Jarkko Sakkinen <jarkko@kernel.org>
@@ -11444,8 +11501,8 @@ Q:      https://patchwork.kernel.org/project/netdevbpf/list/
 F:     drivers/net/ethernet/mellanox/mlxfw/
 
 MELLANOX HARDWARE PLATFORM SUPPORT
-M:     Andy Shevchenko <andy@infradead.org>
-M:     Darren Hart <dvhart@infradead.org>
+M:     Hans de Goede <hdegoede@redhat.com>
+M:     Mark Gross <mgross@linux.intel.com>
 M:     Vadim Pasternak <vadimp@nvidia.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Supported
@@ -11868,6 +11925,14 @@ F:     drivers/scsi/smartpqi/smartpqi*.[ch]
 F:     include/linux/cciss*.h
 F:     include/uapi/linux/cciss*.h
 
+MICROSOFT SURFACE DTX DRIVER
+M:     Maximilian Luz <luzmaximilian@gmail.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     Documentation/driver-api/surface_aggregator/clients/dtx.rst
+F:     drivers/platform/surface/surface_dtx.c
+F:     include/uapi/linux/surface_aggregator/dtx.h
+
 MICROSOFT SURFACE GPE LID SUPPORT DRIVER
 M:     Maximilian Luz <luzmaximilian@gmail.com>
 L:     platform-driver-x86@vger.kernel.org
@@ -11889,6 +11954,12 @@ L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/surface/surface_hotplug.c
 
+MICROSOFT SURFACE PLATFORM PROFILE DRIVER
+M:     Maximilian Luz <luzmaximilian@gmail.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/surface/surface_platform_profile.c
+
 MICROSOFT SURFACE PRO 3 BUTTON DRIVER
 M:     Chen Yu <yu.c.chen@intel.com>
 L:     platform-driver-x86@vger.kernel.org
@@ -11904,6 +11975,7 @@ F:      Documentation/driver-api/surface_aggregator/
 F:     drivers/platform/surface/aggregator/
 F:     drivers/platform/surface/surface_acpi_notify.c
 F:     drivers/platform/surface/surface_aggregator_cdev.c
+F:     drivers/platform/surface/surface_aggregator_registry.c
 F:     include/linux/surface_acpi_notify.h
 F:     include/linux/surface_aggregator/
 F:     include/uapi/linux/surface_aggregator/
@@ -14859,6 +14931,14 @@ L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     drivers/iommu/arm/arm-smmu/qcom_iommu.c
 
+QUALCOMM IPC ROUTER (QRTR) DRIVER
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     include/trace/events/qrtr.h
+F:     include/uapi/linux/qrtr.h
+F:     net/qrtr/
+
 QUALCOMM IPCC MAILBOX DRIVER
 M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 L:     linux-arm-msm@vger.kernel.org
@@ -15208,6 +15288,7 @@ F:      fs/reiserfs/
 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Mathieu Poirier <mathieu.poirier@linaro.org>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rproc-next
@@ -15221,6 +15302,7 @@ F:      include/linux/remoteproc/
 REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Mathieu Poirier <mathieu.poirier@linaro.org>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rpmsg-next
index cc77fd45ca64e107673251929b454047573b4057..70bfa5067c873c49325d1b10e2b335eadace3f44 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Frozen Wasteland
 
 # *DOCUMENTATION*
@@ -813,6 +813,10 @@ KBUILD_CFLAGS      += -ftrivial-auto-var-init=zero
 KBUILD_CFLAGS  += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
 endif
 
+# While VLAs have been removed, GCC produces unreachable stack probes
+# for the randomize_kstack_offset feature. Disable it for all compilers.
+KBUILD_CFLAGS  += $(call cc-option, -fno-stack-clash-protection)
+
 DEBUG_CFLAGS   :=
 
 # Workaround for GCC versions < 5.0
index ecfd3520b676b014ee9d1dbddd181b4fd37e73bb..6b11c825fc3688c334e41b1fabd87e75f0fd4020 100644 (file)
@@ -1054,6 +1054,29 @@ config VMAP_STACK
          backing virtual mappings with real shadow memory, and KASAN_VMALLOC
          must be enabled.
 
+config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+       def_bool n
+       help
+         An arch should select this symbol if it can support kernel stack
+         offset randomization with calls to add_random_kstack_offset()
+         during syscall entry and choose_random_kstack_offset() during
+         syscall exit. Careful removal of -fstack-protector-strong and
+         -fstack-protector should also be applied to the entry code and
+         closely examined, as the artificial stack bump looks like an array
+         to the compiler, so it will attempt to add canary checks regardless
+         of the static branch state.
+
+config RANDOMIZE_KSTACK_OFFSET_DEFAULT
+       bool "Randomize kernel stack offset on syscall entry"
+       depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+       help
+         The kernel stack offset can be randomized (after pt_regs) by
+         roughly 5 bits of entropy, frustrating memory corruption
+         attacks that depend on stack address determinism or
+         cross-syscall address exposures. This feature is controlled
+         by kernel boot param "randomize_kstack_offset=on/off", and this
+         config chooses the default boot state.
+
 config ARCH_OPTIONAL_KERNEL_RWX
        def_bool n
 
index 60d578e2781ff93b332aad1dfe4998fabe8dfe91..76ad527a084708af0da79f3a83708e4532710125 100644 (file)
@@ -16,7 +16,7 @@
        memory {
                device_type = "memory";
                /* CONFIG_LINUX_RAM_BASE needs to match low mem start */
-               reg = <0x0 0x80000000 0x0 0x20000000    /* 512 MB low mem */
+               reg = <0x0 0x80000000 0x0 0x40000000    /* 1 GB low mem */
                       0x1 0x00000000 0x0 0x40000000>;  /* 1 GB highmem */
        };
 
index a78d8f745a6787604108070286ad1f32db6008c4..fdbe06c98895ea34d306e780021f06b6ffa14214 100644 (file)
@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
                             sizeof(sf->uc.uc_mcontext.regs.scratch));
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
                                &(sf->uc.uc_mcontext.regs.scratch),
                                sizeof(sf->uc.uc_mcontext.regs.scratch));
        if (err)
-               return err;
+               return -EFAULT;
 
        set_current_blocked(&set);
        regs->bta       = uregs.scratch.bta;
index 74ad4256022e4b80ac9b2937a1e86471197d1909..47bab67f8649b53f0fffddca05ea5d87983ec37d 100644 (file)
@@ -187,25 +187,26 @@ static void init_unwind_table(struct unwind_table *table, const char *name,
                              const void *table_start, unsigned long table_size,
                              const u8 *header_start, unsigned long header_size)
 {
-       const u8 *ptr = header_start + 4;
-       const u8 *end = header_start + header_size;
-
        table->core.pc = (unsigned long)core_start;
        table->core.range = core_size;
        table->init.pc = (unsigned long)init_start;
        table->init.range = init_size;
        table->address = table_start;
        table->size = table_size;
-
-       /* See if the linker provided table looks valid. */
-       if (header_size <= 4
-           || header_start[0] != 1
-           || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
-           || header_start[2] == DW_EH_PE_omit
-           || read_pointer(&ptr, end, header_start[2]) <= 0
-           || header_start[3] == DW_EH_PE_omit)
-               header_start = NULL;
-
+       /* To avoid the pointer addition with NULL pointer.*/
+       if (header_start != NULL) {
+               const u8 *ptr = header_start + 4;
+               const u8 *end = header_start + header_size;
+               /* See if the linker provided table looks valid. */
+               if (header_size <= 4
+               || header_start[0] != 1
+               || (void *)read_pointer(&ptr, end, header_start[1])
+                               != table_start
+               || header_start[2] == DW_EH_PE_omit
+               || read_pointer(&ptr, end, header_start[2]) <= 0
+               || header_start[3] == DW_EH_PE_omit)
+                       header_start = NULL;
+       }
        table->hdrsz = header_size;
        smp_wmb();
        table->header = header_start;
index 5da96f5df48f356b39e40dc8e5b8bf1b84d72e76..2fae14857dcf8695696dd14bc938ac5d6b5b1eeb 100644 (file)
@@ -1293,9 +1293,15 @@ config KASAN_SHADOW_OFFSET
 
 config NR_CPUS
        int "Maximum number of CPUs (2-32)"
-       range 2 32
+       range 2 16 if DEBUG_KMAP_LOCAL
+       range 2 32 if !DEBUG_KMAP_LOCAL
        depends on SMP
        default "4"
+       help
+         The maximum number of CPUs that the kernel can support.
+         Up to 32 CPUs can be supported, or up to 16 if kmap_local()
+         debugging is enabled, which uses half of the per-CPU fixmap
+         slots as guard regions.
 
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
index 646a06420c77ea6183fb98950caf153eca8b608f..5bd6a66d2c2b411647c59e21cd684503c2571615 100644 (file)
@@ -32,7 +32,8 @@
                ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
                          MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
-                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
+                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
+                         MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
 
                internal-regs {
 
        phy1: ethernet-phy@1 {
                compatible = "ethernet-phy-ieee802.3-c22";
                reg = <1>;
+               marvell,reg-init = <3 18 0 0x4985>;
 
                /* irq is connected to &pcawan pin 7 */
        };
index 462b1dfb038548f22abbb5b5cedbf80df3e5a698..720beec54d61788440c5c6a24c6491f23cdaf4e3 100644 (file)
                        #reset-cells = <1>;
                };
 
-               bsc_intr: interrupt-controller@7ef00040 {
-                       compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
-                       reg = <0x7ef00040 0x30>;
-                       interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupt-controller;
-                       #interrupt-cells = <1>;
-               };
-
                aon_intr: interrupt-controller@7ef00100 {
                        compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
                        reg = <0x7ef00100 0x30>;
                        reg = <0x7ef04500 0x100>, <0x7ef00b00 0x300>;
                        reg-names = "bsc", "auto-i2c";
                        clock-frequency = <97500>;
-                       interrupt-parent = <&bsc_intr>;
-                       interrupts = <0>;
                        status = "disabled";
                };
 
                        reg = <0x7ef09500 0x100>, <0x7ef05b00 0x300>;
                        reg-names = "bsc", "auto-i2c";
                        clock-frequency = <97500>;
-                       interrupt-parent = <&bsc_intr>;
-                       interrupts = <1>;
                        status = "disabled";
                };
        };
index 3bf90d9e33353b366d826b876d0b40101ad01e36..a294a02f2d232e0994d037d536c3a3df32e30def 100644 (file)
                        };
                };
 
-               target-module@34000 {                   /* 0x48034000, ap 7 46.0 */
+               timer3_target: target-module@34000 {    /* 0x48034000, ap 7 46.0 */
                        compatible = "ti,sysc-omap4-timer", "ti,sysc";
                        reg = <0x34000 0x4>,
                              <0x34010 0x4>;
                        };
                };
 
-               target-module@36000 {                   /* 0x48036000, ap 9 4e.0 */
+               timer4_target: target-module@36000 {    /* 0x48036000, ap 9 4e.0 */
                        compatible = "ti,sysc-omap4-timer", "ti,sysc";
                        reg = <0x36000 0x4>,
                              <0x36010 0x4>;
index ce1194744f84025210f5c706e412e10b1267abd0..53d68786a61f243b80edc918ac848b4985ca88c5 100644 (file)
@@ -46,6 +46,7 @@
 
        timer {
                compatible = "arm,armv7-timer";
+               status = "disabled";    /* See ARM architected timer wrap erratum i940 */
                interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
                assigned-clock-parents = <&sys_32k_ck>;
        };
 };
+
+/* Local timers, see ARM architected timer wrap erratum i940 */
+&timer3_target {
+       ti,no-reset-on-init;
+       ti,no-idle;
+       timer@0 {
+               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
+               assigned-clock-parents = <&timer_sys_clk_div>;
+       };
+};
+
+&timer4_target {
+       ti,no-reset-on-init;
+       ti,no-idle;
+       timer@0 {
+               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
+               assigned-clock-parents = <&timer_sys_clk_div>;
+       };
+};
index 7a1e53195785b3e2d1c6a13ec0cb2c432625c40f..f28a96fcf23e8fec54e0b2885dfa281a15141d22 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc2>;
        cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+       vmmc-supply = <&vdd_sd1_reg>;
        status = "disabled";
 };
 
                     &pinctrl_usdhc3_cdwp>;
        cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+       vmmc-supply = <&vdd_sd0_reg>;
        status = "disabled";
 };
index 9dcae1f2bc99f4093ac1266440c13cfbba2de1d4..c5b9da0d7e6cea368d61123b1c7cca14745e2614 100644 (file)
@@ -24,6 +24,9 @@
                i2c0 = &i2c1;
                i2c1 = &i2c2;
                i2c2 = &i2c3;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
                serial0 = &uart1;
                serial1 = &uart2;
                serial2 = &uart3;
index 72e4f6481776c7d49a7af009941febbdffbab91c..4a9f9496a8677777fa167aed82ef91eb9067b69a 100644 (file)
                i2c1 = &i2c2;
                i2c2 = &i2c3;
                i2c3 = &i2c4;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
+               mmc3 = &mmc4;
+               mmc4 = &mmc5;
                serial0 = &uart1;
                serial1 = &uart2;
                serial2 = &uart3;
index 532868591107b5eb65b98cda509da388b3eebd21..1f1c04d8f4721225870c18fddb5f969bdfa26a59 100644 (file)
                ti,max-div = <2>;
        };
 
-       sha2md5_fck: sha2md5_fck@15c8 {
-               #clock-cells = <0>;
-               compatible = "ti,gate-clock";
-               clocks = <&l3_div_ck>;
-               ti,bit-shift = <1>;
-               reg = <0x15c8>;
-       };
-
        usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
index e025b7c9a3572e45c8bb6187edf4816357f72021..ee821d0ab3648ade7c90f2a5a8e57b4f44ff8a6e 100644 (file)
                i2c2 = &i2c3;
                i2c3 = &i2c4;
                i2c4 = &i2c5;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
+               mmc3 = &mmc4;
+               mmc4 = &mmc5;
                serial0 = &uart1;
                serial1 = &uart2;
                serial2 = &uart3;
index 472e56d09eeae6d4f4fd4211960c49750bd4c649..1da3f41359aa86cc808471077dc1ee9a273fd089 100644 (file)
        __hround        \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr
        .endm
 
-       .macro          __rev, out, in
-       .if             __LINUX_ARM_ARCH__ < 6
-       lsl             t0, \in, #24
-       and             t1, \in, #0xff00
-       and             t2, \in, #0xff0000
-       orr             \out, t0, \in, lsr #24
-       orr             \out, \out, t1, lsl #8
-       orr             \out, \out, t2, lsr #8
-       .else
-       rev             \out, \in
-       .endif
-       .endm
-
-       .macro          __adrl, out, sym, c
-       .if             __LINUX_ARM_ARCH__ < 7
-       ldr\c           \out, =\sym
-       .else
-       movw\c          \out, #:lower16:\sym
-       movt\c          \out, #:upper16:\sym
-       .endif
-       .endm
-
        .macro          do_crypt, round, ttab, ltab, bsz
        push            {r3-r11, lr}
 
        ldr             r7, [in, #12]
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       __rev           r4, r4
-       __rev           r5, r5
-       __rev           r6, r6
-       __rev           r7, r7
+       rev_l           r4, t0
+       rev_l           r5, t0
+       rev_l           r6, t0
+       rev_l           r7, t0
 #endif
 
        eor             r4, r4, r8
        eor             r6, r6, r10
        eor             r7, r7, r11
 
-       __adrl          ttab, \ttab
+       mov_l           ttab, \ttab
        /*
         * Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into
         * L1 cache, assuming cacheline size >= 32.  This is a hardening measure
 2:     .ifb            \ltab
        add             ttab, ttab, #1
        .else
-       __adrl          ttab, \ltab
+       mov_l           ttab, \ltab
        // Prefetch inverse S-box for final round; see explanation above
        .set            i, 0
        .rept           256 / 64
        \round          r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       __rev           r4, r4
-       __rev           r5, r5
-       __rev           r6, r6
-       __rev           r7, r7
+       rev_l           r4, t0
+       rev_l           r5, t0
+       rev_l           r6, t0
+       rev_l           r7, t0
 #endif
 
        ldr             out, [sp]
index 34d73200e7fa6eff750bb2ae5fe9a9017c24af0f..4b59d027ba4acde262603caecda9d11bd2a952f1 100644 (file)
@@ -85,8 +85,8 @@ static int __init blake2b_neon_mod_init(void)
 
 static void __exit blake2b_neon_mod_exit(void)
 {
-       return crypto_unregister_shashes(blake2b_neon_algs,
-                                        ARRAY_SIZE(blake2b_neon_algs));
+       crypto_unregister_shashes(blake2b_neon_algs,
+                                 ARRAY_SIZE(blake2b_neon_algs));
 }
 
 module_init(blake2b_neon_mod_init);
index bed897e9a181a1ae0c7104fdd7b73e34e4f74975..86345751bbf3a3d8a7e3af72684985833cf6734b 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
        // Registers used to hold message words temporarily.  There aren't
        // enough ARM registers to hold the whole message block, so we have to
 #endif
 .endm
 
+.macro _le32_bswap     a, tmp
+#ifdef __ARMEB__
+       rev_l           \a, \tmp
+#endif
+.endm
+
+.macro _le32_bswap_8x  a, b, c, d, e, f, g, h,  tmp
+       _le32_bswap     \a, \tmp
+       _le32_bswap     \b, \tmp
+       _le32_bswap     \c, \tmp
+       _le32_bswap     \d, \tmp
+       _le32_bswap     \e, \tmp
+       _le32_bswap     \f, \tmp
+       _le32_bswap     \g, \tmp
+       _le32_bswap     \h, \tmp
+.endm
+
 // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
 // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
 // columns/diagonals.  s0-s1 are the word offsets to the message words the first
@@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
        tst             r1, #3
        bne             .Lcopy_block_misaligned
        ldmia           r1!, {r2-r9}
+       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
        stmia           r12!, {r2-r9}
        ldmia           r1!, {r2-r9}
+       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
        stmia           r12, {r2-r9}
 .Lcopy_block_done:
        str             r1, [sp, #68]           // Update message pointer
@@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
 1:
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
        ldr             r3, [r1], #4
+       _le32_bswap     r3, r4
 #else
        ldrb            r3, [r1, #0]
        ldrb            r4, [r1, #1]
index 2985b80a45b57032b91785732df8a139616d6e4c..083fe1ab96d03fdce3de83e58381e557c96dfcf5 100644 (file)
        X14     .req    r12
        X15     .req    r14
 
-.macro __rev           out, in,  t0, t1, t2
-.if __LINUX_ARM_ARCH__ >= 6
-       rev             \out, \in
-.else
-       lsl             \t0, \in, #24
-       and             \t1, \in, #0xff00
-       and             \t2, \in, #0xff0000
-       orr             \out, \t0, \in, lsr #24
-       orr             \out, \out, \t1, lsl #8
-       orr             \out, \out, \t2, lsr #8
-.endif
-.endm
-
-.macro _le32_bswap     x,  t0, t1, t2
+.macro _le32_bswap_4x  a, b, c, d,  tmp
 #ifdef __ARMEB__
-       __rev           \x, \x,  \t0, \t1, \t2
+       rev_l           \a,  \tmp
+       rev_l           \b,  \tmp
+       rev_l           \c,  \tmp
+       rev_l           \d,  \tmp
 #endif
 .endm
 
-.macro _le32_bswap_4x  a, b, c, d,  t0, t1, t2
-       _le32_bswap     \a,  \t0, \t1, \t2
-       _le32_bswap     \b,  \t0, \t1, \t2
-       _le32_bswap     \c,  \t0, \t1, \t2
-       _le32_bswap     \d,  \t0, \t1, \t2
-.endm
-
 .macro __ldrd          a, b, src, offset
 #if __LINUX_ARM_ARCH__ >= 6
        ldrd            \a, \b, [\src, #\offset]
        add             X1, X1, r9
        add             X2, X2, r10
        add             X3, X3, r11
-       _le32_bswap_4x  X0, X1, X2, X3,  r8, r9, r10
+       _le32_bswap_4x  X0, X1, X2, X3,  r8
        ldmia           r12!, {r8-r11}
        eor             X0, X0, r8
        eor             X1, X1, r9
        ldmia           r12!, {X0-X3}
        add             X6, r10, X6, ror #brot
        add             X7, r11, X7, ror #brot
-       _le32_bswap_4x  X4, X5, X6, X7,  r8, r9, r10
+       _le32_bswap_4x  X4, X5, X6, X7,  r8
        eor             X4, X4, X0
        eor             X5, X5, X1
        eor             X6, X6, X2
        add             r1, r1, r9              // x9
        add             r6, r6, r10             // x10
        add             r7, r7, r11             // x11
-       _le32_bswap_4x  r0, r1, r6, r7,  r8, r9, r10
+       _le32_bswap_4x  r0, r1, r6, r7,  r8
        ldmia           r12!, {r8-r11}
        eor             r0, r0, r8              // x8
        eor             r1, r1, r9              // x9
        add             r3, r9, r3, ror #drot   // x13
        add             r4, r10, r4, ror #drot  // x14
        add             r5, r11, r5, ror #drot  // x15
-       _le32_bswap_4x  r2, r3, r4, r5,  r9, r10, r11
+       _le32_bswap_4x  r2, r3, r4, r5,  r9
          ldr           r9, [sp, #72]           // load LEN
        eor             r2, r2, r0              // x12
        eor             r3, r3, r1              // x13
        add             X1, X1, r9
        add             X2, X2, r10
        add             X3, X3, r11
-       _le32_bswap_4x  X0, X1, X2, X3,  r8, r9, r10
+       _le32_bswap_4x  X0, X1, X2, X3,  r8
        stmia           r14!, {X0-X3}
 
        // Save keystream for x4-x7
        add             X5, r9, X5, ror #brot
        add             X6, r10, X6, ror #brot
        add             X7, r11, X7, ror #brot
-       _le32_bswap_4x  X4, X5, X6, X7,  r8, r9, r10
+       _le32_bswap_4x  X4, X5, X6, X7,  r8
          add           r8, sp, #64
        stmia           r14!, {X4-X7}
 
        add             r1, r1, r9              // x9
        add             r6, r6, r10             // x10
        add             r7, r7, r11             // x11
-       _le32_bswap_4x  r0, r1, r6, r7,  r8, r9, r10
+       _le32_bswap_4x  r0, r1, r6, r7,  r8
        stmia           r14!, {r0,r1,r6,r7}
        __ldrd          r8, r9, sp, 144
        __ldrd          r10, r11, sp, 152
        add             r3, r9, r3, ror #drot   // x13
        add             r4, r10, r4, ror #drot  // x14
        add             r5, r11, r5, ror #drot  // x15
-       _le32_bswap_4x  r2, r3, r4, r5,  r9, r10, r11
+       _le32_bswap_4x  r2, r3, r4, r5,  r9
        stmia           r14, {r2-r5}
 
        // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
index be18af52e7dc9a5657af1a9a3acb50f56f66d8d3..b697fa5d059a2389b32b58ee1e7abff876da99c7 100644 (file)
@@ -10,8 +10,8 @@
 #include <linux/linkage.h>
 
 .text
-.fpu neon
 .arch armv7-a
+.fpu neon
 .align 4
 
 ENTRY(curve25519_neon)
index 3023c1acfa19475e2972ff47342ba958cb8b7ef3..c31bd8f7c0927e5226984b6573c3d4d76a1d445c 100644 (file)
@@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
 
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
 
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
 {
        poly1305_init_arm(&dctx->h, key);
        dctx->s[0] = get_unaligned_le32(key + 16);
index cdbf02d9c1d45ce0ea2c83d7d1ee2d1f2d05565f..95d5b0d625cd35ecf127180534098bfa6c5e3b40 100644 (file)
@@ -3,23 +3,19 @@
 #define _ASM_ARM_PARAVIRT_H
 
 #ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
 struct static_key;
 extern struct static_key paravirt_steal_enabled;
 extern struct static_key paravirt_steal_rq_enabled;
 
-struct pv_time_ops {
-       unsigned long long (*steal_clock)(int cpu);
-};
-
-struct paravirt_patch_template {
-       struct pv_time_ops time;
-};
+u64 dummy_steal_clock(int cpu);
 
-extern struct paravirt_patch_template pv_ops;
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
 
 static inline u64 paravirt_steal_clock(int cpu)
 {
-       return pv_ops.time.steal_clock(cpu);
+       return static_call(pv_steal_clock)(cpu);
 }
 #endif
 
diff --git a/arch/arm/include/asm/xen/swiotlb-xen.h b/arch/arm/include/asm/xen/swiotlb-xen.h
new file mode 100644 (file)
index 0000000..455ade5
--- /dev/null
@@ -0,0 +1 @@
+#include <xen/arm/swiotlb-xen.h>
index 4cfed91fe256e5c94f6d6386f6673918ee593398..7dd9806369fb08217eeb4ef905ec15f134202cbf 100644 (file)
@@ -9,10 +9,15 @@
 #include <linux/export.h>
 #include <linux/jump_label.h>
 #include <linux/types.h>
+#include <linux/static_call.h>
 #include <asm/paravirt.h>
 
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
 
-struct paravirt_patch_template pv_ops;
-EXPORT_SYMBOL_GPL(pv_ops);
+static u64 native_steal_clock(int cpu)
+{
+       return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
index 0b2fd7e2e9b429fd40ecef879a76f8f7cbb3bacb..90b1e9be430e97e779e63a506174de7c26ad4bd5 100644 (file)
 #include <asm/mach-types.h>
 
 /* cats host-specific stuff */
-static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
+static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
 
 static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
 {
        return 0;
 }
 
-static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        if (dev->irq >= 255)
                return -1;      /* not a valid interrupt. */
index 6f28aaa9ca79b2c849b892535c192cb3fc290f5f..c3f280d08fa7fde7fa55c35a8b71908744adc95b 100644 (file)
@@ -14,9 +14,9 @@
 #include <asm/mach/pci.h>
 #include <asm/mach-types.h>
 
-static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
+static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
 
-static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
            dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
index 9473aa0305e5f77883bb65e1f343ce7a8b9452e1..e8304392074b845d8ddfbbab20aac596ca7ba067 100644 (file)
@@ -18,7 +18,7 @@
  * We now use the slot ID instead of the device identifiers to select
  * which interrupt is routed where.
  */
-static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        switch (slot) {
        case 0:  /* host bridge */
index 4391e433a4b2fc3f9bd0843fe1a4a68077818809..9d19aa98a663e490e4853406effbd4aa9c86a326 100644 (file)
 #include <asm/mach/pci.h>
 #include <asm/mach-types.h>
 
-static int irqmap_personal_server[] __initdata = {
+static int irqmap_personal_server[] = {
        IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
        IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
 };
 
-static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
-       u8 pin)
+static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        unsigned char line;
 
index cd711bfc591f21755e79838de164ed54f1f9dd83..2c647bdf8d2583de6d1cdc32967b19c394759f0f 100644 (file)
@@ -65,7 +65,7 @@ static void __init keystone_init(void)
 static long long __init keystone_pv_fixup(void)
 {
        long long offset;
-       phys_addr_t mem_start, mem_end;
+       u64 mem_start, mem_end;
 
        mem_start = memblock_start_of_DRAM();
        mem_end = memblock_end_of_DRAM();
@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
        if (mem_start < KEYSTONE_HIGH_PHYS_START ||
            mem_end   > KEYSTONE_HIGH_PHYS_END) {
                pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
-                       (u64)mem_start, (u64)mem_end);
+                       mem_start, mem_end);
                return 0;
        }
 
index 14a6c3eb329850dab50f29908cade0e8ab68dca6..f745a65d3bd7a3239eaabc25a6771c5d91323cf5 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_data/gpio-omap.h>
 
 #include <asm/assembler.h>
+#include <asm/irq.h>
 
 #include "ams-delta-fiq.h"
 #include "board-ams-delta.h"
index 7290f033fd2dad44ec16dfc9c11590f1be433ab6..1610c567a6a3a10302d17898ee6cacf7ba0bfb1a 100644 (file)
@@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void)
 }
 
 /* Clocks are needed early, see drivers/clocksource for the rest */
-void __init __maybe_unused omap_init_time_of(void)
+static void __init __maybe_unused omap_init_time_of(void)
 {
        omap_clk_init();
        timer_probe();
index f70d561f37f713892dad70df4c0cd3e195240797..0659ab4cb0af315994efd0de966c0db645e8ca12 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/arm-smccc.h>
+#include <linux/cpu_pm.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -20,6 +21,7 @@
 
 #include "common.h"
 #include "omap-secure.h"
+#include "soc.h"
 
 static phys_addr_t omap_secure_memblock_base;
 
@@ -213,3 +215,40 @@ void __init omap_secure_init(void)
 {
        omap_optee_init_check();
 }
+
+/*
+ * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return
+ * address after MMU has been re-enabled after CPU1 has been woken up again.
+ * Otherwise the ROM code will attempt to use the earlier physical return
+ * address that got set with MMU off when waking up CPU1. Only used on secure
+ * devices.
+ */
+static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v)
+{
+       switch (cmd) {
+       case CPU_CLUSTER_PM_EXIT:
+               omap_secure_dispatcher(OMAP4_PPA_SERVICE_0,
+                                      FLAG_START_CRITICAL,
+                                      0, 0, 0, 0, 0);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block secure_notifier_block = {
+       .notifier_call = cpu_notifier,
+};
+
+static int __init secure_pm_init(void)
+{
+       if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx())
+               return 0;
+
+       cpu_pm_register_notifier(&secure_notifier_block);
+
+       return 0;
+}
+omap_arch_initcall(secure_pm_init);
index 4aaa95706d39f2fd20a388c8d31b465b6dbe84d7..172069f3161642f7a27a4290d19ccc4e4ebb6b2a 100644 (file)
@@ -50,6 +50,7 @@
 #define OMAP5_DRA7_MON_SET_ACR_INDEX   0x107
 
 /* Secure PPA(Primary Protected Application) APIs */
+#define OMAP4_PPA_SERVICE_0            0x21
 #define OMAP4_PPA_L2_POR_INDEX         0x23
 #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX  0x25
 
index 09076ad0576d98d31cd4d16d97b3fe904143f95c..668dc84fd31e0435f041b0f672e4d295eb185e92 100644 (file)
@@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void)
        omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);
 
        if (of_machine_is_compatible("motorola,droid-bionic")) {
-               voltdm = voltdm_lookup("mpu");
+               voltdm = voltdm_lookup("core");
                omap_voltage_register_pmic(voltdm, &omap_cpcap_core);
 
-               voltdm = voltdm_lookup("mpu");
+               voltdm = voltdm_lookup("iva");
                omap_voltage_register_pmic(voltdm, &omap_cpcap_iva);
        } else {
                voltdm = voltdm_lookup("core");
index 17b66f0d0deef07dc6f48c60c1baf874664a8360..605925684b0aa4d1015c9d0d3eb4a17d76f28f75 100644 (file)
@@ -188,7 +188,7 @@ static const char * const dra7_sr_instances[] = {
 
 int __init omap_devinit_smartreflex(void)
 {
-       const char * const *sr_inst;
+       const char * const *sr_inst = NULL;
        int i, nr_sr = 0;
 
        if (soc_is_omap44xx()) {
index d1010ec26e9f6d54e430f7e65c7d937e873ed83a..d237bd030238148049442252d4492449ba6f34ce 100644 (file)
@@ -502,16 +502,20 @@ static inline void mainstone_init_keypad(void) {}
 #endif
 
 static int mst_pcmcia0_irqs[11] = {
-       [0 ... 10] = -1,
+       [0 ... 4] = -1,
        [5] = MAINSTONE_S0_CD_IRQ,
+       [6 ... 7] = -1,
        [8] = MAINSTONE_S0_STSCHG_IRQ,
+       [9] = -1,
        [10] = MAINSTONE_S0_IRQ,
 };
 
 static int mst_pcmcia1_irqs[11] = {
-       [0 ... 10] = -1,
+       [0 ... 4] = -1,
        [5] = MAINSTONE_S1_CD_IRQ,
+       [6 ... 7] = -1,
        [8] = MAINSTONE_S1_STSCHG_IRQ,
+       [9] = -1,
        [10] = MAINSTONE_S1_IRQ,
 };
 
index 45c19ca96f7a650142112c9d9ec8c9deac8122d3..ec0d9b094744db67ae99bb80faababa817c3845e 100644 (file)
@@ -147,22 +147,20 @@ static int cplds_probe(struct platform_device *pdev)
        }
 
        irq_set_irq_wake(fpga->irq, 1);
-       fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
-                                              CPLDS_NB_IRQ,
-                                              &cplds_irq_domain_ops, fpga);
+       if (base_irq)
+               fpga->irqdomain = irq_domain_add_legacy(pdev->dev.of_node,
+                                                       CPLDS_NB_IRQ,
+                                                       base_irq, 0,
+                                                       &cplds_irq_domain_ops,
+                                                       fpga);
+       else
+               fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
+                                                       CPLDS_NB_IRQ,
+                                                       &cplds_irq_domain_ops,
+                                                       fpga);
        if (!fpga->irqdomain)
                return -ENODEV;
 
-       if (base_irq) {
-               ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0,
-                                                CPLDS_NB_IRQ);
-               if (ret) {
-                       dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n",
-                               base_irq, base_irq + CPLDS_NB_IRQ);
-                       return ret;
-               }
-       }
-
        return 0;
 }
 
index a25b660c3017263a3ec403368f35ac7a26498b5e..c1e12aab67b8e38e7a0ef2b5964cf01242453ffc 100644 (file)
@@ -387,8 +387,7 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
        pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 
        /* Make sure fixmap region does not exceed available allocation. */
-       BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
-                    FIXADDR_END);
+       BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START);
        BUG_ON(idx >= __end_of_fixed_addresses);
 
        /* we only support device mappings until pgprot_kernel has been set */
index 88950e41a3a9e7eb50b043560d89536d2dbde2c7..59d916ccdf25fe7ffbf6724051145bd657a8cd1b 100644 (file)
@@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
        phys_addr_t mem_end;
        phys_addr_t reg_start, reg_end;
        unsigned int mem_max_regions;
+       bool first = true;
        int num;
        u64 i;
 
@@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
 #endif
 
        for_each_mem_range(i, &reg_start, &reg_end) {
-               if (i == 0) {
+               if (first) {
                        phys_addr_t phys_offset = PHYS_OFFSET;
 
                        /*
@@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
                        mem_start = reg_start;
                        mem_end = reg_end;
                        specified_mem_size = mem_end - mem_start;
+                       first = false;
                } else {
                        /*
                         * memblock auto merges contiguous blocks, remove
index 2de019f7503e87d9f5229b5672c901a0dae63958..8359748a19a11a6a206c59facacc0378704c6cc4 100644 (file)
@@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void)
 {
        phys_addr_t mem_end;
        phys_addr_t reg_start, reg_end;
+       bool first = true;
        u64 i;
 
        for_each_mem_range(i, &reg_start, &reg_end) {
-               if (i == 0) {
+               if (first) {
                        phys_addr_t phys_offset = PHYS_OFFSET;
 
                        /*
@@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void)
                        if (reg_start != phys_offset)
                                panic("First memory bank must be contiguous from PHYS_OFFSET");
                        mem_end = reg_end;
+                       first = false;
                } else {
                        /*
                         * memblock auto merges contiguous blocks, remove
index c4b49b322e8a88238c1b4192ee112c22a7c00ec9..f5f790c6e5f896590b8394c7d6e0fa54c85d3879 100644 (file)
@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
 static struct undef_hook uprobes_arm_break_hook = {
        .instr_mask     = 0x0fffffff,
        .instr_val      = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
-       .cpsr_mask      = MODE_MASK,
+       .cpsr_mask      = (PSR_T_BIT | MODE_MASK),
        .cpsr_val       = USR_MODE,
        .fn             = uprobe_trap_handler,
 };
@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
 static struct undef_hook uprobes_arm_ss_hook = {
        .instr_mask     = 0x0fffffff,
        .instr_val      = (UPROBE_SS_ARM_INSN & 0x0fffffff),
-       .cpsr_mask      = MODE_MASK,
+       .cpsr_mask      = (PSR_T_BIT | MODE_MASK),
        .cpsr_val       = USR_MODE,
        .fn             = uprobe_trap_handler,
 };
index 467fa225c3d0ed5c9793107f583c2be74d7a88c9..e1b12b242a3204751c05ea99e620df869eeddc01 100644 (file)
@@ -135,10 +135,22 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
        return;
 }
 
+int xen_swiotlb_detect(void)
+{
+       if (!xen_domain())
+               return 0;
+       if (xen_feature(XENFEAT_direct_mapped))
+               return 1;
+       /* legacy case */
+       if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
+               return 1;
+       return 0;
+}
+
 static int __init xen_mm_init(void)
 {
        struct gnttab_cache_flush cflush;
-       if (!xen_initial_domain())
+       if (!xen_swiotlb_detect())
                return 0;
        xen_swiotlb_init(1, false);
 
index e4e1b65501156e037c7225c742af0b8ef8735983..406b42c05ee10f75166770e50922e5e0a865f61f 100644 (file)
@@ -108,9 +108,9 @@ config ARM64
        select GENERIC_CPU_AUTOPROBE
        select GENERIC_CPU_VULNERABILITIES
        select GENERIC_EARLY_IOREMAP
+       select GENERIC_FIND_FIRST_BIT
        select GENERIC_IDLE_POLL_SETUP
        select GENERIC_IRQ_IPI
-       select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
@@ -138,6 +138,7 @@ config ARM64
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+       select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
        select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
        select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
        select HAVE_ARCH_KFENCE
@@ -146,6 +147,7 @@ config ARM64
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
        select HAVE_ARCH_PFN_VALID
        select HAVE_ARCH_PREL32_RELOCATIONS
+       select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_STACKLEAK
        select HAVE_ARCH_THREAD_STRUCT_WHITELIST
@@ -194,6 +196,7 @@ config ARM64
        select IOMMU_DMA if IOMMU_SUPPORT
        select IRQ_DOMAIN
        select IRQ_FORCED_THREADING
+       select KASAN_VMALLOC if KASAN_GENERIC
        select MODULES_USE_ELF_RELA
        select NEED_DMA_MAP_STATE
        select NEED_SG_DMA_LENGTH
@@ -1068,6 +1071,9 @@ config SYS_SUPPORTS_HUGETLBFS
 config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
 
+config ARCH_HAS_FILTER_PGPROT
+       def_bool y
+
 config ARCH_ENABLE_SPLIT_PMD_PTLOCK
        def_bool y if PGTABLE_LEVELS > 2
 
@@ -1406,10 +1412,13 @@ config ARM64_PAN
 config AS_HAS_LDAPR
        def_bool $(as-instr,.arch_extension rcpc)
 
+config AS_HAS_LSE_ATOMICS
+       def_bool $(as-instr,.arch_extension lse)
+
 config ARM64_LSE_ATOMICS
        bool
        default ARM64_USE_LSE_ATOMICS
-       depends on $(as-instr,.arch_extension lse)
+       depends on AS_HAS_LSE_ATOMICS
 
 config ARM64_USE_LSE_ATOMICS
        bool "Atomic instructions"
@@ -1426,19 +1435,6 @@ config ARM64_USE_LSE_ATOMICS
          built with binutils >= 2.25 in order for the new instructions
          to be used.
 
-config ARM64_VHE
-       bool "Enable support for Virtualization Host Extensions (VHE)"
-       default y
-       help
-         Virtualization Host Extensions (VHE) allow the kernel to run
-         directly at EL2 (instead of EL1) on processors that support
-         it. This leads to better performance for KVM, as they reduce
-         the cost of the world switch.
-
-         Selecting this option allows the VHE feature to be detected
-         at runtime, and does not affect processors that do not
-         implement this feature.
-
 endmenu
 
 menu "ARMv8.2 architectural features"
@@ -1666,6 +1662,7 @@ config ARM64_MTE
        default y
        depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
        depends on AS_HAS_ARMV8_5
+       depends on AS_HAS_LSE_ATOMICS
        # Required for tag checking in the uaccess routines
        depends on ARM64_PAN
        select ARCH_USES_HIGH_VMA_FLAGS
@@ -1691,10 +1688,23 @@ config ARM64_MTE
 
 endmenu
 
+menu "ARMv8.7 architectural features"
+
+config ARM64_EPAN
+       bool "Enable support for Enhanced Privileged Access Never (EPAN)"
+       default y
+       depends on ARM64_PAN
+       help
+        Enhanced Privileged Access Never (EPAN) allows Privileged
+        Access Never to be used with Execute-only mappings.
+
+        The feature is detected at runtime, and will remain disabled
+        if the cpu does not implement the feature.
+endmenu
+
 config ARM64_SVE
        bool "ARM Scalable Vector Extension support"
        default y
-       depends on !KVM || ARM64_VHE
        help
          The Scalable Vector Extension (SVE) is an extension to the AArch64
          execution state which complements and extends the SIMD functionality
@@ -1723,12 +1733,6 @@ config ARM64_SVE
          booting the kernel.  If unsure and you are not observing these
          symptoms, you should assume that it is safe to say Y.
 
-         CPUs that support SVE are architecturally required to support the
-         Virtualization Host Extensions (VHE), so the kernel makes no
-         provision for supporting SVE alongside KVM without VHE enabled.
-         Thus, you will need to enable CONFIG_ARM64_VHE if you want to support
-         KVM in the same kernel image.
-
 config ARM64_MODULE_PLTS
        bool "Use PLTs to allow module memory to spill over into vmalloc area"
        depends on MODULES
index 437ffe3628a5c281cf3cd8efc66b808f56476085..596a25907432bc0d3223402f5e3b0d7c2d47750d 100644 (file)
@@ -19,3 +19,7 @@
                };
        };
 };
+
+&mmc0 {
+       broken-cd;              /* card detect is broken on *some* boards */
+};
index 3402cec87035bbea4ee851482323d99b4191c94c..df62044ff7a7a9d404eaded80e1ebf7d4d8a1d25 100644 (file)
@@ -34,7 +34,7 @@
        vmmc-supply = <&reg_dcdc1>;
        disable-wp;
        bus-width = <4>;
-       cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+       cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
        status = "okay";
 };
 
index 4f4755152fcea822da8318f2926e0ab54de95d48..b5808047d6e4ac4250a486dd0944c204b4a4b2fb 100644 (file)
        vcc-pm-supply = <&reg_aldo1>;
 };
 
-&rtc {
-       clocks = <&ext_osc32k>;
-};
-
 &spdif {
        status = "okay";
 };
index 49e979794094f415bb880883188bcce3b8bd2b34..af8b7d0ef75068040ce7bcc569cdca8e618d30e6 100644 (file)
                        compatible = "allwinner,sun8i-a23-rsb";
                        reg = <0x07083000 0x400>;
                        interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&r_ccu 13>;
+                       clocks = <&r_ccu CLK_R_APB2_RSB>;
                        clock-frequency = <3000000>;
-                       resets = <&r_ccu 7>;
+                       resets = <&r_ccu RST_R_APB2_RSB>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&r_rsb_pins>;
                        status = "disabled";
index 5ccc4cc91959dd5ed325f99720c115138e7967bf..a003e6af33533d7f00bbba92a3f410c48c585398 100644 (file)
 #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD                                     0x0A4 0x30C 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1                                      0x0A4 0x30C 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0                                 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x31  0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x310 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1                                 0x0AC 0x314 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3                                    0x0AC 0x314 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2                                 0x0B0 0x318 0x000 0x0 0x0
index b94b02080a34443a1ed6bcf553e07ded6a05c722..68e8fa17297416cac43aa885917e1e994c3bd60e 100644 (file)
 #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD                                     0x0A4 0x30C 0x000 0x0 0x0
 #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1                                      0x0A4 0x30C 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0                                 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x31  0x000 0x5 0x0
+#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x310 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1                                 0x0AC 0x314 0x000 0x0 0x0
 #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3                                    0x0AC 0x314 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2                                 0x0B0 0x318 0x000 0x0 0x0
index d239ab70ed995ccb053176381606fd00973af2cc..53e817c5f6f36bd867f7f6e8ae479a1892239ac7 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
 /*
  * Device Tree file for CZ.NIC Turris Mox Board
- * 2019 by Marek Behun <marek.behun@nic.cz>
+ * 2019 by Marek Behún <kabel@kernel.org>
  */
 
 /dts-v1/;
index 64179a372ecf279ed70230b83b8b6c79fe4c5b93..c6f5df2deccfeb87a60456f29199b1da74713419 100644 (file)
                };
 
                CP11X_LABEL(sata0): sata@540000 {
-                       compatible = "marvell,armada-8k-ahci";
+                       compatible = "marvell,armada-8k-ahci",
+                       "generic-ahci";
                        reg = <0x540000 0x30000>;
                        dma-coherent;
+                       interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&CP11X_LABEL(clk) 1 15>,
                                 <&CP11X_LABEL(clk) 1 16>;
                        #address-cells = <1>;
                        status = "disabled";
 
                        sata-port@0 {
-                               interrupts = <109 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <0>;
                        };
 
                        sata-port@1 {
-                               interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <1>;
                        };
                };
index 9f5f5e1fa82e269dbabf6d80308772f7dbf476d7..683743f81849acf2c4cb3e9f6d4d5253f0fa20c3 100644 (file)
@@ -10,7 +10,7 @@
        model = "NVIDIA Jetson TX2 Developer Kit";
        compatible = "nvidia,p2771-0000", "nvidia,tegra186";
 
-       aconnect {
+       aconnect@2900000 {
                status = "okay";
 
                dma-controller@2930000 {
index fd91774477115d91f0977c149f104e9d2634fe5a..fcd71bfc67079079a637abd07d06d587f63077c5 100644 (file)
@@ -23,7 +23,7 @@
        };
 
        chosen {
-               bootargs = "earlycon console=ttyS0,115200n8";
+               bootargs = "earlycon console=ttyS0,115200n8 fw_devlink=on";
                stdout-path = "serial0:115200n8";
        };
 
index 02b26b39cedc1671553a9829c327447fadf213dd..9f75bbf00cf7a53d28b85c919d6935f313d76003 100644 (file)
@@ -73,7 +73,7 @@
                snps,rxpbl = <8>;
        };
 
-       aconnect {
+       aconnect@2900000 {
                compatible = "nvidia,tegra186-aconnect",
                             "nvidia,tegra210-aconnect";
                clocks = <&bpmp TEGRA186_CLK_APE>,
index 2888efc42ba1b72b024a6bbe3b67c7d264197966..d618f197a1d3cd6aa015f5baf4d3c21fb2f62149 100644 (file)
                                reg = <0x1a>;
                                interrupt-parent = <&gpio>;
                                interrupts = <TEGRA194_MAIN_GPIO(S, 5) GPIO_ACTIVE_HIGH>;
+                               clocks = <&bpmp TEGRA194_CLK_AUD_MCLK>;
+                               clock-names = "mclk";
                                realtek,jd-src = <2>;
                                sound-name-prefix = "CVB-RT";
 
                                        rt5658_ep: endpoint {
                                                remote-endpoint = <&i2s1_dap_ep>;
                                                mclk-fs = <256>;
-                                               clocks = <&bpmp TEGRA194_CLK_AUD_MCLK>;
                                        };
                                };
                        };
index 7da3d48cb410608398eba304cc65604544ca0ba8..14da4206ea66a0dde009bee42fa2bebcd57d62f2 100644 (file)
@@ -5,6 +5,10 @@
        model = "NVIDIA Jetson Xavier NX (SD-card)";
        compatible = "nvidia,p3668-0000", "nvidia,tegra194";
 
+       aliases {
+               mmc0 = "/bus@0/mmc@3400000";
+       };
+
        bus@0 {
                /* SDMMC1 (SD/MMC) */
                mmc@3400000 {
index b7808648cfe4d226b26304f5d6b8e5ee904dfcca..f5a9ebbfb12f68dc0369346aa95631591627040d 100644 (file)
@@ -5,6 +5,10 @@
        model = "NVIDIA Jetson Xavier NX (eMMC)";
        compatible = "nvidia,p3668-0001", "nvidia,tegra194";
 
+       aliases {
+               mmc0 = "/bus@0/mmc@3460000";
+       };
+
        bus@0 {
                /* SDMMC4 (eMMC) */
                mmc@3460000 {
index 4f12721c332bcca2b3d2763204da6e35a79b2dff..f16b0aa8a374e7519d90c79258152d2e5c55047e 100644 (file)
@@ -14,7 +14,6 @@
                i2c5 = "/bus@0/i2c@31c0000";
                i2c6 = "/bus@0/i2c@c250000";
                i2c7 = "/bus@0/i2c@31e0000";
-               mmc0 = "/bus@0/mmc@3460000";
                rtc0 = "/bpmp/i2c/pmic@3c";
                rtc1 = "/bus@0/rtc@c2a0000";
                serial0 = &tcu;
index d612f633b7719e6f79e3c66da4a92331d2c5fdb2..8793a9cb9d4b36a9f43cefdffdfc188fb321ef06 100644 (file)
@@ -1156,6 +1156,7 @@ CONFIG_CRYPTO_DEV_HISI_TRNG=m
 CONFIG_CMA_SIZE_MBYTES=32
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
index bbdb54702aa7a45d40fd26b7c2db5e2c98ffbca9..b495de22bb38081e20577798ccb59c0e785d47ac 100644 (file)
@@ -359,6 +359,7 @@ ST5(        mov             v4.16b, vctr.16b                )
        ins             vctr.d[0], x8
 
        /* apply carry to N counter blocks for N := x12 */
+       cbz             x12, 2f
        adr             x16, 1f
        sub             x16, x16, x12, lsl #3
        br              x16
@@ -700,7 +701,7 @@ AES_FUNC_START(aes_mac_update)
        cbz             w5, .Lmacout
        encrypt_block   v0, w2, x1, x7, w8
        st1             {v0.16b}, [x4]                  /* return dg */
-       cond_yield      .Lmacout, x7
+       cond_yield      .Lmacout, x7, x8
        b               .Lmacloop4x
 .Lmac1x:
        add             w3, w3, #4
index 683de671741a7f0de196a1e9947d6f6a8510c9c0..9c3d86e397bf3a1fd9302c0a3057913f9a6ed9ca 100644 (file)
@@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
 
 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
 
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
 {
        poly1305_init_arm64(&dctx->h, key);
        dctx->s[0] = get_unaligned_le32(key + 16);
index 8c02bbc2684ed7f88428bf3f4b96482a64c202a5..889ca0f8972b3736c044a1f80bdccf5ddc41e4df 100644 (file)
@@ -121,7 +121,7 @@ CPU_LE(     rev32           v11.16b, v11.16b        )
        add             dgav.4s, dgav.4s, dg0v.4s
 
        cbz             w2, 2f
-       cond_yield      3f, x5
+       cond_yield      3f, x5, x6
        b               0b
 
        /*
index 6cdea7d560593ba90adc0047958823fd8057da7b..491179922f49808f1144a7a313b3eb647067d17e 100644 (file)
@@ -129,7 +129,7 @@ CPU_LE(     rev32           v19.16b, v19.16b        )
 
        /* handled all input blocks? */
        cbz             w2, 2f
-       cond_yield      3f, x5
+       cond_yield      3f, x5, x6
        b               0b
 
        /*
index 6f5208414fe3fdb03c33482a26eda8d0cddc09e4..9c77313f5a60885231e9374b8853a0a3252d3504 100644 (file)
@@ -184,11 +184,11 @@ SYM_FUNC_START(sha3_ce_transform)
        eor      v0.16b,  v0.16b, v31.16b
 
        cbnz    w8, 3b
-       cond_yield 3f, x8
+       cond_yield 4f, x8, x9
        cbnz    w2, 0b
 
        /* save state */
-3:     st1     { v0.1d- v3.1d}, [x0], #32
+4:     st1     { v0.1d- v3.1d}, [x0], #32
        st1     { v4.1d- v7.1d}, [x0], #32
        st1     { v8.1d-v11.1d}, [x0], #32
        st1     {v12.1d-v15.1d}, [x0], #32
index d6e7f6c95fa6f26f20471426ca04bc7498178c5b..b6a3a36e15f58cf98c7829bc2ad746349d23a74e 100644 (file)
@@ -195,7 +195,7 @@ CPU_LE(     rev64           v19.16b, v19.16b        )
        add             v10.2d, v10.2d, v2.2d
        add             v11.2d, v11.2d, v3.2d
 
-       cond_yield      3f, x4
+       cond_yield      3f, x4, x5
        /* handled all input blocks? */
        cbnz            w2, 0b
 
index 5df500dcc627a61a81667ff94cdee9c3c90abd73..8a078fc662ac5df176785caa5f04746b55f50063 100644 (file)
@@ -97,9 +97,9 @@
        .popsection
        .subsection 1
 663:   \insn2
-664:   .previous
-       .org    . - (664b-663b) + (662b-661b)
+664:   .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
+       .previous
        .endif
 .endm
 
  */
 .macro alternative_endif
 664:
+       .org    . - (664b-663b) + (662b-661b)
+       .org    . - (662b-661b) + (664b-663b)
        .if .Lasm_alt_mode==0
        .previous
        .endif
-       .org    . - (664b-663b) + (662b-661b)
-       .org    . - (662b-661b) + (664b-663b)
 .endm
 
 /*
index 880b9054d75c9b1e92432cc680d7d111801dc70e..934b9be582d21deb1f7d7d98d036918642e80180 100644 (file)
@@ -173,7 +173,7 @@ static inline void gic_pmr_mask_irqs(void)
 
 static inline void gic_arch_enable_irqs(void)
 {
-       asm volatile ("msr daifclr, #2" : : : "memory");
+       asm volatile ("msr daifclr, #3" : : : "memory");
 }
 
 #endif /* __ASSEMBLY__ */
index 9f0ec21d6327f49b8e06549ebfb2e218733a4638..88d20f04c64a5863c8d4c04c46a7790751cfaf95 100644 (file)
@@ -165,25 +165,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
        isb();
 }
 
-/*
- * Ensure that reads of the counter are treated the same as memory reads
- * for the purposes of ordering by subsequent memory barriers.
- *
- * This insanity brought to you by speculative system register reads,
- * out-of-order memory accesses, sequence locks and Thomas Gleixner.
- *
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
- */
-#define arch_counter_enforce_ordering(val) do {                                \
-       u64 tmp, _val = (val);                                          \
-                                                                       \
-       asm volatile(                                                   \
-       "       eor     %0, %1, %1\n"                                   \
-       "       add     %0, sp, %0\n"                                   \
-       "       ldr     xzr, [%0]"                                      \
-       : "=r" (tmp) : "r" (_val));                                     \
-} while (0)
-
 static __always_inline u64 __arch_counter_get_cntpct_stable(void)
 {
        u64 cnt;
@@ -224,8 +205,6 @@ static __always_inline u64 __arch_counter_get_cntvct(void)
        return cnt;
 }
 
-#undef arch_counter_enforce_ordering
-
 static inline int arch_timer_arch_init(void)
 {
        return 0;
index 52dead2a8640d2ef4acf30a0059e2d2c3413e906..8ca2dc0661ee298a503d6f794ec8bfff95932da2 100644 (file)
  * so use the base value of ldp as thread.keys_user and offset as
  * thread.keys_user.ap*.
  */
-       .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+       .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
        mov     \tmp1, #THREAD_KEYS_USER
        add     \tmp1, \tsk, \tmp1
-alternative_if_not ARM64_HAS_ADDRESS_AUTH
-       b       .Laddr_auth_skip_\@
-alternative_else_nop_endif
        ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
        msr_s   SYS_APIAKEYLO_EL1, \tmp2
        msr_s   SYS_APIAKEYHI_EL1, \tmp3
-       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
-       msr_s   SYS_APIBKEYLO_EL1, \tmp2
-       msr_s   SYS_APIBKEYHI_EL1, \tmp3
-       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
-       msr_s   SYS_APDAKEYLO_EL1, \tmp2
-       msr_s   SYS_APDAKEYHI_EL1, \tmp3
-       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
-       msr_s   SYS_APDBKEYLO_EL1, \tmp2
-       msr_s   SYS_APDBKEYHI_EL1, \tmp3
-.Laddr_auth_skip_\@:
-alternative_if ARM64_HAS_GENERIC_AUTH
-       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
-       msr_s   SYS_APGAKEYLO_EL1, \tmp2
-       msr_s   SYS_APGAKEYHI_EL1, \tmp3
-alternative_else_nop_endif
        .endm
 
        .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
index ca31594d3d6c36cb61809fc016fcff747d7526ec..ab569b0b45fc17381cbeda7f5e16f69e57051f09 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm-generic/export.h>
 
 #include <asm/asm-offsets.h>
+#include <asm/alternative.h>
 #include <asm/cpufeature.h>
 #include <asm/cputype.h>
 #include <asm/debug-monitors.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
+       /*
+        * Provide a wxN alias for each wN register so what we can paste a xN
+        * reference after a 'w' to obtain the 32-bit version.
+        */
+       .irp    n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+       wx\n    .req    w\n
+       .endr
+
        .macro save_and_disable_daif, flags
        mrs     \flags, daif
        msr     daifset, #0xf
@@ -40,9 +49,9 @@
        msr     daif, \flags
        .endm
 
-       /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
-       .macro enable_da_f
-       msr     daifclr, #(8 | 4 | 1)
+       /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
+       .macro enable_da
+       msr     daifclr, #(8 | 4)
        .endm
 
 /*
@@ -50,7 +59,7 @@
  */
        .macro  save_and_disable_irq, flags
        mrs     \flags, daif
-       msr     daifset, #2
+       msr     daifset, #3
        .endm
 
        .macro  restore_irq, flags
@@ -692,90 +701,33 @@ USER(\label, ic   ivau, \tmp2)                    // invalidate I line PoU
        isb
 .endm
 
-/*
- * Check whether to yield to another runnable task from kernel mode NEON code
- * (which runs with preemption disabled).
- *
- * if_will_cond_yield_neon
- *        // pre-yield patchup code
- * do_cond_yield_neon
- *        // post-yield patchup code
- * endif_yield_neon    <label>
- *
- * where <label> is optional, and marks the point where execution will resume
- * after a yield has been performed. If omitted, execution resumes right after
- * the endif_yield_neon invocation. Note that the entire sequence, including
- * the provided patchup code, will be omitted from the image if
- * CONFIG_PREEMPTION is not defined.
- *
- * As a convenience, in the case where no patchup code is required, the above
- * sequence may be abbreviated to
- *
- * cond_yield_neon <label>
- *
- * Note that the patchup code does not support assembler directives that change
- * the output section, any use of such directives is undefined.
- *
- * The yield itself consists of the following:
- * - Check whether the preempt count is exactly 1 and a reschedule is also
- *   needed. If so, calling of preempt_enable() in kernel_neon_end() will
- *   trigger a reschedule. If it is not the case, yielding is pointless.
- * - Disable and re-enable kernel mode NEON, and branch to the yield fixup
- *   code.
- *
- * This macro sequence may clobber all CPU state that is not guaranteed by the
- * AAPCS to be preserved across an ordinary function call.
- */
-
-       .macro          cond_yield_neon, lbl
-       if_will_cond_yield_neon
-       do_cond_yield_neon
-       endif_yield_neon        \lbl
-       .endm
-
-       .macro          if_will_cond_yield_neon
-#ifdef CONFIG_PREEMPTION
-       get_current_task        x0
-       ldr             x0, [x0, #TSK_TI_PREEMPT]
-       sub             x0, x0, #PREEMPT_DISABLE_OFFSET
-       cbz             x0, .Lyield_\@
-       /* fall through to endif_yield_neon */
-       .subsection     1
-.Lyield_\@ :
-#else
-       .section        ".discard.cond_yield_neon", "ax"
-#endif
-       .endm
-
-       .macro          do_cond_yield_neon
-       bl              kernel_neon_end
-       bl              kernel_neon_begin
-       .endm
-
-       .macro          endif_yield_neon, lbl
-       .ifnb           \lbl
-       b               \lbl
-       .else
-       b               .Lyield_out_\@
-       .endif
-       .previous
-.Lyield_out_\@ :
-       .endm
-
        /*
-        * Check whether preempt-disabled code should yield as soon as it
-        * is able. This is the case if re-enabling preemption a single
-        * time results in a preempt count of zero, and the TIF_NEED_RESCHED
-        * flag is set. (Note that the latter is stored negated in the
-        * top word of the thread_info::preempt_count field)
+        * Check whether preempt/bh-disabled asm code should yield as soon as
+        * it is able. This is the case if we are currently running in task
+        * context, and either a softirq is pending, or the TIF_NEED_RESCHED
+        * flag is set and re-enabling preemption a single time would result in
+        * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
+        * stored negated in the top word of the thread_info::preempt_count
+        * field)
         */
-       .macro          cond_yield, lbl:req, tmp:req
-#ifdef CONFIG_PREEMPTION
+       .macro          cond_yield, lbl:req, tmp:req, tmp2:req
        get_current_task \tmp
        ldr             \tmp, [\tmp, #TSK_TI_PREEMPT]
+       /*
+        * If we are serving a softirq, there is no point in yielding: the
+        * softirq will not be preempted no matter what we do, so we should
+        * run to completion as quickly as we can.
+        */
+       tbnz            \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
+#ifdef CONFIG_PREEMPTION
        sub             \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
        cbz             \tmp, \lbl
 #endif
+       adr_l           \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
+       this_cpu_offset \tmp2
+       ldr             w\tmp, [\tmp, \tmp2]
+       cbnz            w\tmp, \lbl     // yield on pending softirq in task context
+.Lnoyield_\@:
        .endm
 
 /*
index c3009b0e52393bfe49f770dae6ee749bca00669c..065ba482daf063c87fdde854ad26c4eabb24ccda 100644 (file)
 #define psb_csync()    asm volatile("hint #17" : : : "memory")
 #define csdb()         asm volatile("hint #20" : : : "memory")
 
-#define spec_bar()     asm volatile(ALTERNATIVE("dsb nsh\nisb\n",              \
-                                                SB_BARRIER_INSN"nop\n",        \
-                                                ARM64_HAS_SB))
-
 #ifdef CONFIG_ARM64_PSEUDO_NMI
 #define pmr_sync()                                             \
        do {                                                    \
@@ -70,6 +66,25 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
        return mask;
 }
 
+/*
+ * Ensure that reads of the counter are treated the same as memory reads
+ * for the purposes of ordering by subsequent memory barriers.
+ *
+ * This insanity brought to you by speculative system register reads,
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+ *
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ */
+#define arch_counter_enforce_ordering(val) do {                                \
+       u64 tmp, _val = (val);                                          \
+                                                                       \
+       asm volatile(                                                   \
+       "       eor     %0, %1, %1\n"                                   \
+       "       add     %0, sp, %0\n"                                   \
+       "       ldr     xzr, [%0]"                                      \
+       : "=r" (tmp) : "r" (_val));                                     \
+} while (0)
+
 #define __smp_mb()     dmb(ish)
 #define __smp_rmb()    dmb(ishld)
 #define __smp_wmb()    dmb(ishst)
index c40f2490cd7b7ddca4e814681e6a87a63a3813a0..b0c5eda0498f2ab64df582f46bfcbb868410fbd4 100644 (file)
@@ -67,7 +67,8 @@
 #define ARM64_HAS_LDAPR                                59
 #define ARM64_KVM_PROTECTED_MODE               60
 #define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP     61
+#define ARM64_HAS_EPAN                         62
 
-#define ARM64_NCAPS                            62
+#define ARM64_NCAPS                            63
 
 #endif /* __ASM_CPUCAPS_H */
index 61177bac49fa7fe7831c381598708a3946a868fe..338840c00e8ed72f339aa8fd513c2b2729ce14d7 100644 (file)
@@ -63,6 +63,23 @@ struct arm64_ftr_bits {
        s64             safe_val; /* safe value for FTR_EXACT features */
 };
 
+/*
+ * Describe the early feature override to the core override code:
+ *
+ * @val                        Values that are to be merged into the final
+ *                     sanitised value of the register. Only the bitfields
+ *                     set to 1 in @mask are valid
+ * @mask               Mask of the features that are overridden by @val
+ *
+ * A @mask field set to full-1 indicates that the corresponding field
+ * in @val is a valid override.
+ *
+ * A @mask field set to full-0 with the corresponding @val field set
+ * to full-0 denotes that this field has no override
+ *
+ * A @mask field set to full-0 with the corresponding @val field set
+ * to full-1 denotes thath this field has an invalid override.
+ */
 struct arm64_ftr_override {
        u64             val;
        u64             mask;
index 1c26d7baa67f8d1e12a009bd0e22230f511abeea..5eb7af9c455792a55c8c93a70ac8a8b861e288d8 100644 (file)
@@ -13,8 +13,8 @@
 #include <asm/ptrace.h>
 
 #define DAIF_PROCCTX           0
-#define DAIF_PROCCTX_NOIRQ     PSR_I_BIT
-#define DAIF_ERRCTX            (PSR_I_BIT | PSR_A_BIT)
+#define DAIF_PROCCTX_NOIRQ     (PSR_I_BIT | PSR_F_BIT)
+#define DAIF_ERRCTX            (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
 #define DAIF_MASK              (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
 
 
@@ -47,7 +47,7 @@ static inline unsigned long local_daif_save_flags(void)
        if (system_uses_irq_prio_masking()) {
                /* If IRQs are masked with PMR, reflect it in the flags */
                if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
-                       flags |= PSR_I_BIT;
+                       flags |= PSR_I_BIT | PSR_F_BIT;
        }
 
        return flags;
@@ -69,7 +69,7 @@ static inline void local_daif_restore(unsigned long flags)
        bool irq_disabled = flags & PSR_I_BIT;
 
        WARN_ON(system_has_prio_mask_debugging() &&
-               !(read_sysreg(daif) & PSR_I_BIT));
+               (read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT));
 
        if (!irq_disabled) {
                trace_hardirqs_on();
@@ -86,7 +86,7 @@ static inline void local_daif_restore(unsigned long flags)
                         * If interrupts are disabled but we can take
                         * asynchronous errors, we can take NMIs
                         */
-                       flags &= ~PSR_I_BIT;
+                       flags &= ~(PSR_I_BIT | PSR_F_BIT);
                        pmr = GIC_PRIO_IRQOFF;
                } else {
                        pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
index d77d358f9395ccf714543a8a4b27ac8cc844bdbf..b3f2d3bb0938cd94c69ba6379200f9a0f6d8f589 100644 (file)
 .Lskip_sve_\@:
 .endm
 
+/* Disable any fine grained traps */
+.macro __init_el2_fgt
+       mrs     x1, id_aa64mmfr0_el1
+       ubfx    x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
+       cbz     x1, .Lskip_fgt_\@
+
+       msr_s   SYS_HDFGRTR_EL2, xzr
+       msr_s   SYS_HDFGWTR_EL2, xzr
+       msr_s   SYS_HFGRTR_EL2, xzr
+       msr_s   SYS_HFGWTR_EL2, xzr
+       msr_s   SYS_HFGITR_EL2, xzr
+
+       mrs     x1, id_aa64pfr0_el1             // AMU traps UNDEF without AMU
+       ubfx    x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4
+       cbz     x1, .Lskip_fgt_\@
+
+       msr_s   SYS_HAFGRTR_EL2, xzr
+.Lskip_fgt_\@:
+.endm
+
 .macro __init_el2_nvhe_prepare_eret
        mov     x0, #INIT_PSTATE_EL1
        msr     spsr_el2, x0
        __init_el2_nvhe_idregs
        __init_el2_nvhe_cptr
        __init_el2_nvhe_sve
+       __init_el2_fgt
        __init_el2_nvhe_prepare_eret
 .endm
 
index bec5f14b622ae2fd50e471803418ca6c91c624f0..ebb263b2d3b19ebec07f68e4fdadb2e684106076 100644 (file)
@@ -73,6 +73,7 @@ extern void sve_flush_live(void);
 extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
                                       unsigned long vq_minus_1);
 extern unsigned int sve_get_vl(void);
+extern void sve_set_vq(unsigned long vq_minus_1);
 
 struct arm64_cpu_capabilities;
 extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
index b2b0c6405eb082fea7c99e607e7f2c9d9cca4ee7..fac08e18bcd512eec9bbafe7fa79a38ccea1d48c 100644 (file)
@@ -8,6 +8,10 @@
 
 struct pt_regs;
 
+int set_handle_irq(void (*handle_irq)(struct pt_regs *));
+#define set_handle_irq set_handle_irq
+int set_handle_fiq(void (*handle_fiq)(struct pt_regs *));
+
 static inline int nr_legacy_irqs(void)
 {
        return 0;
index a1020285ea75045c08006b4e068181fac4338a51..81bbfa3a035bd2152f5239a37607136091ffb156 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __ASM_IRQ_WORK_H
 #define __ASM_IRQ_WORK_H
 
+extern void arch_irq_work_raise(void);
+
 static inline bool arch_irq_work_has_interrupt(void)
 {
        return true;
index ff328e5bbb75721e93d1df93bf43cf55aae6f492..b57b9b1e434485d5c869f953ce969da70aeca6dd 100644 (file)
 
 /*
  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
- * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
+ * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
  * order:
  * Masking debug exceptions causes all other exceptions to be masked too/
- * Masking SError masks irq, but not debug exceptions. Masking irqs has no
- * side effects for other flags. Keeping to this order makes it easier for
- * entry.S to know which exceptions should be unmasked.
- *
- * FIQ is never expected, but we mask it when we disable debug exceptions, and
- * unmask it at all other times.
+ * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
+ * always masked and unmasked together, and have no side effects for other
+ * flags. Keeping to this order makes it easier for entry.S to know which
+ * exceptions should be unmasked.
  */
 
 /*
@@ -35,7 +33,7 @@ static inline void arch_local_irq_enable(void)
        }
 
        asm volatile(ALTERNATIVE(
-               "msr    daifclr, #2             // arch_local_irq_enable",
+               "msr    daifclr, #3             // arch_local_irq_enable",
                __msr_s(SYS_ICC_PMR_EL1, "%0"),
                ARM64_HAS_IRQ_PRIO_MASKING)
                :
@@ -54,7 +52,7 @@ static inline void arch_local_irq_disable(void)
        }
 
        asm volatile(ALTERNATIVE(
-               "msr    daifset, #2             // arch_local_irq_disable",
+               "msr    daifset, #3             // arch_local_irq_disable",
                __msr_s(SYS_ICC_PMR_EL1, "%0"),
                ARM64_HAS_IRQ_PRIO_MASKING)
                :
index 0aabc3be9a75953fe02b48d535198cc9eed7f6bf..b943879c1c242feeebf0adfce6fd13e509738edc 100644 (file)
@@ -243,8 +243,10 @@ static inline const void *__tag_set(const void *addr, u8 tag)
 }
 
 #ifdef CONFIG_KASAN_HW_TAGS
-#define arch_enable_tagging()                  mte_enable_kernel()
+#define arch_enable_tagging_sync()             mte_enable_kernel_sync()
+#define arch_enable_tagging_async()            mte_enable_kernel_async()
 #define arch_set_tagging_report_once(state)    mte_set_report_once(state)
+#define arch_force_async_tag_fault()           mte_check_tfsr_exit()
 #define arch_init_tags(max_tag)                        mte_init_tags(max_tag)
 #define arch_get_random_tag()                  mte_get_random_tag()
 #define arch_get_mem_tag(addr)                 mte_get_mem_tag(addr)
index 7ab500e2ad1762c054e4f36f0840a78696ba5985..4acf8bf41cade68d70e48a0a05f95b7fe25f2aea 100644 (file)
@@ -77,7 +77,8 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
        } while (curr != end);
 }
 
-void mte_enable_kernel(void);
+void mte_enable_kernel_sync(void);
+void mte_enable_kernel_async(void);
 void mte_init_tags(u64 max_tag);
 
 void mte_set_report_once(bool state);
@@ -104,7 +105,11 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
 {
 }
 
-static inline void mte_enable_kernel(void)
+static inline void mte_enable_kernel_sync(void)
+{
+}
+
+static inline void mte_enable_kernel_async(void)
 {
 }
 
index 9b557a457f244c7f8f79c129a916fc0fb1c12563..bc88a1ced0d7e588070437f1ac34ba9fa01aec02 100644 (file)
@@ -39,16 +39,15 @@ void mte_free_tag_storage(char *storage);
 
 void mte_sync_tags(pte_t *ptep, pte_t pte);
 void mte_copy_page_tags(void *kto, const void *kfrom);
-void flush_mte_state(void);
+void mte_thread_init_user(void);
 void mte_thread_switch(struct task_struct *next);
+void mte_suspend_enter(void);
 void mte_suspend_exit(void);
 long set_mte_ctrl(struct task_struct *task, unsigned long arg);
 long get_mte_ctrl(struct task_struct *task);
 int mte_ptrace_copy_tags(struct task_struct *child, long request,
                         unsigned long addr, unsigned long data);
 
-void mte_assign_mem_tag_range(void *addr, size_t size);
-
 #else /* CONFIG_ARM64_MTE */
 
 /* unused if !CONFIG_ARM64_MTE, silence the compiler */
@@ -60,12 +59,15 @@ static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
 static inline void mte_copy_page_tags(void *kto, const void *kfrom)
 {
 }
-static inline void flush_mte_state(void)
+static inline void mte_thread_init_user(void)
 {
 }
 static inline void mte_thread_switch(struct task_struct *next)
 {
 }
+static inline void mte_suspend_enter(void)
+{
+}
 static inline void mte_suspend_exit(void)
 {
 }
@@ -84,11 +86,51 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
        return -EIO;
 }
 
-static inline void mte_assign_mem_tag_range(void *addr, size_t size)
+#endif /* CONFIG_ARM64_MTE */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+/* Whether the MTE asynchronous mode is enabled. */
+DECLARE_STATIC_KEY_FALSE(mte_async_mode);
+
+static inline bool system_uses_mte_async_mode(void)
 {
+       return static_branch_unlikely(&mte_async_mode);
 }
 
-#endif /* CONFIG_ARM64_MTE */
+void mte_check_tfsr_el1(void);
+
+static inline void mte_check_tfsr_entry(void)
+{
+       mte_check_tfsr_el1();
+}
+
+static inline void mte_check_tfsr_exit(void)
+{
+       /*
+        * The asynchronous faults are sync'ed automatically with
+        * TFSR_EL1 on kernel entry but for exit an explicit dsb()
+        * is required.
+        */
+       dsb(nsh);
+       isb();
+
+       mte_check_tfsr_el1();
+}
+#else
+static inline bool system_uses_mte_async_mode(void)
+{
+       return false;
+}
+static inline void mte_check_tfsr_el1(void)
+{
+}
+static inline void mte_check_tfsr_entry(void)
+{
+}
+static inline void mte_check_tfsr_exit(void)
+{
+}
+#endif /* CONFIG_KASAN_HW_TAGS */
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_MTE_H  */
index cf3a0fd7c1a7e00ef146b4a19aa4abf711a33b6c..9aa193e0e8f28d9309bc18013230e714152f4f93 100644 (file)
@@ -3,23 +3,19 @@
 #define _ASM_ARM64_PARAVIRT_H
 
 #ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
 struct static_key;
 extern struct static_key paravirt_steal_enabled;
 extern struct static_key paravirt_steal_rq_enabled;
 
-struct pv_time_ops {
-       unsigned long long (*steal_clock)(int cpu);
-};
-
-struct paravirt_patch_template {
-       struct pv_time_ops time;
-};
+u64 dummy_steal_clock(int cpu);
 
-extern struct paravirt_patch_template pv_ops;
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
 
 static inline u64 paravirt_steal_clock(int cpu)
 {
-       return pv_ops.time.steal_clock(cpu);
+       return static_call(pv_steal_clock)(cpu);
 }
 
 int __init pv_time_init(void);
index 3c6a7f5988b127e54b14d422489feda06d03f847..31fbab3d6f99234c301cde17d856dec02f12b17c 100644 (file)
@@ -27,7 +27,10 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
 
 static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
 {
-       __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
+       pudval_t pudval = PUD_TYPE_TABLE;
+
+       pudval |= (mm == &init_mm) ? PUD_TABLE_UXN : PUD_TABLE_PXN;
+       __pud_populate(pudp, __pa(pmdp), pudval);
 }
 #else
 static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
@@ -45,7 +48,10 @@ static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
 
 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
 {
-       __p4d_populate(p4dp, __pa(pudp), PUD_TYPE_TABLE);
+       p4dval_t p4dval = P4D_TYPE_TABLE;
+
+       p4dval |= (mm == &init_mm) ? P4D_TABLE_UXN : P4D_TABLE_PXN;
+       __p4d_populate(p4dp, __pa(pudp), p4dval);
 }
 #else
 static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
@@ -70,16 +76,15 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
 static inline void
 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
 {
-       /*
-        * The pmd must be loaded with the physical address of the PTE table
-        */
-       __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE);
+       VM_BUG_ON(mm != &init_mm);
+       __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
 }
 
 static inline void
 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
 {
-       __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE);
+       VM_BUG_ON(mm == &init_mm);
+       __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE | PMD_TABLE_PXN);
 }
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
index 42442a0ae2ab9b3d37afd3f2cc3ae920c37f76db..b82575a33f8b62f638c0f728bfc0681dfa6b0cad 100644 (file)
 /*
  * Hardware page table definitions.
  *
+ * Level 0 descriptor (P4D).
+ */
+#define P4D_TYPE_TABLE         (_AT(p4dval_t, 3) << 0)
+#define P4D_TABLE_BIT          (_AT(p4dval_t, 1) << 1)
+#define P4D_TYPE_MASK          (_AT(p4dval_t, 3) << 0)
+#define P4D_TYPE_SECT          (_AT(p4dval_t, 1) << 0)
+#define P4D_SECT_RDONLY                (_AT(p4dval_t, 1) << 7)         /* AP[2] */
+#define P4D_TABLE_PXN          (_AT(p4dval_t, 1) << 59)
+#define P4D_TABLE_UXN          (_AT(p4dval_t, 1) << 60)
+
+/*
  * Level 1 descriptor (PUD).
  */
 #define PUD_TYPE_TABLE         (_AT(pudval_t, 3) << 0)
 #define PUD_TYPE_MASK          (_AT(pudval_t, 3) << 0)
 #define PUD_TYPE_SECT          (_AT(pudval_t, 1) << 0)
 #define PUD_SECT_RDONLY                (_AT(pudval_t, 1) << 7)         /* AP[2] */
+#define PUD_TABLE_PXN          (_AT(pudval_t, 1) << 59)
+#define PUD_TABLE_UXN          (_AT(pudval_t, 1) << 60)
 
 /*
  * Level 2 descriptor (PMD).
 #define PMD_SECT_CONT          (_AT(pmdval_t, 1) << 52)
 #define PMD_SECT_PXN           (_AT(pmdval_t, 1) << 53)
 #define PMD_SECT_UXN           (_AT(pmdval_t, 1) << 54)
+#define PMD_TABLE_PXN          (_AT(pmdval_t, 1) << 59)
+#define PMD_TABLE_UXN          (_AT(pmdval_t, 1) << 60)
 
 /*
  * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
index 9a65fb5281100a7e08b29ee380c91b84cd887ca6..fab2f573f7a4fa239710b2e20857403da6929436 100644 (file)
@@ -87,12 +87,13 @@ extern bool arm64_use_ng_mappings;
 #define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_READONLY          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_READONLY_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+#define PAGE_EXECONLY          __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
 
 #define __P000  PAGE_NONE
 #define __P001  PAGE_READONLY
 #define __P010  PAGE_READONLY
 #define __P011  PAGE_READONLY
-#define __P100  PAGE_READONLY_EXEC
+#define __P100  PAGE_EXECONLY
 #define __P101  PAGE_READONLY_EXEC
 #define __P110  PAGE_READONLY_EXEC
 #define __P111  PAGE_READONLY_EXEC
@@ -101,7 +102,7 @@ extern bool arm64_use_ng_mappings;
 #define __S001  PAGE_READONLY
 #define __S010  PAGE_SHARED
 #define __S011  PAGE_SHARED
-#define __S100  PAGE_READONLY_EXEC
+#define __S100  PAGE_EXECONLY
 #define __S101  PAGE_READONLY_EXEC
 #define __S110  PAGE_SHARED_EXEC
 #define __S111  PAGE_SHARED_EXEC
index 47027796c2f934a0207013044dcaee2f9e58078c..0b10204e72fcbca2586d0106a610833e9f6b194c 100644 (file)
@@ -113,11 +113,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define pte_dirty(pte)         (pte_sw_dirty(pte) || pte_hw_dirty(pte))
 
 #define pte_valid(pte)         (!!(pte_val(pte) & PTE_VALID))
+/*
+ * Execute-only user mappings do not have the PTE_USER bit set. All valid
+ * kernel mappings have the PTE_UXN bit set.
+ */
 #define pte_valid_not_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
-
+       ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
 /*
  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
  * so that we don't erroneously return false for pages that have been
@@ -130,12 +131,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
        (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
 
 /*
- * p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
- * set.
+ * p??_access_permitted() is true for valid user mappings (PTE_USER
+ * bit set, subject to the write permission check). For execute-only
+ * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
+ * not set) must return false. PROT_NONE mappings do not have the
+ * PTE_VALID bit set.
  */
 #define pte_access_permitted(pte, write) \
-       (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+       (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
 #define pmd_access_permitted(pmd, write) \
        (pte_access_permitted(pmd_pte(pmd), (write)))
 #define pud_access_permitted(pud, write) \
@@ -995,6 +998,18 @@ static inline bool arch_wants_old_prefaulted_pte(void)
 }
 #define arch_wants_old_prefaulted_pte  arch_wants_old_prefaulted_pte
 
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+       if (cpus_have_const_cap(ARM64_HAS_EPAN))
+               return prot;
+
+       if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
+               return prot;
+
+       return PAGE_READONLY_EXEC;
+}
+
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
index b112a11e93024f4cff245b294fdf48637cd01f6d..d50416be99be0bf0d903f0a985b476546bc83d63 100644 (file)
@@ -3,6 +3,7 @@
 #define __ASM_POINTER_AUTH_H
 
 #include <linux/bitops.h>
+#include <linux/prctl.h>
 #include <linux/random.h>
 
 #include <asm/cpufeature.h>
@@ -34,6 +35,25 @@ struct ptrauth_keys_kernel {
        struct ptrauth_key apia;
 };
 
+#define __ptrauth_key_install_nosync(k, v)                     \
+do {                                                           \
+       struct ptrauth_key __pki_v = (v);                       \
+       write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1);     \
+       write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1);     \
+} while (0)
+
+static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys)
+{
+       if (system_supports_address_auth()) {
+               __ptrauth_key_install_nosync(APIB, keys->apib);
+               __ptrauth_key_install_nosync(APDA, keys->apda);
+               __ptrauth_key_install_nosync(APDB, keys->apdb);
+       }
+
+       if (system_supports_generic_auth())
+               __ptrauth_key_install_nosync(APGA, keys->apga);
+}
+
 static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
 {
        if (system_supports_address_auth()) {
@@ -45,14 +65,9 @@ static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
 
        if (system_supports_generic_auth())
                get_random_bytes(&keys->apga, sizeof(keys->apga));
-}
 
-#define __ptrauth_key_install_nosync(k, v)                     \
-do {                                                           \
-       struct ptrauth_key __pki_v = (v);                       \
-       write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1);     \
-       write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1);     \
-} while (0)
+       ptrauth_keys_install_user(keys);
+}
 
 static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
 {
@@ -71,6 +86,10 @@ static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kerne
 
 extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
 
+extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
+                                   unsigned long enabled);
+extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
+
 static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
 {
        return ptrauth_clear_pac(ptr);
@@ -85,8 +104,23 @@ static __always_inline void ptrauth_enable(void)
        isb();
 }
 
-#define ptrauth_thread_init_user(tsk)                                  \
-       ptrauth_keys_init_user(&(tsk)->thread.keys_user)
+#define ptrauth_suspend_exit()                                                 \
+       ptrauth_keys_install_user(&current->thread.keys_user)
+
+#define ptrauth_thread_init_user()                                             \
+       do {                                                                   \
+               ptrauth_keys_init_user(&current->thread.keys_user);            \
+                                                                              \
+               /* enable all keys */                                          \
+               if (system_supports_address_auth())                            \
+                       set_task_sctlr_el1(current->thread.sctlr_user |        \
+                                          SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |   \
+                                          SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);   \
+       } while (0)
+
+#define ptrauth_thread_switch_user(tsk)                                        \
+       ptrauth_keys_install_user(&(tsk)->thread.keys_user)
+
 #define ptrauth_thread_init_kernel(tsk)                                        \
        ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
 #define ptrauth_thread_switch_kernel(tsk)                              \
@@ -95,10 +129,17 @@ static __always_inline void ptrauth_enable(void)
 #else /* CONFIG_ARM64_PTR_AUTH */
 #define ptrauth_enable()
 #define ptrauth_prctl_reset_keys(tsk, arg)     (-EINVAL)
+#define ptrauth_set_enabled_keys(tsk, keys, enabled)   (-EINVAL)
+#define ptrauth_get_enabled_keys(tsk)  (-EINVAL)
 #define ptrauth_strip_insn_pac(lr)     (lr)
-#define ptrauth_thread_init_user(tsk)
+#define ptrauth_suspend_exit()
+#define ptrauth_thread_init_user()
 #define ptrauth_thread_init_kernel(tsk)
+#define ptrauth_thread_switch_user(tsk)
 #define ptrauth_thread_switch_kernel(tsk)
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
+#define PR_PAC_ENABLED_KEYS_MASK                                               \
+       (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
+
 #endif /* __ASM_POINTER_AUTH_H */
index efc10e9041a035850bf343757c7fcf6092f8a462..9df3feeee8909b5a0794a8d15c6331d855e072f4 100644 (file)
@@ -151,11 +151,15 @@ struct thread_struct {
        struct ptrauth_keys_kernel      keys_kernel;
 #endif
 #ifdef CONFIG_ARM64_MTE
-       u64                     sctlr_tcf0;
        u64                     gcr_user_excl;
 #endif
+       u64                     sctlr_user;
 };
 
+#define SCTLR_USER_MASK                                                        \
+       (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB |   \
+        SCTLR_EL1_TCF0_MASK)
+
 static inline void arch_thread_struct_whitelist(unsigned long *offset,
                                                unsigned long *size)
 {
@@ -247,6 +251,8 @@ extern void release_thread(struct task_struct *);
 
 unsigned long get_wchan(struct task_struct *p);
 
+void set_task_sctlr_el1(u64 sctlr);
+
 /* Thread switching */
 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
                                         struct task_struct *next);
@@ -303,6 +309,11 @@ extern void __init minsigstksz_setup(void);
 /* PR_PAC_RESET_KEYS prctl */
 #define PAC_RESET_KEYS(tsk, arg)       ptrauth_prctl_reset_keys(tsk, arg)
 
+/* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */
+#define PAC_SET_ENABLED_KEYS(tsk, keys, enabled)                               \
+       ptrauth_set_enabled_keys(tsk, keys, enabled)
+#define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk)
+
 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
index 38187f74e0896525cba3783870dc19fb61bde388..b1dd7ecff7efb4a3b1edcf954bb3368080d9bb48 100644 (file)
@@ -23,7 +23,7 @@ struct ptdump_info {
 
 void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
 #ifdef CONFIG_PTDUMP_DEBUGFS
-void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
 #else
 static inline void ptdump_debugfs_register(struct ptdump_info *info,
                                           const char *name) { }
index bcb01ca15325182bcdda653d5990bfd363ec811d..0e357757c0ccaa997cd061df3847a3f18c319d0c 100644 (file)
@@ -145,6 +145,7 @@ bool cpus_are_stuck_in_kernel(void);
 
 extern void crash_smp_send_stop(void);
 extern bool smp_crash_stop_failed(void);
+extern void panic_smp_self_stop(void);
 
 #endif /* ifndef __ASSEMBLY__ */
 
index eb29b1fe8255eb23d0cf2e9e588dd19ddee5061b..4b33ca6206793361fad25b23258c05ccfe06b024 100644 (file)
@@ -148,27 +148,7 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
        return false;
 }
 
-static inline void start_backtrace(struct stackframe *frame,
-                                  unsigned long fp, unsigned long pc)
-{
-       frame->fp = fp;
-       frame->pc = pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       frame->graph = 0;
-#endif
-
-       /*
-        * Prime the first unwind.
-        *
-        * In unwind_frame() we'll check that the FP points to a valid stack,
-        * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
-        * treated as a transition to whichever stack that happens to be. The
-        * prev_fp value won't be used, but we set it to 0 such that it is
-        * definitely not an accessible stack address.
-        */
-       bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
-       frame->prev_fp = 0;
-       frame->prev_type = STACK_TYPE_UNKNOWN;
-}
+void start_backtrace(struct stackframe *frame, unsigned long fp,
+                    unsigned long pc);
 
 #endif /* __ASM_STACKTRACE_H */
index d4a5fca984c3e228594e19a80fa68bc37e43e16c..b31ac5ccc8ab78a6cf4d5b50085f221322557214 100644 (file)
 #define SYS_PMCCFILTR_EL0              sys_reg(3, 3, 14, 15, 7)
 
 #define SYS_SCTLR_EL2                  sys_reg(3, 4, 1, 0, 0)
+#define SYS_HFGRTR_EL2                 sys_reg(3, 4, 1, 1, 4)
+#define SYS_HFGWTR_EL2                 sys_reg(3, 4, 1, 1, 5)
+#define SYS_HFGITR_EL2                 sys_reg(3, 4, 1, 1, 6)
 #define SYS_ZCR_EL2                    sys_reg(3, 4, 1, 2, 0)
 #define SYS_TRFCR_EL2                  sys_reg(3, 4, 1, 2, 1)
 #define SYS_DACR32_EL2                 sys_reg(3, 4, 3, 0, 0)
+#define SYS_HDFGRTR_EL2                        sys_reg(3, 4, 3, 1, 4)
+#define SYS_HDFGWTR_EL2                        sys_reg(3, 4, 3, 1, 5)
+#define SYS_HAFGRTR_EL2                        sys_reg(3, 4, 3, 1, 6)
 #define SYS_SPSR_EL2                   sys_reg(3, 4, 4, 0, 0)
 #define SYS_ELR_EL2                    sys_reg(3, 4, 4, 0, 1)
 #define SYS_IFSR32_EL2                 sys_reg(3, 4, 5, 0, 1)
 #define SCTLR_ELx_TCF_ASYNC    (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
 #define SCTLR_ELx_TCF_MASK     (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
 
+#define SCTLR_ELx_ENIA_SHIFT   31
+
 #define SCTLR_ELx_ITFSB        (BIT(37))
-#define SCTLR_ELx_ENIA (BIT(31))
+#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
 #define SCTLR_ELx_ENIB (BIT(30))
 #define SCTLR_ELx_ENDA (BIT(27))
 #define SCTLR_ELx_EE    (BIT(25))
        (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
 
 /* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_EPAN         (BIT(57))
 #define SCTLR_EL1_ATA0         (BIT(42))
 
 #define SCTLR_EL1_TCF0_SHIFT   38
         SCTLR_EL1_SED  | SCTLR_ELx_I    | SCTLR_EL1_DZE  | SCTLR_EL1_UCT   | \
         SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
         SCTLR_ELx_ATA  | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI   | \
-        SCTLR_EL1_RES1)
+        SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
 
 /* MAIR_ELx memory attributes (used by Linux) */
 #define MAIR_ATTR_DEVICE_nGnRnE                UL(0x00)
index 0deb88467111cccb0ec87cae36644834a516041e..b5f08621fa296168968506e0ae545c4c2dfd9406 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm/mmu.h>
+#include <asm/mte.h>
 #include <asm/ptrace.h>
 #include <asm/memory.h>
 #include <asm/extable.h>
@@ -188,6 +189,23 @@ static inline void __uaccess_enable_tco(void)
                                 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
 }
 
+/*
+ * These functions disable tag checking only if in MTE async mode
+ * since the sync mode generates exceptions synchronously and the
+ * nofault or load_unaligned_zeropad can handle them.
+ */
+static inline void __uaccess_disable_tco_async(void)
+{
+       if (system_uses_mte_async_mode())
+                __uaccess_disable_tco();
+}
+
+static inline void __uaccess_enable_tco_async(void)
+{
+       if (system_uses_mte_async_mode())
+               __uaccess_enable_tco();
+}
+
 static inline void uaccess_disable_privileged(void)
 {
        __uaccess_disable_tco();
@@ -307,8 +325,10 @@ do {                                                                       \
 do {                                                                   \
        int __gkn_err = 0;                                              \
                                                                        \
+       __uaccess_enable_tco_async();                                   \
        __raw_get_mem("ldr", *((type *)(dst)),                          \
                      (__force type *)(src), __gkn_err);                \
+       __uaccess_disable_tco_async();                                  \
        if (unlikely(__gkn_err))                                        \
                goto err_label;                                         \
 } while (0)
@@ -380,8 +400,10 @@ do {                                                                       \
 do {                                                                   \
        int __pkn_err = 0;                                              \
                                                                        \
+       __uaccess_enable_tco_async();                                   \
        __raw_put_mem("str", *((type *)(src)),                          \
                      (__force type *)(dst), __pkn_err);                \
+       __uaccess_disable_tco_async();                                  \
        if (unlikely(__pkn_err))                                        \
                goto err_label;                                         \
 } while(0)
index 631ab12816335ff0bed9eda6b9eb4ad56c657b72..4b4c0dac0e1494298e821e859c2a32737989109a 100644 (file)
@@ -83,11 +83,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
         */
        isb();
        asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
-       /*
-        * This isb() is required to prevent that the seq lock is
-        * speculated.#
-        */
-       isb();
+       arch_counter_enforce_ordering(res);
 
        return res;
 }
index 3333950b590939bebb65118ea308db17613f09e8..2dcb104c645b634134044828a0fd048e11e752c3 100644 (file)
@@ -53,7 +53,9 @@ static inline unsigned long find_zero(unsigned long mask)
  */
 static inline unsigned long load_unaligned_zeropad(const void *addr)
 {
-       unsigned long ret, offset;
+       unsigned long ret, tmp;
+
+       __uaccess_enable_tco_async();
 
        /* Load word from unaligned pointer addr */
        asm(
@@ -61,9 +63,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
        "2:\n"
        "       .pushsection .fixup,\"ax\"\n"
        "       .align 2\n"
-       "3:     and     %1, %2, #0x7\n"
-       "       bic     %2, %2, #0x7\n"
-       "       ldr     %0, [%2]\n"
+       "3:     bic     %1, %2, #0x7\n"
+       "       ldr     %0, [%1]\n"
+       "       and     %1, %2, #0x7\n"
        "       lsl     %1, %1, #0x3\n"
 #ifndef __AARCH64EB__
        "       lsr     %0, %0, %1\n"
@@ -73,9 +75,11 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
        "       b       2b\n"
        "       .popsection\n"
        _ASM_EXTABLE(1b, 3b)
-       : "=&r" (ret), "=&r" (offset)
+       : "=&r" (ret), "=&r" (tmp)
        : "r" (addr), "Q" (*(unsigned long *)addr));
 
+       __uaccess_disable_tco_async();
+
        return ret;
 }
 
diff --git a/arch/arm64/include/asm/xen/swiotlb-xen.h b/arch/arm64/include/asm/xen/swiotlb-xen.h
new file mode 100644 (file)
index 0000000..455ade5
--- /dev/null
@@ -0,0 +1 @@
+#include <xen/arm/swiotlb-xen.h>
index ed65576ce710b0277cd52393640f75a78f4fa6a0..6cc97730790e7167a69995aba58c94c40ec40e61 100644 (file)
@@ -9,6 +9,11 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
 
+# Remove stack protector to avoid triggering unneeded stack canary
+# checks due to randomize_kstack_offset.
+CFLAGS_REMOVE_syscall.o         = -fstack-protector -fstack-protector-strong
+CFLAGS_syscall.o       += -fno-stack-protector
+
 # Object file lists.
 obj-y                  := debug-monitors.o entry.o irq.o fpsimd.o              \
                           entry-common.o entry-fpsimd.o process.o ptrace.o     \
index a36e2fc330d430a714f901595ee9efc02bedab99..e797603e55b70ae46d44827aae0891240366cede 100644 (file)
@@ -43,6 +43,7 @@ int main(void)
 #endif
   BLANK();
   DEFINE(THREAD_CPU_CONTEXT,   offsetof(struct task_struct, thread.cpu_context));
+  DEFINE(THREAD_SCTLR_USER,    offsetof(struct task_struct, thread.sctlr_user));
 #ifdef CONFIG_ARM64_PTR_AUTH
   DEFINE(THREAD_KEYS_USER,     offsetof(struct task_struct, thread.keys_user));
   DEFINE(THREAD_KEYS_KERNEL,   offsetof(struct task_struct, thread.keys_kernel));
@@ -95,6 +96,8 @@ int main(void)
   DEFINE(DMA_FROM_DEVICE,      DMA_FROM_DEVICE);
   BLANK();
   DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
+  DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
+  DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
   BLANK();
   DEFINE(CPU_BOOT_STACK,       offsetof(struct secondary_data, stack));
   DEFINE(CPU_BOOT_TASK,                offsetof(struct secondary_data, task));
@@ -147,10 +150,6 @@ int main(void)
 #endif
 #ifdef CONFIG_ARM64_PTR_AUTH
   DEFINE(PTRAUTH_USER_KEY_APIA,                offsetof(struct ptrauth_keys_user, apia));
-  DEFINE(PTRAUTH_USER_KEY_APIB,                offsetof(struct ptrauth_keys_user, apib));
-  DEFINE(PTRAUTH_USER_KEY_APDA,                offsetof(struct ptrauth_keys_user, apda));
-  DEFINE(PTRAUTH_USER_KEY_APDB,                offsetof(struct ptrauth_keys_user, apdb));
-  DEFINE(PTRAUTH_USER_KEY_APGA,                offsetof(struct ptrauth_keys_user, apga));
   DEFINE(PTRAUTH_KERNEL_KEY_APIA,      offsetof(struct ptrauth_keys_kernel, apia));
   BLANK();
 #endif
index e5281e1c8f1d291020790d0319922ee8c1b3a4ea..76c60b3cda5348ab3cd33b2d67dd57573745978e 100644 (file)
@@ -808,6 +808,12 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
                                        reg->name,
                                        ftrp->shift + ftrp->width - 1,
                                        ftrp->shift, str, tmp);
+               } else if ((ftr_mask & reg->override->val) == ftr_mask) {
+                       reg->override->val &= ~ftr_mask;
+                       pr_warn("%s[%d:%d]: impossible override, ignored\n",
+                               reg->name,
+                               ftrp->shift + ftrp->width - 1,
+                               ftrp->shift);
                }
 
                val = arm64_ftr_set_value(ftrp, val, ftr_new);
@@ -1619,7 +1625,6 @@ int get_cpu_with_amu_feat(void)
 }
 #endif
 
-#ifdef CONFIG_ARM64_VHE
 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
 {
        return is_kernel_in_hyp_mode();
@@ -1638,7 +1643,6 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
        if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
                write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
 }
-#endif
 
 static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
 {
@@ -1823,6 +1827,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .cpu_enable = cpu_enable_pan,
        },
 #endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_ARM64_EPAN
+       {
+               .desc = "Enhanced Privileged Access Never",
+               .capability = ARM64_HAS_EPAN,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR1_EL1,
+               .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+               .sign = FTR_UNSIGNED,
+               .min_field_value = 3,
+       },
+#endif /* CONFIG_ARM64_EPAN */
 #ifdef CONFIG_ARM64_LSE_ATOMICS
        {
                .desc = "LSE atomic instructions",
@@ -1841,7 +1857,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
                .matches = has_no_hw_prefetch,
        },
-#ifdef CONFIG_ARM64_VHE
        {
                .desc = "Virtualization Host Extensions",
                .capability = ARM64_HAS_VIRT_HOST_EXTN,
@@ -1849,7 +1864,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .matches = runs_at_el2,
                .cpu_enable = cpu_copy_el2regs,
        },
-#endif /* CONFIG_ARM64_VHE */
        {
                .desc = "32-bit EL0 Support",
                .capability = ARM64_HAS_32BIT_EL0,
index 9d35884504737173efb02d6c127946ba42cfe932..a1ec351c36bd8aa6085eebd98c9f5cf2084a92d0 100644 (file)
@@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
        lockdep_hardirqs_off(CALLER_ADDR0);
        rcu_irq_enter_check_tick();
        trace_hardirqs_off_finish();
+
+       mte_check_tfsr_entry();
 }
 
 /*
@@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
 {
        lockdep_assert_irqs_disabled();
 
+       mte_check_tfsr_exit();
+
        if (interrupts_enabled(regs)) {
                if (regs->exit_rcu) {
                        trace_hardirqs_on_prepare();
@@ -293,6 +297,8 @@ asmlinkage void noinstr enter_from_user_mode(void)
 
 asmlinkage void noinstr exit_to_user_mode(void)
 {
+       mte_check_tfsr_exit();
+
        trace_hardirqs_on_prepare();
        lockdep_hardirqs_on_prepare(CALLER_ADDR0);
        user_enter_irqoff();
index 2ca395c25448f45f29e55ca67992d3ebd491aeb4..3ecec60d3295832b8c7a5917767c638e3866d92e 100644 (file)
@@ -48,6 +48,11 @@ SYM_FUNC_START(sve_get_vl)
        ret
 SYM_FUNC_END(sve_get_vl)
 
+SYM_FUNC_START(sve_set_vq)
+       sve_load_vq x0, x1, x2
+       ret
+SYM_FUNC_END(sve_set_vq)
+
 /*
  * Load SVE state from FPSIMD state.
  *
index a31a0a713c85df8fabd31182ba5d973d229903bc..4ac5455c0eaddef032d49ae06fd8b470e354f27d 100644 (file)
@@ -148,16 +148,18 @@ alternative_cb_end
        .endm
 
        /* Check for MTE asynchronous tag check faults */
-       .macro check_mte_async_tcf, flgs, tmp
+       .macro check_mte_async_tcf, tmp, ti_flags
 #ifdef CONFIG_ARM64_MTE
+       .arch_extension lse
 alternative_if_not ARM64_MTE
        b       1f
 alternative_else_nop_endif
        mrs_s   \tmp, SYS_TFSRE0_EL1
        tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
        /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
-       orr     \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
-       str     \flgs, [tsk, #TSK_TI_FLAGS]
+       mov     \tmp, #_TIF_MTE_ASYNC_FAULT
+       add     \ti_flags, tsk, #TSK_TI_FLAGS
+       stset   \tmp, [\ti_flags]
        msr_s   SYS_TFSRE0_EL1, xzr
 1:
 #endif
@@ -244,10 +246,32 @@ alternative_else_nop_endif
        disable_step_tsk x19, x20
 
        /* Check for asynchronous tag check faults in user space */
-       check_mte_async_tcf x19, x22
+       check_mte_async_tcf x22, x23
        apply_ssbd 1, x22, x23
 
-       ptrauth_keys_install_kernel tsk, x20, x22, x23
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if ARM64_HAS_ADDRESS_AUTH
+       /*
+        * Enable IA for in-kernel PAC if the task had it disabled. Although
+        * this could be implemented with an unconditional MRS which would avoid
+        * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
+        *
+        * Install the kernel IA key only if IA was enabled in the task. If IA
+        * was disabled on kernel exit then we would have left the kernel IA
+        * installed so there is no need to install it again.
+        */
+       ldr     x0, [tsk, THREAD_SCTLR_USER]
+       tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f
+       __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
+       b       2f
+1:
+       mrs     x0, sctlr_el1
+       orr     x0, x0, SCTLR_ELx_ENIA
+       msr     sctlr_el1, x0
+2:
+       isb
+alternative_else_nop_endif
+#endif
 
        mte_set_kernel_gcr x22, x23
 
@@ -351,8 +375,26 @@ alternative_else_nop_endif
 3:
        scs_save tsk, x0
 
-       /* No kernel C function calls after this as user keys are set. */
-       ptrauth_keys_install_user tsk, x0, x1, x2
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if ARM64_HAS_ADDRESS_AUTH
+       /*
+        * IA was enabled for in-kernel PAC. Disable it now if needed, or
+        * alternatively install the user's IA. All other per-task keys and
+        * SCTLR bits were updated on task switch.
+        *
+        * No kernel C function calls after this.
+        */
+       ldr     x0, [tsk, THREAD_SCTLR_USER]
+       tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f
+       __ptrauth_keys_install_user tsk, x0, x1, x2
+       b       2f
+1:
+       mrs     x0, sctlr_el1
+       bic     x0, x0, SCTLR_ELx_ENIA
+       msr     sctlr_el1, x0
+2:
+alternative_else_nop_endif
+#endif
 
        mte_set_user_gcr tsk, x0, x1
 
@@ -491,28 +533,14 @@ tsk       .req    x28             // current thread_info
 /*
  * Interrupt handling.
  */
-       .macro  irq_handler
-       ldr_l   x1, handle_arch_irq
+       .macro  irq_handler, handler:req
+       ldr_l   x1, \handler
        mov     x0, sp
        irq_stack_entry
        blr     x1
        irq_stack_exit
        .endm
 
-#ifdef CONFIG_ARM64_PSEUDO_NMI
-       /*
-        * Set res to 0 if irqs were unmasked in interrupted context.
-        * Otherwise set res to non-0 value.
-        */
-       .macro  test_irqs_unmasked res:req, pmr:req
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
-       sub     \res, \pmr, #GIC_PRIO_IRQON
-alternative_else
-       mov     \res, xzr
-alternative_endif
-       .endm
-#endif
-
        .macro  gic_prio_kentry_setup, tmp:req
 #ifdef CONFIG_ARM64_PSEUDO_NMI
        alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -531,6 +559,47 @@ alternative_endif
 #endif
        .endm
 
+       .macro el1_interrupt_handler, handler:req
+       gic_prio_irq_setup pmr=x20, tmp=x1
+       enable_da
+
+       mov     x0, sp
+       bl      enter_el1_irq_or_nmi
+
+       irq_handler     \handler
+
+#ifdef CONFIG_PREEMPTION
+       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+       /*
+        * DA were cleared at start of handling, and IF are cleared by
+        * the GIC irqchip driver using gic_arch_enable_irqs() for
+        * normal IRQs. If anything is set, it means we come back from
+        * an NMI instead of a normal IRQ, so skip preemption
+        */
+       mrs     x0, daif
+       orr     x24, x24, x0
+alternative_else_nop_endif
+       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
+       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
+1:
+#endif
+
+       mov     x0, sp
+       bl      exit_el1_irq_or_nmi
+       .endm
+
+       .macro el0_interrupt_handler, handler:req
+       gic_prio_irq_setup pmr=x20, tmp=x0
+       user_exit_irqoff
+       enable_da
+
+       tbz     x22, #55, 1f
+       bl      do_el0_irq_bp_hardening
+1:
+       irq_handler     \handler
+       .endm
+
        .text
 
 /*
@@ -547,18 +616,18 @@ SYM_CODE_START(vectors)
 
        kernel_ventry   1, sync                         // Synchronous EL1h
        kernel_ventry   1, irq                          // IRQ EL1h
-       kernel_ventry   1, fiq_invalid                  // FIQ EL1h
+       kernel_ventry   1, fiq                          // FIQ EL1h
        kernel_ventry   1, error                        // Error EL1h
 
        kernel_ventry   0, sync                         // Synchronous 64-bit EL0
        kernel_ventry   0, irq                          // IRQ 64-bit EL0
-       kernel_ventry   0, fiq_invalid                  // FIQ 64-bit EL0
+       kernel_ventry   0, fiq                          // FIQ 64-bit EL0
        kernel_ventry   0, error                        // Error 64-bit EL0
 
 #ifdef CONFIG_COMPAT
        kernel_ventry   0, sync_compat, 32              // Synchronous 32-bit EL0
        kernel_ventry   0, irq_compat, 32               // IRQ 32-bit EL0
-       kernel_ventry   0, fiq_invalid_compat, 32       // FIQ 32-bit EL0
+       kernel_ventry   0, fiq_compat, 32               // FIQ 32-bit EL0
        kernel_ventry   0, error_compat, 32             // Error 32-bit EL0
 #else
        kernel_ventry   0, sync_invalid, 32             // Synchronous 32-bit EL0
@@ -624,12 +693,6 @@ SYM_CODE_START_LOCAL(el0_error_invalid)
        inv_entry 0, BAD_ERROR
 SYM_CODE_END(el0_error_invalid)
 
-#ifdef CONFIG_COMPAT
-SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
-       inv_entry 0, BAD_FIQ, 32
-SYM_CODE_END(el0_fiq_invalid_compat)
-#endif
-
 SYM_CODE_START_LOCAL(el1_sync_invalid)
        inv_entry 1, BAD_SYNC
 SYM_CODE_END(el1_sync_invalid)
@@ -660,35 +723,16 @@ SYM_CODE_END(el1_sync)
        .align  6
 SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
        kernel_entry 1
-       gic_prio_irq_setup pmr=x20, tmp=x1
-       enable_da_f
-
-       mov     x0, sp
-       bl      enter_el1_irq_or_nmi
-
-       irq_handler
-
-#ifdef CONFIG_PREEMPTION
-       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
-       /*
-        * DA_F were cleared at start of handling. If anything is set in DAIF,
-        * we come back from an NMI, so skip preemption
-        */
-       mrs     x0, daif
-       orr     x24, x24, x0
-alternative_else_nop_endif
-       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
-       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
-1:
-#endif
-
-       mov     x0, sp
-       bl      exit_el1_irq_or_nmi
-
+       el1_interrupt_handler handle_arch_irq
        kernel_exit 1
 SYM_CODE_END(el1_irq)
 
+SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
+       kernel_entry 1
+       el1_interrupt_handler handle_arch_fiq
+       kernel_exit 1
+SYM_CODE_END(el1_fiq)
+
 /*
  * EL0 mode handlers.
  */
@@ -715,6 +759,11 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
        b       el0_irq_naked
 SYM_CODE_END(el0_irq_compat)
 
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
+       kernel_entry 0, 32
+       b       el0_fiq_naked
+SYM_CODE_END(el0_fiq_compat)
+
 SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
        kernel_entry 0, 32
        b       el0_error_naked
@@ -725,18 +774,17 @@ SYM_CODE_END(el0_error_compat)
 SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
        kernel_entry 0
 el0_irq_naked:
-       gic_prio_irq_setup pmr=x20, tmp=x0
-       user_exit_irqoff
-       enable_da_f
-
-       tbz     x22, #55, 1f
-       bl      do_el0_irq_bp_hardening
-1:
-       irq_handler
-
+       el0_interrupt_handler handle_arch_irq
        b       ret_to_user
 SYM_CODE_END(el0_irq)
 
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
+       kernel_entry 0
+el0_fiq_naked:
+       el0_interrupt_handler handle_arch_fiq
+       b       ret_to_user
+SYM_CODE_END(el0_fiq)
+
 SYM_CODE_START_LOCAL(el1_error)
        kernel_entry 1
        mrs     x1, esr_el1
@@ -757,7 +805,7 @@ el0_error_naked:
        mov     x0, sp
        mov     x1, x25
        bl      do_serror
-       enable_da_f
+       enable_da
        b       ret_to_user
 SYM_CODE_END(el0_error)
 
index 062b21f30f9422aa03d6cf0eb8cc56e2ba8032d9..ad3dd34a83cf98db4593c4a406728aecd75bc96e 100644 (file)
@@ -180,7 +180,7 @@ static void __get_cpu_fpsimd_context(void)
  */
 static void get_cpu_fpsimd_context(void)
 {
-       preempt_disable();
+       local_bh_disable();
        __get_cpu_fpsimd_context();
 }
 
@@ -201,7 +201,7 @@ static void __put_cpu_fpsimd_context(void)
 static void put_cpu_fpsimd_context(void)
 {
        __put_cpu_fpsimd_context();
-       preempt_enable();
+       local_bh_enable();
 }
 
 static bool have_cpu_fpsimd_context(void)
@@ -285,7 +285,7 @@ static void task_fpsimd_load(void)
        WARN_ON(!system_supports_fpsimd());
        WARN_ON(!have_cpu_fpsimd_context());
 
-       if (system_supports_sve() && test_thread_flag(TIF_SVE))
+       if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE))
                sve_load_state(sve_pffr(&current->thread),
                               &current->thread.uw.fpsimd_state.fpsr,
                               sve_vq_from_vl(current->thread.sve_vl) - 1);
@@ -307,7 +307,8 @@ static void fpsimd_save(void)
        WARN_ON(!have_cpu_fpsimd_context());
 
        if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
+               if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+                   test_thread_flag(TIF_SVE)) {
                        if (WARN_ON(sve_get_vl() != last->sve_vl)) {
                                /*
                                 * Can't save the user regs, so current would
@@ -926,9 +927,8 @@ void fpsimd_release_task(struct task_struct *dead_task)
  * Trapped SVE access
  *
  * Storage is allocated for the full SVE state, the current FPSIMD
- * register contents are migrated across, and TIF_SVE is set so that
- * the SVE access trap will be disabled the next time this task
- * reaches ret_to_user.
+ * register contents are migrated across, and the access trap is
+ * disabled.
  *
  * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
  * would have disabled the SVE access trap for userspace during
@@ -946,15 +946,24 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
 
        get_cpu_fpsimd_context();
 
-       fpsimd_save();
-
-       /* Force ret_to_user to reload the registers: */
-       fpsimd_flush_task_state(current);
-
-       fpsimd_to_sve(current);
        if (test_and_set_thread_flag(TIF_SVE))
                WARN_ON(1); /* SVE access shouldn't have trapped */
 
+       /*
+        * Convert the FPSIMD state to SVE, zeroing all the state that
+        * is not shared with FPSIMD. If (as is likely) the current
+        * state is live in the registers then do this there and
+        * update our metadata for the current task including
+        * disabling the trap, otherwise update our in-memory copy.
+        */
+       if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+               sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1);
+               sve_flush_live();
+               fpsimd_bind_task_to_cpu();
+       } else {
+               fpsimd_to_sve(current);
+       }
+
        put_cpu_fpsimd_context();
 }
 
@@ -1092,7 +1101,7 @@ void fpsimd_preserve_current_state(void)
 void fpsimd_signal_preserve_current_state(void)
 {
        fpsimd_preserve_current_state();
-       if (system_supports_sve() && test_thread_flag(TIF_SVE))
+       if (test_thread_flag(TIF_SVE))
                sve_to_fpsimd(current);
 }
 
@@ -1181,7 +1190,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
        get_cpu_fpsimd_context();
 
        current->thread.uw.fpsimd_state = *state;
-       if (system_supports_sve() && test_thread_flag(TIF_SVE))
+       if (test_thread_flag(TIF_SVE))
                fpsimd_to_sve(current);
 
        task_fpsimd_load();
index 840bda1869e9ccb0d88b2e6bb0cb63ea1eb1f0c6..96873dfa67fd5214dca48d68a98483b6b1bf9d2c 100644 (file)
@@ -477,14 +477,13 @@ EXPORT_SYMBOL(kimage_vaddr)
  * booted in EL1 or EL2 respectively.
  */
 SYM_FUNC_START(init_kernel_el)
-       mov_q   x0, INIT_SCTLR_EL1_MMU_OFF
-       msr     sctlr_el1, x0
-
        mrs     x0, CurrentEL
        cmp     x0, #CurrentEL_EL2
        b.eq    init_el2
 
 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
+       mov_q   x0, INIT_SCTLR_EL1_MMU_OFF
+       msr     sctlr_el1, x0
        isb
        mov_q   x0, INIT_PSTATE_EL1
        msr     spsr_el1, x0
@@ -504,9 +503,43 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
        msr     vbar_el2, x0
        isb
 
+       /*
+        * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
+        * making it impossible to start in nVHE mode. Is that
+        * compliant with the architecture? Absolutely not!
+        */
+       mrs     x0, hcr_el2
+       and     x0, x0, #HCR_E2H
+       cbz     x0, 1f
+
+       /* Switching to VHE requires a sane SCTLR_EL1 as a start */
+       mov_q   x0, INIT_SCTLR_EL1_MMU_OFF
+       msr_s   SYS_SCTLR_EL12, x0
+
+       /*
+        * Force an eret into a helper "function", and let it return
+        * to our original caller... This makes sure that we have
+        * initialised the basic PSTATE state.
+        */
+       mov     x0, #INIT_PSTATE_EL2
+       msr     spsr_el1, x0
+       adr     x0, __cpu_stick_to_vhe
+       msr     elr_el1, x0
+       eret
+
+1:
+       mov_q   x0, INIT_SCTLR_EL1_MMU_OFF
+       msr     sctlr_el1, x0
+
        msr     elr_el2, lr
        mov     w0, #BOOT_CPU_MODE_EL2
        eret
+
+__cpu_stick_to_vhe:
+       mov     x0, #HVC_VHE_RESTART
+       hvc     #0
+       mov     x0, #BOOT_CPU_MODE_EL2
+       ret
 SYM_FUNC_END(init_kernel_el)
 
 /*
index 5eccbd62fec8a84fd35dc715a2f81e907847ea3b..74ad3db061d13c7377bc55f7e3789629c98ccfec 100644 (file)
@@ -27,12 +27,12 @@ SYM_CODE_START(__hyp_stub_vectors)
        ventry  el2_fiq_invalid                 // FIQ EL2t
        ventry  el2_error_invalid               // Error EL2t
 
-       ventry  el2_sync_invalid                // Synchronous EL2h
+       ventry  elx_sync                        // Synchronous EL2h
        ventry  el2_irq_invalid                 // IRQ EL2h
        ventry  el2_fiq_invalid                 // FIQ EL2h
        ventry  el2_error_invalid               // Error EL2h
 
-       ventry  el1_sync                        // Synchronous 64-bit EL1
+       ventry  elx_sync                        // Synchronous 64-bit EL1
        ventry  el1_irq_invalid                 // IRQ 64-bit EL1
        ventry  el1_fiq_invalid                 // FIQ 64-bit EL1
        ventry  el1_error_invalid               // Error 64-bit EL1
@@ -45,7 +45,7 @@ SYM_CODE_END(__hyp_stub_vectors)
 
        .align 11
 
-SYM_CODE_START_LOCAL(el1_sync)
+SYM_CODE_START_LOCAL(elx_sync)
        cmp     x0, #HVC_SET_VECTORS
        b.ne    1f
        msr     vbar_el2, x1
@@ -71,7 +71,7 @@ SYM_CODE_START_LOCAL(el1_sync)
 
 9:     mov     x0, xzr
        eret
-SYM_CODE_END(el1_sync)
+SYM_CODE_END(elx_sync)
 
 // nVHE? No way! Give me the real thing!
 SYM_CODE_START_LOCAL(mutate_to_vhe)
@@ -224,7 +224,6 @@ SYM_FUNC_END(__hyp_reset_vectors)
  * Entry point to switch to VHE if deemed capable
  */
 SYM_FUNC_START(switch_to_vhe)
-#ifdef CONFIG_ARM64_VHE
        // Need to have booted at EL2
        adr_l   x1, __boot_cpu_mode
        ldr     w0, [x1]
@@ -240,6 +239,5 @@ SYM_FUNC_START(switch_to_vhe)
        mov     x0, #HVC_VHE_RESTART
        hvc     #0
 1:
-#endif
        ret
 SYM_FUNC_END(switch_to_vhe)
index 83f1c4b92095e9a6acf0c2eac454f965228bf040..e628c8ce1ffe2f6976fd20233f75632d73ce42c4 100644 (file)
@@ -25,14 +25,26 @@ struct ftr_set_desc {
        struct {
                char                    name[FTR_DESC_FIELD_LEN];
                u8                      shift;
+               bool                    (*filter)(u64 val);
        }                               fields[];
 };
 
+static bool __init mmfr1_vh_filter(u64 val)
+{
+       /*
+        * If we ever reach this point while running VHE, we're
+        * guaranteed to be on one of these funky, VHE-stuck CPUs. If
+        * the user was trying to force nVHE on us, proceed with
+        * attitude adjustment.
+        */
+       return !(is_kernel_in_hyp_mode() && val == 0);
+}
+
 static const struct ftr_set_desc mmfr1 __initconst = {
        .name           = "id_aa64mmfr1",
        .override       = &id_aa64mmfr1_override,
        .fields         = {
-               { "vh", ID_AA64MMFR1_VHE_SHIFT },
+               { "vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter },
                {}
        },
 };
@@ -124,6 +136,18 @@ static void __init match_options(const char *cmdline)
                        if (find_field(cmdline, regs[i], f, &v))
                                continue;
 
+                       /*
+                        * If an override gets filtered out, advertise
+                        * it by setting the value to 0xf, but
+                        * clearing the mask... Yes, this is fragile.
+                        */
+                       if (regs[i]->fields[f].filter &&
+                           !regs[i]->fields[f].filter(v)) {
+                               regs[i]->override->val  |= mask;
+                               regs[i]->override->mask &= ~mask;
+                               continue;
+                       }
+
                        regs[i]->override->val  &= ~mask;
                        regs[i]->override->val  |= (v << shift) & mask;
                        regs[i]->override->mask |= mask;
index dfb1feab867d43b7fd5ea029b3f69d9bbf873907..bda49430c9ea3c9beb0669689e760bc6bd26c3ce 100644 (file)
@@ -71,13 +71,44 @@ static void init_irq_stacks(void)
 }
 #endif
 
+static void default_handle_irq(struct pt_regs *regs)
+{
+       panic("IRQ taken without a root IRQ handler\n");
+}
+
+static void default_handle_fiq(struct pt_regs *regs)
+{
+       panic("FIQ taken without a root FIQ handler\n");
+}
+
+void (*handle_arch_irq)(struct pt_regs *) __ro_after_init = default_handle_irq;
+void (*handle_arch_fiq)(struct pt_regs *) __ro_after_init = default_handle_fiq;
+
+int __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
+{
+       if (handle_arch_irq != default_handle_irq)
+               return -EBUSY;
+
+       handle_arch_irq = handle_irq;
+       pr_info("Root IRQ handler: %ps\n", handle_irq);
+       return 0;
+}
+
+int __init set_handle_fiq(void (*handle_fiq)(struct pt_regs *))
+{
+       if (handle_arch_fiq != default_handle_fiq)
+               return -EBUSY;
+
+       handle_arch_fiq = handle_fiq;
+       pr_info("Root FIQ handler: %ps\n", handle_fiq);
+       return 0;
+}
+
 void __init init_IRQ(void)
 {
        init_irq_stacks();
        init_irq_scs();
        irqchip_init();
-       if (!handle_arch_irq)
-               panic("No interrupt controller found.");
 
        if (system_uses_irq_prio_masking()) {
                /*
index 27f8939deb1b322839e9b09195adcdfd3b5ce7e1..341342b207f63286db6db7c48b72fe7629c12b9e 100644 (file)
@@ -128,15 +128,17 @@ u64 __init kaslr_early_init(void)
        /* use the top 16 bits to randomize the linear region */
        memstart_offset_seed = seed >> 48;
 
-       if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
-           IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+       if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) &&
+           (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
+            IS_ENABLED(CONFIG_KASAN_SW_TAGS)))
                /*
-                * KASAN does not expect the module region to intersect the
-                * vmalloc region, since shadow memory is allocated for each
-                * module at load time, whereas the vmalloc region is shadowed
-                * by KASAN zero pages. So keep modules out of the vmalloc
-                * region if KASAN is enabled, and put the kernel well within
-                * 4 GB of the module region.
+                * KASAN without KASAN_VMALLOC does not expect the module region
+                * to intersect the vmalloc region, since shadow memory is
+                * allocated for each module at load time, whereas the vmalloc
+                * region is shadowed by KASAN zero pages. So keep modules
+                * out of the vmalloc region if KASAN is enabled without
+                * KASAN_VMALLOC, and put the kernel well within 4 GB of the
+                * module region.
                 */
                return offset % SZ_2G;
 
index fe21e0f06492ee5e4328288b191e25d08ef28d08..b5ec010c481f37c87c5d977a870a955ca86377af 100644 (file)
@@ -40,14 +40,16 @@ void *module_alloc(unsigned long size)
                                NUMA_NO_NODE, __builtin_return_address(0));
 
        if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
-           !IS_ENABLED(CONFIG_KASAN_GENERIC) &&
-           !IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+           (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
+            (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+             !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
                /*
-                * KASAN can only deal with module allocations being served
-                * from the reserved module region, since the remainder of
-                * the vmalloc region is already backed by zero shadow pages,
-                * and punching holes into it is non-trivial. Since the module
-                * region is not randomized when KASAN is enabled, it is even
+                * KASAN without KASAN_VMALLOC can only deal with module
+                * allocations being served from the reserved module region,
+                * since the remainder of the vmalloc region is already
+                * backed by zero shadow pages, and punching holes into it
+                * is non-trivial. Since the module region is not randomized
+                * when KASAN is enabled without KASAN_VMALLOC, it is even
                 * less likely that the module region gets exhausted, so we
                 * can simply omit this fallback in that case.
                 */
index b3c70a612c7a93324b735559853069320519dd94..125a10e413e9f2967b7cbc56fb355924020d66e0 100644 (file)
@@ -26,6 +26,12 @@ u64 gcr_kernel_excl __ro_after_init;
 
 static bool report_fault_once = true;
 
+#ifdef CONFIG_KASAN_HW_TAGS
+/* Whether the MTE asynchronous mode is enabled. */
+DEFINE_STATIC_KEY_FALSE(mte_async_mode);
+EXPORT_SYMBOL_GPL(mte_async_mode);
+#endif
+
 static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
 {
        pte_t old_pte = READ_ONCE(*ptep);
@@ -107,13 +113,45 @@ void mte_init_tags(u64 max_tag)
        write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
 }
 
-void mte_enable_kernel(void)
+static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
 {
        /* Enable MTE Sync Mode for EL1. */
-       sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
+       sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
        isb();
+
+       pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
+}
+
+#ifdef CONFIG_KASAN_HW_TAGS
+void mte_enable_kernel_sync(void)
+{
+       /*
+        * Make sure we enter this function when no PE has set
+        * async mode previously.
+        */
+       WARN_ONCE(system_uses_mte_async_mode(),
+                       "MTE async mode enabled system wide!");
+
+       __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
 }
 
+void mte_enable_kernel_async(void)
+{
+       __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
+
+       /*
+        * MTE async mode is set system wide by the first PE that
+        * executes this function.
+        *
+        * Note: If in future KASAN acquires a runtime switching
+        * mode in between sync and async, this strategy needs
+        * to be reviewed.
+        */
+       if (!system_uses_mte_async_mode())
+               static_branch_enable(&mte_async_mode);
+}
+#endif
+
 void mte_set_report_once(bool state)
 {
        WRITE_ONCE(report_fault_once, state);
@@ -124,25 +162,28 @@ bool mte_report_once(void)
        return READ_ONCE(report_fault_once);
 }
 
-static void update_sctlr_el1_tcf0(u64 tcf0)
+#ifdef CONFIG_KASAN_HW_TAGS
+void mte_check_tfsr_el1(void)
 {
-       /* ISB required for the kernel uaccess routines */
-       sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
-       isb();
-}
+       u64 tfsr_el1;
 
-static void set_sctlr_el1_tcf0(u64 tcf0)
-{
-       /*
-        * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
-        * optimisation. Disable preemption so that it does not see
-        * the variable update before the SCTLR_EL1.TCF0 one.
-        */
-       preempt_disable();
-       current->thread.sctlr_tcf0 = tcf0;
-       update_sctlr_el1_tcf0(tcf0);
-       preempt_enable();
+       if (!system_supports_mte())
+               return;
+
+       tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+
+       if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
+               /*
+                * Note: isb() is not required after this direct write
+                * because there is no indirect read subsequent to it
+                * (per ARM DDI 0487F.c table D13-1).
+                */
+               write_sysreg_s(0, SYS_TFSR_EL1);
+
+               kasan_report_async();
+       }
 }
+#endif
 
 static void update_gcr_el1_excl(u64 excl)
 {
@@ -166,7 +207,7 @@ static void set_gcr_el1_excl(u64 excl)
         */
 }
 
-void flush_mte_state(void)
+void mte_thread_init_user(void)
 {
        if (!system_supports_mte())
                return;
@@ -176,19 +217,39 @@ void flush_mte_state(void)
        write_sysreg_s(0, SYS_TFSRE0_EL1);
        clear_thread_flag(TIF_MTE_ASYNC_FAULT);
        /* disable tag checking */
-       set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
+       set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
+                          SCTLR_EL1_TCF0_NONE);
        /* reset tag generation mask */
        set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
 }
 
 void mte_thread_switch(struct task_struct *next)
+{
+       /*
+        * Check if an async tag exception occurred at EL1.
+        *
+        * Note: On the context switch path we rely on the dsb() present
+        * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
+        * are synchronized before this point.
+        */
+       isb();
+       mte_check_tfsr_el1();
+}
+
+void mte_suspend_enter(void)
 {
        if (!system_supports_mte())
                return;
 
-       /* avoid expensive SCTLR_EL1 accesses if no change */
-       if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
-               update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
+       /*
+        * The barriers are required to guarantee that the indirect writes
+        * to TFSR_EL1 are synchronized before we report the state.
+        */
+       dsb(nsh);
+       isb();
+
+       /* Report SYS_TFSR_EL1 before suspend entry */
+       mte_check_tfsr_el1();
 }
 
 void mte_suspend_exit(void)
@@ -201,7 +262,7 @@ void mte_suspend_exit(void)
 
 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
 {
-       u64 tcf0;
+       u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
        u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
                       SYS_GCR_EL1_EXCL_MASK;
 
@@ -210,23 +271,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
 
        switch (arg & PR_MTE_TCF_MASK) {
        case PR_MTE_TCF_NONE:
-               tcf0 = SCTLR_EL1_TCF0_NONE;
+               sctlr |= SCTLR_EL1_TCF0_NONE;
                break;
        case PR_MTE_TCF_SYNC:
-               tcf0 = SCTLR_EL1_TCF0_SYNC;
+               sctlr |= SCTLR_EL1_TCF0_SYNC;
                break;
        case PR_MTE_TCF_ASYNC:
-               tcf0 = SCTLR_EL1_TCF0_ASYNC;
+               sctlr |= SCTLR_EL1_TCF0_ASYNC;
                break;
        default:
                return -EINVAL;
        }
 
        if (task != current) {
-               task->thread.sctlr_tcf0 = tcf0;
+               task->thread.sctlr_user = sctlr;
                task->thread.gcr_user_excl = gcr_excl;
        } else {
-               set_sctlr_el1_tcf0(tcf0);
+               set_task_sctlr_el1(sctlr);
                set_gcr_el1_excl(gcr_excl);
        }
 
@@ -243,7 +304,7 @@ long get_mte_ctrl(struct task_struct *task)
 
        ret = incl << PR_MTE_TAG_SHIFT;
 
-       switch (task->thread.sctlr_tcf0) {
+       switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
        case SCTLR_EL1_TCF0_NONE:
                ret |= PR_MTE_TCF_NONE;
                break;
index c07d7a03494108a9a75fea9e9bb6b98830b3d0fa..75fed4460407dee05914177a86c234423a6efe54 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/static_call.h>
 
 #include <asm/paravirt.h>
 #include <asm/pvclock-abi.h>
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
 
-struct paravirt_patch_template pv_ops;
-EXPORT_SYMBOL_GPL(pv_ops);
+static u64 native_steal_clock(int cpu)
+{
+       return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
 
 struct pv_time_stolen_time_region {
        struct pvclock_vcpu_stolen_time *kaddr;
@@ -45,7 +50,7 @@ static int __init parse_no_stealacc(char *arg)
 early_param("no-steal-acc", parse_no_stealacc);
 
 /* return stolen time in ns by asking the hypervisor */
-static u64 pv_steal_clock(int cpu)
+static u64 para_steal_clock(int cpu)
 {
        struct pv_time_stolen_time_region *reg;
 
@@ -150,7 +155,7 @@ int __init pv_time_init(void)
        if (ret)
                return ret;
 
-       pv_ops.time.steal_clock = pv_steal_clock;
+       static_call_update(pv_steal_clock, para_steal_clock);
 
        static_key_slow_inc(&paravirt_steal_enabled);
        if (steal_acc)
index 4658fcf88c2b4ddff069e3f47858c6443eac0198..f594957e29bd1253fdfae7fdd8df6742033c122b 100644 (file)
@@ -470,9 +470,8 @@ static inline u64 armv8pmu_read_evcntr(int idx)
 static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
 {
        int idx = event->hw.idx;
-       u64 val = 0;
+       u64 val = armv8pmu_read_evcntr(idx);
 
-       val = armv8pmu_read_evcntr(idx);
        if (armv8pmu_event_is_chained(event))
                val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
        return val;
@@ -520,7 +519,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
-       u64 value = 0;
+       u64 value;
 
        if (idx == ARMV8_IDX_CYCLE_COUNTER)
                value = read_sysreg(pmccntr_el0);
index adb955fd9bdd9812e1ab29ac2735d8ea2b836997..60901ab0a7fe54f74dbe4eca272ef189390c9245 100644 (file)
@@ -43,6 +43,69 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
                get_random_bytes(&keys->apdb, sizeof(keys->apdb));
        if (arg & PR_PAC_APGAKEY)
                get_random_bytes(&keys->apga, sizeof(keys->apga));
+       ptrauth_keys_install_user(keys);
 
        return 0;
 }
+
+static u64 arg_to_enxx_mask(unsigned long arg)
+{
+       u64 sctlr_enxx_mask = 0;
+
+       WARN_ON(arg & ~PR_PAC_ENABLED_KEYS_MASK);
+       if (arg & PR_PAC_APIAKEY)
+               sctlr_enxx_mask |= SCTLR_ELx_ENIA;
+       if (arg & PR_PAC_APIBKEY)
+               sctlr_enxx_mask |= SCTLR_ELx_ENIB;
+       if (arg & PR_PAC_APDAKEY)
+               sctlr_enxx_mask |= SCTLR_ELx_ENDA;
+       if (arg & PR_PAC_APDBKEY)
+               sctlr_enxx_mask |= SCTLR_ELx_ENDB;
+       return sctlr_enxx_mask;
+}
+
+int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
+                            unsigned long enabled)
+{
+       u64 sctlr = tsk->thread.sctlr_user;
+
+       if (!system_supports_address_auth())
+               return -EINVAL;
+
+       if (is_compat_thread(task_thread_info(tsk)))
+               return -EINVAL;
+
+       if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys))
+               return -EINVAL;
+
+       sctlr &= ~arg_to_enxx_mask(keys);
+       sctlr |= arg_to_enxx_mask(enabled);
+       if (tsk == current)
+               set_task_sctlr_el1(sctlr);
+       else
+               tsk->thread.sctlr_user = sctlr;
+
+       return 0;
+}
+
+int ptrauth_get_enabled_keys(struct task_struct *tsk)
+{
+       int retval = 0;
+
+       if (!system_supports_address_auth())
+               return -EINVAL;
+
+       if (is_compat_thread(task_thread_info(tsk)))
+               return -EINVAL;
+
+       if (tsk->thread.sctlr_user & SCTLR_ELx_ENIA)
+               retval |= PR_PAC_APIAKEY;
+       if (tsk->thread.sctlr_user & SCTLR_ELx_ENIB)
+               retval |= PR_PAC_APIBKEY;
+       if (tsk->thread.sctlr_user & SCTLR_ELx_ENDA)
+               retval |= PR_PAC_APDAKEY;
+       if (tsk->thread.sctlr_user & SCTLR_ELx_ENDB)
+               retval |= PR_PAC_APDBKEY;
+
+       return retval;
+}
index 66aac2881ba84ea06b2318b45f42919982c31c98..d607c9912025219469d1946b1ea6ac56128d8cc3 100644 (file)
@@ -264,13 +264,14 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
                 * normal page fault.
                 */
                instruction_pointer_set(regs, (unsigned long) cur->addr);
-               if (!instruction_pointer(regs))
-                       BUG();
+               BUG_ON(!instruction_pointer(regs));
 
-               if (kcb->kprobe_status == KPROBE_REENTER)
+               if (kcb->kprobe_status == KPROBE_REENTER) {
                        restore_previous_kprobe(kcb);
-               else
+               } else {
+                       kprobes_restore_local_irqflag(kcb, regs);
                        reset_current_kprobe();
+               }
 
                break;
        case KPROBE_HIT_ACTIVE:
index 6e60aa3b5ea960346f05647eb0decbc8f3f5cd58..cbf52109583bbb61c25301b589a51cfe080fbcb5 100644 (file)
@@ -86,7 +86,7 @@ static void noinstr __cpu_do_idle_irqprio(void)
        unsigned long daif_bits;
 
        daif_bits = read_sysreg(daif);
-       write_sysreg(daif_bits | PSR_I_BIT, daif);
+       write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
 
        /*
         * Unmask PMR before going idle to make sure interrupts can
@@ -341,7 +341,6 @@ void flush_thread(void)
        tls_thread_flush();
        flush_ptrace_hw_breakpoint(current);
        flush_tagged_addr_state();
-       flush_mte_state();
 }
 
 void release_thread(struct task_struct *dead_task)
@@ -531,6 +530,31 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
        write_sysreg(val, cntkctl_el1);
 }
 
+static void update_sctlr_el1(u64 sctlr)
+{
+       /*
+        * EnIA must not be cleared while in the kernel as this is necessary for
+        * in-kernel PAC. It will be cleared on kernel exit if needed.
+        */
+       sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
+
+       /* ISB required for the kernel uaccess routines when setting TCF0. */
+       isb();
+}
+
+void set_task_sctlr_el1(u64 sctlr)
+{
+       /*
+        * __switch_to() checks current->thread.sctlr as an
+        * optimisation. Disable preemption so that it does not see
+        * the variable update before the SCTLR_EL1 one.
+        */
+       preempt_disable();
+       current->thread.sctlr_user = sctlr;
+       update_sctlr_el1(sctlr);
+       preempt_enable();
+}
+
 /*
  * Thread switching.
  */
@@ -546,6 +570,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
        entry_task_switch(next);
        ssbs_thread_switch(next);
        erratum_1418040_thread_switch(prev, next);
+       ptrauth_thread_switch_user(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
@@ -561,6 +586,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
         * registers.
         */
        mte_thread_switch(next);
+       /* avoid expensive SCTLR_EL1 accesses if no change */
+       if (prev->thread.sctlr_user != next->thread.sctlr_user)
+               update_sctlr_el1(next->thread.sctlr_user);
 
        /* the actual thread switch */
        last = cpu_switch_to(prev, next);
@@ -610,7 +638,8 @@ void arch_setup_new_exec(void)
 {
        current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
 
-       ptrauth_thread_init_user(current);
+       ptrauth_thread_init_user();
+       mte_thread_init_user();
 
        if (task_spec_ssb_noexec(current)) {
                arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
index 170f42fd6101a8cb9e9781064bfe7ded6c2a0ce2..eb2f73939b7bbbaf7feb4a5cb98dd127ad3c7a9b 100644 (file)
@@ -909,6 +909,38 @@ static int pac_mask_get(struct task_struct *target,
        return membuf_write(&to, &uregs, sizeof(uregs));
 }
 
+static int pac_enabled_keys_get(struct task_struct *target,
+                               const struct user_regset *regset,
+                               struct membuf to)
+{
+       long enabled_keys = ptrauth_get_enabled_keys(target);
+
+       if (IS_ERR_VALUE(enabled_keys))
+               return enabled_keys;
+
+       return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
+}
+
+static int pac_enabled_keys_set(struct task_struct *target,
+                               const struct user_regset *regset,
+                               unsigned int pos, unsigned int count,
+                               const void *kbuf, const void __user *ubuf)
+{
+       int ret;
+       long enabled_keys = ptrauth_get_enabled_keys(target);
+
+       if (IS_ERR_VALUE(enabled_keys))
+               return enabled_keys;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
+                                sizeof(long));
+       if (ret)
+               return ret;
+
+       return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
+                                       enabled_keys);
+}
+
 #ifdef CONFIG_CHECKPOINT_RESTORE
 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
 {
@@ -1074,6 +1106,7 @@ enum aarch64_regset {
 #endif
 #ifdef CONFIG_ARM64_PTR_AUTH
        REGSET_PAC_MASK,
+       REGSET_PAC_ENABLED_KEYS,
 #ifdef CONFIG_CHECKPOINT_RESTORE
        REGSET_PACA_KEYS,
        REGSET_PACG_KEYS,
@@ -1160,6 +1193,14 @@ static const struct user_regset aarch64_regsets[] = {
                .regset_get = pac_mask_get,
                /* this cannot be set dynamically */
        },
+       [REGSET_PAC_ENABLED_KEYS] = {
+               .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
+               .n = 1,
+               .size = sizeof(long),
+               .align = sizeof(long),
+               .regset_get = pac_enabled_keys_get,
+               .set = pac_enabled_keys_set,
+       },
 #ifdef CONFIG_CHECKPOINT_RESTORE
        [REGSET_PACA_KEYS] = {
                .core_note_type = NT_ARM_PACA_KEYS,
index 5bfd9b87f85df784894e9ed2ed576da63f2c3ed1..4ea9392f86e017a4a509a4324ade8177caf977ff 100644 (file)
@@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume)
         */
        bl      cpu_do_resume
 
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
        mov     x0, sp
        bl      kasan_unpoison_task_stack_below
 #endif
index 357590beaabb26de093c19d8007e4f8dc9830451..dcd7041b2b077f0ece865d5edfb4b1b67e04199c 100644 (file)
@@ -188,6 +188,7 @@ static void init_gic_priority_masking(void)
        cpuflags = read_sysreg(daif);
 
        WARN_ON(!(cpuflags & PSR_I_BIT));
+       WARN_ON(!(cpuflags & PSR_F_BIT));
 
        gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 }
index d55bdfb7789c1f05f118513ec091cc1846c8e49d..84b676bcf867674638e7903fed4e0bc2a3c45969 100644 (file)
  *     add     sp, sp, #0x10
  */
 
+
+void start_backtrace(struct stackframe *frame, unsigned long fp,
+                    unsigned long pc)
+{
+       frame->fp = fp;
+       frame->pc = pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       frame->graph = 0;
+#endif
+
+       /*
+        * Prime the first unwind.
+        *
+        * In unwind_frame() we'll check that the FP points to a valid stack,
+        * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
+        * treated as a transition to whichever stack that happens to be. The
+        * prev_fp value won't be used, but we set it to 0 such that it is
+        * definitely not an accessible stack address.
+        */
+       bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
+       frame->prev_fp = 0;
+       frame->prev_type = STACK_TYPE_UNKNOWN;
+}
+
 /*
  * Unwind from one frame record (A) to the next frame record (B).
  *
index d7564891ffe12f656d80f76363821870886c7ad6..e3f72df9509d72e18b192aa106dec182ace45293 100644 (file)
@@ -74,8 +74,9 @@ void notrace __cpu_suspend_exit(void)
         */
        spectre_v4_enable_mitigation(NULL);
 
-       /* Restore additional MTE-specific configuration */
+       /* Restore additional feature-specific configuration */
        mte_suspend_exit();
+       ptrauth_suspend_exit();
 }
 
 /*
@@ -91,6 +92,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        unsigned long flags;
        struct sleep_stack_data state;
 
+       /* Report any MTE async fault before going to suspend */
+       mte_suspend_enter();
+
        /*
         * From this point debug exceptions are disabled to prevent
         * updates to mdscr register (saved and restored along with
index b9cf12b271d79a8fc8896960f2581a16d139adf4..263d6c1a525f3d6d06c944917454d4178fc3515c 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/errno.h>
 #include <linux/nospec.h>
 #include <linux/ptrace.h>
+#include <linux/randomize_kstack.h>
 #include <linux/syscalls.h>
 
 #include <asm/daifflags.h>
@@ -43,6 +44,8 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
 {
        long ret;
 
+       add_random_kstack_offset();
+
        if (scno < sc_nr) {
                syscall_fn_t syscall_fn;
                syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
@@ -55,6 +58,19 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
                ret = lower_32_bits(ret);
 
        regs->regs[0] = ret;
+
+       /*
+        * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+        * but not enough for arm64 stack utilization comfort. To keep
+        * reasonable stack head room, reduce the maximum offset to 9 bits.
+        *
+        * The actual entropy will be further reduced by the compiler when
+        * applying stack alignment constraints: the AAPCS mandates a
+        * 16-byte (i.e. 4-bit) aligned SP at function boundaries.
+        *
+        * The resulting 5 bits of entropy is seen in SP[8:4].
+        */
+       choose_random_kstack_offset(get_random_int() & 0x1FF);
 }
 
 static inline bool has_syscall_work(unsigned long flags)
index cee5d04ea9ad1a6e5f39ddbf393819aefe193a89..a61fc4f989b37bfe57f05931926edb03146ab89e 100644 (file)
@@ -86,7 +86,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
        return 0;
 }
 
-static int __vdso_init(enum vdso_abi abi)
+static int __init __vdso_init(enum vdso_abi abi)
 {
        int i;
        struct page **vdso_pagelist;
@@ -271,6 +271,14 @@ enum aarch32_map {
 static struct page *aarch32_vectors_page __ro_after_init;
 static struct page *aarch32_sig_page __ro_after_init;
 
+static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
+                                 struct vm_area_struct *new_vma)
+{
+       current->mm->context.sigpage = (void *)new_vma->vm_start;
+
+       return 0;
+}
+
 static struct vm_special_mapping aarch32_vdso_maps[] = {
        [AA32_MAP_VECTORS] = {
                .name   = "[vectors]", /* ABI */
@@ -279,6 +287,7 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
        [AA32_MAP_SIGPAGE] = {
                .name   = "[sigpage]", /* ABI */
                .pages  = &aarch32_sig_page,
+               .mremap = aarch32_sigpage_mremap,
        },
        [AA32_MAP_VVAR] = {
                .name = "[vvar]",
@@ -299,34 +308,35 @@ static int aarch32_alloc_kuser_vdso_page(void)
        if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
                return 0;
 
-       vdso_page = get_zeroed_page(GFP_ATOMIC);
+       vdso_page = get_zeroed_page(GFP_KERNEL);
        if (!vdso_page)
                return -ENOMEM;
 
        memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
               kuser_sz);
        aarch32_vectors_page = virt_to_page(vdso_page);
-       flush_dcache_page(aarch32_vectors_page);
        return 0;
 }
 
+#define COMPAT_SIGPAGE_POISON_WORD     0xe7fddef1
 static int aarch32_alloc_sigpage(void)
 {
        extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
        int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
-       unsigned long sigpage;
+       __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
+       void *sigpage;
 
-       sigpage = get_zeroed_page(GFP_ATOMIC);
+       sigpage = (void *)__get_free_page(GFP_KERNEL);
        if (!sigpage)
                return -ENOMEM;
 
-       memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
+       memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
+       memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
        aarch32_sig_page = virt_to_page(sigpage);
-       flush_dcache_page(aarch32_sig_page);
        return 0;
 }
 
-static int __aarch32_alloc_vdso_pages(void)
+static int __init __aarch32_alloc_vdso_pages(void)
 {
 
        if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
index 15a6c98ee92f05cdf35f19c5ed5cbd7a6738f699..2f1b156021a6042e9a7a224c684e7b4bdf4db7be 100644 (file)
@@ -86,7 +86,7 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
                }
                break;
        case GICD_TYPER2:
-               if (kvm_vgic_global_state.has_gicv4_1)
+               if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi())
                        value = GICD_TYPER2_nASSGIcap;
                break;
        case GICD_IIDR:
@@ -119,7 +119,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
                dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
 
                /* Not a GICv4.1? No HW SGIs */
-               if (!kvm_vgic_global_state.has_gicv4_1)
+               if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi())
                        val &= ~GICD_CTLR_nASSGIreq;
 
                /* Dist stays enabled? nASSGIreq is RO */
index 93e87b2875567e09427b4d5e60a947a0e33be77c..4bf1dd3eb041912f8cad240fa606eb9efb5df2a1 100644 (file)
@@ -53,7 +53,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                iommu_setup_dma_ops(dev, dma_base, size);
 
 #ifdef CONFIG_XEN
-       if (xen_initial_domain())
+       if (xen_swiotlb_detect())
                dev->dma_ops = &xen_swiotlb_dma_ops;
 #endif
 }
index f37d4e3830b79a9459f1de8c4349afda05e1c347..871c82ab0a309550c3a140dc969afcc876525ef8 100644 (file)
@@ -527,7 +527,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
        const struct fault_info *inf;
        struct mm_struct *mm = current->mm;
        vm_fault_t fault;
-       unsigned long vm_flags = VM_ACCESS_FLAGS;
+       unsigned long vm_flags;
        unsigned int mm_flags = FAULT_FLAG_DEFAULT;
        unsigned long addr = untagged_addr(far);
 
@@ -544,12 +544,28 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
        if (user_mode(regs))
                mm_flags |= FAULT_FLAG_USER;
 
+       /*
+        * vm_flags tells us what bits we must have in vma->vm_flags
+        * for the fault to be benign, __do_page_fault() would check
+        * vma->vm_flags & vm_flags and returns an error if the
+        * intersection is empty
+        */
        if (is_el0_instruction_abort(esr)) {
+               /* It was exec fault */
                vm_flags = VM_EXEC;
                mm_flags |= FAULT_FLAG_INSTRUCTION;
        } else if (is_write_abort(esr)) {
+               /* It was write fault */
                vm_flags = VM_WRITE;
                mm_flags |= FAULT_FLAG_WRITE;
+       } else {
+               /* It was read fault */
+               vm_flags = VM_READ;
+               /* Write implies read */
+               vm_flags |= VM_WRITE;
+               /* If EPAN is absent then exec implies read */
+               if (!cpus_have_const_cap(ARM64_HAS_EPAN))
+                       vm_flags |= VM_EXEC;
        }
 
        if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
index d8e66c78440ec38a9a8d2e9304a0f01dfc3cbd00..61b52a92b8b68b0d6a9d72bd816f6483bcb7c175 100644 (file)
@@ -79,7 +79,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
                phys_addr_t pmd_phys = early ?
                                __pa_symbol(kasan_early_shadow_pmd)
                                        : kasan_alloc_zeroed_page(node);
-               __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
+               __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
        }
 
        return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
@@ -92,7 +92,7 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
                phys_addr_t pud_phys = early ?
                                __pa_symbol(kasan_early_shadow_pud)
                                        : kasan_alloc_zeroed_page(node);
-               __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
+               __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
        }
 
        return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
@@ -214,15 +214,18 @@ static void __init kasan_init_shadow(void)
 {
        u64 kimg_shadow_start, kimg_shadow_end;
        u64 mod_shadow_start, mod_shadow_end;
+       u64 vmalloc_shadow_end;
        phys_addr_t pa_start, pa_end;
        u64 i;
 
-       kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
-       kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
+       kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
+       kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
 
        mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
        mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
 
+       vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
+
        /*
         * We are going to perform proper setup of shadow memory.
         * At first we should unmap early shadow (clear_pgds() call below).
@@ -237,16 +240,22 @@ static void __init kasan_init_shadow(void)
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
        kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
-                          early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
+                          early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
 
        kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
                                   (void *)mod_shadow_start);
-       kasan_populate_early_shadow((void *)kimg_shadow_end,
-                                  (void *)KASAN_SHADOW_END);
 
-       if (kimg_shadow_start > mod_shadow_end)
-               kasan_populate_early_shadow((void *)mod_shadow_end,
-                                           (void *)kimg_shadow_start);
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+               BUILD_BUG_ON(VMALLOC_START != MODULES_END);
+               kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+                                           (void *)KASAN_SHADOW_END);
+       } else {
+               kasan_populate_early_shadow((void *)kimg_shadow_end,
+                                           (void *)KASAN_SHADOW_END);
+               if (kimg_shadow_start > mod_shadow_end)
+                       kasan_populate_early_shadow((void *)mod_shadow_end,
+                                                   (void *)kimg_shadow_start);
+       }
 
        for_each_mem_range(i, &pa_start, &pa_end) {
                void *start = (void *)__phys_to_virt(pa_start);
index 5d9550fdb9cf8d14871069d2c229db3245688756..d563335ad43f5ef21d3ac6579a4989ac770687d3 100644 (file)
@@ -39,6 +39,7 @@
 
 #define NO_BLOCK_MAPPINGS      BIT(0)
 #define NO_CONT_MAPPINGS       BIT(1)
+#define NO_EXEC_MAPPINGS       BIT(2)  /* assumes FEAT_HPDS is not used */
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
@@ -185,10 +186,14 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 
        BUG_ON(pmd_sect(pmd));
        if (pmd_none(pmd)) {
+               pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
                phys_addr_t pte_phys;
+
+               if (flags & NO_EXEC_MAPPINGS)
+                       pmdval |= PMD_TABLE_PXN;
                BUG_ON(!pgtable_alloc);
                pte_phys = pgtable_alloc(PAGE_SHIFT);
-               __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
+               __pmd_populate(pmdp, pte_phys, pmdval);
                pmd = READ_ONCE(*pmdp);
        }
        BUG_ON(pmd_bad(pmd));
@@ -259,10 +264,14 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
         */
        BUG_ON(pud_sect(pud));
        if (pud_none(pud)) {
+               pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
                phys_addr_t pmd_phys;
+
+               if (flags & NO_EXEC_MAPPINGS)
+                       pudval |= PUD_TABLE_PXN;
                BUG_ON(!pgtable_alloc);
                pmd_phys = pgtable_alloc(PMD_SHIFT);
-               __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
+               __pud_populate(pudp, pmd_phys, pudval);
                pud = READ_ONCE(*pudp);
        }
        BUG_ON(pud_bad(pud));
@@ -306,10 +315,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
        p4d_t p4d = READ_ONCE(*p4dp);
 
        if (p4d_none(p4d)) {
+               p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
                phys_addr_t pud_phys;
+
+               if (flags & NO_EXEC_MAPPINGS)
+                       p4dval |= P4D_TABLE_PXN;
                BUG_ON(!pgtable_alloc);
                pud_phys = pgtable_alloc(PUD_SHIFT);
-               __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
+               __p4d_populate(p4dp, pud_phys, p4dval);
                p4d = READ_ONCE(*p4dp);
        }
        BUG_ON(p4d_bad(p4d));
@@ -486,14 +499,24 @@ early_param("crashkernel", enable_crash_mem_map);
 
 static void __init map_mem(pgd_t *pgdp)
 {
+       static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
        phys_addr_t kernel_start = __pa_symbol(_stext);
        phys_addr_t kernel_end = __pa_symbol(__init_begin);
        phys_addr_t start, end;
-       int flags = 0;
+       int flags = NO_EXEC_MAPPINGS;
        u64 i;
 
+       /*
+        * Setting hierarchical PXNTable attributes on table entries covering
+        * the linear region is only possible if it is guaranteed that no table
+        * entries at any level are being shared between the linear region and
+        * the vmalloc region. Check whether this is true for the PGD level, in
+        * which case it is guaranteed to be true for all other levels as well.
+        */
+       BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
+
        if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
-               flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+               flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 
        /*
         * Take care not to create a writable alias for the
@@ -1210,11 +1233,11 @@ void __init early_fixmap_init(void)
                pudp = pud_offset_kimg(p4dp, addr);
        } else {
                if (p4d_none(p4d))
-                       __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
+                       __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
                pudp = fixmap_pud(addr);
        }
        if (pud_none(READ_ONCE(*pudp)))
-               __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
+               __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
        pmdp = fixmap_pmd(addr);
        __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
 
@@ -1480,7 +1503,7 @@ struct range arch_get_mappable_range(void)
 int arch_add_memory(int nid, u64 start, u64 size,
                    struct mhp_params *params)
 {
-       int ret, flags = 0;
+       int ret, flags = NO_EXEC_MAPPINGS;
 
        VM_BUG_ON(!mhp_range_allowed(start, size, true));
 
@@ -1490,7 +1513,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
         */
        if (rodata_full || debug_pagealloc_enabled() ||
            IS_ENABLED(CONFIG_KFENCE))
-               flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+               flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 
        __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
                             size, params->pgprot, __pgd_pgtable_alloc,
index c967bfd30d2b545f9836dad23cec8a83bdad764d..0a48191534ff6360787b17c428b017a79190ed7e 100644 (file)
@@ -419,14 +419,17 @@ SYM_FUNC_START(__cpu_setup)
        reset_amuserenr_el0 x1                  // Disable AMU access from EL0
 
        /*
-        * Memory region attributes
+        * Default values for VMSA control registers. These will be adjusted
+        * below depending on detected CPU features.
         */
-       mov_q   x5, MAIR_EL1_SET
-#ifdef CONFIG_ARM64_MTE
-       mte_tcr .req    x20
-
-       mov     mte_tcr, #0
+       mair    .req    x17
+       tcr     .req    x16
+       mov_q   mair, MAIR_EL1_SET
+       mov_q   tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+                       TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
+                       TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
 
+#ifdef CONFIG_ARM64_MTE
        /*
         * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
         * (ID_AA64PFR1_EL1[11:8] > 1).
@@ -438,7 +441,7 @@ SYM_FUNC_START(__cpu_setup)
 
        /* Normal Tagged memory type at the corresponding MAIR index */
        mov     x10, #MAIR_ATTR_NORMAL_TAGGED
-       bfi     x5, x10, #(8 *  MT_NORMAL_TAGGED), #8
+       bfi     mair, x10, #(8 *  MT_NORMAL_TAGGED), #8
 
        /* initialize GCR_EL1: all non-zero tags excluded by default */
        mov     x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
@@ -449,37 +452,26 @@ SYM_FUNC_START(__cpu_setup)
        msr_s   SYS_TFSRE0_EL1, xzr
 
        /* set the TCR_EL1 bits */
-       mov_q   mte_tcr, TCR_KASAN_HW_FLAGS
+       mov_q   x10, TCR_KASAN_HW_FLAGS
+       orr     tcr, tcr, x10
 1:
 #endif
-       msr     mair_el1, x5
-       /*
-        * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
-        * adjusted if the kernel is compiled with 52bit VA support.
-        */
-       mov_q   x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
-                       TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
-                       TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
-#ifdef CONFIG_ARM64_MTE
-       orr     x10, x10, mte_tcr
-       .unreq  mte_tcr
-#endif
-       tcr_clear_errata_bits x10, x9, x5
+       tcr_clear_errata_bits tcr, x9, x5
 
 #ifdef CONFIG_ARM64_VA_BITS_52
        ldr_l           x9, vabits_actual
        sub             x9, xzr, x9
        add             x9, x9, #64
-       tcr_set_t1sz    x10, x9
+       tcr_set_t1sz    tcr, x9
 #else
        ldr_l           x9, idmap_t0sz
 #endif
-       tcr_set_t0sz    x10, x9
+       tcr_set_t0sz    tcr, x9
 
        /*
         * Set the IPS bits in TCR_EL1.
         */
-       tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
+       tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
 #ifdef CONFIG_ARM64_HW_AFDBM
        /*
         * Enable hardware update of the Access Flags bit.
@@ -489,13 +481,17 @@ SYM_FUNC_START(__cpu_setup)
        mrs     x9, ID_AA64MMFR1_EL1
        and     x9, x9, #0xf
        cbz     x9, 1f
-       orr     x10, x10, #TCR_HA               // hardware Access flag update
+       orr     tcr, tcr, #TCR_HA               // hardware Access flag update
 1:
 #endif /* CONFIG_ARM64_HW_AFDBM */
-       msr     tcr_el1, x10
+       msr     mair_el1, mair
+       msr     tcr_el1, tcr
        /*
         * Prepare SCTLR
         */
        mov_q   x0, INIT_SCTLR_EL1_MMU_ON
        ret                                     // return to head.S
+
+       .unreq  mair
+       .unreq  tcr
 SYM_FUNC_END(__cpu_setup)
index 0e050d76b83aab01bf39493fb8c31829bc48c767..a50e92ea1878e0e787771333e736cc1000d104df 100644 (file)
@@ -337,7 +337,7 @@ void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
        ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
 }
 
-static void ptdump_initialize(void)
+static void __init ptdump_initialize(void)
 {
        unsigned i, j;
 
@@ -381,7 +381,7 @@ void ptdump_check_wx(void)
                pr_info("Checked W+X mappings: passed, no W+X pages found\n");
 }
 
-static int ptdump_init(void)
+static int __init ptdump_init(void)
 {
        address_markers[PAGE_END_NR].start_address = PAGE_END;
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
index d29d722ec3ec6e58f1ca795913a4515ae639365f..68bf1a125502dab30fb60903a33d65bb20eb93e6 100644 (file)
@@ -16,7 +16,7 @@ static int ptdump_show(struct seq_file *m, void *v)
 }
 DEFINE_SHOW_ATTRIBUTE(ptdump);
 
-void ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name)
 {
        debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
 }
index 34e91224adc395c5c6912042f8c8fdccfc364bbc..8de5b987edb9f04487e70a76dcc3454dbed43c45 100644 (file)
@@ -314,7 +314,7 @@ config FORCE_MAX_ZONEORDER
        int "Maximum zone order"
        default "11"
 
-config RAM_BASE
+config DRAM_BASE
        hex "DRAM start addr (the same with memory-section in dts)"
        default 0x0
 
index 3b91fc3cf36f9e5b12d1dfdade4a59684f9729b1..ed7451478b1b03b9ebcf20b32ec4478dee0131f1 100644 (file)
@@ -28,7 +28,7 @@
 #define SSEG_SIZE      0x20000000
 #define LOWMEM_LIMIT   (SSEG_SIZE * 2)
 
-#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
+#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
 
 #ifndef __ASSEMBLY__
 
index ca0d596c800d80505e1aab0d992aa8486d101865..8916a2850c48b2d9f060106b6f512774a30ca7ac 100644 (file)
@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_QLOGIC_1280=y
-CONFIG_ATA=y
-CONFIG_ATA_PIIX=y
 CONFIG_SATA_VITESSE=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=m
index b3aa460901012946dc720a29c168d4bf80cf7a02..08179135905cdb93cedc9a513e623d73049b2af6 100644 (file)
@@ -54,8 +54,7 @@
 
 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
 {
-       /* FIXME: should this be bspstore + nr_dirty regs? */
-       return regs->ar_bspstore;
+       return regs->r12;
 }
 
 static inline int is_syscall_success(struct pt_regs *regs)
@@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
        unsigned long __ip = instruction_pointer(regs);                 \
        (__ip & ~3UL) + ((__ip & 3UL) << 2);                            \
 })
-/*
- * Why not default?  Because user_stack_pointer() on ia64 gives register
- * stack backing store instead...
- */
-#define current_user_stack_pointer() (current_pt_regs()->r12)
 
   /* given a pointer to a task_struct, return the user's pt_regs */
 # define task_pt_regs(t)               (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
index 03b3a02375ff36a8a6eb8cf965f7e7c4ad3d4b2e..c310b4c99fb306cba40e09a532cd9c7dfdc912ec 100644 (file)
@@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
  * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
  * called yet.  Note that node 0 will also count all non-existent cpus.
  */
-static int __meminit early_nr_cpus_node(int node)
+static int early_nr_cpus_node(int node)
 {
        int cpu, n = 0;
 
@@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node)
  * compute_pernodesize - compute size of pernode data
  * @node: the node id.
  */
-static unsigned long __meminit compute_pernodesize(int node)
+static unsigned long compute_pernodesize(int node)
 {
        unsigned long pernodesize = 0, cpus;
 
@@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void)
        }
 }
 
-static void __meminit scatter_node_data(void)
+static void scatter_node_data(void)
 {
        pg_data_t **dst;
        int node;
index 786656090c502956f71902a3770968a6b69f61af..59b727b693575eb32d0b9567d3866ef03e150f6f 100644 (file)
@@ -580,12 +580,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -598,7 +594,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index 9bb12be4a38e8a36be22de9f827765eca9b8a305..8d4ddcebe7b8d2e54c1f425a4e782ebe050be4ba 100644 (file)
@@ -536,12 +536,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -554,7 +550,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index 413232626d9d573072c0dbd03fcdf27007cf9257..9cc9f1a06516434e96f24acfbb10ba9c9d9602f7 100644 (file)
@@ -558,12 +558,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -576,7 +572,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index 819cc70b06d8663dde5136da36529c19f66a0e05..c3f3f462e6ce6fa5456cd58284890389b03734e7 100644 (file)
@@ -529,12 +529,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -547,7 +543,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index 8f8d5968713bfe2d1dd77b29b143adf70c19d265..8c908fc5c1910aa07739c03d009ba44c82381b71 100644 (file)
@@ -538,12 +538,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -556,7 +552,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index bf15e6c1c939bb3097bd7baa0566f9e186a1dbcd..4e68b72d9c50f15b2ad936dd078d11abb7715095 100644 (file)
@@ -561,12 +561,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -579,7 +575,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index 5466d48fcd9d51c9479ae4b5fcfdcce04f9f61c4..d31896293c394c21f98276a81f75c77d49740b58 100644 (file)
@@ -647,12 +647,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -665,7 +661,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index 93c3059188389742a1b1ebfefe0c01b29ddbf2ba..c7442f9dd469a19e65f47fd0032f7fbb9a09f90d 100644 (file)
@@ -528,12 +528,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -546,7 +542,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index cacd6c617f6951ee3d15cad78aab0b8ab0cced4a..233b82ea103a448b47a3dede20f4103619579ddd 100644 (file)
@@ -529,12 +529,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -547,7 +543,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index 3ae421cb24a43986826b2e8153d1f383117939cd..664025a0f6a4173eb40e9b061a65cd1d12072978 100644 (file)
@@ -547,12 +547,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -565,7 +561,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index 6da97e28c48ef66e133b3850c3dee61ab95664ab..73293a0b3dc86e58c7c393aec4fb788e7a7127df 100644 (file)
@@ -531,12 +531,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -549,7 +545,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index f54481bb789aecb853a673e209d6814996e67fc5..bca8a6f3e92f55a87de8ca339d17ed655e764984 100644 (file)
@@ -530,12 +530,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_TI=m
@@ -548,7 +544,6 @@ CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_SM4=m
index aab04d372ae7d5abf38fa115602d5c668d6cb5bd..834ae9471b8810bff20b2057b266dbf67fa5218f 100644 (file)
@@ -10,7 +10,3 @@ obj-y    := bindec.o binstr.o decbin.o do_func.o gen_except.o get_op.o \
            ssin.o ssinh.o stan.o stanh.o sto_res.o stwotox.o tbldo.o util.o \
            x_bsun.o x_fline.o x_operr.o x_ovfl.o x_snan.o x_store.o \
            x_unfl.o x_unimp.o x_unsupp.o bugfix.o skeleton.o
-
-EXTRA_LDFLAGS := -x
-
-$(OS_OBJS): fpsp.h
index 43b435049452b1daafd360d3ed33192c99cd4df7..56b530a96c2f33b5a71d3d67a6dffdf6e8136a5c 100644 (file)
@@ -5,5 +5,3 @@
 # for more details.
 
 obj-y := fskeleton.o iskeleton.o os.o
-
-EXTRA_LDFLAGS := -x
index 257b29184af913f8613b3cbcacb91a08e14ef924..e28eb1c0e0bfb37986e520102214fd6e5e1cd5b3 100644 (file)
@@ -66,6 +66,9 @@ struct pcc_regs {
 #define PCC_INT_ENAB           0x08
 
 #define PCC_TIMER_INT_CLR      0x80
+
+#define PCC_TIMER_TIC_EN       0x01
+#define PCC_TIMER_COC_EN       0x02
 #define PCC_TIMER_CLR_OVF      0x04
 
 #define PCC_LEVEL_ABORT                0x07
index 9e8f0cc30a2ccb9d2459228f1fef4ae28d4ca0ff..2411ea9ef578b19dbc5d369440d30d708830b682 100644 (file)
@@ -167,7 +167,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
        ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn;          \
 })
 #else
-#define ARCH_PFN_OFFSET (m68k_memory[0].addr)
+#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
 #include <asm-generic/memory_model.h>
 #endif
 
index 93f2a8431c0e90797c00a835f35c3c43480859e3..bce8aabb5380d80bacc47ee04d27521c8d34b069 100644 (file)
@@ -106,7 +106,7 @@ static void sun3x_82072_fd_outb(unsigned char value, int port)
        case 4: /* FD_STATUS */
                *(sun3x_fdc.status_r) = value;
                break;
-       };
+       }
        return;
 }
 
index 1c235d8f53f36dbb5669e15be107de1a7abe3920..f55bdcb8e4f15723d1cc4333ea04a05e9835a08b 100644 (file)
@@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
                ret = -EPERM;
                if (!capable(CAP_SYS_ADMIN))
                        goto out;
+
+               mmap_read_lock(current->mm);
        } else {
                struct vm_area_struct *vma;
 
index 285aaba832d93936c3f2b7edb2938dbfe13fc21f..6713c65a25e15cfe96824cbcdba6bb8f222ef274 100644 (file)
@@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')     \
          $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
 
 syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
 
 quiet_cmd_syshdr = SYSHDR  $@
-      cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@'       \
-                  '$(syshdr_abis_$(basetarget))'               \
-                  '$(syshdr_pfx_$(basetarget))'                \
-                  '$(syshdr_offset_$(basetarget))'
+      cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@
 
 quiet_cmd_systbl = SYSTBL  $@
-      cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@'       \
-                  '$(systbl_abis_$(basetarget))'               \
-                  '$(systbl_abi_$(basetarget))'                \
-                  '$(systbl_offset_$(basetarget))'
+      cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
 
 $(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
        $(call if_changed,syshdr)
diff --git a/arch/m68k/kernel/syscalls/syscallhdr.sh b/arch/m68k/kernel/syscalls/syscallhdr.sh
deleted file mode 100644 (file)
index 6f357d6..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_M68K_`basename "$out" | sed \
-       -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
-       -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
-       printf "#ifndef %s\n" "${fileguard}"
-       printf "#define %s\n" "${fileguard}"
-       printf "\n"
-
-       nxt=0
-       while read nr abi name entry ; do
-               if [ -z "$offset" ]; then
-                       printf "#define __NR_%s%s\t%s\n" \
-                               "${prefix}" "${name}" "${nr}"
-               else
-                       printf "#define __NR_%s%s\t(%s + %s)\n" \
-                               "${prefix}" "${name}" "${offset}" "${nr}"
-               fi
-               nxt=$((nr+1))
-       done
-
-       printf "\n"
-       printf "#ifdef __KERNEL__\n"
-       printf "#define __NR_syscalls\t%s\n" "${nxt}"
-       printf "#endif\n"
-       printf "\n"
-       printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/m68k/kernel/syscalls/syscalltbl.sh b/arch/m68k/kernel/syscalls/syscalltbl.sh
deleted file mode 100644 (file)
index 85d78d9..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
-       t_nxt="$1"
-       t_nr="$2"
-       t_entry="$3"
-
-       while [ $t_nxt -lt $t_nr ]; do
-               printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
-               t_nxt=$((t_nxt+1))
-       done
-       printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
-       nxt=0
-       if [ -z "$offset" ]; then
-               offset=0
-       fi
-
-       while read nr abi name entry ; do
-               emit $((nxt+offset)) $((nr+offset)) $entry
-               nxt=$((nr+1))
-       done
-) > "$out"
index d329cc7b481cd1a4b3a72f05b766f4f684ab443e..e25ef4a9df3027dc07bd0205f615aaefa9fdd760 100644 (file)
@@ -18,9 +18,8 @@
 #define sys_mmap2      sys_mmap_pgoff
 #endif
 
-#define __SYSCALL(nr, entry, nargs) .long entry
+#define __SYSCALL(nr, entry) .long entry
        .section .rodata
 ALIGN
 ENTRY(sys_call_table)
 #include <asm/syscall_table.h>
-#undef __SYSCALL
index cfdc7f912e14ed20a98c0843e373a56a76db4d12..e1e90c49a49624ccf2cc81f427723de0c89dd28f 100644 (file)
@@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
        unsigned long flags;
 
        local_irq_save(flags);
-       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
-       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
+       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
+                            PCC_TIMER_TIC_EN;
+       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
+                                PCC_LEVEL_TIMER1;
        clk_total += PCC_TIMER_CYCLES;
        legacy_timer_tick(1);
        local_irq_restore(flags);
@@ -133,10 +135,10 @@ void mvme147_sched_init (void)
        /* Init the clock with a value */
        /* The clock counter increments until 0xFFFF then reloads */
        m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
-       m147_pcc->t1_cntrl = 0x0;       /* clear timer */
-       m147_pcc->t1_cntrl = 0x3;       /* start timer */
-       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;  /* clear pending ints */
-       m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
+       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
+                            PCC_TIMER_TIC_EN;
+       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
+                                PCC_LEVEL_TIMER1;
 
        clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
 }
index 30357fe4ba6c8b6eea7c99896a19b10756829782..b59593c7cfb9dfbfd9073b18166bca1cdd5c0515 100644 (file)
@@ -366,6 +366,7 @@ static u32 clk_total;
 #define PCCTOVR1_COC_EN      0x02
 #define PCCTOVR1_OVR_CLR     0x04
 
+#define PCCTIC1_INT_LEVEL    6
 #define PCCTIC1_INT_CLR      0x08
 #define PCCTIC1_INT_EN       0x10
 
@@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
        unsigned long flags;
 
        local_irq_save(flags);
-       out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
-       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
+       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
+       out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
        clk_total += PCC_TIMER_CYCLES;
        legacy_timer_tick(1);
        local_irq_restore(flags);
@@ -389,14 +390,15 @@ void mvme16x_sched_init(void)
     int irq;
 
     /* Using PCCchip2 or MC2 chip tick timer 1 */
-    out_be32(PCCTCNT1, 0);
-    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
-    out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
-    out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
     if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
                     NULL))
        panic ("Couldn't register timer int");
 
+    out_be32(PCCTCNT1, 0);
+    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
+    out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
+    out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
+
     clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
 
     if (brdno == 0x0162 || brdno == 0x172)
index fc881b46d9111572842cae886d267511922ffbc0..bc6110fb98e0ab6540e0008bec541972c8406a4c 100644 (file)
@@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
 asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
 asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
 
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
 {
        poly1305_init_mips(&dctx->h, key);
        dctx->s[0] = get_unaligned_le32(key + 16);
index cf33dd8a487ee8c401f418b789b8345b92173b15..c25a2ce5e29f516661c544dcc4b99e93210f3598 100644 (file)
@@ -276,10 +276,6 @@ asmlinkage void plat_irq_dispatch(void)
 }
 
 #ifdef CONFIG_CPU_XLP
-static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
-       .xlate = irq_domain_xlate_onetwocell,
-};
-
 static int __init xlp_of_pic_init(struct device_node *node,
                                        struct device_node *parent)
 {
@@ -324,7 +320,7 @@ static int __init xlp_of_pic_init(struct device_node *node,
 
        xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
                nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
-               &xlp_pic_irq_domain_ops, NULL);
+               &irq_domain_simple_ops, NULL);
        if (xlp_pic_domain == NULL) {
                pr_err("PIC %pOFn: Creating legacy domain failed!\n", node);
                return -EINVAL;
index 6eb98a7ad27d22ebf441e259caffc1e2a6aa8b6b..ad5344ef5d3348f1ca4a3cdcb058e5bcb076f715 100644 (file)
@@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page)
 {
        struct address_space *mapping;
 
-       mapping = page_mapping(page);
+       mapping = page_mapping_file(page);
        if (mapping && !mapping_mapped(mapping))
                set_bit(PG_dcache_dirty, &page->flags);
        else {
index cf5ee9b0b393c4249e7a2d66ba22e30ecc7cd459..84ee232278a6aaec3d7fbfefdcf9c13b40d6f7a3 100644 (file)
@@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
 #endif
        case 4: return __cmpxchg_u32((unsigned int *)ptr,
                                     (unsigned int)old, (unsigned int)new_);
-       case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
+       case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
        }
        __cmpxchg_called_with_bad_pointer();
        return old;
index 11ece0d07374fc8e20cff3dabb3387c478622f46..b5fbcd2c17808b773725118ec795fbe9b70d147d 100644 (file)
@@ -272,7 +272,6 @@ on downward growing arches, it looks like this:
        regs->gr[23] = 0;                               \
 } while(0)
 
-struct task_struct;
 struct mm_struct;
 
 /* Free all resources held by a thread. */
index 853c19c03828e377adb686630937488771e23c76..dec951d40286fda29c799ce104aedc0aee96c20d 100644 (file)
@@ -5,34 +5,10 @@
  * Floating-point emulation code
  *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
  */
-/*
- * BEGIN_DESC
- * 
- *  File: 
- *      @(#)   pa/fp/fpu.h             $Revision: 1.1 $
- * 
- *  Purpose:
- *      <<please update with a synopis of the functionality provided by this file>>
- * 
- * 
- * END_DESC  
-*/
-
-#ifdef __NO_PA_HDRS
-    PA header file -- do not include this header file for non-PA builds.
-#endif
-
 
 #ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */
 #define _MACHINE_FPU_INCLUDED
 
-#if 0
-#ifndef _SYS_STDSYMS_INCLUDED
-#    include <sys/stdsyms.h>
-#endif   /* _SYS_STDSYMS_INCLUDED  */
-#include  <machine/pdc/pdc_rqsts.h>
-#endif
-
 #define PA83_FPU_FLAG    0x00000001
 #define PA89_FPU_FLAG    0x00000002
 #define PA2_0_FPU_FLAG   0x00000010
 #define COPR_FP        0x00000080      /* Floating point -- Coprocessor 0 */
 #define SFU_MPY_DIVIDE 0x00008000      /* Multiply/Divide __ SFU 0 */
 
-
 #define EM_FPU_TYPE_OFFSET 272
 
 /* version of EMULATION software for COPR,0,0 instruction */
 #define EMULATION_VERSION 4
 
 /*
- * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T)
- * is thorough the potential type field from the PDC_MODEL call.  The 
- * following flags are used at assist this differeniation.
+ * The only way to differentiate between TIMEX and ROLEX (or PCX-S and PCX-T)
+ * is through the potential type field from the PDC_MODEL call.
+ * The following flags are used to assist this differentiation.
  */
 
 #define ROLEX_POTENTIAL_KEY_FLAGS      PDC_MODEL_CPU_KEY_WORD_TO_IO
 #define TIMEX_POTENTIAL_KEY_FLAGS      (PDC_MODEL_CPU_KEY_QUAD_STORE | \
                                         PDC_MODEL_CPU_KEY_RECIP_SQRT)
 
-
 #endif /* ! _MACHINE_FPU_INCLUDED */
index b1e577cbf00ca73c4460818722f8ab5f54c928f6..88e8ea73bfa714d22a3790b64f42b55b57ced936 100644 (file)
@@ -107,7 +107,7 @@ static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
 
                src += bytes;
                len -= bytes;
-       };
+       }
 
        memcpy((char *)sctx->buffer, src, len);
        return 0;
index 6084fa499aa356851ea0d387d5374fb6c7aa5636..f66b63e81c3bc218e37a99cafa18808fb062a85e 100644 (file)
@@ -191,3 +191,7 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE
 targets += prom_init_check
 
 clean-files := vmlinux.lds
+
+# Force dependency (incbin is bad)
+$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
+$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
index 8ebc11d1168d8a5b4ac95a420b4a0b1b13988290..77abd1a5a508d0a6c6a7a0b0cd37a867e9ce2731 100644 (file)
@@ -6,11 +6,11 @@
 CFLAGS_ptrace-view.o           += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 obj-y                          += ptrace.o ptrace-view.o
-obj-$(CONFIG_PPC_FPU_REGS)     += ptrace-fpu.o
+obj-y                          += ptrace-fpu.o
 obj-$(CONFIG_COMPAT)           += ptrace32.o
 obj-$(CONFIG_VSX)              += ptrace-vsx.o
 ifneq ($(CONFIG_VSX),y)
-obj-$(CONFIG_PPC_FPU_REGS)     += ptrace-novsx.o
+obj-y                          += ptrace-novsx.o
 endif
 obj-$(CONFIG_ALTIVEC)          += ptrace-altivec.o
 obj-$(CONFIG_SPE)              += ptrace-spe.o
index 3487f2c9735c65d649738a73f7eccf1b3675b292..eafe5f0f62898cc07f0e761111b2039ed5692133 100644 (file)
@@ -165,22 +165,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data);
 extern const struct user_regset_view user_ppc_native_view;
 
 /* ptrace-fpu */
-#ifdef CONFIG_PPC_FPU_REGS
 int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data);
 int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data);
-#else
-static inline int
-ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
-{
-       return -EIO;
-}
-
-static inline int
-ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
-{
-       return -EIO;
-}
-#endif
 
 /* ptrace-(no)adv */
 void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
index 8301cb52dd99236840231e60ccd38b9a6fd35d10..5dca19361316e497d676d113ea74684b37cbda37 100644 (file)
@@ -8,32 +8,42 @@
 
 int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        unsigned int fpidx = index - PT_FPR0;
+#endif
 
        if (index > PT_FPSCR)
                return -EIO;
 
+#ifdef CONFIG_PPC_FPU_REGS
        flush_fp_to_thread(child);
        if (fpidx < (PT_FPSCR - PT_FPR0))
                memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
        else
                *data = child->thread.fp_state.fpscr;
+#else
+       *data = 0;
+#endif
 
        return 0;
 }
 
 int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        unsigned int fpidx = index - PT_FPR0;
+#endif
 
        if (index > PT_FPSCR)
                return -EIO;
 
+#ifdef CONFIG_PPC_FPU_REGS
        flush_fp_to_thread(child);
        if (fpidx < (PT_FPSCR - PT_FPR0))
                memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
        else
                child->thread.fp_state.fpscr = data;
+#endif
 
        return 0;
 }
index b3b36835658afc9ebebb7bccecd3579e3805b6a4..7433f3db979ac18a5e3611c13769080783d4a93c 100644 (file)
 int fpr_get(struct task_struct *target, const struct user_regset *regset,
            struct membuf to)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
                     offsetof(struct thread_fp_state, fpr[32]));
 
        flush_fp_to_thread(target);
 
        return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
+#else
+       return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64));
+#endif
 }
 
 /*
@@ -46,6 +50,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
            unsigned int pos, unsigned int count,
            const void *kbuf, const void __user *ubuf)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
                     offsetof(struct thread_fp_state, fpr[32]));
 
@@ -53,4 +58,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.fp_state, 0, -1);
+#else
+       return 0;
+#endif
 }
index 2bad8068f598c628d4188405c91240e3b079750e..6ccffc65ac97ee797a110517a009f3504ba508e5 100644 (file)
@@ -522,13 +522,11 @@ static const struct user_regset native_regsets[] = {
                .size = sizeof(long), .align = sizeof(long),
                .regset_get = gpr_get, .set = gpr_set
        },
-#ifdef CONFIG_PPC_FPU_REGS
        [REGSET_FPR] = {
                .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
                .size = sizeof(double), .align = sizeof(double),
                .regset_get = fpr_get, .set = fpr_set
        },
-#endif
 #ifdef CONFIG_ALTIVEC
        [REGSET_VMX] = {
                .core_note_type = NT_PPC_VMX, .n = 34,
index 75ee918a120a5e67f499e53e0c1f7df5aabeba13..f651b992fe016ffe78a7442522ac2f432323df61 100644 (file)
@@ -775,7 +775,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
        else
                prepare_save_user_regs(1);
 
-       if (!user_write_access_begin(frame, sizeof(*frame)))
+       if (!user_access_begin(frame, sizeof(*frame)))
                goto badframe;
 
        /* Put the siginfo & fill in most of the ucontext */
@@ -809,17 +809,15 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
                unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
                                failed);
                unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+               asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
        }
        unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
 
-       user_write_access_end();
+       user_access_end();
 
        if (copy_siginfo_to_user(&frame->info, &ksig->info))
                goto badframe;
 
-       if (tramp == (unsigned long)mctx->mc_pad)
-               flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
-
        regs->link = tramp;
 
 #ifdef CONFIG_PPC_FPU_REGS
@@ -844,7 +842,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
        return 0;
 
 failed:
-       user_write_access_end();
+       user_access_end();
 
 badframe:
        signal_fault(tsk, regs, "handle_rt_signal32", frame);
@@ -879,7 +877,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
        else
                prepare_save_user_regs(1);
 
-       if (!user_write_access_begin(frame, sizeof(*frame)))
+       if (!user_access_begin(frame, sizeof(*frame)))
                goto badframe;
        sc = (struct sigcontext __user *) &frame->sctx;
 
@@ -908,11 +906,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
                /* Set up the sigreturn trampoline: li r0,sigret; sc */
                unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
                unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+               asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
        }
-       user_write_access_end();
-
-       if (tramp == (unsigned long)mctx->mc_pad)
-               flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
+       user_access_end();
 
        regs->link = tramp;
 
@@ -935,7 +931,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
        return 0;
 
 failed:
-       user_write_access_end();
+       user_access_end();
 
 badframe:
        signal_fault(tsk, regs, "handle_signal32", frame);
index 0d0cf67359cba6e13fe9c66056f9cec7d1918e5e..4515a10c5d222e18774d4cc545501b3e09f88464 100644 (file)
@@ -153,7 +153,7 @@ config ARCH_FLATMEM_ENABLE
 config ARCH_SPARSEMEM_ENABLE
        def_bool y
        depends on MMU
-       select SPARSEMEM_STATIC if 32BIT && SPARSMEM
+       select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
        select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
 
 config ARCH_SELECT_MEMORY_MODEL
index 76274a4a1d8e6596dbf3069df999847a94238a7e..83095faa680e1626a0ff162030bff63989dc79e1 100644 (file)
@@ -130,6 +130,9 @@ skip_context_tracking:
         */
        andi t0, s1, SR_PIE
        beqz t0, 1f
+       /* kprobes, entered via ebreak, must have interrupts disabled. */
+       li t0, EXC_BREAKPOINT
+       beq s4, t0, 1f
 #ifdef CONFIG_TRACE_IRQFLAGS
        call trace_hardirqs_on
 #endif
index 17ca5e923bb0dfbef53cbf4fcc5d33f05e61b9ec..aab85a82f4199da898e841f23c7df8fbe499dbaf 100644 (file)
@@ -9,10 +9,16 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
        struct kprobe *p;
        struct pt_regs *regs;
        struct kprobe_ctlblk *kcb;
+       int bit;
 
+       bit = ftrace_test_recursion_trylock(ip, parent_ip);
+       if (bit < 0)
+               return;
+
+       preempt_disable_notrace();
        p = get_kprobe((kprobe_opcode_t *)ip);
        if (unlikely(!p) || kprobe_disabled(p))
-               return;
+               goto out;
 
        regs = ftrace_get_regs(fregs);
        kcb = get_kprobe_ctlblk();
@@ -45,6 +51,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                 */
                __this_cpu_write(current_kprobe, NULL);
        }
+out:
+       preempt_enable_notrace();
+       ftrace_test_recursion_unlock(bit);
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
index 0879b5df11b92c21a10e5b796618a72ba2934fdd..1357abf79570a07fe67a57c8890e08c3a4db4a91 100644 (file)
@@ -178,6 +178,7 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs)
        else
                die(regs, "Kernel BUG");
 }
+NOKPROBE_SYMBOL(do_trap_break);
 
 #ifdef CONFIG_GENERIC_BUG
 int is_valid_bugaddr(unsigned long pc)
index 8f17519208c756b7a08bf089ec53004c8a5b0c89..c5dbd55cbf7cdfc0638080981f1e7487fcee99c8 100644 (file)
@@ -328,3 +328,4 @@ good_area:
        }
        return;
 }
+NOKPROBE_SYMBOL(do_page_fault);
index ee056f4a4fa3061b45df8a97a0d10703c7952aab..2b543163d90a02b2d050e7fc8f578020f9107973 100644 (file)
@@ -12,6 +12,7 @@ enum stack_type {
        STACK_TYPE_IRQ,
        STACK_TYPE_NODAT,
        STACK_TYPE_RESTART,
+       STACK_TYPE_MCCK,
 };
 
 struct stack_info {
index af013b4244d346598a29db32b49af71f20e785fe..2da0273597989b88bf99159e231bcb0d7d66700f 100644 (file)
@@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
 
 static int diag8_response(int cmdlen, char *response, int *rlen)
 {
+       unsigned long _cmdlen = cmdlen | 0x40000000L;
+       unsigned long _rlen = *rlen;
        register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
        register unsigned long reg3 asm ("3") = (addr_t) response;
-       register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
-       register unsigned long reg5 asm ("5") = *rlen;
+       register unsigned long reg4 asm ("4") = _cmdlen;
+       register unsigned long reg5 asm ("5") = _rlen;
 
        asm volatile(
                "       diag    %2,%0,0x8\n"
index 0dc4b258b98d51c1c80c2a1eefea64c46ea7e1da..db1bc00229caf20f04d8ee8275598536c390f70b 100644 (file)
@@ -79,6 +79,15 @@ static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
        return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
 }
 
+static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
+{
+       unsigned long frame_size, top;
+
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+       top = S390_lowcore.mcck_stack + frame_size;
+       return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
+}
+
 static bool in_restart_stack(unsigned long sp, struct stack_info *info)
 {
        unsigned long frame_size, top;
@@ -108,7 +117,8 @@ int get_stack_info(unsigned long sp, struct task_struct *task,
        /* Check per-cpu stacks */
        if (!in_irq_stack(sp, info) &&
            !in_nodat_stack(sp, info) &&
-           !in_restart_stack(sp, info))
+           !in_restart_stack(sp, info) &&
+           !in_mcck_stack(sp, info))
                goto unknown;
 
 recursion_check:
index c10b9f31eef7993fde313814cb3d3b68db724a90..12de7a9c85b3583b98559b9d0b31377e05fb8c8b 100644 (file)
@@ -401,15 +401,13 @@ ENTRY(\name)
        brasl   %r14,.Lcleanup_sie_int
 #endif
 0:     CHECK_STACK __LC_SAVE_AREA_ASYNC
-       lgr     %r11,%r15
        aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-       stg     %r11,__SF_BACKCHAIN(%r15)
        j       2f
 1:     BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
        lctlg   %c1,%c1,__LC_KERNEL_ASCE
        lg      %r15,__LC_KERNEL_STACK
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-2:     la      %r11,STACK_FRAME_OVERHEAD(%r15)
+2:     xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
        stmg    %r0,%r7,__PT_R0(%r11)
        # clear user controlled registers to prevent speculative use
        xgr     %r0,%r0
@@ -445,6 +443,7 @@ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
  * Load idle PSW.
  */
 ENTRY(psw_idle)
+       stg     %r14,(__SF_GPRS+8*8)(%r15)
        stg     %r3,__SF_EMPTY(%r15)
        larl    %r1,psw_idle_exit
        stg     %r1,__SF_EMPTY+8(%r15)
index 601c2179133847d76db62b4f283522b1d76124e6..714269e10eec54ff8e47c5a9ea550931d6eb2882 100644 (file)
@@ -174,7 +174,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
 
        memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
        regs->int_parm = S390_lowcore.ext_params;
-       regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2;
+       regs->int_parm_long = S390_lowcore.ext_params2;
 
        from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
        if (from_idle)
index 60da976eee6ff4b61d7a7ddf977732035a5fba42..72134f9f6ff5274c90bffe5e37ff146ac00b46e5 100644 (file)
@@ -354,7 +354,7 @@ static int __init stack_realloc(void)
        if (!new)
                panic("Couldn't allocate machine check stack");
        WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
-       memblock_free(old, THREAD_SIZE);
+       memblock_free_late(old, THREAD_SIZE);
        return 0;
 }
 early_initcall(stack_realloc);
index 7f1266c24f6bc1af2a4351b8f1210ec5f8b05128..101477b3e26339d59428330da708fc06b03c1c4b 100644 (file)
@@ -24,12 +24,6 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
        }
 }
 
-/*
- * This function returns an error if it detects any unreliable features of the
- * stack.  Otherwise it guarantees that the stack trace is reliable.
- *
- * If the task is not 'current', the caller *must* ensure the task is inactive.
- */
 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
                             void *cookie, struct task_struct *task)
 {
index 2792879d398ee41fa0d4858815b33e18664297ae..f3db131be563f2235e7dd77f3c68d4c75fffc920 100644 (file)
@@ -165,6 +165,7 @@ config X86
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
        select HAVE_ARCH_USERFAULTFD_WP         if X86_64 && USERFAULTFD
        select HAVE_ARCH_VMAP_STACK             if X86_64
+       select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_WITHIN_STACK_FRAMES
        select HAVE_ASM_MODVERSIONS
        select HAVE_CMPXCHG_DOUBLE
@@ -571,6 +572,7 @@ config X86_UV
        depends on X86_EXTENDED_PLATFORM
        depends on NUMA
        depends on EFI
+       depends on KEXEC_CORE
        depends on X86_X2APIC
        depends on PCI
        help
@@ -777,6 +779,7 @@ if HYPERVISOR_GUEST
 
 config PARAVIRT
        bool "Enable paravirtualization code"
+       depends on HAVE_STATIC_CALL
        help
          This changes the kernel so it can modify itself when it is run
          under a hypervisor, potentially improving performance significantly
@@ -1406,7 +1409,7 @@ config HIGHMEM4G
 
 config HIGHMEM64G
        bool "64GB"
-       depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
        select X86_PAE
        help
          Select this if you have a 32-bit processor and more than 4
@@ -1518,6 +1521,7 @@ config AMD_MEM_ENCRYPT
        select ARCH_USE_MEMREMAP_PROT
        select ARCH_HAS_FORCE_DMA_UNENCRYPTED
        select INSTRUCTION_DECODER
+       select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
        help
          Say yes to enable support for the encryption of system memory.
          This requires an AMD processor that supports Secure Memory
@@ -1931,6 +1935,7 @@ config X86_SGX
        depends on CRYPTO_SHA256=y
        select SRCU
        select MMU_NOTIFIER
+       select NUMA_KEEP_MEMINFO if NUMA
        help
          Intel(R) Software Guard eXtensions (SGX) is a set of CPU instructions
          that can be used by applications to set aside private regions of code
index 9a85eae37b17357e8d1b9ee5058b44e6bad2c09d..78faf9c7e3aed4825c4efcc00dce87495dde8fcc 100644 (file)
@@ -33,6 +33,7 @@ REALMODE_CFLAGS += -ffreestanding
 REALMODE_CFLAGS += -fno-stack-protector
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
+REALMODE_CFLAGS += $(CLANG_FLAGS)
 export REALMODE_CFLAGS
 
 # BITS is used as extension for files which are available in a 32 bit
index e0bc3988c3faa823e5d4be99abe327044035b8ef..6e5522aebbbd467e17df99a91513caff232056ad 100644 (file)
@@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
 # Disable relocation relaxation in case the link is not PIE.
 KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
 KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
+KBUILD_CFLAGS += $(CLANG_FLAGS)
 
 # sev-es.c indirectly inludes inat-table.h which is generated during
 # compilation and stored in $(objtree). Add the directory to the includes so
index c4bb0f9363f5ee6494f86780d2a93ff82cc40fb3..95a223b3e56a2b6b6b113867e6d9bbe49c90e419 100644 (file)
@@ -5,7 +5,7 @@
  * Early support for invoking 32-bit EFI services from a 64-bit kernel.
  *
  * Because this thunking occurs before ExitBootServices() we have to
- * restore the firmware's 32-bit GDT before we make EFI serivce calls,
+ * restore the firmware's 32-bit GDT before we make EFI service calls,
  * since the firmware's 32-bit IDT is still currently installed and it
  * needs to be able to service interrupts.
  *
index e94874f4bbc1d2e563ba76fd74145a8e137b2fc7..a2347ded77ea25fce37f80b32eb743254a7a480e 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/bootparam.h>
 #include <asm/desc_defs.h>
+#include <asm/trapnr.h>
 #include "pgtable.h"
 
 /*
@@ -107,9 +108,19 @@ SYM_FUNC_START(startup_32)
        movl    %eax, %gs
        movl    %eax, %ss
 
-/* setup a stack and make sure cpu supports long mode. */
+       /* Setup a stack and load CS from current GDT */
        leal    rva(boot_stack_end)(%ebp), %esp
 
+       pushl   $__KERNEL32_CS
+       leal    rva(1f)(%ebp), %eax
+       pushl   %eax
+       lretl
+1:
+
+       /* Setup Exception handling for SEV-ES */
+       call    startup32_load_idt
+
+       /* Make sure cpu supports long mode. */
        call    verify_cpu
        testl   %eax, %eax
        jnz     .Lno_longmode
@@ -172,11 +183,21 @@ SYM_FUNC_START(startup_32)
         */
        call    get_sev_encryption_bit
        xorl    %edx, %edx
+#ifdef CONFIG_AMD_MEM_ENCRYPT
        testl   %eax, %eax
        jz      1f
        subl    $32, %eax       /* Encryption bit is always above bit 31 */
        bts     %eax, %edx      /* Set encryption mask for page tables */
+       /*
+        * Mark SEV as active in sev_status so that startup32_check_sev_cbit()
+        * will do a check. The sev_status memory will be fully initialized
+        * with the contents of MSR_AMD_SEV_STATUS later in
+        * set_sev_encryption_mask(). For now it is sufficient to know that SEV
+        * is active.
+        */
+       movl    $1, rva(sev_status)(%ebp)
 1:
+#endif
 
        /* Initialize Page tables to 0 */
        leal    rva(pgtable)(%ebx), %edi
@@ -231,7 +252,7 @@ SYM_FUNC_START(startup_32)
        /*
         * Setup for the jump to 64bit mode
         *
-        * When the jump is performend we will be in long mode but
+        * When the jump is performed we will be in long mode but
         * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
         * (and in turn EFER.LMA = 1).  To jump into 64bit mode we use
         * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
@@ -261,6 +282,9 @@ SYM_FUNC_START(startup_32)
        movl    %esi, %edx
 1:
 #endif
+       /* Check if the C-bit position is correct when SEV is active */
+       call    startup32_check_sev_cbit
+
        pushl   $__KERNEL_CS
        pushl   %eax
 
@@ -694,6 +718,19 @@ SYM_DATA_START(boot_idt)
        .endr
 SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
 
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+SYM_DATA_START(boot32_idt_desc)
+       .word   boot32_idt_end - boot32_idt - 1
+       .long   0
+SYM_DATA_END(boot32_idt_desc)
+       .balign 8
+SYM_DATA_START(boot32_idt)
+       .rept 32
+       .quad 0
+       .endr
+SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end)
+#endif
+
 #ifdef CONFIG_EFI_STUB
 SYM_DATA(image_offset, .long 0)
 #endif
@@ -786,6 +823,137 @@ SYM_DATA_START_LOCAL(loaded_image_proto)
 SYM_DATA_END(loaded_image_proto)
 #endif
 
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+       __HEAD
+       .code32
+/*
+ * Write an IDT entry into boot32_idt
+ *
+ * Parameters:
+ *
+ * %eax:       Handler address
+ * %edx:       Vector number
+ *
+ * Physical offset is expected in %ebp
+ */
+SYM_FUNC_START(startup32_set_idt_entry)
+       push    %ebx
+       push    %ecx
+
+       /* IDT entry address to %ebx */
+       leal    rva(boot32_idt)(%ebp), %ebx
+       shl     $3, %edx
+       addl    %edx, %ebx
+
+       /* Build IDT entry, lower 4 bytes */
+       movl    %eax, %edx
+       andl    $0x0000ffff, %edx       # Target code segment offset [15:0]
+       movl    $__KERNEL32_CS, %ecx    # Target code segment selector
+       shl     $16, %ecx
+       orl     %ecx, %edx
+
+       /* Store lower 4 bytes to IDT */
+       movl    %edx, (%ebx)
+
+       /* Build IDT entry, upper 4 bytes */
+       movl    %eax, %edx
+       andl    $0xffff0000, %edx       # Target code segment offset [31:16]
+       orl     $0x00008e00, %edx       # Present, Type 32-bit Interrupt Gate
+
+       /* Store upper 4 bytes to IDT */
+       movl    %edx, 4(%ebx)
+
+       pop     %ecx
+       pop     %ebx
+       ret
+SYM_FUNC_END(startup32_set_idt_entry)
+#endif
+
+SYM_FUNC_START(startup32_load_idt)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+       /* #VC handler */
+       leal    rva(startup32_vc_handler)(%ebp), %eax
+       movl    $X86_TRAP_VC, %edx
+       call    startup32_set_idt_entry
+
+       /* Load IDT */
+       leal    rva(boot32_idt)(%ebp), %eax
+       movl    %eax, rva(boot32_idt_desc+2)(%ebp)
+       lidt    rva(boot32_idt_desc)(%ebp)
+#endif
+       ret
+SYM_FUNC_END(startup32_load_idt)
+
+/*
+ * Check for the correct C-bit position when the startup_32 boot-path is used.
+ *
+ * The check makes use of the fact that all memory is encrypted when paging is
+ * disabled. The function creates 64 bits of random data using the RDRAND
+ * instruction. RDRAND is mandatory for SEV guests, so always available. If the
+ * hypervisor violates that the kernel will crash right here.
+ *
+ * The 64 bits of random data are stored to a memory location and at the same
+ * time kept in the %eax and %ebx registers. Since encryption is always active
+ * when paging is off the random data will be stored encrypted in main memory.
+ *
+ * Then paging is enabled. When the C-bit position is correct all memory is
+ * still mapped encrypted and comparing the register values with memory will
+ * succeed. An incorrect C-bit position will map all memory unencrypted, so that
+ * the compare will use the encrypted random data and fail.
+ */
+SYM_FUNC_START(startup32_check_sev_cbit)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+       pushl   %eax
+       pushl   %ebx
+       pushl   %ecx
+       pushl   %edx
+
+       /* Check for non-zero sev_status */
+       movl    rva(sev_status)(%ebp), %eax
+       testl   %eax, %eax
+       jz      4f
+
+       /*
+        * Get two 32-bit random values - Don't bail out if RDRAND fails
+        * because it is better to prevent forward progress if no random value
+        * can be gathered.
+        */
+1:     rdrand  %eax
+       jnc     1b
+2:     rdrand  %ebx
+       jnc     2b
+
+       /* Store to memory and keep it in the registers */
+       movl    %eax, rva(sev_check_data)(%ebp)
+       movl    %ebx, rva(sev_check_data+4)(%ebp)
+
+       /* Enable paging to see if encryption is active */
+       movl    %cr0, %edx                       /* Backup %cr0 in %edx */
+       movl    $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
+       movl    %ecx, %cr0
+
+       cmpl    %eax, rva(sev_check_data)(%ebp)
+       jne     3f
+       cmpl    %ebx, rva(sev_check_data+4)(%ebp)
+       jne     3f
+
+       movl    %edx, %cr0      /* Restore previous %cr0 */
+
+       jmp     4f
+
+3:     /* Check failed - hlt the machine */
+       hlt
+       jmp     3b
+
+4:
+       popl    %edx
+       popl    %ecx
+       popl    %ebx
+       popl    %eax
+#endif
+       ret
+SYM_FUNC_END(startup32_check_sev_cbit)
+
 /*
  * Stack and heap for uncompression
  */
index 804a502ee0d28bae40d754a00d282a5aa8ffeb51..9b93567d663a9003d25a8d3f9a3ff6029da28458 100644 (file)
@@ -52,3 +52,17 @@ void load_stage2_idt(void)
 
        load_boot_idt(&boot_idt_desc);
 }
+
+void cleanup_exception_handling(void)
+{
+       /*
+        * Flush GHCB from cache and map it encrypted again when running as
+        * SEV-ES guest.
+        */
+       sev_es_shutdown_ghcb();
+
+       /* Set a null-idt, disabling #PF and #VC handling */
+       boot_idt_desc.size    = 0;
+       boot_idt_desc.address = 0;
+       load_boot_idt(&boot_idt_desc);
+}
index b92fffbe761fd5d38c86f68770bb71152c62d8c3..e36690778497f3ac02e4c7b0b3a1f3bcb7d4ec21 100644 (file)
@@ -639,9 +639,9 @@ static bool process_mem_region(struct mem_vector *region,
 
                if (slot_area_index == MAX_SLOT_AREA) {
                        debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n");
-                       return 1;
+                       return true;
                }
-               return 0;
+               return false;
        }
 
 #if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI)
index aa561795efd16182fa185964045ec40571d0b71b..c1e81a848b2a50b7f1f1635e96119c312affab37 100644 (file)
@@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
        push    %ecx
        push    %edx
 
-       /* Check if running under a hypervisor */
-       movl    $1, %eax
-       cpuid
-       bt      $31, %ecx               /* Check the hypervisor bit */
-       jnc     .Lno_sev
-
        movl    $0x80000000, %eax       /* CPUID to check the highest leaf */
        cpuid
        cmpl    $0x8000001f, %eax       /* See if 0x8000001f is available */
@@ -67,10 +61,132 @@ SYM_FUNC_START(get_sev_encryption_bit)
        ret
 SYM_FUNC_END(get_sev_encryption_bit)
 
+/**
+ * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using
+ *                   the GHCB MSR protocol
+ *
+ * @%eax:      Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX)
+ * @%edx:      CPUID Function
+ *
+ * Returns 0 in %eax on success, non-zero on failure
+ * %edx returns CPUID value on success
+ */
+SYM_CODE_START_LOCAL(sev_es_req_cpuid)
+       shll    $30, %eax
+       orl     $0x00000004, %eax
+       movl    $MSR_AMD64_SEV_ES_GHCB, %ecx
+       wrmsr
+       rep; vmmcall            # VMGEXIT
+       rdmsr
+
+       /* Check response */
+       movl    %eax, %ecx
+       andl    $0x3ffff000, %ecx       # Bits [12-29] MBZ
+       jnz     2f
+
+       /* Check return code */
+       andl    $0xfff, %eax
+       cmpl    $5, %eax
+       jne     2f
+
+       /* All good - return success */
+       xorl    %eax, %eax
+1:
+       ret
+2:
+       movl    $-1, %eax
+       jmp     1b
+SYM_CODE_END(sev_es_req_cpuid)
+
+SYM_CODE_START(startup32_vc_handler)
+       pushl   %eax
+       pushl   %ebx
+       pushl   %ecx
+       pushl   %edx
+
+       /* Keep CPUID function in %ebx */
+       movl    %eax, %ebx
+
+       /* Check if error-code == SVM_EXIT_CPUID */
+       cmpl    $0x72, 16(%esp)
+       jne     .Lfail
+
+       movl    $0, %eax                # Request CPUID[fn].EAX
+       movl    %ebx, %edx              # CPUID fn
+       call    sev_es_req_cpuid        # Call helper
+       testl   %eax, %eax              # Check return code
+       jnz     .Lfail
+       movl    %edx, 12(%esp)          # Store result
+
+       movl    $1, %eax                # Request CPUID[fn].EBX
+       movl    %ebx, %edx              # CPUID fn
+       call    sev_es_req_cpuid        # Call helper
+       testl   %eax, %eax              # Check return code
+       jnz     .Lfail
+       movl    %edx, 8(%esp)           # Store result
+
+       movl    $2, %eax                # Request CPUID[fn].ECX
+       movl    %ebx, %edx              # CPUID fn
+       call    sev_es_req_cpuid        # Call helper
+       testl   %eax, %eax              # Check return code
+       jnz     .Lfail
+       movl    %edx, 4(%esp)           # Store result
+
+       movl    $3, %eax                # Request CPUID[fn].EDX
+       movl    %ebx, %edx              # CPUID fn
+       call    sev_es_req_cpuid        # Call helper
+       testl   %eax, %eax              # Check return code
+       jnz     .Lfail
+       movl    %edx, 0(%esp)           # Store result
+
+       /*
+        * Sanity check CPUID results from the Hypervisor. See comment in
+        * do_vc_no_ghcb() for more details on why this is necessary.
+        */
+
+       /* Fail if SEV leaf not available in CPUID[0x80000000].EAX */
+       cmpl    $0x80000000, %ebx
+       jne     .Lcheck_sev
+       cmpl    $0x8000001f, 12(%esp)
+       jb      .Lfail
+       jmp     .Ldone
+
+.Lcheck_sev:
+       /* Fail if SEV bit not set in CPUID[0x8000001f].EAX[1] */
+       cmpl    $0x8000001f, %ebx
+       jne     .Ldone
+       btl     $1, 12(%esp)
+       jnc     .Lfail
+
+.Ldone:
+       popl    %edx
+       popl    %ecx
+       popl    %ebx
+       popl    %eax
+
+       /* Remove error code */
+       addl    $4, %esp
+
+       /* Jump over CPUID instruction */
+       addl    $2, (%esp)
+
+       iret
+.Lfail:
+       /* Send terminate request to Hypervisor */
+       movl    $0x100, %eax
+       xorl    %edx, %edx
+       movl    $MSR_AMD64_SEV_ES_GHCB, %ecx
+       wrmsr
+       rep; vmmcall
+
+       /* If request fails, go to hlt loop */
+       hlt
+       jmp .Lfail
+SYM_CODE_END(startup32_vc_handler)
+
        .code64
 
 #include "../../kernel/sev_verify_cbit.S"
-
 SYM_FUNC_START(set_sev_encryption_mask)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
        push    %rbp
index 267e7f93050ef322cec5e29a742d685c6661b75c..dde042f64ccaa4b41fe983d4d6ad20416392a731 100644 (file)
@@ -430,8 +430,6 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
                error("Destination address too large");
 #endif
 #ifndef CONFIG_RELOCATABLE
-       if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
-               error("Destination address does not match LOAD_PHYSICAL_ADDR");
        if (virt_addr != LOAD_PHYSICAL_ADDR)
                error("Destination virtual address changed when not relocatable");
 #endif
@@ -443,11 +441,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
        handle_relocations(output, output_len, virt_addr);
        debug_putstr("done.\nBooting the kernel.\n");
 
-       /*
-        * Flush GHCB from cache and map it encrypted again when running as
-        * SEV-ES guest.
-        */
-       sev_es_shutdown_ghcb();
+       /* Disable exception handling before booting the kernel */
+       cleanup_exception_handling();
 
        return output;
 }
index 901ea5ebec22a7b9e599b2e2fee990447993544e..e5612f035498c15d2762911807ad4c518638e390 100644 (file)
@@ -155,6 +155,12 @@ extern pteval_t __default_kernel_pte_mask;
 extern gate_desc boot_idt[BOOT_IDT_ENTRIES];
 extern struct desc_ptr boot_idt_desc;
 
+#ifdef CONFIG_X86_64
+void cleanup_exception_handling(void);
+#else
+static inline void cleanup_exception_handling(void) { }
+#endif
+
 /* IDT Entry Points */
 void boot_page_fault(void);
 void boot_stage1_vc(void);
index 27826c265aab484770a3b82b7cde24116cb66d7b..d904bd56b3e33778c92b445c3fed12e148fc4ae5 100644 (file)
@@ -200,14 +200,8 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
        }
 
 finish:
-       if (result == ES_OK) {
+       if (result == ES_OK)
                vc_finish_insn(&ctxt);
-       } else if (result != ES_RETRY) {
-               /*
-                * For now, just halt the machine. That makes debugging easier,
-                * later we just call sev_es_terminate() here.
-                */
-               while (true)
-                       asm volatile("hlt\n");
-       }
+       else if (result != ES_RETRY)
+               sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
 }
index 7c4c7b2fbf0599ce3dd85c497946f7c13489adfd..98cf3b4e4c9fecfe8b2a479640f9f84a8215a046 100644 (file)
@@ -24,7 +24,7 @@
 /*
  * Copyright 2012 Xyratex Technology Limited
  *
- * Wrappers for kernel crypto shash api to pclmulqdq crc32 imlementation.
+ * Wrappers for kernel crypto shash api to pclmulqdq crc32 implementation.
  */
 #include <linux/init.h>
 #include <linux/module.h>
index 5af8021b98cea4e8d10b7b1ebc5a264c89ab7bd4..6706b6cb1d0fc133c614431a0ff8cea4b83b836a 100644 (file)
@@ -114,11 +114,11 @@ static inline void fadd(u64 *out, const u64 *f1, const u64 *f2)
        );
 }
 
-/* Computes the field substraction of two field elements */
+/* Computes the field subtraction of two field elements */
 static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
 {
        asm volatile(
-               /* Compute the raw substraction of f1-f2 */
+               /* Compute the raw subtraction of f1-f2 */
                "  movq 0(%1), %%r8;"
                "  subq 0(%2), %%r8;"
                "  movq 8(%1), %%r9;"
@@ -135,7 +135,7 @@ static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
                "  mov $38, %%rcx;"
                "  cmovc %%rcx, %%rax;"
 
-               /* Step 2: Substract carry*38 from the original difference */
+               /* Step 2: Subtract carry*38 from the original difference */
                "  sub %%rax, %%r8;"
                "  sbb $0, %%r9;"
                "  sbb $0, %%r10;"
index 646da46e8d1042c2ee426185bbc346b5a328cd27..1dfb8af48a3caa7ae7a406b8dbf30ccabc840a20 100644 (file)
@@ -16,7 +16,7 @@
 #include <asm/simd.h>
 
 asmlinkage void poly1305_init_x86_64(void *ctx,
-                                    const u8 key[POLY1305_KEY_SIZE]);
+                                    const u8 key[POLY1305_BLOCK_SIZE]);
 asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
                                       const size_t len, const u32 padbit);
 asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
@@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
        state->is_base2_26 = 0;
 }
 
-static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
+static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
 {
        poly1305_init_x86_64(ctx, key);
 }
@@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
                poly1305_emit_avx(ctx, mac, nonce);
 }
 
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
 {
        poly1305_simd_init(&dctx->h, key);
        dctx->s[0] = get_unaligned_le32(&key[16]);
index fc23552afe37d767ba484155bf99dd843ed7e9b1..bca4cea757ce245b7044a86924263c90c50206f4 100644 (file)
@@ -88,7 +88,7 @@
 
 /*
  * Combined G1 & G2 function. Reordered with help of rotates to have moves
- * at begining.
+ * at beginning.
  */
 #define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \
        /* G1,1 && G2,1 */ \
index 03725696397c8e32b80f4735900c5a0355a5bb21..3507cf2064f107bd2363aabe56c17210db41d9eb 100644 (file)
@@ -117,7 +117,7 @@ static bool is_blacklisted_cpu(void)
                 * storing blocks in 64bit registers to allow three blocks to
                 * be processed parallel. Parallel operation then allows gaining
                 * more performance than was trade off, on out-of-order CPUs.
-                * However Atom does not benefit from this parallellism and
+                * However Atom does not benefit from this parallelism and
                 * should be blacklisted.
                 */
                return true;
index 4efd39aacb9f28366f3bbafb17f59a3ec181cbed..7b2542b13ebd9683748677df37604adb911c4b40 100644 (file)
@@ -38,6 +38,7 @@
 #ifdef CONFIG_X86_64
 __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
 {
+       add_random_kstack_offset();
        nr = syscall_enter_from_user_mode(regs, nr);
 
        instrumentation_begin();
@@ -83,6 +84,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
 {
        unsigned int nr = syscall_32_enter(regs);
 
+       add_random_kstack_offset();
        /*
         * Subtlety here: if ptrace pokes something larger than 2^32-1 into
         * orig_ax, the unsigned int return value truncates it.  This may
@@ -102,6 +104,7 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
        unsigned int nr = syscall_32_enter(regs);
        int res;
 
+       add_random_kstack_offset();
        /*
         * This cannot use syscall_enter_from_user_mode() as it has to
         * fetch EBP before invoking any of the syscall entry work
index df8c017e616119850d7df71a72a7021bdf715241..ff00347409007193d6b11590ae210dfa43dbcdaf 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/processor-flags.h>
 #include <asm/irq_vectors.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
 #include <asm/frame.h>
         *
         * Lets build a 5 entry IRET frame after that, such that struct pt_regs
         * is complete and in particular regs->sp is correct. This gives us
-        * the original 6 enties as gap:
+        * the original 6 entries as gap:
         *
         * 14*4(%esp) - <previous context>
         * 13*4(%esp) - gap / flags
         * will soon execute iret and the tracer was already set to
         * the irqstate after the IRET:
         */
-       DISABLE_INTERRUPTS(CLBR_ANY)
+       cli
        lss     (%esp), %esp                    /* switch to espfix segment */
 .Lend_\@:
 #endif /* CONFIG_X86_ESPFIX32 */
@@ -1077,7 +1077,7 @@ restore_all_switch_stack:
         * when returning from IPI handler and when returning from
         * scheduler to user-space.
         */
-       INTERRUPT_RETURN
+       iret
 
 .section .fixup, "ax"
 SYM_CODE_START(asm_iret_error)
index 400908dff42ebfebf0f0dda3063b9b41ff7ffd2c..a16a5294d55f655a70566a1dc4f92b1fa2574798 100644 (file)
@@ -305,7 +305,7 @@ SYM_CODE_END(ret_from_fork)
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
        pushq %rax
-       SAVE_FLAGS(CLBR_RAX)
+       SAVE_FLAGS
        testl $X86_EFLAGS_IF, %eax
        jz .Lokay_\@
        ud2
@@ -511,7 +511,7 @@ SYM_CODE_START(\asmsym)
        /*
         * No need to switch back to the IST stack. The current stack is either
         * identical to the stack in the IRET frame or the VC fall-back stack,
-        * so it is definitly mapped even with PTI enabled.
+        * so it is definitely mapped even with PTI enabled.
         */
        jmp     paranoid_exit
 
index 2d0f3d8bcc257cc421a4df652e6909d6f2f44c11..edfe9780f6d1962b0d57c91475b025d0cc024936 100644 (file)
@@ -218,7 +218,7 @@ int main(int argc, char **argv)
 
        /*
         * Figure out the struct name.  If we're writing to a .so file,
-        * generate raw output insted.
+        * generate raw output instead.
         */
        name = strdup(argv[3]);
        namelen = strlen(name);
index 1c7cfac7e64ace62a4db3fb148baec1ee9e0a592..5264daa8859f5be103a8ad61d4f675631c9ae791 100644 (file)
@@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
        if (offset + len > data_len)
                fail("section to extract overruns input data");
 
-       fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
+       fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
        BITSFUNC(copy)(outfile, data + offset, len);
        fprintf(outfile, "\n};\n\n");
 }
index de1fff7188aad1a5074f0823372ac97fb9f54b14..6ddd7a937b3e3001c0d7c3736736dcb90d5c610d 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 
        .text
        .globl __kernel_vsyscall
@@ -29,7 +29,7 @@ __kernel_vsyscall:
         * anyone with an AMD CPU, for example).  Nonetheless, we try to keep
         * it working approximately as well as it ever worked.
         *
-        * This link may eludicate some of the history:
+        * This link may elucidate some of the history:
         *   https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
         * personally, I find it hard to understand what's going on there.
         *
index 825e829ffff19e245adde1855af2ffc675a12219..235a5794296acbef611d65d2de8cbec7de9f4ed3 100644 (file)
@@ -358,7 +358,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
        mmap_write_lock(mm);
        /*
         * Check if we have already mapped vdso blob - fail to prevent
-        * abusing from userspace install_speciall_mapping, which may
+        * abusing from userspace install_special_mapping, which may
         * not do accounting and rlimit right.
         * We could search vma near context.vdso, but it's a slowpath,
         * so let's explicitly check all VMAs to be completely sure.
index 86a0e94f68dff84f3bbe139300fa0d55df350e75..99dafac992e2cecc3a9cb9c311e012afc971ffd5 100644 (file)
@@ -137,7 +137,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
 
        /*
         * If the return from callback is zero or negative, return immediately,
-        * else re-execute ENCLU with the postive return value interpreted as
+        * else re-execute ENCLU with the positive return value interpreted as
         * the requested ENCLU function.
         */
        cmp     $0, %eax
index 2c1791c4a518f2ad70bec87d1000445ff7e08cf1..9687a8aef01c5d48533d020aba872fe58f6b41f8 100644 (file)
@@ -623,7 +623,7 @@ static void amd_pmu_disable_all(void)
        /*
         * Check each counter for overflow and wait for it to be reset by the
         * NMI if it has overflowed. This relies on the fact that all active
-        * counters are always enabled when this function is caled and
+        * counters are always enabled when this function is called and
         * ARCH_PERFMON_EVENTSEL_INT is always set.
         */
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
index 0e5c036fd7bea1b2b0801b7bcc7122670095463f..e6493a67f1c6b3615c775c9a706955e81cf4b11b 100644 (file)
@@ -17,7 +17,7 @@
 #define IOMMU_PC_DEVID_MATCH_REG               0x20
 #define IOMMU_PC_COUNTER_REPORT_REG            0x28
 
-/* maximun specified bank/counters */
+/* maximum specified bank/counters */
 #define PC_MAX_SPEC_BNKS                       64
 #define PC_MAX_SPEC_CNTRS                      16
 
index 18df1712969554ce3d58b6e65c311cfc283bfa00..4c31cae4707ee6c0bf8519fee7d8b1048d473a9f 100644 (file)
@@ -765,7 +765,7 @@ struct perf_sched {
 };
 
 /*
- * Initialize interator that runs through all events and counters.
+ * Initialize iterator that runs through all events and counters.
  */
 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
                            int num, int wmin, int wmax, int gpmax)
index 731dd8d0dbb11cc538899b181e32c92aa916900e..6320d2cfd9d3e12617babf3b2fa06906166df87a 100644 (file)
@@ -594,7 +594,7 @@ static __init int bts_init(void)
                 * we cannot use the user mapping since it will not be available
                 * if we're not running the owning process.
                 *
-                * With PTI we can't use the kernal map either, because its not
+                * With PTI we can't use the kernel map either, because its not
                 * there when we run userspace.
                 *
                 * For now, disable this driver when using PTI.
index 37ce38403cb8d8bd2b741337d0badd0ebaaf4542..3fd69bd5fa6ea00040335397a572c71a55772688 100644 (file)
@@ -137,7 +137,7 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
-       INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
+       INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
        INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
        INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
@@ -2186,7 +2186,7 @@ static void intel_pmu_enable_all(int added)
  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
  *   in sequence on the same PMC or on different PMCs.
  *
- * In practise it appears some of these events do in fact count, and
+ * In practice it appears some of these events do in fact count, and
  * we need to program all 4 events.
  */
 static void intel_pmu_nhm_workaround(void)
@@ -2435,7 +2435,7 @@ static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
 
        /*
         * The metric is reported as an 8bit integer fraction
-        * suming up to 0xff.
+        * summing up to 0xff.
         * slots-in-metric = (Metric / 0xff) * slots
         */
        val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
@@ -2776,7 +2776,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
         * processing loop coming after that the function, otherwise
         * phony regular samples may be generated in the sampling buffer
         * not marked with the EXACT tag. Another possibility is to have
-        * one PEBS event and at least one non-PEBS event whic hoverflows
+        * one PEBS event and at least one non-PEBS event whicoverflows
         * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
         * not be set, yet the overflow status bit for the PEBS counter will
         * be on Skylake.
@@ -2824,7 +2824,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
        }
 
        /*
-        * Intel Perf mertrics
+        * Intel Perf metrics
         */
        if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
                handled++;
@@ -4516,7 +4516,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,           3, 0x07000009),
        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,           4, 0x0f000009),
        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,           5, 0x0e000002),
-       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X,           2, 0x0b000014),
+       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X,           1, 0x0b000014),
        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             3, 0x00000021),
        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             4, 0x00000000),
        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             5, 0x00000000),
@@ -4594,7 +4594,7 @@ static bool check_msr(unsigned long msr, u64 mask)
 
        /*
         * Disable the check for real HW, so we don't
-        * mess with potentionaly enabled registers:
+        * mess with potentially enabled registers:
         */
        if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
                return true;
@@ -4659,7 +4659,7 @@ static __init void intel_arch_events_quirk(void)
 {
        int bit;
 
-       /* disable event that reported as not presend by cpuid */
+       /* disable event that reported as not present by cpuid */
        for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
                intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
                pr_warn("CPUID marked event: \'%s\' unavailable\n",
index d32b302719fe56dc7302fa49049bca7b2e06ae6e..5aabb0e2964a87a9794616ea0342d6682463656e 100644 (file)
@@ -1805,7 +1805,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
         *
         *   [-period, 0]
         *
-        * the difference between two consequtive reads is:
+        * the difference between two consecutive reads is:
         *
         *   A) value2 - value1;
         *      when no overflows have happened in between,
index 21890dacfcfee5f7406e5ac5b72980188ccb3997..acb04ef3da3f12cc12c47eed65bf8e2db9c45a7e 100644 (file)
@@ -1198,7 +1198,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
                /*
                 * The LBR logs any address in the IP, even if the IP just
                 * faulted. This means userspace can control the from address.
-                * Ensure we don't blindy read any address by validating it is
+                * Ensure we don't blindly read any address by validating it is
                 * a known text address.
                 */
                if (kernel_text_address(from)) {
index a4cc66005ce85004174f076150b7d7aa34ae5b62..971dffe0b77d475511f8416d607c86b6d4ae5be5 100644 (file)
@@ -24,7 +24,7 @@ struct p4_event_bind {
        unsigned int escr_msr[2];               /* ESCR MSR for this event */
        unsigned int escr_emask;                /* valid ESCR EventMask bits */
        unsigned int shared;                    /* event is shared across threads */
-       char cntr[2][P4_CNTR_LIMIT];            /* counter index (offset), -1 on abscence */
+       char cntr[2][P4_CNTR_LIMIT];            /* counter index (offset), -1 on absence */
 };
 
 struct p4_pebs_bind {
@@ -45,7 +45,7 @@ struct p4_pebs_bind {
  * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
  * event configuration to find out which values are to be
  * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
- * resgisters
+ * registers
  */
 static struct p4_pebs_bind p4_pebs_bind_map[] = {
        P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired,  0x0000001, 0x0000001),
@@ -1313,7 +1313,7 @@ static __initconst const struct x86_pmu p4_pmu = {
        .get_event_constraints  = x86_get_event_constraints,
        /*
         * IF HT disabled we may need to use all
-        * ARCH_P4_MAX_CCCR counters simulaneously
+        * ARCH_P4_MAX_CCCR counters simultaneously
         * though leave it restricted at moment assuming
         * HT is on
         */
index e94af4a54d0d89fb385a61472d3c91708101fd1f..915847655c0652820ef9083faae49d63e85007d1 100644 (file)
@@ -362,7 +362,7 @@ static bool pt_event_valid(struct perf_event *event)
 
        /*
         * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
-        * clears the assomption that BranchEn must always be enabled,
+        * clears the assumption that BranchEn must always be enabled,
         * as was the case with the first implementation of PT.
         * If this bit is not set, the legacy behavior is preserved
         * for compatibility with the older userspace.
index b79951d0707c237b815b4c9d78755c891dbe3e43..4bba0491068c869059ac234fb709599789d4cbe8 100644 (file)
  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
  *                       numbers have been initialized. (RO)
  * |[62:48]|    ---    | Reserved
- * |[47:40]|    00h    | BUS_NUM_5  Return the bus number BIOS assigned
+ * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
  *                       CPUBUSNO(5). (RO)
- * |[39:32]|    00h    | BUS_NUM_4  Return the bus number BIOS assigned
+ * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
  *                       CPUBUSNO(4). (RO)
- * |[31:24]|    00h    | BUS_NUM_3  Return the bus number BIOS assigned
+ * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
  *                       CPUBUSNO(3). (RO)
- * |[23:16]|    00h    | BUS_NUM_2  Return the bus number BIOS assigned
+ * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
  *                       CPUBUSNO(2). (RO)
- * |[15:8] |    00h    | BUS_NUM_1  Return the bus number BIOS assigned
+ * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
  *                       CPUBUSNO(1). (RO)
- * | [7:0] |    00h    | BUS_NUM_0  Return the bus number BIOS assigned
+ * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
  *                       CPUBUSNO(0). (RO)
  */
 #define SKX_MSR_CPU_BUS_NUMBER         0x300
@@ -1159,7 +1159,6 @@ enum {
        SNBEP_PCI_QPI_PORT0_FILTER,
        SNBEP_PCI_QPI_PORT1_FILTER,
        BDX_PCI_QPI_PORT2_FILTER,
-       HSWEP_PCI_PCU_3,
 };
 
 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2857,22 +2856,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
        NULL,
 };
 
-void hswep_uncore_cpu_init(void)
+#define HSWEP_PCU_DID                  0x2fc0
+#define HSWEP_PCU_CAPID4_OFFET         0x94
+#define hswep_get_chop(_cap)           (((_cap) >> 6) & 0x3)
+
+static bool hswep_has_limit_sbox(unsigned int device)
 {
-       int pkg = boot_cpu_data.logical_proc_id;
+       struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+       u32 capid4;
+
+       if (!dev)
+               return false;
+
+       pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
+       if (!hswep_get_chop(capid4))
+               return true;
 
+       return false;
+}
+
+void hswep_uncore_cpu_init(void)
+{
        if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
 
        /* Detect 6-8 core systems with only two SBOXes */
-       if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
-               u32 capid4;
-
-               pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
-                                     0x94, &capid4);
-               if (((capid4 >> 6) & 0x3) == 0)
-                       hswep_uncore_sbox.num_boxes = 2;
-       }
+       if (hswep_has_limit_sbox(HSWEP_PCU_DID))
+               hswep_uncore_sbox.num_boxes = 2;
 
        uncore_msr_uncores = hswep_msr_uncores;
 }
@@ -3135,11 +3145,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = {
                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
                                                   SNBEP_PCI_QPI_PORT1_FILTER),
        },
-       { /* PCU.3 (for Capability registers) */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
-                                                  HSWEP_PCI_PCU_3),
-       },
        { /* end: all zeroes */ }
 };
 
@@ -3231,27 +3236,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
        EVENT_CONSTRAINT_END
 };
 
+#define BDX_PCU_DID                    0x6fc0
+
 void bdx_uncore_cpu_init(void)
 {
-       int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
-
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
        uncore_msr_uncores = bdx_msr_uncores;
 
-       /* BDX-DE doesn't have SBOX */
-       if (boot_cpu_data.x86_model == 86) {
-               uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
        /* Detect systems with no SBOXes */
-       } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
-               struct pci_dev *pdev;
-               u32 capid4;
-
-               pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
-               pci_read_config_dword(pdev, 0x94, &capid4);
-               if (((capid4 >> 6) & 0x3) == 0)
-                       bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
-       }
+       if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
+               uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+
        hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
 }
 
@@ -3472,11 +3468,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
                                                   BDX_PCI_QPI_PORT2_FILTER),
        },
-       { /* PCU.3 (for Capability registers) */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
-                                                  HSWEP_PCI_PCU_3),
-       },
        { /* end: all zeroes */ }
 };
 
index e68827e604ad1bb82af185e2e8db6e8ce77024c4..949d845c922b48282d0414462b47159fca57206d 100644 (file)
@@ -494,7 +494,7 @@ static __init void zhaoxin_arch_events_quirk(void)
 {
        int bit;
 
-       /* disable event that reported as not presend by cpuid */
+       /* disable event that reported as not present by cpuid */
        for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) {
                zx_pmon_event_map[zx_arch_events_map[bit].id] = 0;
                pr_warn("CPUID marked event: \'%s\' unavailable\n",
index 284e73661a18bde5d17ba6b03efd78d9be1cd309..90e682a92820dea099bc5cbb75a703581d828c0e 100644 (file)
@@ -60,9 +60,11 @@ static u32 hv_apic_read(u32 reg)
        switch (reg) {
        case APIC_EOI:
                rdmsr(HV_X64_MSR_EOI, reg_val, hi);
+               (void)hi;
                return reg_val;
        case APIC_TASKPRI:
                rdmsr(HV_X64_MSR_TPR, reg_val, hi);
+               (void)hi;
                return reg_val;
 
        default:
@@ -103,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
        struct hv_send_ipi_ex *ipi_arg;
        unsigned long flags;
        int nr_bank = 0;
-       int ret = 1;
+       u64 status = HV_STATUS_INVALID_PARAMETER;
 
        if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
                return false;
@@ -128,19 +130,19 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
        if (!nr_bank)
                ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
 
-       ret = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
+       status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
                              ipi_arg, NULL);
 
 ipi_mask_ex_done:
        local_irq_restore(flags);
-       return ((ret == 0) ? true : false);
+       return hv_result_success(status);
 }
 
 static bool __send_ipi_mask(const struct cpumask *mask, int vector)
 {
        int cur_cpu, vcpu;
        struct hv_send_ipi ipi_arg;
-       int ret = 1;
+       u64 status;
 
        trace_hyperv_send_ipi_mask(mask, vector);
 
@@ -184,9 +186,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
                __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
        }
 
-       ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
+       status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
                                     ipi_arg.cpu_mask);
-       return ((ret == 0) ? true : false);
+       return hv_result_success(status);
 
 do_ex_hypercall:
        return __send_ipi_mask_ex(mask, vector);
@@ -195,6 +197,7 @@ do_ex_hypercall:
 static bool __send_ipi_one(int cpu, int vector)
 {
        int vp = hv_cpu_number_to_vp_number(cpu);
+       u64 status;
 
        trace_hyperv_send_ipi_one(cpu, vector);
 
@@ -207,7 +210,8 @@ static bool __send_ipi_one(int cpu, int vector)
        if (vp >= 64)
                return __send_ipi_mask_ex(cpumask_of(cpu), vector);
 
-       return !hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
+       status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
+       return hv_result_success(status);
 }
 
 static void hv_send_ipi(int cpu, int vector)
index b81047dec1da323860eb3fc70cb935335d3d7939..bb0ae4b5c00f1bf6654142ddbb69f870610a240c 100644 (file)
@@ -54,28 +54,6 @@ EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg);
 u32 hv_max_vp_index;
 EXPORT_SYMBOL_GPL(hv_max_vp_index);
 
-void *hv_alloc_hyperv_page(void)
-{
-       BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE);
-
-       return (void *)__get_free_page(GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
-
-void *hv_alloc_hyperv_zeroed_page(void)
-{
-        BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE);
-
-        return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
-
-void hv_free_hyperv_page(unsigned long addr)
-{
-       free_page(addr);
-}
-EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
-
 static int hv_cpu_init(unsigned int cpu)
 {
        u64 msr_vp_index;
@@ -97,7 +75,7 @@ static int hv_cpu_init(unsigned int cpu)
                *output_arg = page_address(pg + 1);
        }
 
-       hv_get_vp_index(msr_vp_index);
+       msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
 
        hv_vp_index[smp_processor_id()] = msr_vp_index;
 
@@ -162,7 +140,7 @@ EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
 static inline bool hv_reenlightenment_available(void)
 {
        /*
-        * Check for required features and priviliges to make TSC frequency
+        * Check for required features and privileges to make TSC frequency
         * change notifications work.
         */
        return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
@@ -292,7 +270,7 @@ static int hv_suspend(void)
 
        /*
         * Reset the hypercall page as it is going to be invalidated
-        * accross hibernation. Setting hv_hypercall_pg to NULL ensures
+        * across hibernation. Setting hv_hypercall_pg to NULL ensures
         * that any subsequent hypercall operation fails safely instead of
         * crashing due to an access of an invalid page. The hypercall page
         * pointer is restored on resume.
@@ -349,7 +327,7 @@ static void __init hv_stimer_setup_percpu_clockev(void)
         * Ignore any errors in setting up stimer clockevents
         * as we can run with the LAPIC timer as a fallback.
         */
-       (void)hv_stimer_alloc();
+       (void)hv_stimer_alloc(false);
 
        /*
         * Still register the LAPIC timer, because the direct-mode STIMER is
@@ -369,7 +347,7 @@ static void __init hv_get_partition_id(void)
        local_irq_save(flags);
        output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
        status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page);
-       if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+       if (!hv_result_success(status)) {
                /* No point in proceeding if this failed */
                pr_err("Failed to get partition ID: %lld\n", status);
                BUG();
@@ -520,6 +498,8 @@ void __init hyperv_init(void)
                x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
 #endif
 
+       /* Query the VMs extended capability once, so that it can be cached. */
+       hv_query_ext_cap(0);
        return;
 
 remove_cpuhp_state:
@@ -593,33 +573,6 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
 }
 EXPORT_SYMBOL_GPL(hyperv_report_panic);
 
-/**
- * hyperv_report_panic_msg - report panic message to Hyper-V
- * @pa: physical address of the panic page containing the message
- * @size: size of the message in the page
- */
-void hyperv_report_panic_msg(phys_addr_t pa, size_t size)
-{
-       /*
-        * P3 to contain the physical address of the panic page & P4 to
-        * contain the size of the panic data in that page. Rest of the
-        * registers are no-op when the NOTIFY_MSG flag is set.
-        */
-       wrmsrl(HV_X64_MSR_CRASH_P0, 0);
-       wrmsrl(HV_X64_MSR_CRASH_P1, 0);
-       wrmsrl(HV_X64_MSR_CRASH_P2, 0);
-       wrmsrl(HV_X64_MSR_CRASH_P3, pa);
-       wrmsrl(HV_X64_MSR_CRASH_P4, size);
-
-       /*
-        * Let Hyper-V know there is crash data available along with
-        * the panic message.
-        */
-       wrmsrl(HV_X64_MSR_CRASH_CTL,
-              (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
-}
-EXPORT_SYMBOL_GPL(hyperv_report_panic_msg);
-
 bool hv_is_hyperv_initialized(void)
 {
        union hv_x64_msr_hypercall_contents hypercall_msr;
@@ -650,7 +603,7 @@ EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
 
 enum hv_isolation_type hv_get_isolation_type(void)
 {
-       if (!(ms_hyperv.features_b & HV_ISOLATION))
+       if (!(ms_hyperv.priv_high & HV_ISOLATION))
                return HV_ISOLATION_TYPE_NONE;
        return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
 }
@@ -661,3 +614,50 @@ bool hv_is_isolation_supported(void)
        return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
 }
 EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
+
+/* Bit mask of the extended capability to query: see HV_EXT_CAPABILITY_xxx */
+bool hv_query_ext_cap(u64 cap_query)
+{
+       /*
+        * The address of the 'hv_extended_cap' variable will be used as an
+        * output parameter to the hypercall below and so it should be
+        * compatible with 'virt_to_phys'. Which means, it's address should be
+        * directly mapped. Use 'static' to keep it compatible; stack variables
+        * can be virtually mapped, making them imcompatible with
+        * 'virt_to_phys'.
+        * Hypercall input/output addresses should also be 8-byte aligned.
+        */
+       static u64 hv_extended_cap __aligned(8);
+       static bool hv_extended_cap_queried;
+       u64 status;
+
+       /*
+        * Querying extended capabilities is an extended hypercall. Check if the
+        * partition supports extended hypercall, first.
+        */
+       if (!(ms_hyperv.priv_high & HV_ENABLE_EXTENDED_HYPERCALLS))
+               return false;
+
+       /* Extended capabilities do not change at runtime. */
+       if (hv_extended_cap_queried)
+               return hv_extended_cap & cap_query;
+
+       status = hv_do_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, NULL,
+                                &hv_extended_cap);
+
+       /*
+        * The query extended capabilities hypercall should not fail under
+        * any normal circumstances. Avoid repeatedly making the hypercall, on
+        * error.
+        */
+       hv_extended_cap_queried = true;
+       status &= HV_HYPERCALL_RESULT_MASK;
+       if (status != HV_STATUS_SUCCESS) {
+               pr_err("Hyper-V: Extended query capabilities hypercall failed 0x%llx\n",
+                      status);
+               return false;
+       }
+
+       return hv_extended_cap & cap_query;
+}
+EXPORT_SYMBOL_GPL(hv_query_ext_cap);
index 60461e598239127f85dcb4bfb77868e5692437d3..68a0843d4750f765b50dd303c82bc445f442646e 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/types.h>
-#include <linux/version.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
 #include <linux/clockchips.h>
@@ -93,10 +92,9 @@ int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
        status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY,
                                     page_count, 0, input_page, NULL);
        local_irq_restore(flags);
-
-       if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+       if (!hv_result_success(status)) {
                pr_err("Failed to deposit pages: %lld\n", status);
-               ret = status;
+               ret = hv_result(status);
                goto err_free_allocations;
        }
 
@@ -122,7 +120,7 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
        struct hv_add_logical_processor_out *output;
        u64 status;
        unsigned long flags;
-       int ret = 0;
+       int ret = HV_STATUS_SUCCESS;
        int pxm = node_to_pxm(node);
 
        /*
@@ -148,13 +146,11 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
                                         input, output);
                local_irq_restore(flags);
 
-               status &= HV_HYPERCALL_RESULT_MASK;
-
-               if (status != HV_STATUS_INSUFFICIENT_MEMORY) {
-                       if (status != HV_STATUS_SUCCESS) {
+               if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+                       if (!hv_result_success(status)) {
                                pr_err("%s: cpu %u apic ID %u, %lld\n", __func__,
                                       lp_index, apic_id, status);
-                               ret = status;
+                               ret = hv_result(status);
                        }
                        break;
                }
@@ -169,7 +165,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
        struct hv_create_vp *input;
        u64 status;
        unsigned long irq_flags;
-       int ret = 0;
+       int ret = HV_STATUS_SUCCESS;
        int pxm = node_to_pxm(node);
 
        /* Root VPs don't seem to need pages deposited */
@@ -200,13 +196,11 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
                status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
                local_irq_restore(irq_flags);
 
-               status &= HV_HYPERCALL_RESULT_MASK;
-
-               if (status != HV_STATUS_INSUFFICIENT_MEMORY) {
-                       if (status != HV_STATUS_SUCCESS) {
+               if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+                       if (!hv_result_success(status)) {
                                pr_err("%s: vcpu %u, lp %u, %lld\n", __func__,
                                       vp_index, flags, status);
-                               ret = status;
+                               ret = hv_result(status);
                        }
                        break;
                }
index f3270c1fc48c1dba158a8af7cac7a4ad30405e45..91cfe698bde0c531a388df586c00f65c35d34aa5 100644 (file)
@@ -25,7 +25,6 @@ static void hv_qlock_kick(int cpu)
 
 static void hv_qlock_wait(u8 *byte, u8 val)
 {
-       unsigned long msr_val;
        unsigned long flags;
 
        if (in_nmi())
@@ -48,8 +47,13 @@ static void hv_qlock_wait(u8 *byte, u8 val)
        /*
         * Only issue the rdmsrl() when the lock state has not changed.
         */
-       if (READ_ONCE(*byte) == val)
+       if (READ_ONCE(*byte) == val) {
+               unsigned long msr_val;
+
                rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
+
+               (void)msr_val;
+       }
        local_irq_restore(flags);
 }
 
index 4421a8d92e2307a7021261a5ac3aca5d68c071d8..514fc64e23d5b49964fed6c690ae30c6876b826f 100644 (file)
@@ -63,10 +63,10 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level,
 
        local_irq_restore(flags);
 
-       if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
+       if (!hv_result_success(status))
                pr_err("%s: hypercall failed, status %lld\n", __func__, status);
 
-       return status & HV_HYPERCALL_RESULT_MASK;
+       return hv_result(status);
 }
 
 static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
@@ -88,7 +88,7 @@ static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
        status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL);
        local_irq_restore(flags);
 
-       return status & HV_HYPERCALL_RESULT_MASK;
+       return hv_result(status);
 }
 
 #ifdef CONFIG_PCI_MSI
index 2c87350c1fb095149a06709cbc0b11f2427f7acc..c0ba8874d9cb833d4f25d5b0395e386e386270af 100644 (file)
@@ -58,7 +58,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
        int cpu, vcpu, gva_n, max_gvas;
        struct hv_tlb_flush **flush_pcpu;
        struct hv_tlb_flush *flush;
-       u64 status = U64_MAX;
+       u64 status;
        unsigned long flags;
 
        trace_hyperv_mmu_flush_tlb_others(cpus, info);
@@ -161,7 +161,7 @@ do_ex_hypercall:
 check_status:
        local_irq_restore(flags);
 
-       if (!(status & HV_HYPERCALL_RESULT_MASK))
+       if (hv_result_success(status))
                return;
 do_native:
        native_flush_tlb_others(cpus, info);
@@ -176,7 +176,7 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
        u64 status;
 
        if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
-               return U64_MAX;
+               return HV_STATUS_INVALID_PARAMETER;
 
        flush_pcpu = (struct hv_tlb_flush_ex **)
                     this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -201,7 +201,7 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
        flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
        nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
        if (nr_bank < 0)
-               return U64_MAX;
+               return HV_STATUS_INVALID_PARAMETER;
 
        /*
         * We can flush not more than max_gvas with one hypercall. Flush the
index dd0a843f766d2ae2d3792613a7cdcb84ab5b64df..5d70968c85382560f6d7112b778a935d7a3466fa 100644 (file)
@@ -47,7 +47,7 @@ int hyperv_flush_guest_mapping(u64 as)
                                 flush, NULL);
        local_irq_restore(flags);
 
-       if (!(status & HV_HYPERCALL_RESULT_MASK))
+       if (hv_result_success(status))
                ret = 0;
 
 fault:
@@ -92,7 +92,7 @@ int hyperv_flush_guest_mapping_range(u64 as,
 {
        struct hv_guest_mapping_flush_list **flush_pcpu;
        struct hv_guest_mapping_flush_list *flush;
-       u64 status = 0;
+       u64 status;
        unsigned long flags;
        int ret = -ENOTSUPP;
        int gpa_n = 0;
@@ -125,10 +125,10 @@ int hyperv_flush_guest_mapping_range(u64 as,
 
        local_irq_restore(flags);
 
-       if (!(status & HV_HYPERCALL_RESULT_MASK))
+       if (hv_result_success(status))
                ret = 0;
        else
-               ret = status;
+               ret = hv_result(status);
 fault:
        trace_hyperv_nested_flush_guest_mapping_range(as, ret);
        return ret;
index 62da760d6d5a6cb177df16f75a3e0e5975db1e1c..cd7b14322035ebbf2853c312164f7aef869f5b33 100644 (file)
@@ -9,7 +9,7 @@
  * Functions to keep the agpgart mappings coherent with the MMU. The
  * GART gives the CPU a physical alias of pages in memory. The alias
  * region is mapped uncacheable. Make sure there are no conflicting
- * mappings with different cachability attributes for the same
+ * mappings with different cacheability attributes for the same
  * page. This avoids data corruption on some CPUs.
  */
 
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
deleted file mode 100644 (file)
index 464034d..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_ALTERNATIVE_ASM_H
-#define _ASM_X86_ALTERNATIVE_ASM_H
-
-#ifdef __ASSEMBLY__
-
-#include <asm/asm.h>
-
-#ifdef CONFIG_SMP
-       .macro LOCK_PREFIX
-672:   lock
-       .pushsection .smp_locks,"a"
-       .balign 4
-       .long 672b - .
-       .popsection
-       .endm
-#else
-       .macro LOCK_PREFIX
-       .endm
-#endif
-
-/*
- * objtool annotation to ignore the alternatives and only consider the original
- * instruction(s).
- */
-.macro ANNOTATE_IGNORE_ALTERNATIVE
-       .Lannotate_\@:
-       .pushsection .discard.ignore_alts
-       .long .Lannotate_\@ - .
-       .popsection
-.endm
-
-/*
- * Issue one struct alt_instr descriptor entry (need to put it into
- * the section .altinstructions, see below). This entry contains
- * enough information for the alternatives patching code to patch an
- * instruction. See apply_alternatives().
- */
-.macro altinstruction_entry orig alt feature orig_len alt_len pad_len
-       .long \orig - .
-       .long \alt - .
-       .word \feature
-       .byte \orig_len
-       .byte \alt_len
-       .byte \pad_len
-.endm
-
-/*
- * Define an alternative between two instructions. If @feature is
- * present, early code in apply_alternatives() replaces @oldinstr with
- * @newinstr. ".skip" directive takes care of proper instruction padding
- * in case @newinstr is longer than @oldinstr.
- */
-.macro ALTERNATIVE oldinstr, newinstr, feature
-140:
-       \oldinstr
-141:
-       .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
-142:
-
-       .pushsection .altinstructions,"a"
-       altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
-       .popsection
-
-       .pushsection .altinstr_replacement,"ax"
-143:
-       \newinstr
-144:
-       .popsection
-.endm
-
-#define old_len                        141b-140b
-#define new_len1               144f-143f
-#define new_len2               145f-144f
-
-/*
- * gas compatible max based on the idea from:
- * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
- *
- * The additional "-" is needed because gas uses a "true" value of -1.
- */
-#define alt_max_short(a, b)    ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
-
-
-/*
- * Same as ALTERNATIVE macro above but for two alternatives. If CPU
- * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
- * @feature2, it replaces @oldinstr with @feature2.
- */
-.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
-140:
-       \oldinstr
-141:
-       .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
-               (alt_max_short(new_len1, new_len2) - (old_len)),0x90
-142:
-
-       .pushsection .altinstructions,"a"
-       altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b
-       altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
-       .popsection
-
-       .pushsection .altinstr_replacement,"ax"
-143:
-       \newinstr1
-144:
-       \newinstr2
-145:
-       .popsection
-.endm
-
-#endif  /*  __ASSEMBLY__  */
-
-#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
index 13adca37c99a36a3a8282e22283b130b4a5dd0ad..17b36090d448500634bf41f1bae46160651ae29d 100644 (file)
@@ -2,13 +2,17 @@
 #ifndef _ASM_X86_ALTERNATIVE_H
 #define _ASM_X86_ALTERNATIVE_H
 
-#ifndef __ASSEMBLY__
-
 #include <linux/types.h>
-#include <linux/stddef.h>
 #include <linux/stringify.h>
 #include <asm/asm.h>
 
+#define ALTINSTR_FLAG_INV      (1 << 15)
+#define ALT_NOT(feat)          ((feat) | ALTINSTR_FLAG_INV)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stddef.h>
+
 /*
  * Alternative inline assembly for SMP.
  *
@@ -150,7 +154,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
        " .byte " alt_rlen(num) "\n"                    /* replacement len */ \
        " .byte " alt_pad_len "\n"                      /* pad len */
 
-#define ALTINSTR_REPLACEMENT(newinstr, feature, num)   /* replacement */       \
+#define ALTINSTR_REPLACEMENT(newinstr, num)            /* replacement */       \
        "# ALT: replacement " #num "\n"                                         \
        b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n"
 
@@ -161,7 +165,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
        ALTINSTR_ENTRY(feature, 1)                                      \
        ".popsection\n"                                                 \
        ".pushsection .altinstr_replacement, \"ax\"\n"                  \
-       ALTINSTR_REPLACEMENT(newinstr, feature, 1)                      \
+       ALTINSTR_REPLACEMENT(newinstr, 1)                               \
        ".popsection\n"
 
 #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
@@ -171,10 +175,15 @@ static inline int alternatives_text_reserved(void *start, void *end)
        ALTINSTR_ENTRY(feature2, 2)                                     \
        ".popsection\n"                                                 \
        ".pushsection .altinstr_replacement, \"ax\"\n"                  \
-       ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)                    \
-       ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
+       ALTINSTR_REPLACEMENT(newinstr1, 1)                              \
+       ALTINSTR_REPLACEMENT(newinstr2, 2)                              \
        ".popsection\n"
 
+/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */
+#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \
+       ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS,        \
+                     newinstr_yes, feature)
+
 #define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \
        OLDINSTR_3(oldinsn, 1, 2, 3)                                            \
        ".pushsection .altinstructions,\"a\"\n"                                 \
@@ -183,9 +192,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
        ALTINSTR_ENTRY(feat3, 3)                                                \
        ".popsection\n"                                                         \
        ".pushsection .altinstr_replacement, \"ax\"\n"                          \
-       ALTINSTR_REPLACEMENT(newinsn1, feat1, 1)                                \
-       ALTINSTR_REPLACEMENT(newinsn2, feat2, 2)                                \
-       ALTINSTR_REPLACEMENT(newinsn3, feat3, 3)                                \
+       ALTINSTR_REPLACEMENT(newinsn1, 1)                                       \
+       ALTINSTR_REPLACEMENT(newinsn2, 2)                                       \
+       ALTINSTR_REPLACEMENT(newinsn3, 3)                                       \
        ".popsection\n"
 
 /*
@@ -206,6 +215,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
 #define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
        asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
 
+#define alternative_ternary(oldinstr, feature, newinstr_yes, newinstr_no) \
+       asm_inline volatile(ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) ::: "memory")
+
 /*
  * Alternative inline assembly with input.
  *
@@ -271,6 +283,116 @@ static inline int alternatives_text_reserved(void *start, void *end)
  */
 #define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
 
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_SMP
+       .macro LOCK_PREFIX
+672:   lock
+       .pushsection .smp_locks,"a"
+       .balign 4
+       .long 672b - .
+       .popsection
+       .endm
+#else
+       .macro LOCK_PREFIX
+       .endm
+#endif
+
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+.macro ANNOTATE_IGNORE_ALTERNATIVE
+       .Lannotate_\@:
+       .pushsection .discard.ignore_alts
+       .long .Lannotate_\@ - .
+       .popsection
+.endm
+
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
+.macro altinstruction_entry orig alt feature orig_len alt_len pad_len
+       .long \orig - .
+       .long \alt - .
+       .word \feature
+       .byte \orig_len
+       .byte \alt_len
+       .byte \pad_len
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE oldinstr, newinstr, feature
+140:
+       \oldinstr
+141:
+       .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
+142:
+
+       .pushsection .altinstructions,"a"
+       altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
+       .popsection
+
+       .pushsection .altinstr_replacement,"ax"
+143:
+       \newinstr
+144:
+       .popsection
+.endm
+
+#define old_len                        141b-140b
+#define new_len1               144f-143f
+#define new_len2               145f-144f
+
+/*
+ * gas compatible max based on the idea from:
+ * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas uses a "true" value of -1.
+ */
+#define alt_max_short(a, b)    ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
+
+
+/*
+ * Same as ALTERNATIVE macro above but for two alternatives. If CPU
+ * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
+ * @feature2, it replaces @oldinstr with @feature2.
+ */
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+140:
+       \oldinstr
+141:
+       .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
+               (alt_max_short(new_len1, new_len2) - (old_len)),0x90
+142:
+
+       .pushsection .altinstructions,"a"
+       altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b
+       altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
+       .popsection
+
+       .pushsection .altinstr_replacement,"ax"
+143:
+       \newinstr1
+144:
+       \newinstr2
+145:
+       .popsection
+.endm
+
+/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */
+#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \
+       ALTERNATIVE_2 oldinstr, newinstr_no, X86_FEATURE_ALWAYS,        \
+       newinstr_yes, feature
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_X86_ALTERNATIVE_H */
index 4d4ec5cbdc51c3a6e75de4d37a1ea44ddfb9a8a3..94fbe6ae74313d397429a0992ba729aedb0ee2ba 100644 (file)
@@ -22,7 +22,7 @@ extern void __add_wrong_size(void)
 /*
  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
  * -1 because sizeof will never return -1, thereby making those switch
- * case statements guaranteeed dead code which the compiler will
+ * case statements guaranteed dead code which the compiler will
  * eliminate, and allowing the "missing symbol in the default case" to
  * indicate a usage error.
  */
index da78ccbd493b77e30c0b56a2fa8c7267835ffdfb..0d7fc0e2bfc9e6403f87436faa421bfdfec97e0a 100644 (file)
@@ -41,12 +41,13 @@ unsigned int x86_family(unsigned int sig);
 unsigned int x86_model(unsigned int sig);
 unsigned int x86_stepping(unsigned int sig);
 #ifdef CONFIG_CPU_SUP_INTEL
-extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
+extern void __init sld_setup(struct cpuinfo_x86 *c);
 extern void switch_to_sld(unsigned long tifn);
 extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
 extern bool handle_guest_split_lock(unsigned long ip);
+extern void handle_bus_lock(struct pt_regs *regs);
 #else
-static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
+static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
 static inline void switch_to_sld(unsigned long tifn) {}
 static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
 {
@@ -57,6 +58,8 @@ static inline bool handle_guest_split_lock(unsigned long ip)
 {
        return false;
 }
+
+static inline void handle_bus_lock(struct pt_regs *regs) {}
 #endif
 #ifdef CONFIG_IA32_FEAT_CTL
 void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
index 1728d4ce5730b64fa475db780d9c483418d1aa43..16a51e7288d581805701cbd21aebb08c4c63441c 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <asm/asm.h>
 #include <linux/bitops.h>
+#include <asm/alternative.h>
 
 enum cpuid_leafs
 {
@@ -175,39 +176,15 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
  */
 static __always_inline bool _static_cpu_has(u16 bit)
 {
-       asm_volatile_goto("1: jmp 6f\n"
-                "2:\n"
-                ".skip -(((5f-4f) - (2b-1b)) > 0) * "
-                        "((5f-4f) - (2b-1b)),0x90\n"
-                "3:\n"
-                ".section .altinstructions,\"a\"\n"
-                " .long 1b - .\n"              /* src offset */
-                " .long 4f - .\n"              /* repl offset */
-                " .word %P[always]\n"          /* always replace */
-                " .byte 3b - 1b\n"             /* src len */
-                " .byte 5f - 4f\n"             /* repl len */
-                " .byte 3b - 2b\n"             /* pad len */
-                ".previous\n"
-                ".section .altinstr_replacement,\"ax\"\n"
-                "4: jmp %l[t_no]\n"
-                "5:\n"
-                ".previous\n"
-                ".section .altinstructions,\"a\"\n"
-                " .long 1b - .\n"              /* src offset */
-                " .long 0\n"                   /* no replacement */
-                " .word %P[feature]\n"         /* feature bit */
-                " .byte 3b - 1b\n"             /* src len */
-                " .byte 0\n"                   /* repl len */
-                " .byte 0\n"                   /* pad len */
-                ".previous\n"
-                ".section .altinstr_aux,\"ax\"\n"
-                "6:\n"
-                " testb %[bitnum],%[cap_byte]\n"
-                " jnz %l[t_yes]\n"
-                " jmp %l[t_no]\n"
-                ".previous\n"
+       asm_volatile_goto(
+               ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
+               ".section .altinstr_aux,\"ax\"\n"
+               "6:\n"
+               " testb %[bitnum],%[cap_byte]\n"
+               " jnz %l[t_yes]\n"
+               " jmp %l[t_no]\n"
+               ".previous\n"
                 : : [feature]  "i" (bit),
-                    [always]   "i" (X86_FEATURE_ALWAYS),
                     [bitnum]   "i" (1 << (bit & 7)),
                     [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
                 : : t_yes, t_no);
index cc96e26d69f7ae9fa04ccdbc35d1f72f93d735a2..6241165620867e11e755b73e24ffd4a83976d5d2 100644 (file)
 #define X86_FEATURE_EPT_AD             ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
 #define X86_FEATURE_VMCALL             ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
 #define X86_FEATURE_VMW_VMMCALL                ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
+#define X86_FEATURE_PVUNLOCK           ( 8*32+20) /* "" PV unlock function */
+#define X86_FEATURE_VCPUPREEMPT                ( 8*32+21) /* "" PV vcpu_is_preempted function */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
 #define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
 #define X86_FEATURE_FENCE_SWAPGS_KERNEL        (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
 #define X86_FEATURE_SPLIT_LOCK_DETECT  (11*32+ 6) /* #AC for split lock */
 #define X86_FEATURE_PER_THREAD_MBA     (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
+#define X86_FEATURE_SGX1               (11*32+ 8) /* "" Basic SGX */
+#define X86_FEATURE_SGX2               (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_AVX512_VPOPCNTDQ   (16*32+14) /* POPCNT for vectors of DW/QW */
 #define X86_FEATURE_LA57               (16*32+16) /* 5-level page tables */
 #define X86_FEATURE_RDPID              (16*32+22) /* RDPID instruction */
+#define X86_FEATURE_BUS_LOCK_DETECT    (16*32+24) /* Bus Lock detect */
 #define X86_FEATURE_CLDEMOTE           (16*32+25) /* CLDEMOTE instruction */
 #define X86_FEATURE_MOVDIRI            (16*32+27) /* MOVDIRI instruction */
 #define X86_FEATURE_MOVDIR64B          (16*32+28) /* MOVDIR64B instruction */
index 9224d40cdefee7f60f8dde09b42b71c0f5a9f4fd..7d7500806af8a900694e5a5fe5b8440b70a5c522 100644 (file)
@@ -283,12 +283,12 @@ extern u32 elf_hwcap2;
  *
  * The decision process for determining the results are:
  *
- *                 CPU: | lacks NX*  | has NX, ia32     | has NX, x86_64 |
- * ELF:                 |            |                  |                |
+ *                 CPU: | lacks NX*  | has NX, ia32     | has NX, x86_64 |
+ * ELF:                 |            |                  |                |
  * ---------------------|------------|------------------|----------------|
- * missing PT_GNU_STACK | exec-all   | exec-all         | exec-none      |
- * PT_GNU_STACK == RWX  | exec-stack | exec-stack       | exec-stack     |
- * PT_GNU_STACK == RW   | exec-none  | exec-none        | exec-none      |
+ * missing PT_GNU_STACK | exec-all   | exec-all         | exec-none      |
+ * PT_GNU_STACK == RWX  | exec-stack | exec-stack       | exec-stack     |
+ * PT_GNU_STACK == RW   | exec-none  | exec-none        | exec-none      |
  *
  *  exec-all  : all PROT_READ user mappings are executable, except when
  *              backed by files on a noexec-filesystem.
index 2b87b191b3b8470cf80ad8d5004a7baf4ab4d6da..14ebd21965691d17ea763e5f62494cf780946cae 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _ASM_X86_ENTRY_COMMON_H
 #define _ASM_X86_ENTRY_COMMON_H
 
+#include <linux/randomize_kstack.h>
 #include <linux/user-return-notifier.h>
 
 #include <asm/nospec-branch.h>
@@ -70,6 +71,21 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
         */
        current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED);
 #endif
+
+       /*
+        * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+        * but not enough for x86 stack utilization comfort. To keep
+        * reasonable stack head room, reduce the maximum offset to 8 bits.
+        *
+        * The actual entropy will be further reduced by the compiler when
+        * applying stack alignment constraints (see cc_stack_align4/8 in
+        * arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
+        * low bits from any entropy chosen here.
+        *
+        * Therefore, final stack offset entropy will be 5 (x86_64) or
+        * 6 (ia32) bits.
+        */
+       choose_random_kstack_offset(rdtsc() & 0xFF);
 }
 #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
 
index e6cd3fee562bff1704b2c4fcfeec9a8f156e509e..606f5cc579b2bd52f678b57e06409b108c1fd803 100644 (file)
@@ -156,7 +156,7 @@ enum hv_isolation_type {
 #define HV_X64_MSR_HYPERCALL                   0x40000001
 
 /* MSR used to provide vcpu index */
-#define HV_X64_MSR_VP_INDEX                    0x40000002
+#define HV_REGISTER_VP_INDEX                   0x40000002
 
 /* MSR used to reset the guest OS. */
 #define HV_X64_MSR_RESET                       0x40000003
@@ -165,10 +165,10 @@ enum hv_isolation_type {
 #define HV_X64_MSR_VP_RUNTIME                  0x40000010
 
 /* MSR used to read the per-partition time reference counter */
-#define HV_X64_MSR_TIME_REF_COUNT              0x40000020
+#define HV_REGISTER_TIME_REF_COUNT             0x40000020
 
 /* A partition's reference time stamp counter (TSC) page */
-#define HV_X64_MSR_REFERENCE_TSC               0x40000021
+#define HV_REGISTER_REFERENCE_TSC              0x40000021
 
 /* MSR used to retrieve the TSC frequency */
 #define HV_X64_MSR_TSC_FREQUENCY               0x40000022
@@ -183,50 +183,50 @@ enum hv_isolation_type {
 #define HV_X64_MSR_VP_ASSIST_PAGE              0x40000073
 
 /* Define synthetic interrupt controller model specific registers. */
-#define HV_X64_MSR_SCONTROL                    0x40000080
-#define HV_X64_MSR_SVERSION                    0x40000081
-#define HV_X64_MSR_SIEFP                       0x40000082
-#define HV_X64_MSR_SIMP                                0x40000083
-#define HV_X64_MSR_EOM                         0x40000084
-#define HV_X64_MSR_SINT0                       0x40000090
-#define HV_X64_MSR_SINT1                       0x40000091
-#define HV_X64_MSR_SINT2                       0x40000092
-#define HV_X64_MSR_SINT3                       0x40000093
-#define HV_X64_MSR_SINT4                       0x40000094
-#define HV_X64_MSR_SINT5                       0x40000095
-#define HV_X64_MSR_SINT6                       0x40000096
-#define HV_X64_MSR_SINT7                       0x40000097
-#define HV_X64_MSR_SINT8                       0x40000098
-#define HV_X64_MSR_SINT9                       0x40000099
-#define HV_X64_MSR_SINT10                      0x4000009A
-#define HV_X64_MSR_SINT11                      0x4000009B
-#define HV_X64_MSR_SINT12                      0x4000009C
-#define HV_X64_MSR_SINT13                      0x4000009D
-#define HV_X64_MSR_SINT14                      0x4000009E
-#define HV_X64_MSR_SINT15                      0x4000009F
+#define HV_REGISTER_SCONTROL                   0x40000080
+#define HV_REGISTER_SVERSION                   0x40000081
+#define HV_REGISTER_SIEFP                      0x40000082
+#define HV_REGISTER_SIMP                       0x40000083
+#define HV_REGISTER_EOM                                0x40000084
+#define HV_REGISTER_SINT0                      0x40000090
+#define HV_REGISTER_SINT1                      0x40000091
+#define HV_REGISTER_SINT2                      0x40000092
+#define HV_REGISTER_SINT3                      0x40000093
+#define HV_REGISTER_SINT4                      0x40000094
+#define HV_REGISTER_SINT5                      0x40000095
+#define HV_REGISTER_SINT6                      0x40000096
+#define HV_REGISTER_SINT7                      0x40000097
+#define HV_REGISTER_SINT8                      0x40000098
+#define HV_REGISTER_SINT9                      0x40000099
+#define HV_REGISTER_SINT10                     0x4000009A
+#define HV_REGISTER_SINT11                     0x4000009B
+#define HV_REGISTER_SINT12                     0x4000009C
+#define HV_REGISTER_SINT13                     0x4000009D
+#define HV_REGISTER_SINT14                     0x4000009E
+#define HV_REGISTER_SINT15                     0x4000009F
 
 /*
  * Synthetic Timer MSRs. Four timers per vcpu.
  */
-#define HV_X64_MSR_STIMER0_CONFIG              0x400000B0
-#define HV_X64_MSR_STIMER0_COUNT               0x400000B1
-#define HV_X64_MSR_STIMER1_CONFIG              0x400000B2
-#define HV_X64_MSR_STIMER1_COUNT               0x400000B3
-#define HV_X64_MSR_STIMER2_CONFIG              0x400000B4
-#define HV_X64_MSR_STIMER2_COUNT               0x400000B5
-#define HV_X64_MSR_STIMER3_CONFIG              0x400000B6
-#define HV_X64_MSR_STIMER3_COUNT               0x400000B7
+#define HV_REGISTER_STIMER0_CONFIG             0x400000B0
+#define HV_REGISTER_STIMER0_COUNT              0x400000B1
+#define HV_REGISTER_STIMER1_CONFIG             0x400000B2
+#define HV_REGISTER_STIMER1_COUNT              0x400000B3
+#define HV_REGISTER_STIMER2_CONFIG             0x400000B4
+#define HV_REGISTER_STIMER2_COUNT              0x400000B5
+#define HV_REGISTER_STIMER3_CONFIG             0x400000B6
+#define HV_REGISTER_STIMER3_COUNT              0x400000B7
 
 /* Hyper-V guest idle MSR */
 #define HV_X64_MSR_GUEST_IDLE                  0x400000F0
 
 /* Hyper-V guest crash notification MSR's */
-#define HV_X64_MSR_CRASH_P0                    0x40000100
-#define HV_X64_MSR_CRASH_P1                    0x40000101
-#define HV_X64_MSR_CRASH_P2                    0x40000102
-#define HV_X64_MSR_CRASH_P3                    0x40000103
-#define HV_X64_MSR_CRASH_P4                    0x40000104
-#define HV_X64_MSR_CRASH_CTL                   0x40000105
+#define HV_REGISTER_CRASH_P0                   0x40000100
+#define HV_REGISTER_CRASH_P1                   0x40000101
+#define HV_REGISTER_CRASH_P2                   0x40000102
+#define HV_REGISTER_CRASH_P3                   0x40000103
+#define HV_REGISTER_CRASH_P4                   0x40000104
+#define HV_REGISTER_CRASH_CTL                  0x40000105
 
 /* TSC emulation after migration */
 #define HV_X64_MSR_REENLIGHTENMENT_CONTROL     0x40000106
@@ -236,6 +236,32 @@ enum hv_isolation_type {
 /* TSC invariant control */
 #define HV_X64_MSR_TSC_INVARIANT_CONTROL       0x40000118
 
+/* Register name aliases for temporary compatibility */
+#define HV_X64_MSR_STIMER0_COUNT       HV_REGISTER_STIMER0_COUNT
+#define HV_X64_MSR_STIMER0_CONFIG      HV_REGISTER_STIMER0_CONFIG
+#define HV_X64_MSR_STIMER1_COUNT       HV_REGISTER_STIMER1_COUNT
+#define HV_X64_MSR_STIMER1_CONFIG      HV_REGISTER_STIMER1_CONFIG
+#define HV_X64_MSR_STIMER2_COUNT       HV_REGISTER_STIMER2_COUNT
+#define HV_X64_MSR_STIMER2_CONFIG      HV_REGISTER_STIMER2_CONFIG
+#define HV_X64_MSR_STIMER3_COUNT       HV_REGISTER_STIMER3_COUNT
+#define HV_X64_MSR_STIMER3_CONFIG      HV_REGISTER_STIMER3_CONFIG
+#define HV_X64_MSR_SCONTROL            HV_REGISTER_SCONTROL
+#define HV_X64_MSR_SVERSION            HV_REGISTER_SVERSION
+#define HV_X64_MSR_SIMP                        HV_REGISTER_SIMP
+#define HV_X64_MSR_SIEFP               HV_REGISTER_SIEFP
+#define HV_X64_MSR_VP_INDEX            HV_REGISTER_VP_INDEX
+#define HV_X64_MSR_EOM                 HV_REGISTER_EOM
+#define HV_X64_MSR_SINT0               HV_REGISTER_SINT0
+#define HV_X64_MSR_SINT15              HV_REGISTER_SINT15
+#define HV_X64_MSR_CRASH_P0            HV_REGISTER_CRASH_P0
+#define HV_X64_MSR_CRASH_P1            HV_REGISTER_CRASH_P1
+#define HV_X64_MSR_CRASH_P2            HV_REGISTER_CRASH_P2
+#define HV_X64_MSR_CRASH_P3            HV_REGISTER_CRASH_P3
+#define HV_X64_MSR_CRASH_P4            HV_REGISTER_CRASH_P4
+#define HV_X64_MSR_CRASH_CTL           HV_REGISTER_CRASH_CTL
+#define HV_X64_MSR_TIME_REF_COUNT      HV_REGISTER_TIME_REF_COUNT
+#define HV_X64_MSR_REFERENCE_TSC       HV_REGISTER_REFERENCE_TSC
+
 /*
  * Declare the MSR used to setup pages used to communicate with the hypervisor.
  */
@@ -288,35 +314,6 @@ struct hv_tsc_emulation_status {
 #define HV_X64_MSR_TSC_REFERENCE_ENABLE                0x00000001
 #define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
 
-
-/* Define hypervisor message types. */
-enum hv_message_type {
-       HVMSG_NONE                      = 0x00000000,
-
-       /* Memory access messages. */
-       HVMSG_UNMAPPED_GPA              = 0x80000000,
-       HVMSG_GPA_INTERCEPT             = 0x80000001,
-
-       /* Timer notification messages. */
-       HVMSG_TIMER_EXPIRED             = 0x80000010,
-
-       /* Error messages. */
-       HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
-       HVMSG_UNRECOVERABLE_EXCEPTION   = 0x80000021,
-       HVMSG_UNSUPPORTED_FEATURE       = 0x80000022,
-
-       /* Trace buffer complete messages. */
-       HVMSG_EVENTLOG_BUFFERCOMPLETE   = 0x80000040,
-
-       /* Platform-specific processor intercept messages. */
-       HVMSG_X64_IOPORT_INTERCEPT      = 0x80010000,
-       HVMSG_X64_MSR_INTERCEPT         = 0x80010001,
-       HVMSG_X64_CPUID_INTERCEPT       = 0x80010002,
-       HVMSG_X64_EXCEPTION_INTERCEPT   = 0x80010003,
-       HVMSG_X64_APIC_EOI              = 0x80010004,
-       HVMSG_X64_LEGACY_FP_ERROR       = 0x80010005
-};
-
 struct hv_nested_enlightenments_control {
        struct {
                __u32 directhypercall:1;
index 5eb3bdf36a41984811cd4da8c3f1c9370912b2c6..e35e342673c72675cfbdd2b4a6acfac999b058a8 100644 (file)
@@ -547,7 +547,7 @@ SYM_CODE_END(spurious_entries_start)
 /*
  * Dummy trap number so the low level ASM macro vector number checks do not
  * match which results in emitting plain IDTENTRY stubs without bells and
- * whistels.
+ * whistles.
  */
 #define X86_TRAP_OTHER         0xFFFF
 
index 3cb002b1d0f930b3751d9a34b9e6720db5780415..994638ef171bec62782bdee78fe01061fa2acba6 100644 (file)
@@ -38,7 +38,7 @@ enum pconfig_leaf {
 #define MKTME_INVALID_ENC_ALG  4
 #define MKTME_DEVICE_BUSY      5
 
-/* Hardware requires the structure to be 256 byte alinged. Otherwise #GP(0). */
+/* Hardware requires the structure to be 256 byte aligned. Otherwise #GP(0). */
 struct mktme_key_program {
        u16 keyid;
        u32 keyid_ctrl;
index 423b788f495e92ce8b059509c84feedbb66d9e37..ebe8d2ea44fe0973076f369273bf94394fcabf09 100644 (file)
@@ -3,7 +3,7 @@
 #define _ASM_X86_INTEL_PT_H
 
 #define PT_CPUID_LEAVES                2
-#define PT_CPUID_REGS_NUM      4 /* number of regsters (eax, ebx, ecx, edx) */
+#define PT_CPUID_REGS_NUM      4 /* number of registers (eax, ebx, ecx, edx) */
 
 enum pt_capabilities {
        PT_CAP_max_subleaf = 0,
index d726459d08e5a1f8eecb83bd0857e9fef793355f..841a5d104afaaca1ff4b3151b459c2f74ccef70d 100644 (file)
@@ -159,7 +159,7 @@ static inline void *phys_to_virt(phys_addr_t address)
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
  * However, we truncate the address to unsigned int to avoid undesirable
- * promitions in legacy drivers.
+ * promotions in legacy drivers.
  */
 static inline unsigned int isa_virt_to_bus(volatile void *address)
 {
index 9b2a0ff76c73e215ca788300835f74462ded371e..562854c60808263798413e3b61b2cbaf230dfd11 100644 (file)
 
 /*
  * Macro to invoke __do_softirq on the irq stack. This is only called from
- * task context when bottom halfs are about to be reenabled and soft
+ * task context when bottom halves are about to be reenabled and soft
  * interrupts are pending to be processed. The interrupt stack cannot be in
  * use here.
  */
index 144d70ea43936b226787148b3c1fb7c917b30f6d..c5ce9845c999841797b0b479d3c5b85aa8638963 100644 (file)
@@ -109,18 +109,13 @@ static __always_inline unsigned long arch_local_irq_save(void)
 }
 #else
 
-#define ENABLE_INTERRUPTS(x)   sti
-#define DISABLE_INTERRUPTS(x)  cli
-
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_DEBUG_ENTRY
-#define SAVE_FLAGS(x)          pushfq; popq %rax
+#define SAVE_FLAGS             pushfq; popq %rax
 #endif
 
 #define INTERRUPT_RETURN       jmp native_iret
 
-#else
-#define INTERRUPT_RETURN               iret
 #endif
 
 #endif /* __ASSEMBLY__ */
index 97bbb4a9083a76cce625d5255d2e25ce86aeb2a5..05b48b33baf0b811b2788750d4539f18c9bd2255 100644 (file)
@@ -56,8 +56,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
        else
                set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
 
-       /* Flush this CPU's TLB. */
+       /*
+        * Flush this CPU's TLB, assuming whoever did the allocation/free is
+        * likely to continue running on this CPU.
+        */
+       preempt_disable();
        flush_tlb_one_kernel(addr);
+       preempt_enable();
        return true;
 }
 
index 3768819693e5c8a59da539b8f2907c1c75c729db..10eca9e8f7f668e4457e8c95cf22fd0c068550fb 100644 (file)
@@ -1488,7 +1488,7 @@ extern u64 kvm_mce_cap_supported;
 /*
  * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
  *                     userspace I/O) to indicate that the emulation context
- *                     should be resued as is, i.e. skip initialization of
+ *                     should be reused as is, i.e. skip initialization of
  *                     emulation context, instruction fetch and decode.
  *
  * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
@@ -1513,7 +1513,7 @@ extern u64 kvm_mce_cap_supported;
  *
  * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
  *                     backdoor emulation, which is opt in via module param.
- *                     VMware backoor emulation handles select instructions
+ *                     VMware backdoor emulation handles select instructions
  *                     and reinjects the #GP for all other cases.
  *
  * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
index ccf60a809a17957c744aeaac25cbd69570b6a666..67ff0d637e554129454977d71ca03fc98badbd27 100644 (file)
@@ -9,70 +9,29 @@
 #include <asm/hyperv-tlfs.h>
 #include <asm/nospec-branch.h>
 #include <asm/paravirt.h>
+#include <asm/mshyperv.h>
 
 typedef int (*hyperv_fill_flush_list_func)(
                struct hv_guest_mapping_flush_list *flush,
                void *data);
 
-#define hv_init_timer(timer, tick) \
-       wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick)
-#define hv_init_timer_config(timer, val) \
-       wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val)
-
-#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
-#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
-
-#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
-#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
-
-#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
-#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
-
-#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
-
-#define hv_signal_eom() wrmsrl(HV_X64_MSR_EOM, 0)
-
-#define hv_get_synint_state(int_num, val) \
-       rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
-#define hv_set_synint_state(int_num, val) \
-       wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
-#define hv_recommend_using_aeoi() \
-       (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
+static inline void hv_set_register(unsigned int reg, u64 value)
+{
+       wrmsrl(reg, value);
+}
 
-#define hv_get_crash_ctl(val) \
-       rdmsrl(HV_X64_MSR_CRASH_CTL, val)
+static inline u64 hv_get_register(unsigned int reg)
+{
+       u64 value;
 
-#define hv_get_time_ref_count(val) \
-       rdmsrl(HV_X64_MSR_TIME_REF_COUNT, val)
+       rdmsrl(reg, value);
+       return value;
+}
 
-#define hv_get_reference_tsc(val) \
-       rdmsrl(HV_X64_MSR_REFERENCE_TSC, val)
-#define hv_set_reference_tsc(val) \
-       wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
-#define hv_set_clocksource_vdso(val) \
-       ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
-#define hv_enable_vdso_clocksource() \
-       vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
 #define hv_get_raw_timer() rdtsc_ordered()
-#define hv_get_vector() HYPERVISOR_CALLBACK_VECTOR
-
-/*
- * Reference to pv_ops must be inline so objtool
- * detection of noinstr violations can work correctly.
- */
-static __always_inline void hv_setup_sched_clock(void *sched_clock)
-{
-#ifdef CONFIG_PARAVIRT
-       pv_ops.time.sched_clock = sched_clock;
-#endif
-}
 
 void hyperv_vector_handler(struct pt_regs *regs);
 
-static inline void hv_enable_stimer0_percpu_irq(int irq) {}
-static inline void hv_disable_stimer0_percpu_irq(int irq) {}
-
-
 #if IS_ENABLED(CONFIG_HYPERV)
 extern int hyperv_init_cpuhp;
 
@@ -189,38 +148,6 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
        return hv_status;
 }
 
-/*
- * Rep hypercalls. Callers of this functions are supposed to ensure that
- * rep_count and varhead_size comply with Hyper-V hypercall definition.
- */
-static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
-                                     void *input, void *output)
-{
-       u64 control = code;
-       u64 status;
-       u16 rep_comp;
-
-       control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
-       control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
-
-       do {
-               status = hv_do_hypercall(control, input, output);
-               if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
-                       return status;
-
-               /* Bits 32-43 of status have 'Reps completed' data. */
-               rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
-                       HV_HYPERCALL_REP_COMP_OFFSET;
-
-               control &= ~HV_HYPERCALL_REP_START_MASK;
-               control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
-
-               touch_nmi_watchdog();
-       } while (rep_comp < rep_count);
-
-       return status;
-}
-
 extern struct hv_vp_assist_page **hv_vp_assist_page;
 
 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
@@ -233,9 +160,6 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
 
 void __init hyperv_init(void);
 void hyperv_setup_mmu_ops(void);
-void *hv_alloc_hyperv_page(void);
-void *hv_alloc_hyperv_zeroed_page(void);
-void hv_free_hyperv_page(unsigned long addr);
 void set_hv_tscchange_cb(void (*cb)(void));
 void clear_hv_tscchange_cb(void);
 void hyperv_stop_tsc_emulation(void);
@@ -272,8 +196,6 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
 #else /* CONFIG_HYPERV */
 static inline void hyperv_init(void) {}
 static inline void hyperv_setup_mmu_ops(void) {}
-static inline void *hv_alloc_hyperv_page(void) { return NULL; }
-static inline void hv_free_hyperv_page(unsigned long addr) {}
 static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
 static inline void clear_hv_tscchange_cb(void) {}
 static inline void hyperv_stop_tsc_emulation(void) {};
index 546d6ecf0a35b52a58196f07676058e59c301b62..fe335d8c1676fdc3e09e56d333d5da40cfe3c3ac 100644 (file)
 #define DEBUGCTLMSR_LBR                        (1UL <<  0) /* last branch recording */
 #define DEBUGCTLMSR_BTF_SHIFT          1
 #define DEBUGCTLMSR_BTF                        (1UL <<  1) /* single-step on branches */
+#define DEBUGCTLMSR_BUS_LOCK_DETECT    (1UL <<  2)
 #define DEBUGCTLMSR_TR                 (1UL <<  6)
 #define DEBUGCTLMSR_BTS                        (1UL <<  7)
 #define DEBUGCTLMSR_BTINT              (1UL <<  8)
 #define MSR_IA32_APICBASE_ENABLE       (1<<11)
 #define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
 
-#define MSR_IA32_TSCDEADLINE           0x000006e0
-
 #define MSR_IA32_UCODE_WRITE           0x00000079
 #define MSR_IA32_UCODE_REV             0x0000008b
 
index cb9ad6b739737e4e36f47f3e93e0604f5a4c7713..c14fb80b9a07a47db41883ad37acef172ede8787 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/objtool.h>
 
 #include <asm/alternative.h>
-#include <asm/alternative-asm.h>
 #include <asm/cpufeatures.h>
 #include <asm/msr-index.h>
 #include <asm/unwind_hints.h>
@@ -33,7 +32,7 @@
 
 /*
  * Google experimented with loop-unrolling and this turned out to be
- * the optimal version  two calls, each with their own speculation
+ * the optimal version - two calls, each with their own speculation
  * trap should their return address end up getting used, in a loop.
  */
 #define __FILL_RETURN_BUFFER(reg, nr, sp)      \
index 4abf110e224380b2514626c8753eedabb7e4d905..43992e5c52c243d8d400c471d0808887fcba9a8b 100644 (file)
 #include <linux/bug.h>
 #include <linux/types.h>
 #include <linux/cpumask.h>
+#include <linux/static_call_types.h>
 #include <asm/frame.h>
 
-static inline unsigned long long paravirt_sched_clock(void)
+u64 dummy_steal_clock(int cpu);
+u64 dummy_sched_clock(void);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
+
+void paravirt_set_sched_clock(u64 (*func)(void));
+
+static inline u64 paravirt_sched_clock(void)
 {
-       return PVOP_CALL0(unsigned long long, time.sched_clock);
+       return static_call(pv_sched_clock)();
 }
 
 struct static_key;
@@ -33,9 +42,13 @@ bool pv_is_native_vcpu_is_preempted(void);
 
 static inline u64 paravirt_steal_clock(int cpu)
 {
-       return PVOP_CALL1(u64, time.steal_clock, cpu);
+       return static_call(pv_steal_clock)(cpu);
 }
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init paravirt_set_cap(void);
+#endif
+
 /* The paravirtualized I/O functions */
 static inline void slow_down_io(void)
 {
@@ -122,7 +135,9 @@ static inline void write_cr0(unsigned long x)
 
 static inline unsigned long read_cr2(void)
 {
-       return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
+       return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
+                               "mov %%cr2, %%rax;",
+                               ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline void write_cr2(unsigned long x)
@@ -132,12 +147,14 @@ static inline void write_cr2(unsigned long x)
 
 static inline unsigned long __read_cr3(void)
 {
-       return PVOP_CALL0(unsigned long, mmu.read_cr3);
+       return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
+                             "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline void write_cr3(unsigned long x)
 {
-       PVOP_VCALL1(mmu.write_cr3, x);
+       PVOP_ALT_VCALL1(mmu.write_cr3, x,
+                       "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline void __write_cr4(unsigned long x)
@@ -157,7 +174,7 @@ static inline void halt(void)
 
 static inline void wbinvd(void)
 {
-       PVOP_VCALL0(cpu.wbinvd);
+       PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline u64 paravirt_read_msr(unsigned msr)
@@ -371,22 +388,28 @@ static inline void paravirt_release_p4d(unsigned long pfn)
 
 static inline pte_t __pte(pteval_t val)
 {
-       return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
+       return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
+                                         "mov %%rdi, %%rax",
+                                         ALT_NOT(X86_FEATURE_XENPV)) };
 }
 
 static inline pteval_t pte_val(pte_t pte)
 {
-       return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
+       return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
+                               "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline pgd_t __pgd(pgdval_t val)
 {
-       return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
+       return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
+                                         "mov %%rdi, %%rax",
+                                         ALT_NOT(X86_FEATURE_XENPV)) };
 }
 
 static inline pgdval_t pgd_val(pgd_t pgd)
 {
-       return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
+       return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
+                               "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -419,12 +442,15 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 
 static inline pmd_t __pmd(pmdval_t val)
 {
-       return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
+       return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
+                                         "mov %%rdi, %%rax",
+                                         ALT_NOT(X86_FEATURE_XENPV)) };
 }
 
 static inline pmdval_t pmd_val(pmd_t pmd)
 {
-       return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
+       return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
+                               "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
@@ -436,14 +462,16 @@ static inline pud_t __pud(pudval_t val)
 {
        pudval_t ret;
 
-       ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
+       ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
+                              "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
 
        return (pud_t) { ret };
 }
 
 static inline pudval_t pud_val(pud_t pud)
 {
-       return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
+       return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
+                               "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline void pud_clear(pud_t *pudp)
@@ -462,14 +490,17 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
 
 static inline p4d_t __p4d(p4dval_t val)
 {
-       p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
+       p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
+                                       "mov %%rdi, %%rax",
+                                       ALT_NOT(X86_FEATURE_XENPV));
 
        return (p4d_t) { ret };
 }
 
 static inline p4dval_t p4d_val(p4d_t p4d)
 {
-       return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
+       return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
+                               "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
@@ -556,7 +587,9 @@ static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
 
 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
 {
-       PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
+       PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
+                         "movb $0, (%%" _ASM_ARG1 ");",
+                         ALT_NOT(X86_FEATURE_PVUNLOCK));
 }
 
 static __always_inline void pv_wait(u8 *ptr, u8 val)
@@ -571,7 +604,9 @@ static __always_inline void pv_kick(int cpu)
 
 static __always_inline bool pv_vcpu_is_preempted(long cpu)
 {
-       return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
+       return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
+                               "xor %%" _ASM_AX ", %%" _ASM_AX ";",
+                               ALT_NOT(X86_FEATURE_VCPUPREEMPT));
 }
 
 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
@@ -645,17 +680,18 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
 #ifdef CONFIG_PARAVIRT_XXL
 static inline notrace unsigned long arch_local_save_flags(void)
 {
-       return PVOP_CALLEE0(unsigned long, irq.save_fl);
+       return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
+                               ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline notrace void arch_local_irq_disable(void)
 {
-       PVOP_VCALLEE0(irq.irq_disable);
+       PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline notrace void arch_local_irq_enable(void)
 {
-       PVOP_VCALLEE0(irq.irq_enable);
+       PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
 }
 
 static inline notrace unsigned long arch_local_irq_save(void)
@@ -700,84 +736,27 @@ extern void default_banner(void);
        .popsection
 
 
-#define COND_PUSH(set, mask, reg)                      \
-       .if ((~(set)) & mask); push %reg; .endif
-#define COND_POP(set, mask, reg)                       \
-       .if ((~(set)) & mask); pop %reg; .endif
-
 #ifdef CONFIG_X86_64
-
-#define PV_SAVE_REGS(set)                      \
-       COND_PUSH(set, CLBR_RAX, rax);          \
-       COND_PUSH(set, CLBR_RCX, rcx);          \
-       COND_PUSH(set, CLBR_RDX, rdx);          \
-       COND_PUSH(set, CLBR_RSI, rsi);          \
-       COND_PUSH(set, CLBR_RDI, rdi);          \
-       COND_PUSH(set, CLBR_R8, r8);            \
-       COND_PUSH(set, CLBR_R9, r9);            \
-       COND_PUSH(set, CLBR_R10, r10);          \
-       COND_PUSH(set, CLBR_R11, r11)
-#define PV_RESTORE_REGS(set)                   \
-       COND_POP(set, CLBR_R11, r11);           \
-       COND_POP(set, CLBR_R10, r10);           \
-       COND_POP(set, CLBR_R9, r9);             \
-       COND_POP(set, CLBR_R8, r8);             \
-       COND_POP(set, CLBR_RDI, rdi);           \
-       COND_POP(set, CLBR_RSI, rsi);           \
-       COND_POP(set, CLBR_RDX, rdx);           \
-       COND_POP(set, CLBR_RCX, rcx);           \
-       COND_POP(set, CLBR_RAX, rax)
+#ifdef CONFIG_PARAVIRT_XXL
 
 #define PARA_PATCH(off)                ((off) / 8)
 #define PARA_SITE(ptype, ops)  _PVSITE(ptype, ops, .quad, 8)
 #define PARA_INDIRECT(addr)    *addr(%rip)
-#else
-#define PV_SAVE_REGS(set)                      \
-       COND_PUSH(set, CLBR_EAX, eax);          \
-       COND_PUSH(set, CLBR_EDI, edi);          \
-       COND_PUSH(set, CLBR_ECX, ecx);          \
-       COND_PUSH(set, CLBR_EDX, edx)
-#define PV_RESTORE_REGS(set)                   \
-       COND_POP(set, CLBR_EDX, edx);           \
-       COND_POP(set, CLBR_ECX, ecx);           \
-       COND_POP(set, CLBR_EDI, edi);           \
-       COND_POP(set, CLBR_EAX, eax)
-
-#define PARA_PATCH(off)                ((off) / 4)
-#define PARA_SITE(ptype, ops)  _PVSITE(ptype, ops, .long, 4)
-#define PARA_INDIRECT(addr)    *%cs:addr
-#endif
 
-#ifdef CONFIG_PARAVIRT_XXL
 #define INTERRUPT_RETURN                                               \
-       PARA_SITE(PARA_PATCH(PV_CPU_iret),                              \
-                 ANNOTATE_RETPOLINE_SAFE;                              \
-                 jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
-
-#define DISABLE_INTERRUPTS(clobbers)                                   \
-       PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable),                       \
-                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
-                 ANNOTATE_RETPOLINE_SAFE;                              \
-                 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);        \
-                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
-
-#define ENABLE_INTERRUPTS(clobbers)                                    \
-       PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable),                        \
-                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
-                 ANNOTATE_RETPOLINE_SAFE;                              \
-                 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);         \
-                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
-#endif
+       ANNOTATE_RETPOLINE_SAFE;                                        \
+       ALTERNATIVE_TERNARY("jmp *paravirt_iret(%rip);",                \
+               X86_FEATURE_XENPV, "jmp xen_iret;", "jmp native_iret;")
 
-#ifdef CONFIG_X86_64
-#ifdef CONFIG_PARAVIRT_XXL
 #ifdef CONFIG_DEBUG_ENTRY
-#define SAVE_FLAGS(clobbers)                                        \
-       PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),                       \
-                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
-                 ANNOTATE_RETPOLINE_SAFE;                          \
-                 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);        \
-                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+.macro PARA_IRQ_save_fl
+       PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),
+                 ANNOTATE_RETPOLINE_SAFE;
+                 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
+.endm
+
+#define SAVE_FLAGS     ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
+                                   ALT_NOT(X86_FEATURE_XENPV)
 #endif
 #endif /* CONFIG_PARAVIRT_XXL */
 #endif /* CONFIG_X86_64 */
@@ -800,5 +779,11 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
 {
 }
 #endif
+
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+static inline void paravirt_set_cap(void)
+{
+}
+#endif
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PARAVIRT_H */
index de87087d3bde16ac6a062536ebad122541acc7ad..ae692c3194e9e5e9758d171649a2ee5496f6a7cb 100644 (file)
@@ -3,7 +3,6 @@
 #define _ASM_X86_PARAVIRT_TYPES_H
 
 /* Bitmask of what can be clobbered: usually at least eax. */
-#define CLBR_NONE 0
 #define CLBR_EAX  (1 << 0)
 #define CLBR_ECX  (1 << 1)
 #define CLBR_EDX  (1 << 2)
@@ -15,7 +14,6 @@
 
 #define CLBR_ARG_REGS  (CLBR_EAX | CLBR_EDX | CLBR_ECX)
 #define CLBR_RET_REG   (CLBR_EAX | CLBR_EDX)
-#define CLBR_SCRATCH   (0)
 #else
 #define CLBR_RAX  CLBR_EAX
 #define CLBR_RCX  CLBR_ECX
 #define CLBR_ARG_REGS  (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
                         CLBR_RCX | CLBR_R8 | CLBR_R9)
 #define CLBR_RET_REG   (CLBR_RAX)
-#define CLBR_SCRATCH   (CLBR_R10 | CLBR_R11)
 
 #endif /* X86_64 */
 
-#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
-
 #ifndef __ASSEMBLY__
 
 #include <asm/desc_defs.h>
@@ -73,19 +68,6 @@ struct pv_info {
        const char *name;
 };
 
-struct pv_init_ops {
-       /*
-        * Patch may replace one of the defined code sequences with
-        * arbitrary code, subject to the same register constraints.
-        * This generally means the code is not free to clobber any
-        * registers other than EAX.  The patch function should return
-        * the number of bytes of code generated, as we nop pad the
-        * rest in generic code.
-        */
-       unsigned (*patch)(u8 type, void *insn_buff,
-                         unsigned long addr, unsigned len);
-} __no_randomize_layout;
-
 #ifdef CONFIG_PARAVIRT_XXL
 struct pv_lazy_ops {
        /* Set deferred update mode, used for batching operations. */
@@ -95,11 +77,6 @@ struct pv_lazy_ops {
 } __no_randomize_layout;
 #endif
 
-struct pv_time_ops {
-       unsigned long long (*sched_clock)(void);
-       unsigned long long (*steal_clock)(int cpu);
-} __no_randomize_layout;
-
 struct pv_cpu_ops {
        /* hooks for various privileged instructions */
        void (*io_delay)(void);
@@ -156,10 +133,6 @@ struct pv_cpu_ops {
 
        u64 (*read_pmc)(int counter);
 
-       /* Normal iret.  Jump to this with the standard iret stack
-          frame set up. */
-       void (*iret)(void);
-
        void (*start_context_switch)(struct task_struct *prev);
        void (*end_context_switch)(struct task_struct *next);
 #endif
@@ -290,8 +263,6 @@ struct pv_lock_ops {
  * number for each function using the offset which we use to indicate
  * what to patch. */
 struct paravirt_patch_template {
-       struct pv_init_ops      init;
-       struct pv_time_ops      time;
        struct pv_cpu_ops       cpu;
        struct pv_irq_ops       irq;
        struct pv_mmu_ops       mmu;
@@ -300,6 +271,7 @@ struct paravirt_patch_template {
 
 extern struct pv_info pv_info;
 extern struct paravirt_patch_template pv_ops;
+extern void (*paravirt_iret)(void);
 
 #define PARAVIRT_PATCH(x)                                      \
        (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
@@ -331,11 +303,7 @@ extern struct paravirt_patch_template pv_ops;
 /* Simple instruction patching code. */
 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
 
-unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len);
-unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len);
-unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end);
-
-unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len);
+unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len);
 
 int paravirt_disable_iospace(void);
 
@@ -371,7 +339,7 @@ int paravirt_disable_iospace(void);
  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
  * to be modified (either clobbered or used for return values).
  * X86_64, on the other hand, already specifies a register-based calling
- * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
+ * conventions, returning at %rax, with parameters going on %rdi, %rsi,
  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
  * special handling for dealing with 4 arguments, unlike i386.
  * However, x86_64 also have to clobber all caller saved registers, which
@@ -414,11 +382,9 @@ int paravirt_disable_iospace(void);
  * makes sure the incoming and outgoing types are always correct.
  */
 #ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS                                                        \
+#define PVOP_CALL_ARGS                                                 \
        unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
 
-#define PVOP_CALL_ARGS                 PVOP_VCALL_ARGS
-
 #define PVOP_CALL_ARG1(x)              "a" ((unsigned long)(x))
 #define PVOP_CALL_ARG2(x)              "d" ((unsigned long)(x))
 #define PVOP_CALL_ARG3(x)              "c" ((unsigned long)(x))
@@ -434,12 +400,10 @@ int paravirt_disable_iospace(void);
 #define VEXTRA_CLOBBERS
 #else  /* CONFIG_X86_64 */
 /* [re]ax isn't an arg, but the return val */
-#define PVOP_VCALL_ARGS                                                \
+#define PVOP_CALL_ARGS                                         \
        unsigned long __edi = __edi, __esi = __esi,             \
                __edx = __edx, __ecx = __ecx, __eax = __eax;
 
-#define PVOP_CALL_ARGS         PVOP_VCALL_ARGS
-
 #define PVOP_CALL_ARG1(x)              "D" ((unsigned long)(x))
 #define PVOP_CALL_ARG2(x)              "S" ((unsigned long)(x))
 #define PVOP_CALL_ARG3(x)              "d" ((unsigned long)(x))
@@ -464,152 +428,138 @@ int paravirt_disable_iospace(void);
 #define PVOP_TEST_NULL(op)     ((void)pv_ops.op)
 #endif
 
-#define PVOP_RETMASK(rettype)                                          \
+#define PVOP_RETVAL(rettype)                                           \
        ({      unsigned long __mask = ~0UL;                            \
+               BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long));  \
                switch (sizeof(rettype)) {                              \
                case 1: __mask =       0xffUL; break;                   \
                case 2: __mask =     0xffffUL; break;                   \
                case 4: __mask = 0xffffffffUL; break;                   \
                default: break;                                         \
                }                                                       \
-               __mask;                                                 \
+               __mask & __eax;                                         \
        })
 
 
-#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,                \
-                     pre, post, ...)                                   \
+#define ____PVOP_CALL(ret, op, clbr, call_clbr, extra_clbr, ...)       \
        ({                                                              \
-               rettype __ret;                                          \
                PVOP_CALL_ARGS;                                         \
                PVOP_TEST_NULL(op);                                     \
-               /* This is 32-bit specific, but is okay in 64-bit */    \
-               /* since this condition will never hold */              \
-               if (sizeof(rettype) > sizeof(unsigned long)) {          \
-                       asm volatile(pre                                \
-                                    paravirt_alt(PARAVIRT_CALL)        \
-                                    post                               \
-                                    : call_clbr, ASM_CALL_CONSTRAINT   \
-                                    : paravirt_type(op),               \
-                                      paravirt_clobber(clbr),          \
-                                      ##__VA_ARGS__                    \
-                                    : "memory", "cc" extra_clbr);      \
-                       __ret = (rettype)((((u64)__edx) << 32) | __eax); \
-               } else {                                                \
-                       asm volatile(pre                                \
-                                    paravirt_alt(PARAVIRT_CALL)        \
-                                    post                               \
-                                    : call_clbr, ASM_CALL_CONSTRAINT   \
-                                    : paravirt_type(op),               \
-                                      paravirt_clobber(clbr),          \
-                                      ##__VA_ARGS__                    \
-                                    : "memory", "cc" extra_clbr);      \
-                       __ret = (rettype)(__eax & PVOP_RETMASK(rettype));       \
-               }                                                       \
-               __ret;                                                  \
+               asm volatile(paravirt_alt(PARAVIRT_CALL)                \
+                            : call_clbr, ASM_CALL_CONSTRAINT           \
+                            : paravirt_type(op),                       \
+                              paravirt_clobber(clbr),                  \
+                              ##__VA_ARGS__                            \
+                            : "memory", "cc" extra_clbr);              \
+               ret;                                                    \
        })
 
-#define __PVOP_CALL(rettype, op, pre, post, ...)                       \
-       ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
-                     EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
-
-#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                 \
-       ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
-                     PVOP_CALLEE_CLOBBERS, ,                           \
-                     pre, post, ##__VA_ARGS__)
-
-
-#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)        \
+#define ____PVOP_ALT_CALL(ret, op, alt, cond, clbr, call_clbr,         \
+                         extra_clbr, ...)                              \
        ({                                                              \
-               PVOP_VCALL_ARGS;                                        \
+               PVOP_CALL_ARGS;                                         \
                PVOP_TEST_NULL(op);                                     \
-               asm volatile(pre                                        \
-                            paravirt_alt(PARAVIRT_CALL)                \
-                            post                                       \
+               asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL),   \
+                                        alt, cond)                     \
                             : call_clbr, ASM_CALL_CONSTRAINT           \
                             : paravirt_type(op),                       \
                               paravirt_clobber(clbr),                  \
                               ##__VA_ARGS__                            \
                             : "memory", "cc" extra_clbr);              \
+               ret;                                                    \
        })
 
-#define __PVOP_VCALL(op, pre, post, ...)                               \
-       ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
-                      VEXTRA_CLOBBERS,                                 \
-                      pre, post, ##__VA_ARGS__)
+#define __PVOP_CALL(rettype, op, ...)                                  \
+       ____PVOP_CALL(PVOP_RETVAL(rettype), op, CLBR_ANY,               \
+                     PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__)
+
+#define __PVOP_ALT_CALL(rettype, op, alt, cond, ...)                   \
+       ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond, CLBR_ANY,\
+                         PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS,           \
+                         ##__VA_ARGS__)
+
+#define __PVOP_CALLEESAVE(rettype, op, ...)                            \
+       ____PVOP_CALL(PVOP_RETVAL(rettype), op.func, CLBR_RET_REG,      \
+                     PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
+
+#define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...)             \
+       ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond,     \
+                         CLBR_RET_REG, PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
+
+
+#define __PVOP_VCALL(op, ...)                                          \
+       (void)____PVOP_CALL(, op, CLBR_ANY, PVOP_VCALL_CLOBBERS,        \
+                      VEXTRA_CLOBBERS, ##__VA_ARGS__)
+
+#define __PVOP_ALT_VCALL(op, alt, cond, ...)                           \
+       (void)____PVOP_ALT_CALL(, op, alt, cond, CLBR_ANY,              \
+                               PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS,   \
+                               ##__VA_ARGS__)
 
-#define __PVOP_VCALLEESAVE(op, pre, post, ...)                         \
-       ____PVOP_VCALL(op.func, CLBR_RET_REG,                           \
-                     PVOP_VCALLEE_CLOBBERS, ,                          \
-                     pre, post, ##__VA_ARGS__)
+#define __PVOP_VCALLEESAVE(op, ...)                                    \
+       (void)____PVOP_CALL(, op.func, CLBR_RET_REG,                    \
+                           PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
 
+#define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...)                     \
+       (void)____PVOP_ALT_CALL(, op.func, alt, cond, CLBR_RET_REG,     \
+                               PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
 
 
 #define PVOP_CALL0(rettype, op)                                                \
-       __PVOP_CALL(rettype, op, "", "")
+       __PVOP_CALL(rettype, op)
 #define PVOP_VCALL0(op)                                                        \
-       __PVOP_VCALL(op, "", "")
+       __PVOP_VCALL(op)
+#define PVOP_ALT_CALL0(rettype, op, alt, cond)                         \
+       __PVOP_ALT_CALL(rettype, op, alt, cond)
+#define PVOP_ALT_VCALL0(op, alt, cond)                                 \
+       __PVOP_ALT_VCALL(op, alt, cond)
 
 #define PVOP_CALLEE0(rettype, op)                                      \
-       __PVOP_CALLEESAVE(rettype, op, "", "")
+       __PVOP_CALLEESAVE(rettype, op)
 #define PVOP_VCALLEE0(op)                                              \
-       __PVOP_VCALLEESAVE(op, "", "")
+       __PVOP_VCALLEESAVE(op)
+#define PVOP_ALT_CALLEE0(rettype, op, alt, cond)                       \
+       __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond)
+#define PVOP_ALT_VCALLEE0(op, alt, cond)                               \
+       __PVOP_ALT_VCALLEESAVE(op, alt, cond)
 
 
 #define PVOP_CALL1(rettype, op, arg1)                                  \
-       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+       __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1))
 #define PVOP_VCALL1(op, arg1)                                          \
-       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+       __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1))
+#define PVOP_ALT_VCALL1(op, arg1, alt, cond)                           \
+       __PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1))
 
 #define PVOP_CALLEE1(rettype, op, arg1)                                        \
-       __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+       __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1))
 #define PVOP_VCALLEE1(op, arg1)                                                \
-       __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+       __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1))
+#define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond)                 \
+       __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1))
+#define PVOP_ALT_VCALLEE1(op, arg1, alt, cond)                         \
+       __PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1))
 
 
 #define PVOP_CALL2(rettype, op, arg1, arg2)                            \
-       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
-                   PVOP_CALL_ARG2(arg2))
+       __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
 #define PVOP_VCALL2(op, arg1, arg2)                                    \
-       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
-                    PVOP_CALL_ARG2(arg2))
-
-#define PVOP_CALLEE2(rettype, op, arg1, arg2)                          \
-       __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
-                         PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALLEE2(op, arg1, arg2)                                  \
-       __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
-                          PVOP_CALL_ARG2(arg2))
-
+       __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
 
 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                      \
-       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
+       __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1),                  \
                    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
 #define PVOP_VCALL3(op, arg1, arg2, arg3)                              \
-       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
+       __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1),                          \
                     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
 
-/* This is the only difference in x86_64. We can make it much simpler */
-#ifdef CONFIG_X86_32
 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                        \
        __PVOP_CALL(rettype, op,                                        \
-                   "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
-                   PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
-                   PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                                \
-       __PVOP_VCALL(op,                                                \
-                   "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
-                   "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
-                   "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
-#else
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                        \
-       __PVOP_CALL(rettype, op, "", "",                                \
                    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
                    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                                \
-       __PVOP_VCALL(op, "", "",                                        \
-                    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
+       __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),    \
                     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-#endif
 
 /* Lazy mode for batching updates / context switch */
 enum paravirt_lazy_mode {
index a02c67291cfcbdf69048030323e1c4d4f2aee907..b1099f2d9800c7e2fe7873bbda7172fd72fdde90 100644 (file)
@@ -1244,7 +1244,7 @@ static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
- *  dst - pointer to pgd range anwhere on a pgd page
+ *  dst - pointer to pgd range anywhere on a pgd page
  *  src - ""
  *  count - the number of pgds to copy.
  *
index f1b9ed5efaa9016275cbd520e310a8f74e1801d9..185142b84ebe8fa96d289aa794d0509c1cc3c4ce 100644 (file)
@@ -314,11 +314,6 @@ struct x86_hw_tss {
 struct x86_hw_tss {
        u32                     reserved1;
        u64                     sp0;
-
-       /*
-        * We store cpu_current_top_of_stack in sp1 so it's always accessible.
-        * Linux does not use ring 1, so sp1 is not otherwise needed.
-        */
        u64                     sp1;
 
        /*
@@ -426,12 +421,7 @@ struct irq_stack {
        char            stack[IRQ_STACK_SIZE];
 } __aligned(IRQ_STACK_SIZE);
 
-#ifdef CONFIG_X86_32
 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
-#else
-/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
-#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
-#endif
 
 #ifdef CONFIG_X86_64
 struct fixed_percpu_data {
@@ -527,7 +517,7 @@ struct thread_struct {
        struct io_bitmap        *io_bitmap;
 
        /*
-        * IOPL. Priviledge level dependent I/O permission which is
+        * IOPL. Privilege level dependent I/O permission which is
         * emulated via the I/O bitmap to prevent user space from disabling
         * interrupts.
         */
index b6a9d51d1d7911894fa6b831b2f01a05f1daf57f..8c5d1910a848f25091711eb675f400dc6f8270f7 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <asm/ldt.h>
 
+struct task_struct;
+
 /* misc architecture specific prototypes */
 
 void syscall_init(void);
index 4352f08bfbb54b405a0c18d945362211b94cda15..43fa081a1adb236daf9c2f4de4e89616285ed40f 100644 (file)
@@ -8,8 +8,8 @@
 /*
  * The set_memory_* API can be used to change various attributes of a virtual
  * address range. The attributes include:
- * Cachability   : UnCached, WriteCombining, WriteThrough, WriteBack
- * Executability : eXeutable, NoteXecutable
+ * Cacheability  : UnCached, WriteCombining, WriteThrough, WriteBack
+ * Executability : eXecutable, NoteXecutable
  * Read/Write    : ReadOnly, ReadWrite
  * Presence      : NotPresent
  * Encryption    : Encrypted, Decrypted
index 389d851a02c4fd55cab18a10b4d277bb12b093ab..a12458a7a8d4af15b9380a0d4cc336a6d1c80723 100644 (file)
@@ -130,11 +130,6 @@ void *extend_brk(size_t size, size_t align);
                        : : "i" (sz));                                  \
        }
 
-/* Helper for reserving space for arrays of things */
-#define RESERVE_BRK_ARRAY(type, name, entries)         \
-       type *name;                                     \
-       RESERVE_BRK(name, sizeof(type) * entries)
-
 extern void probe_roms(void);
 #ifdef __i386__
 
diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h
new file mode 100644 (file)
index 0000000..9c31e0e
--- /dev/null
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * Copyright(c) 2016-20 Intel Corporation.
+ *
+ * Intel Software Guard Extensions (SGX) support.
+ */
+#ifndef _ASM_X86_SGX_H
+#define _ASM_X86_SGX_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/*
+ * This file contains both data structures defined by SGX architecture and Linux
+ * defined software data structures and functions.  The two should not be mixed
+ * together for better readibility.  The architectural definitions come first.
+ */
+
+/* The SGX specific CPUID function. */
+#define SGX_CPUID              0x12
+/* EPC enumeration. */
+#define SGX_CPUID_EPC          2
+/* An invalid EPC section, i.e. the end marker. */
+#define SGX_CPUID_EPC_INVALID  0x0
+/* A valid EPC section. */
+#define SGX_CPUID_EPC_SECTION  0x1
+/* The bitmask for the EPC section type. */
+#define SGX_CPUID_EPC_MASK     GENMASK(3, 0)
+
+enum sgx_encls_function {
+       ECREATE = 0x00,
+       EADD    = 0x01,
+       EINIT   = 0x02,
+       EREMOVE = 0x03,
+       EDGBRD  = 0x04,
+       EDGBWR  = 0x05,
+       EEXTEND = 0x06,
+       ELDU    = 0x08,
+       EBLOCK  = 0x09,
+       EPA     = 0x0A,
+       EWB     = 0x0B,
+       ETRACK  = 0x0C,
+       EAUG    = 0x0D,
+       EMODPR  = 0x0E,
+       EMODT   = 0x0F,
+};
+
+/**
+ * enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV
+ * %SGX_NOT_TRACKED:           Previous ETRACK's shootdown sequence has not
+ *                             been completed yet.
+ * %SGX_CHILD_PRESENT          SECS has child pages present in the EPC.
+ * %SGX_INVALID_EINITTOKEN:    EINITTOKEN is invalid and enclave signer's
+ *                             public key does not match IA32_SGXLEPUBKEYHASH.
+ * %SGX_UNMASKED_EVENT:                An unmasked event, e.g. INTR, was received
+ */
+enum sgx_return_code {
+       SGX_NOT_TRACKED                 = 11,
+       SGX_CHILD_PRESENT               = 13,
+       SGX_INVALID_EINITTOKEN          = 16,
+       SGX_UNMASKED_EVENT              = 128,
+};
+
+/* The modulus size for 3072-bit RSA keys. */
+#define SGX_MODULUS_SIZE 384
+
+/**
+ * enum sgx_miscselect - additional information to an SSA frame
+ * %SGX_MISC_EXINFO:   Report #PF or #GP to the SSA frame.
+ *
+ * Save State Area (SSA) is a stack inside the enclave used to store processor
+ * state when an exception or interrupt occurs. This enum defines additional
+ * information stored to an SSA frame.
+ */
+enum sgx_miscselect {
+       SGX_MISC_EXINFO         = BIT(0),
+};
+
+#define SGX_MISC_RESERVED_MASK GENMASK_ULL(63, 1)
+
+#define SGX_SSA_GPRS_SIZE              184
+#define SGX_SSA_MISC_EXINFO_SIZE       16
+
+/**
+ * enum sgx_attributes - the attributes field in &struct sgx_secs
+ * %SGX_ATTR_INIT:             Enclave can be entered (is initialized).
+ * %SGX_ATTR_DEBUG:            Allow ENCLS(EDBGRD) and ENCLS(EDBGWR).
+ * %SGX_ATTR_MODE64BIT:                Tell that this a 64-bit enclave.
+ * %SGX_ATTR_PROVISIONKEY:      Allow to use provisioning keys for remote
+ *                             attestation.
+ * %SGX_ATTR_KSS:              Allow to use key separation and sharing (KSS).
+ * %SGX_ATTR_EINITTOKENKEY:    Allow to use token signing key that is used to
+ *                             sign cryptographic tokens that can be passed to
+ *                             EINIT as an authorization to run an enclave.
+ */
+enum sgx_attribute {
+       SGX_ATTR_INIT           = BIT(0),
+       SGX_ATTR_DEBUG          = BIT(1),
+       SGX_ATTR_MODE64BIT      = BIT(2),
+       SGX_ATTR_PROVISIONKEY   = BIT(4),
+       SGX_ATTR_EINITTOKENKEY  = BIT(5),
+       SGX_ATTR_KSS            = BIT(7),
+};
+
+#define SGX_ATTR_RESERVED_MASK (BIT_ULL(3) | BIT_ULL(6) | GENMASK_ULL(63, 8))
+
+/**
+ * struct sgx_secs - SGX Enclave Control Structure (SECS)
+ * @size:              size of the address space
+ * @base:              base address of the  address space
+ * @ssa_frame_size:    size of an SSA frame
+ * @miscselect:                additional information stored to an SSA frame
+ * @attributes:                attributes for enclave
+ * @xfrm:              XSave-Feature Request Mask (subset of XCR0)
+ * @mrenclave:         SHA256-hash of the enclave contents
+ * @mrsigner:          SHA256-hash of the public key used to sign the SIGSTRUCT
+ * @config_id:         a user-defined value that is used in key derivation
+ * @isv_prod_id:       a user-defined value that is used in key derivation
+ * @isv_svn:           a user-defined value that is used in key derivation
+ * @config_svn:                a user-defined value that is used in key derivation
+ *
+ * SGX Enclave Control Structure (SECS) is a special enclave page that is not
+ * visible in the address space. In fact, this structure defines the address
+ * range and other global attributes for the enclave and it is the first EPC
+ * page created for any enclave. It is moved from a temporary buffer to an EPC
+ * by the means of ENCLS[ECREATE] function.
+ */
+struct sgx_secs {
+       u64 size;
+       u64 base;
+       u32 ssa_frame_size;
+       u32 miscselect;
+       u8  reserved1[24];
+       u64 attributes;
+       u64 xfrm;
+       u32 mrenclave[8];
+       u8  reserved2[32];
+       u32 mrsigner[8];
+       u8  reserved3[32];
+       u32 config_id[16];
+       u16 isv_prod_id;
+       u16 isv_svn;
+       u16 config_svn;
+       u8  reserved4[3834];
+} __packed;
+
+/**
+ * enum sgx_tcs_flags - execution flags for TCS
+ * %SGX_TCS_DBGOPTIN:  If enabled allows single-stepping and breakpoints
+ *                     inside an enclave. It is cleared by EADD but can
+ *                     be set later with EDBGWR.
+ */
+enum sgx_tcs_flags {
+       SGX_TCS_DBGOPTIN        = 0x01,
+};
+
+#define SGX_TCS_RESERVED_MASK  GENMASK_ULL(63, 1)
+#define SGX_TCS_RESERVED_SIZE  4024
+
+/**
+ * struct sgx_tcs - Thread Control Structure (TCS)
+ * @state:             used to mark an entered TCS
+ * @flags:             execution flags (cleared by EADD)
+ * @ssa_offset:                SSA stack offset relative to the enclave base
+ * @ssa_index:         the current SSA frame index (cleard by EADD)
+ * @nr_ssa_frames:     the number of frame in the SSA stack
+ * @entry_offset:      entry point offset relative to the enclave base
+ * @exit_addr:         address outside the enclave to exit on an exception or
+ *                     interrupt
+ * @fs_offset:         offset relative to the enclave base to become FS
+ *                     segment inside the enclave
+ * @gs_offset:         offset relative to the enclave base to become GS
+ *                     segment inside the enclave
+ * @fs_limit:          size to become a new FS-limit (only 32-bit enclaves)
+ * @gs_limit:          size to become a new GS-limit (only 32-bit enclaves)
+ *
+ * Thread Control Structure (TCS) is an enclave page visible in its address
+ * space that defines an entry point inside the enclave. A thread enters inside
+ * an enclave by supplying address of TCS to ENCLU(EENTER). A TCS can be entered
+ * by only one thread at a time.
+ */
+struct sgx_tcs {
+       u64 state;
+       u64 flags;
+       u64 ssa_offset;
+       u32 ssa_index;
+       u32 nr_ssa_frames;
+       u64 entry_offset;
+       u64 exit_addr;
+       u64 fs_offset;
+       u64 gs_offset;
+       u32 fs_limit;
+       u32 gs_limit;
+       u8  reserved[SGX_TCS_RESERVED_SIZE];
+} __packed;
+
+/**
+ * struct sgx_pageinfo - an enclave page descriptor
+ * @addr:      address of the enclave page
+ * @contents:  pointer to the page contents
+ * @metadata:  pointer either to a SECINFO or PCMD instance
+ * @secs:      address of the SECS page
+ */
+struct sgx_pageinfo {
+       u64 addr;
+       u64 contents;
+       u64 metadata;
+       u64 secs;
+} __packed __aligned(32);
+
+
+/**
+ * enum sgx_page_type - bits in the SECINFO flags defining the page type
+ * %SGX_PAGE_TYPE_SECS:        a SECS page
+ * %SGX_PAGE_TYPE_TCS: a TCS page
+ * %SGX_PAGE_TYPE_REG: a regular page
+ * %SGX_PAGE_TYPE_VA:  a VA page
+ * %SGX_PAGE_TYPE_TRIM:        a page in trimmed state
+ */
+enum sgx_page_type {
+       SGX_PAGE_TYPE_SECS,
+       SGX_PAGE_TYPE_TCS,
+       SGX_PAGE_TYPE_REG,
+       SGX_PAGE_TYPE_VA,
+       SGX_PAGE_TYPE_TRIM,
+};
+
+#define SGX_NR_PAGE_TYPES      5
+#define SGX_PAGE_TYPE_MASK     GENMASK(7, 0)
+
+/**
+ * enum sgx_secinfo_flags - the flags field in &struct sgx_secinfo
+ * %SGX_SECINFO_R:     allow read
+ * %SGX_SECINFO_W:     allow write
+ * %SGX_SECINFO_X:     allow execution
+ * %SGX_SECINFO_SECS:  a SECS page
+ * %SGX_SECINFO_TCS:   a TCS page
+ * %SGX_SECINFO_REG:   a regular page
+ * %SGX_SECINFO_VA:    a VA page
+ * %SGX_SECINFO_TRIM:  a page in trimmed state
+ */
+enum sgx_secinfo_flags {
+       SGX_SECINFO_R                   = BIT(0),
+       SGX_SECINFO_W                   = BIT(1),
+       SGX_SECINFO_X                   = BIT(2),
+       SGX_SECINFO_SECS                = (SGX_PAGE_TYPE_SECS << 8),
+       SGX_SECINFO_TCS                 = (SGX_PAGE_TYPE_TCS << 8),
+       SGX_SECINFO_REG                 = (SGX_PAGE_TYPE_REG << 8),
+       SGX_SECINFO_VA                  = (SGX_PAGE_TYPE_VA << 8),
+       SGX_SECINFO_TRIM                = (SGX_PAGE_TYPE_TRIM << 8),
+};
+
+#define SGX_SECINFO_PERMISSION_MASK    GENMASK_ULL(2, 0)
+#define SGX_SECINFO_PAGE_TYPE_MASK     (SGX_PAGE_TYPE_MASK << 8)
+#define SGX_SECINFO_RESERVED_MASK      ~(SGX_SECINFO_PERMISSION_MASK | \
+                                         SGX_SECINFO_PAGE_TYPE_MASK)
+
+/**
+ * struct sgx_secinfo - describes attributes of an EPC page
+ * @flags:     permissions and type
+ *
+ * Used together with ENCLS leaves that add or modify an EPC page to an
+ * enclave to define page permissions and type.
+ */
+struct sgx_secinfo {
+       u64 flags;
+       u8  reserved[56];
+} __packed __aligned(64);
+
+#define SGX_PCMD_RESERVED_SIZE 40
+
+/**
+ * struct sgx_pcmd - Paging Crypto Metadata (PCMD)
+ * @enclave_id:        enclave identifier
+ * @mac:       MAC over PCMD, page contents and isvsvn
+ *
+ * PCMD is stored for every swapped page to the regular memory. When ELDU loads
+ * the page back it recalculates the MAC by using a isvsvn number stored in a
+ * VA page. Together these two structures bring integrity and rollback
+ * protection.
+ */
+struct sgx_pcmd {
+       struct sgx_secinfo secinfo;
+       u64 enclave_id;
+       u8  reserved[SGX_PCMD_RESERVED_SIZE];
+       u8  mac[16];
+} __packed __aligned(128);
+
+#define SGX_SIGSTRUCT_RESERVED1_SIZE 84
+#define SGX_SIGSTRUCT_RESERVED2_SIZE 20
+#define SGX_SIGSTRUCT_RESERVED3_SIZE 32
+#define SGX_SIGSTRUCT_RESERVED4_SIZE 12
+
+/**
+ * struct sgx_sigstruct_header -  defines author of the enclave
+ * @header1:           constant byte string
+ * @vendor:            must be either 0x0000 or 0x8086
+ * @date:              YYYYMMDD in BCD
+ * @header2:           constant byte string
+ * @swdefined:         software defined value
+ */
+struct sgx_sigstruct_header {
+       u64 header1[2];
+       u32 vendor;
+       u32 date;
+       u64 header2[2];
+       u32 swdefined;
+       u8  reserved1[84];
+} __packed;
+
+/**
+ * struct sgx_sigstruct_body - defines contents of the enclave
+ * @miscselect:                additional information stored to an SSA frame
+ * @misc_mask:         required miscselect in SECS
+ * @attributes:                attributes for enclave
+ * @xfrm:              XSave-Feature Request Mask (subset of XCR0)
+ * @attributes_mask:   required attributes in SECS
+ * @xfrm_mask:         required XFRM in SECS
+ * @mrenclave:         SHA256-hash of the enclave contents
+ * @isvprodid:         a user-defined value that is used in key derivation
+ * @isvsvn:            a user-defined value that is used in key derivation
+ */
+struct sgx_sigstruct_body {
+       u32 miscselect;
+       u32 misc_mask;
+       u8  reserved2[20];
+       u64 attributes;
+       u64 xfrm;
+       u64 attributes_mask;
+       u64 xfrm_mask;
+       u8  mrenclave[32];
+       u8  reserved3[32];
+       u16 isvprodid;
+       u16 isvsvn;
+} __packed;
+
+/**
+ * struct sgx_sigstruct - an enclave signature
+ * @header:            defines author of the enclave
+ * @modulus:           the modulus of the public key
+ * @exponent:          the exponent of the public key
+ * @signature:         the signature calculated over the fields except modulus,
+ * @body:              defines contents of the enclave
+ * @q1:                        a value used in RSA signature verification
+ * @q2:                        a value used in RSA signature verification
+ *
+ * Header and body are the parts that are actual signed. The remaining fields
+ * define the signature of the enclave.
+ */
+struct sgx_sigstruct {
+       struct sgx_sigstruct_header header;
+       u8  modulus[SGX_MODULUS_SIZE];
+       u32 exponent;
+       u8  signature[SGX_MODULUS_SIZE];
+       struct sgx_sigstruct_body body;
+       u8  reserved4[12];
+       u8  q1[SGX_MODULUS_SIZE];
+       u8  q2[SGX_MODULUS_SIZE];
+} __packed;
+
+#define SGX_LAUNCH_TOKEN_SIZE 304
+
+/*
+ * Do not put any hardware-defined SGX structure representations below this
+ * comment!
+ */
+
+#ifdef CONFIG_X86_SGX_KVM
+int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs,
+                    int *trapnr);
+int sgx_virt_einit(void __user *sigstruct, void __user *token,
+                  void __user *secs, u64 *lepubkeyhash, int *trapnr);
+#endif
+
+int sgx_set_attribute(unsigned long *allowed_attributes,
+                     unsigned int attribute_fd);
+
+#endif /* _ASM_X86_SGX_H */
index 0bc9b0895f33e5a65c9f4304ae6d92fafade2394..d17b39893b7973073f21814e41d841aed5fec11a 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <asm/nops.h>
 #include <asm/cpufeatures.h>
+#include <asm/alternative.h>
 
 /* "Raw" instruction opcodes */
 #define __ASM_CLAC     ".byte 0x0f,0x01,0xca"
@@ -18,8 +19,6 @@
 
 #ifdef __ASSEMBLY__
 
-#include <asm/alternative-asm.h>
-
 #ifdef CONFIG_X86_SMAP
 
 #define ASM_CLAC \
@@ -37,8 +36,6 @@
 
 #else /* __ASSEMBLY__ */
 
-#include <asm/alternative.h>
-
 #ifdef CONFIG_X86_SMAP
 
 static __always_inline void clac(void)
index 57ef2094af93ede71e6ad6283fd699160d21de5d..630ff08532be828f9a908e78b329589bc600f216 100644 (file)
@@ -132,7 +132,7 @@ void native_play_dead(void);
 void play_dead_common(void);
 void wbinvd_on_cpu(int cpu);
 int wbinvd_on_all_cpus(void);
-bool wakeup_cpu0(void);
+void cond_wakeup_cpu0(void);
 
 void native_smp_send_reschedule(int cpu);
 void native_send_call_func_ipi(const struct cpumask *mask);
index 9f69cc497f4b68f3f49029f5b2b7a3209d8d890e..b5f0d2ff47e47366d0df6f0987cd1e4e2ea82adf 100644 (file)
@@ -71,12 +71,7 @@ static inline void update_task_stack(struct task_struct *task)
        else
                this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
 #else
-       /*
-        * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
-        * doesn't work on x86-32 because sp1 and
-        * cpu_current_top_of_stack have different values (because of
-        * the non-zero stack-padding on 32bit).
-        */
+       /* Xen PV enters the kernel on the thread stack. */
        if (static_cpu_has(X86_FEATURE_XENPV))
                load_sp0(task_top_of_stack(task));
 #endif
index a84333adeef232aa4d38f10d8ec1af7c1155ddb7..80c08c7d5e72e44d850cfa206598287e28df8150 100644 (file)
@@ -80,6 +80,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
        }
 
 #define __COND_SYSCALL(abi, name)                                      \
+       __weak long __##abi##_##name(const struct pt_regs *__unused);   \
        __weak long __##abi##_##name(const struct pt_regs *__unused)    \
        {                                                               \
                return sys_ni_syscall();                                \
index 06b740bae431d2a0b4a163553e548dc5d64d7c0d..de406d93b515dbc7b93f31f17ece2334ea3b4443 100644 (file)
@@ -197,13 +197,7 @@ static inline int arch_within_stack_frames(const void * const stack,
 #endif
 }
 
-#else /* !__ASSEMBLY__ */
-
-#ifdef CONFIG_X86_64
-# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
-#endif
-
-#endif
+#endif  /* !__ASSEMBLY__ */
 
 /*
  * Thread-synchronous status.
index f241451035fb2b4c9a721198312255dafe3139be..027a9258dbca482ae69ce099274c097734648149 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef _ASM_UV_GEO_H
 #define _ASM_UV_GEO_H
 
-/* Type declaractions */
+/* Type declarations */
 
 /* Size of a geoid_s structure (must be before decl. of geoid_u) */
 #define GEOID_SIZE     8
index 5002f52be3328418a429bb3217ade8a5cfce5808..d3e3197917be89f9bb7d9d488bf5a9772ccb9090 100644 (file)
@@ -353,7 +353,7 @@ union uvh_apicid {
  *
  * Note there are NO leds on a UV system.  This register is only
  * used by the system controller to monitor system-wide operation.
- * There are 64 regs per node.  With Nahelem cpus (2 cores per node,
+ * There are 64 regs per node.  With Nehalem cpus (2 cores per node,
  * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
  * a node.
  *
index 600a141c88050ffe05ed716d767e5b4eae84e53b..b25d3f82c2f36a9a210863d0540f2adc5744238a 100644 (file)
@@ -234,7 +234,7 @@ struct boot_params {
  * handling of page tables.
  *
  * These enums should only ever be used by x86 code, and the code that uses
- * it should be well contained and compartamentalized.
+ * it should be well contained and compartmentalized.
  *
  * KVM and Xen HVM do not have a subarch as these are expected to follow
  * standard x86 boot entries. If there is a genuine need for "hypervisor" type
@@ -252,7 +252,7 @@ struct boot_params {
  * @X86_SUBARCH_XEN: Used for Xen guest types which follow the PV boot path,
  *     which start at asm startup_xen() entry point and later jump to the C
  *     xen_start_kernel() entry point. Both domU and dom0 type of guests are
- *     currently supportd through this PV boot path.
+ *     currently supported through this PV boot path.
  * @X86_SUBARCH_INTEL_MID: Used for Intel MID (Mobile Internet Device) platform
  *     systems which do not have the PCI legacy interfaces.
  * @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC
index d95d080b30e3ea833a5ddbe47f95e1195c6870a1..0007ba077c0c2beac746d7d51e9e00809cfb4fed 100644 (file)
@@ -24,6 +24,7 @@
 #define DR_TRAP3       (0x8)           /* db3 */
 #define DR_TRAP_BITS   (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
 
+#define DR_BUS_LOCK    (0x800)         /* bus_lock */
 #define DR_STEP                (0x4000)        /* single-step */
 #define DR_SWITCH      (0x8000)        /* task switch */
 
index b3d0664fadc9cc8055f1f84e5978341491d0b4ee..ac83e25bbf37b5c17a1bb79485aa57b9e0a8d1eb 100644 (file)
@@ -12,7 +12,7 @@
  * The msqid64_ds structure for x86 architecture with x32 ABI.
  *
  * On x86-32 and x86-64 we can just use the generic definition, but
- * x32 uses the same binary layout as x86_64, which is differnet
+ * x32 uses the same binary layout as x86_64, which is different
  * from other 32-bit architectures.
  */
 
index 9034f3007c4efc2e59aa9e0df97c9d34d0e29223..9690d6899ad98093fda3ab6909de8eac3093af77 100644 (file)
@@ -152,7 +152,7 @@ struct sgx_enclave_run {
  * Most exceptions reported on ENCLU, including those that occur within the
  * enclave, are fixed up and reported synchronously instead of being delivered
  * via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are
- * never fixed up and are always delivered via standard signals. On synchrously
+ * never fixed up and are always delivered via standard signals. On synchronously
  * reported exceptions, -EFAULT is returned and details about the exception are
  * recorded in @run.exception, the optional sgx_enclave_exception struct.
  *
index f0305dc660c9a92338272dbfe40712519b123682..fce18eaa070c1b39b2293a8acb747d41f79519c0 100644 (file)
@@ -9,7 +9,7 @@
  * The shmid64_ds structure for x86 architecture with x32 ABI.
  *
  * On x86-32 and x86-64 we can just use the generic definition, but
- * x32 uses the same binary layout as x86_64, which is differnet
+ * x32 uses the same binary layout as x86_64, which is different
  * from other 32-bit architectures.
  */
 
index 844d60eb1882755f3653ddfdf3a130285a4fbe3a..d0d9b331d3a13864a886e9c7ecfac285454fe5c0 100644 (file)
@@ -139,7 +139,7 @@ struct _fpstate_32 {
  * The 64-bit FPU frame. (FXSAVE format and later)
  *
  * Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is
- *        larger: 'struct _xstate'. Note that 'struct _xstate' embedds
+ *        larger: 'struct _xstate'. Note that 'struct _xstate' embeds
  *        'struct _fpstate' so that you can always assume the _fpstate portion
  *        exists so that you can check the magic value.
  *
index 2ddf08351f0bca0b5f3911a1855e7fa91d748ef0..0704c2a94272c0b30b052441eb5070cb53b39430 100644 (file)
@@ -35,7 +35,6 @@ KASAN_SANITIZE_sev-es.o                                       := n
 KCSAN_SANITIZE := n
 
 OBJECT_FILES_NON_STANDARD_test_nx.o                    := y
-OBJECT_FILES_NON_STANDARD_paravirt_patch.o             := y
 
 ifdef CONFIG_FRAME_POINTER
 OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o             := y
@@ -121,7 +120,7 @@ obj-$(CONFIG_AMD_NB)                += amd_nb.o
 obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o
 
 obj-$(CONFIG_KVM_GUEST)                += kvm.o kvmclock.o
-obj-$(CONFIG_PARAVIRT)         += paravirt.o paravirt_patch.o
+obj-$(CONFIG_PARAVIRT)         += paravirt.o
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
 obj-$(CONFIG_PARAVIRT_CLOCK)   += pvclock.o
 obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
index 14cd3186dc77dc7a929a47026bfa8c3152f89308..e90310cbe73ac2fac72f21ab469e139130703544 100644 (file)
@@ -830,7 +830,7 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
 EXPORT_SYMBOL(acpi_unregister_ioapic);
 
 /**
- * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
+ * acpi_ioapic_registered - Check whether IOAPIC associated with @gsi_base
  *                         has been registered
  * @handle:    ACPI handle of the IOAPIC device
  * @gsi_base:  GSI base associated with the IOAPIC
@@ -1656,7 +1656,7 @@ static int __init parse_acpi(char *arg)
        else if (strcmp(arg, "noirq") == 0) {
                acpi_noirq_set();
        }
-       /* "acpi=copy_dsdt" copys DSDT */
+       /* "acpi=copy_dsdt" copies DSDT */
        else if (strcmp(arg, "copy_dsdt") == 0) {
                acpi_gbl_copy_dsdt_locally = 1;
        }
index cc1fea76aab05dfabb69ca59e3bff20f0643914d..3f85fcae450ce2fb89b8c466d6872815491f493f 100644 (file)
@@ -41,7 +41,7 @@ unsigned long acpi_get_wakeup_address(void)
  * x86_acpi_enter_sleep_state - enter sleep state
  * @state: Sleep state to enter.
  *
- * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
+ * Wrapper around acpi_enter_sleep_state() to be called by assembly.
  */
 asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state)
 {
index 56b6865afb2ac68cadf7798209c875576cf6f90b..d5d8a352eafaa1b0bf77bd61a1f4edf444d9b23b 100644 (file)
@@ -115,7 +115,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
        movq    pt_regs_r14(%rax), %r14
        movq    pt_regs_r15(%rax), %r15
 
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
        /*
         * The suspend path may have poisoned some areas deeper in the stack,
         * which we now need to unpoison.
index 8d778e46725d293ed0659742b636e70c81985d73..f810e6fececd1fedeec621d7f7883975410e614e 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/insn.h>
 #include <asm/io.h>
 #include <asm/fixmap.h>
+#include <asm/paravirt.h>
 
 int __read_mostly alternatives_patched;
 
@@ -388,21 +389,31 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
         */
        for (a = start; a < end; a++) {
                int insn_buff_sz = 0;
+               /* Mask away "NOT" flag bit for feature to test. */
+               u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
 
                instr = (u8 *)&a->instr_offset + a->instr_offset;
                replacement = (u8 *)&a->repl_offset + a->repl_offset;
                BUG_ON(a->instrlen > sizeof(insn_buff));
-               BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
-               if (!boot_cpu_has(a->cpuid)) {
+               BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
+
+               /*
+                * Patch if either:
+                * - feature is present
+                * - feature not present but ALTINSTR_FLAG_INV is set to mean,
+                *   patch if feature is *NOT* present.
+                */
+               if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV)) {
                        if (a->padlen > 1)
                                optimize_nops(a, instr);
 
                        continue;
                }
 
-               DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
-                       a->cpuid >> 5,
-                       a->cpuid & 0x1f,
+               DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
+                       (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
+                       feature >> 5,
+                       feature & 0x1f,
                        instr, instr, a->instrlen,
                        replacement, a->replacementlen, a->padlen);
 
@@ -605,7 +616,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
                BUG_ON(p->len > MAX_PATCH_LEN);
                /* prep the buffer with the original instructions */
                memcpy(insn_buff, p->instr, p->len);
-               used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
+               used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
 
                BUG_ON(used > p->len);
 
@@ -723,6 +734,33 @@ void __init alternative_instructions(void)
         * patching.
         */
 
+       /*
+        * Paravirt patching and alternative patching can be combined to
+        * replace a function call with a short direct code sequence (e.g.
+        * by setting a constant return value instead of doing that in an
+        * external function).
+        * In order to make this work the following sequence is required:
+        * 1. set (artificial) features depending on used paravirt
+        *    functions which can later influence alternative patching
+        * 2. apply paravirt patching (generally replacing an indirect
+        *    function call with a direct one)
+        * 3. apply alternative patching (e.g. replacing a direct function
+        *    call with a custom code sequence)
+        * Doing paravirt patching after alternative patching would clobber
+        * the optimization of the custom code with a function call again.
+        */
+       paravirt_set_cap();
+
+       /*
+        * First patch paravirt functions, such that we overwrite the indirect
+        * call with the direct call.
+        */
+       apply_paravirt(__parainstructions, __parainstructions_end);
+
+       /*
+        * Then patch alternatives, such that those paravirt calls that are in
+        * alternatives can be overwritten by their immediate fragments.
+        */
        apply_alternatives(__alt_instructions, __alt_instructions_end);
 
 #ifdef CONFIG_SMP
@@ -741,8 +779,6 @@ void __init alternative_instructions(void)
        }
 #endif
 
-       apply_paravirt(__parainstructions, __parainstructions_end);
-
        restart_nmi();
        alternatives_patched = 1;
 }
index b4396952c9a6ce287a4d8e0587b62a617a484f92..09083094eb575c656beceaa5298f53c9d6ffb206 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Shared support code for AMD K8 northbridges and derivates.
+ * Shared support code for AMD K8 northbridges and derivatives.
  * Copyright 2006 Andi Kleen, SUSE Labs.
  */
 
index 4f26700f314d9ad48c4c57af17831b7f4fdb9d9b..4a39fb429f15b427f796b95f97ce60db56d0f029 100644 (file)
@@ -619,7 +619,7 @@ static void setup_APIC_timer(void)
 
        if (this_cpu_has(X86_FEATURE_ARAT)) {
                lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
-               /* Make LAPIC timer preferrable over percpu HPET */
+               /* Make LAPIC timer preferable over percpu HPET */
                lapic_clockevent.rating = 150;
        }
 
@@ -666,7 +666,7 @@ void lapic_update_tsc_freq(void)
  * In this functions we calibrate APIC bus clocks to the external timer.
  *
  * We want to do the calibration only once since we want to have local timer
- * irqs syncron. CPUs connected by the same APIC bus have the very same bus
+ * irqs synchronous. CPUs connected by the same APIC bus have the very same bus
  * frequency.
  *
  * This was previously done by reading the PIT/HPET and waiting for a wrap
@@ -1532,7 +1532,7 @@ static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
  * Most probably by now the CPU has serviced that pending interrupt and it
  * might not have done the ack_APIC_irq() because it thought, interrupt
  * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
- * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
+ * the ISR bit and cpu thinks it has already serviced the interrupt. Hence
  * a vector might get locked. It was noticed for timer irq (vector
  * 0x31). Issue an extra EOI to clear ISR.
  *
@@ -1657,7 +1657,7 @@ static void setup_local_APIC(void)
         */
        /*
         * Actually disabling the focus CPU check just makes the hang less
-        * frequent as it makes the interrupt distributon model be more
+        * frequent as it makes the interrupt distribution model be more
         * like LRU than MRU (the short-term load is more even across CPUs).
         */
 
@@ -1875,7 +1875,7 @@ static __init void try_to_enable_x2apic(int remap_mode)
 
                /*
                 * Without IR, all CPUs can be addressed by IOAPIC/MSI only
-                * in physical mode, and CPUs with an APIC ID that cannnot
+                * in physical mode, and CPUs with an APIC ID that cannot
                 * be addressed must not be brought online.
                 */
                x2apic_set_max_apicid(apic_limit);
index 73ff4dd426a8341bcd38654c2856b5d465bed6ba..d5c691a3208b69d7aba5b2321074d6175dab2dd4 100644 (file)
@@ -928,7 +928,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
 
        /*
         * setup_IO_APIC_irqs() programs all legacy IRQs with default trigger
-        * and polarity attirbutes. So allow the first user to reprogram the
+        * and polarity attributes. So allow the first user to reprogram the
         * pin with real trigger and polarity attributes.
         */
        if (irq < nr_legacy_irqs() && data->count == 1) {
@@ -994,7 +994,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
 
        /*
         * Legacy ISA IRQ has already been allocated, just add pin to
-        * the pin list assoicated with this IRQ and program the IOAPIC
+        * the pin list associated with this IRQ and program the IOAPIC
         * entry. The IOAPIC entry
         */
        if (irq_data && irq_data->parent_data) {
@@ -1752,7 +1752,7 @@ static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
                 * with masking the ioapic entry and then polling until
                 * Remote IRR was clear before reprogramming the
                 * ioapic I don't trust the Remote IRR bit to be
-                * completey accurate.
+                * completely accurate.
                 *
                 * However there appears to be no other way to plug
                 * this race, so if the Remote IRR bit is not
@@ -1830,7 +1830,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
        /*
         * Tail end of clearing remote IRR bit (either by delivering the EOI
         * message via io-apic EOI register write or simulating it using
-        * mask+edge followed by unnask+level logic) manually when the
+        * mask+edge followed by unmask+level logic) manually when the
         * level triggered interrupt is seen as the edge triggered interrupt
         * at the cpu.
         */
index 3c9c7492252f827207a8638e53318835f79b48dc..6dbdc7c22bb751126cb70f9ea5766e120235f4eb 100644 (file)
@@ -543,6 +543,14 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
        if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
                return -ENOSYS;
 
+       /*
+        * Catch any attempt to touch the cascade interrupt on a PIC
+        * equipped system.
+        */
+       if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY &&
+                        virq == PIC_CASCADE_IR))
+               return -EINVAL;
+
        for (i = 0; i < nr_irqs; i++) {
                irqd = irq_domain_get_irq_data(domain, virq + i);
                BUG_ON(!irqd);
@@ -745,6 +753,11 @@ void __init lapic_assign_system_vectors(void)
 
        /* Mark the preallocated legacy interrupts */
        for (i = 0; i < nr_legacy_irqs(); i++) {
+               /*
+                * Don't touch the cascade interrupt. It's unusable
+                * on PIC equipped machines. See the large comment
+                * in the IO/APIC code.
+                */
                if (i != PIC_CASCADE_IR)
                        irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
        }
@@ -1045,7 +1058,7 @@ void irq_force_complete_move(struct irq_desc *desc)
                 *
                 * But in case of cpu hotplug this should be a non issue
                 * because if the affinity update happens right before all
-                * cpus rendevouz in stop machine, there is no way that the
+                * cpus rendezvous in stop machine, there is no way that the
                 * interrupt can be blocked on the target cpu because all cpus
                 * loops first with interrupts enabled in stop machine, so the
                 * old vector is not yet cleaned up when the interrupt fires.
@@ -1054,7 +1067,7 @@ void irq_force_complete_move(struct irq_desc *desc)
                 * of the interrupt on the apic/system bus would be delayed
                 * beyond the point where the target cpu disables interrupts
                 * in stop machine. I doubt that it can happen, but at least
-                * there is a theroretical chance. Virtualization might be
+                * there is a theoretical chance. Virtualization might be
                 * able to expose this, but AFAICT the IOAPIC emulation is not
                 * as stupid as the real hardware.
                 *
index 52bc217ca8c32a805947071ae28884cebe560ad6..f5a48e66e4f5462f83c4a76e89554377372964c6 100644 (file)
@@ -369,6 +369,15 @@ static int __init early_get_arch_type(void)
        return ret;
 }
 
+/* UV system found, check which APIC MODE BIOS already selected */
+static void __init early_set_apic_mode(void)
+{
+       if (x2apic_enabled())
+               uv_system_type = UV_X2APIC;
+       else
+               uv_system_type = UV_LEGACY_APIC;
+}
+
 static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
 {
        /* Save OEM_ID passed from ACPI MADT */
@@ -404,11 +413,12 @@ static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
                else
                        uv_hubless_system |= 0x8;
 
-               /* Copy APIC type */
+               /* Copy OEM Table ID */
                uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
 
                pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n",
                        oem_id, oem_table_id, uv_system_type, uv_hubless_system);
+
                return 0;
        }
 
@@ -453,6 +463,7 @@ static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
        early_set_hub_type();
 
        /* Other UV setup functions */
+       early_set_apic_mode();
        early_get_pnodeid();
        early_get_apic_socketid_shift();
        x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
@@ -472,29 +483,14 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
        if (uv_set_system_type(_oem_id, _oem_table_id) == 0)
                return 0;
 
-       /* Save and Decode OEM Table ID */
+       /* Save for display of the OEM Table ID */
        uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
 
-       /* This is the most common hardware variant, x2apic mode */
-       if (!strcmp(oem_table_id, "UVX"))
-               uv_system_type = UV_X2APIC;
-
-       /* Only used for very small systems, usually 1 chassis, legacy mode  */
-       else if (!strcmp(oem_table_id, "UVL"))
-               uv_system_type = UV_LEGACY_APIC;
-
-       else
-               goto badbios;
-
        pr_info("UV: OEM IDs %s/%s, System/UVType %d/0x%x, HUB RevID %d\n",
                oem_id, oem_table_id, uv_system_type, is_uv(UV_ANY),
                uv_min_hub_revision_id);
 
        return 0;
-
-badbios:
-       pr_err("UV: UVarchtype:%s not supported\n", uv_archtype);
-       BUG();
 }
 
 enum uv_system_type get_uv_system_type(void)
@@ -1671,6 +1667,9 @@ static __init int uv_system_init_hubless(void)
        if (rc < 0)
                return rc;
 
+       /* Set section block size for current node memory */
+       set_block_size();
+
        /* Create user access node */
        if (rc >= 0)
                uv_setup_proc_files(1);
index 660270359d39355f0e37d17a718c1d62ab6a4e2d..241dda687eb9f684e933efffee5f9228323f4e37 100644 (file)
@@ -94,7 +94,7 @@
  *         Remove APM dependencies in arch/i386/kernel/process.c
  *         Remove APM dependencies in drivers/char/sysrq.c
  *         Reset time across standby.
- *         Allow more inititialisation on SMP.
+ *         Allow more initialisation on SMP.
  *         Remove CONFIG_APM_POWER_OFF and make it boot time
  *         configurable (default on).
  *         Make debug only a boot time parameter (remove APM_DEBUG).
@@ -766,7 +766,7 @@ static int apm_driver_version(u_short *val)
  *     not cleared until it is acknowledged.
  *
  *     Additional information is returned in the info pointer, providing
- *     that APM 1.2 is in use. If no messges are pending the value 0x80
+ *     that APM 1.2 is in use. If no messages are pending the value 0x80
  *     is returned (No power management events pending).
  */
 static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
@@ -1025,7 +1025,7 @@ static int apm_enable_power_management(int enable)
  *     status which gives the rough battery status, and current power
  *     source. The bat value returned give an estimate as a percentage
  *     of life and a status value for the battery. The estimated life
- *     if reported is a lifetime in secodnds/minutes at current powwer
+ *     if reported is a lifetime in seconds/minutes at current power
  *     consumption.
  */
 
index 60b9f42ce3c152720425961eb52ca3983b53868f..ecd3fd6993d1a5e4f257405681c70eef77532bb6 100644 (file)
@@ -61,13 +61,6 @@ static void __used common(void)
        OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
 #endif
 
-#ifdef CONFIG_PARAVIRT_XXL
-       BLANK();
-       OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable);
-       OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable);
-       OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret);
-#endif
-
 #ifdef CONFIG_XEN
        BLANK();
        OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
index 3ca9be482a9e83b4c6545d6ccbe00e83cb131da7..d66af2950e06e7ccfecc8e0c549a572ef8db2b93 100644 (file)
@@ -877,7 +877,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
 static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
                                    struct _cpuid4_info_regs *base)
 {
-       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       struct cpu_cacheinfo *this_cpu_ci;
        struct cacheinfo *this_leaf;
        int i, sibling;
 
index ab640abe26b686a57e89f95dafa6b7a158db1fd9..99e1656b326eb853bf8d06fee9bb9da9c7faf970 100644 (file)
@@ -482,7 +482,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
        if (pk)
                pk->pkru = init_pkru_value;
        /*
-        * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
+        * Setting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
         * cpuid bit to be set.  We need to ensure that we
         * update that bit in this CPU's "cpu_info".
         */
@@ -1330,7 +1330,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 
        cpu_set_bug_bits(c);
 
-       cpu_set_core_cap_bits(c);
+       sld_setup(c);
 
        fpu__init_system(c);
 
@@ -1404,7 +1404,7 @@ static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
         * where GS is unused by the prev and next threads.
         *
         * Since neither vendor documents this anywhere that I can see,
-        * detect it directly instead of hardcoding the choice by
+        * detect it directly instead of hard-coding the choice by
         * vendor.
         *
         * I've designated AMD's behavior as the "bug" because it's
@@ -1748,6 +1748,8 @@ DEFINE_PER_CPU(bool, hardirq_stack_inuse);
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
+DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
+
 /* May not be marked __init: used by software suspend */
 void syscall_init(void)
 {
index 42af31b64c2ceb3f96cebd2bb54827b0f084b445..defda61f372df532dadf52dd452949135ee0881b 100644 (file)
@@ -72,6 +72,9 @@ static const struct cpuid_dep cpuid_deps[] = {
        { X86_FEATURE_AVX512_FP16,              X86_FEATURE_AVX512BW  },
        { X86_FEATURE_ENQCMD,                   X86_FEATURE_XSAVES    },
        { X86_FEATURE_PER_THREAD_MBA,           X86_FEATURE_MBA       },
+       { X86_FEATURE_SGX_LC,                   X86_FEATURE_SGX       },
+       { X86_FEATURE_SGX1,                     X86_FEATURE_SGX       },
+       { X86_FEATURE_SGX2,                     X86_FEATURE_SGX1      },
        {}
 };
 
index 1d9b8aaea06c8c9c7d14b0c30d51ded3bac83d7d..7227c15299d0b8a50ce44459e7b5d8889704db5e 100644 (file)
@@ -291,7 +291,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
                        mark_tsc_unstable("cyrix 5510/5520 detected");
        }
 #endif
-               c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */
+               c->x86_cache_size = 16; /* Yep 16K integrated cache that's it */
 
                /* GXm supports extended cpuid levels 'ala' AMD */
                if (c->cpuid_level == 2) {
index 3b1b01f2b248a57cd0dee06fbd35077e42ed1a76..da696eb4821a0b159e14ea0246beea0bcaf22cfc 100644 (file)
@@ -93,15 +93,9 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c)
 }
 #endif /* CONFIG_X86_VMX_FEATURE_NAMES */
 
-static void clear_sgx_caps(void)
-{
-       setup_clear_cpu_cap(X86_FEATURE_SGX);
-       setup_clear_cpu_cap(X86_FEATURE_SGX_LC);
-}
-
 static int __init nosgx(char *str)
 {
-       clear_sgx_caps();
+       setup_clear_cpu_cap(X86_FEATURE_SGX);
 
        return 0;
 }
@@ -110,23 +104,30 @@ early_param("nosgx", nosgx);
 
 void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
 {
+       bool enable_sgx_kvm = false, enable_sgx_driver = false;
        bool tboot = tboot_enabled();
-       bool enable_sgx;
+       bool enable_vmx;
        u64 msr;
 
        if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) {
                clear_cpu_cap(c, X86_FEATURE_VMX);
-               clear_sgx_caps();
+               clear_cpu_cap(c, X86_FEATURE_SGX);
                return;
        }
 
-       /*
-        * Enable SGX if and only if the kernel supports SGX and Launch Control
-        * is supported, i.e. disable SGX if the LE hash MSRs can't be written.
-        */
-       enable_sgx = cpu_has(c, X86_FEATURE_SGX) &&
-                    cpu_has(c, X86_FEATURE_SGX_LC) &&
-                    IS_ENABLED(CONFIG_X86_SGX);
+       enable_vmx = cpu_has(c, X86_FEATURE_VMX) &&
+                    IS_ENABLED(CONFIG_KVM_INTEL);
+
+       if (cpu_has(c, X86_FEATURE_SGX) && IS_ENABLED(CONFIG_X86_SGX)) {
+               /*
+                * Separate out SGX driver enabling from KVM.  This allows KVM
+                * guests to use SGX even if the kernel SGX driver refuses to
+                * use it.  This happens if flexible Launch Control is not
+                * available.
+                */
+               enable_sgx_driver = cpu_has(c, X86_FEATURE_SGX_LC);
+               enable_sgx_kvm = enable_vmx && IS_ENABLED(CONFIG_X86_SGX_KVM);
+       }
 
        if (msr & FEAT_CTL_LOCKED)
                goto update_caps;
@@ -142,15 +143,18 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
         * i.e. KVM is enabled, to avoid unnecessarily adding an attack vector
         * for the kernel, e.g. using VMX to hide malicious code.
         */
-       if (cpu_has(c, X86_FEATURE_VMX) && IS_ENABLED(CONFIG_KVM_INTEL)) {
+       if (enable_vmx) {
                msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
 
                if (tboot)
                        msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX;
        }
 
-       if (enable_sgx)
-               msr |= FEAT_CTL_SGX_ENABLED | FEAT_CTL_SGX_LC_ENABLED;
+       if (enable_sgx_kvm || enable_sgx_driver) {
+               msr |= FEAT_CTL_SGX_ENABLED;
+               if (enable_sgx_driver)
+                       msr |= FEAT_CTL_SGX_LC_ENABLED;
+       }
 
        wrmsrl(MSR_IA32_FEAT_CTL, msr);
 
@@ -173,10 +177,29 @@ update_caps:
        }
 
 update_sgx:
-       if (!(msr & FEAT_CTL_SGX_ENABLED) ||
-           !(msr & FEAT_CTL_SGX_LC_ENABLED) || !enable_sgx) {
-               if (enable_sgx)
-                       pr_err_once("SGX disabled by BIOS\n");
-               clear_sgx_caps();
+       if (!(msr & FEAT_CTL_SGX_ENABLED)) {
+               if (enable_sgx_kvm || enable_sgx_driver)
+                       pr_err_once("SGX disabled by BIOS.\n");
+               clear_cpu_cap(c, X86_FEATURE_SGX);
+               return;
+       }
+
+       /*
+        * VMX feature bit may be cleared due to being disabled in BIOS,
+        * in which case SGX virtualization cannot be supported either.
+        */
+       if (!cpu_has(c, X86_FEATURE_VMX) && enable_sgx_kvm) {
+               pr_err_once("SGX virtualization disabled due to lack of VMX.\n");
+               enable_sgx_kvm = 0;
+       }
+
+       if (!(msr & FEAT_CTL_SGX_LC_ENABLED) && enable_sgx_driver) {
+               if (!enable_sgx_kvm) {
+                       pr_err_once("SGX Launch Control is locked. Disable SGX.\n");
+                       clear_cpu_cap(c, X86_FEATURE_SGX);
+               } else {
+                       pr_err_once("SGX Launch Control is locked. Support SGX virtualization only.\n");
+                       clear_cpu_cap(c, X86_FEATURE_SGX_LC);
+               }
        }
 }
index 0e422a5448351c6d418a3bc40f40b39e5d7c2e1d..fe0bec14d7ece81f8cc9f5430d21ffdba6cc5a5f 100644 (file)
@@ -44,9 +44,9 @@ enum split_lock_detect_state {
 };
 
 /*
- * Default to sld_off because most systems do not support split lock detection
- * split_lock_setup() will switch this to sld_warn on systems that support
- * split lock detect, unless there is a command line override.
+ * Default to sld_off because most systems do not support split lock detection.
+ * sld_state_setup() will switch this to sld_warn on systems that support
+ * split lock/bus lock detect, unless there is a command line override.
  */
 static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
 static u64 msr_test_ctrl_cache __ro_after_init;
@@ -301,7 +301,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
         *  The operating system must reload CR3 to cause the TLB to be flushed"
         *
         * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
-        * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
+        * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
         * to be modified.
         */
        if (c->x86 == 5 && c->x86_model == 9) {
@@ -603,6 +603,7 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c)
 }
 
 static void split_lock_init(void);
+static void bus_lock_init(void);
 
 static void init_intel(struct cpuinfo_x86 *c)
 {
@@ -720,6 +721,7 @@ static void init_intel(struct cpuinfo_x86 *c)
                tsx_disable();
 
        split_lock_init();
+       bus_lock_init();
 
        intel_init_thermal(c);
 }
@@ -1020,16 +1022,15 @@ static bool split_lock_verify_msr(bool on)
        return ctrl == tmp;
 }
 
-static void __init split_lock_setup(void)
+static void __init sld_state_setup(void)
 {
        enum split_lock_detect_state state = sld_warn;
        char arg[20];
        int i, ret;
 
-       if (!split_lock_verify_msr(false)) {
-               pr_info("MSR access failed: Disabled\n");
+       if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
+           !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
                return;
-       }
 
        ret = cmdline_find_option(boot_command_line, "split_lock_detect",
                                  arg, sizeof(arg));
@@ -1041,17 +1042,14 @@ static void __init split_lock_setup(void)
                        }
                }
        }
+       sld_state = state;
+}
 
-       switch (state) {
-       case sld_off:
-               pr_info("disabled\n");
+static void __init __split_lock_setup(void)
+{
+       if (!split_lock_verify_msr(false)) {
+               pr_info("MSR access failed: Disabled\n");
                return;
-       case sld_warn:
-               pr_info("warning about user-space split_locks\n");
-               break;
-       case sld_fatal:
-               pr_info("sending SIGBUS on user-space split_locks\n");
-               break;
        }
 
        rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
@@ -1061,7 +1059,9 @@ static void __init split_lock_setup(void)
                return;
        }
 
-       sld_state = state;
+       /* Restore the MSR to its cached value. */
+       wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
+
        setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
 }
 
@@ -1118,6 +1118,29 @@ bool handle_guest_split_lock(unsigned long ip)
 }
 EXPORT_SYMBOL_GPL(handle_guest_split_lock);
 
+static void bus_lock_init(void)
+{
+       u64 val;
+
+       /*
+        * Warn and fatal are handled by #AC for split lock if #AC for
+        * split lock is supported.
+        */
+       if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) ||
+           (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
+           (sld_state == sld_warn || sld_state == sld_fatal)) ||
+           sld_state == sld_off)
+               return;
+
+       /*
+        * Enable #DB for bus lock. All bus locks are handled in #DB except
+        * split locks are handled in #AC in the fatal case.
+        */
+       rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
+       val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
+       wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
+}
+
 bool handle_user_split_lock(struct pt_regs *regs, long error_code)
 {
        if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
@@ -1126,6 +1149,21 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code)
        return true;
 }
 
+void handle_bus_lock(struct pt_regs *regs)
+{
+       switch (sld_state) {
+       case sld_off:
+               break;
+       case sld_warn:
+               pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
+                                   current->comm, current->pid, regs->ip);
+               break;
+       case sld_fatal:
+               force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
+               break;
+       }
+}
+
 /*
  * This function is called only when switching between tasks with
  * different split-lock detection modes. It sets the MSR for the
@@ -1166,7 +1204,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
        {}
 };
 
-void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
+static void __init split_lock_setup(struct cpuinfo_x86 *c)
 {
        const struct x86_cpu_id *m;
        u64 ia32_core_caps;
@@ -1193,5 +1231,40 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
        }
 
        cpu_model_supports_sld = true;
-       split_lock_setup();
+       __split_lock_setup();
+}
+
+static void sld_state_show(void)
+{
+       if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
+           !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+               return;
+
+       switch (sld_state) {
+       case sld_off:
+               pr_info("disabled\n");
+               break;
+       case sld_warn:
+               if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+                       pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
+               else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+                       pr_info("#DB: warning on user-space bus_locks\n");
+               break;
+       case sld_fatal:
+               if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
+                       pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
+               } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
+                       pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n",
+                               boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ?
+                               " from non-WB" : "");
+               }
+               break;
+       }
+}
+
+void __init sld_setup(struct cpuinfo_x86 *c)
+{
+       split_lock_setup(c);
+       sld_state_setup();
+       sld_state_show();
 }
index 7962355436dacf3f74eea63f74e8e8d66df5d07f..bf7fe87a7e884d3006f20f76a113e8289618ebe9 100644 (file)
@@ -529,7 +529,7 @@ static void mce_irq_work_cb(struct irq_work *entry)
  * Check if the address reported by the CPU is in a format we can parse.
  * It would be possible to add code for most other cases, but all would
  * be somewhat complicated (e.g. segment offset would require an instruction
- * parser). So only support physical addresses up to page granuality for now.
+ * parser). So only support physical addresses up to page granularity for now.
  */
 int mce_usable_address(struct mce *m)
 {
index 7b360731fc2d058480f059cbd92cffd1213a7615..4e86d97f96530a92f1f7f6b8a889757559097f66 100644 (file)
@@ -74,6 +74,7 @@ MCE_INJECT_SET(status);
 MCE_INJECT_SET(misc);
 MCE_INJECT_SET(addr);
 MCE_INJECT_SET(synd);
+MCE_INJECT_SET(ipid);
 
 #define MCE_INJECT_GET(reg)                                            \
 static int inj_##reg##_get(void *data, u64 *val)                       \
@@ -88,11 +89,13 @@ MCE_INJECT_GET(status);
 MCE_INJECT_GET(misc);
 MCE_INJECT_GET(addr);
 MCE_INJECT_GET(synd);
+MCE_INJECT_GET(ipid);
 
 DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
 DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
 DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
 DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(ipid_fops, inj_ipid_get, inj_ipid_set, "%llx\n");
 
 static void setup_inj_struct(struct mce *m)
 {
@@ -629,6 +632,8 @@ static const char readme_msg[] =
 "\t    is present in hardware. \n"
 "\t  - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
 "\t    APIC interrupt handler to handle the error. \n"
+"\n"
+"ipid:\t IPID (AMD-specific)\n"
 "\n";
 
 static ssize_t
@@ -652,6 +657,7 @@ static struct dfs_node {
        { .name = "misc",       .fops = &misc_fops,   .perm = S_IRUSR | S_IWUSR },
        { .name = "addr",       .fops = &addr_fops,   .perm = S_IRUSR | S_IWUSR },
        { .name = "synd",       .fops = &synd_fops,   .perm = S_IRUSR | S_IWUSR },
+       { .name = "ipid",       .fops = &ipid_fops,   .perm = S_IRUSR | S_IWUSR },
        { .name = "bank",       .fops = &bank_fops,   .perm = S_IRUSR | S_IWUSR },
        { .name = "flags",      .fops = &flags_fops,  .perm = S_IRUSR | S_IWUSR },
        { .name = "cpu",        .fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
index 83df991314c531bea4c1b90b98d2f8f61fab66ab..55ffa84d30d6949e4cf260afaac23a9219ebfcb1 100644 (file)
@@ -142,7 +142,7 @@ static struct severity {
                MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
                ),
        MCESEV(
-               KEEP, "Non signalled machine check",
+               KEEP, "Non signaled machine check",
                SER, BITCLR(MCI_STATUS_S)
                ),
 
index b935e1b5f115e797de342b9d73a9346aacdaa3b0..6a6318e9590c89f0903be4c304238329e5895ac3 100644 (file)
@@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
        if (val != 1)
                return size;
 
-       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
-       if (tmp_ret != UCODE_NEW)
-               return size;
-
        get_online_cpus();
 
        ret = check_online_cpus();
        if (ret)
                goto put;
 
+       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+       if (tmp_ret != UCODE_NEW)
+               goto put;
+
        mutex_lock(&microcode_mutex);
        ret = microcode_reload_late();
        mutex_unlock(&microcode_mutex);
index e88bc296afca0ba9f513b37ceb336396b0431a92..22f13343b5da808f5045195a93522130dc8165d0 100644 (file)
@@ -60,23 +60,18 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
        set_irq_regs(old_regs);
 }
 
-int hv_setup_vmbus_irq(int irq, void (*handler)(void))
+void hv_setup_vmbus_handler(void (*handler)(void))
 {
-       /*
-        * The 'irq' argument is ignored on x86/x64 because a hard-coded
-        * interrupt vector is used for Hyper-V interrupts.
-        */
        vmbus_handler = handler;
-       return 0;
 }
+EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler);
 
-void hv_remove_vmbus_irq(void)
+void hv_remove_vmbus_handler(void)
 {
        /* We have no way to deallocate the interrupt gate */
        vmbus_handler = NULL;
 }
-EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
-EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
+EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
 
 /*
  * Routines to do per-architecture handling of stimer0
@@ -95,21 +90,17 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
        set_irq_regs(old_regs);
 }
 
-int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void))
+/* For x86/x64, override weak placeholders in hyperv_timer.c */
+void hv_setup_stimer0_handler(void (*handler)(void))
 {
-       *vector = HYPERV_STIMER0_VECTOR;
-       *irq = -1;   /* Unused on x86/x64 */
        hv_stimer0_handler = handler;
-       return 0;
 }
-EXPORT_SYMBOL_GPL(hv_setup_stimer0_irq);
 
-void hv_remove_stimer0_irq(int irq)
+void hv_remove_stimer0_handler(void)
 {
        /* We have no way to deallocate the interrupt gate */
        hv_stimer0_handler = NULL;
 }
-EXPORT_SYMBOL_GPL(hv_remove_stimer0_irq);
 
 void hv_setup_kexec_handler(void (*handler)(void))
 {
@@ -197,7 +188,7 @@ static unsigned char hv_get_nmi_reason(void)
 #ifdef CONFIG_X86_LOCAL_APIC
 /*
  * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
- * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
+ * it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle
  * unknown NMI on the first CPU which gets it.
  */
 static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
@@ -274,12 +265,13 @@ static void __init ms_hyperv_init_platform(void)
         * Extract the features and hints
         */
        ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
-       ms_hyperv.features_b = cpuid_ebx(HYPERV_CPUID_FEATURES);
+       ms_hyperv.priv_high = cpuid_ebx(HYPERV_CPUID_FEATURES);
        ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
        ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
 
-       pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
-               ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
+       pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n",
+               ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
+               ms_hyperv.misc_features);
 
        ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
        ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
@@ -325,7 +317,7 @@ static void __init ms_hyperv_init_platform(void)
                x86_platform.calibrate_cpu = hv_get_tsc_khz;
        }
 
-       if (ms_hyperv.features_b & HV_ISOLATION) {
+       if (ms_hyperv.priv_high & HV_ISOLATION) {
                ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG);
                ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG);
 
@@ -428,7 +420,7 @@ static void __init ms_hyperv_init_platform(void)
 
        /*
         * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
-        * set x2apic destination mode to physcial mode when x2apic is available
+        * set x2apic destination mode to physical mode when x2apic is available
         * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
         * have 8-bit APIC id.
         */
index 9231640782fa23fdb4974e72ec4699b4b5491029..0c3b372318b70776dc915279c005130a0454e7ff 100644 (file)
@@ -434,7 +434,7 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
        state->range_sizek  = sizek - second_sizek;
 }
 
-/* Mininum size of mtrr block that can take hole: */
+/* Minimum size of mtrr block that can take hole: */
 static u64 mtrr_chunk_size __initdata = (256ULL<<20);
 
 static int __init parse_mtrr_chunk_size_opt(char *p)
index 28c8a23aa42ee9b0fc253f6f34e883a334f4d8ba..a76694bffe86a43c60e92079f41e1134049d04ed 100644 (file)
@@ -799,7 +799,7 @@ void mtrr_ap_init(void)
         *
         * This routine is called in two cases:
         *
-        *   1. very earily time of software resume, when there absolutely
+        *   1. very early time of software resume, when there absolutely
         *      isn't mtrr entry changes;
         *
         *   2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
index 698bb26aeb6e95ed06599b11bcf3f72f105fc842..23001ae03e82b924d66ed56e70194d533bbff5a0 100644 (file)
@@ -192,7 +192,7 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
  *     Intel(R) Xeon(R)  CPU E5-2608L v3  @  2.00GHz
  *     Intel(R) Xeon(R)  CPU E5-2658A v3  @  2.20GHz
  *
- * Probe by trying to write the first of the L3 cach mask registers
+ * Probe by trying to write the first of the L3 cache mask registers
  * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
  * is always 20 on hsw server parts. The minimum cache bitmask length
  * allowed for HSW server is always 2 bits. Hardcode all of them.
index 7ac31210e452abfa4d4e8af9ff2daba6039c2a9b..dbeaa8409313492396bbe9a1dbf0f67c0b4de4d0 100644 (file)
@@ -387,7 +387,7 @@ void mon_event_count(void *info)
  * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
  * that:
  *
- *   current bandwdith(cur_bw) < user specified bandwidth(user_bw)
+ *   current bandwidth(cur_bw) < user specified bandwidth(user_bw)
  *
  * This uses the MBM counters to measure the bandwidth and MBA throttle
  * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
@@ -397,7 +397,7 @@ void mon_event_count(void *info)
  * timer. Having 1s interval makes the calculation of bandwidth simpler.
  *
  * Although MBA's goal is to restrict the bandwidth to a maximum, there may
- * be a need to increase the bandwidth to avoid uncecessarily restricting
+ * be a need to increase the bandwidth to avoid unnecessarily restricting
  * the L2 <-> L3 traffic.
  *
  * Since MBA controls the L2 external bandwidth where as MBM measures the
@@ -480,7 +480,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 
        /*
         * Delta values are updated dynamically package wise for each
-        * rdtgrp everytime the throttle MSR changes value.
+        * rdtgrp every time the throttle MSR changes value.
         *
         * This is because (1)the increase in bandwidth is not perfectly
         * linear and only "approximately" linear even when the hardware
index e916646adc69f8f5293b5ca1ed4ca1b34b7f8a00..935af2ac6b1af15c677d6d9fffb75b0e0eec830e 100644 (file)
@@ -1307,7 +1307,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
                 * If the thread does not get on the CPU for whatever
                 * reason and the process which sets up the region is
                 * interrupted then this will leave the thread in runnable
-                * state and once it gets on the CPU it will derefence
+                * state and once it gets on the CPU it will dereference
                 * the cleared, but not freed, plr struct resulting in an
                 * empty pseudo-locking loop.
                 */
@@ -1391,7 +1391,7 @@ out:
  * group is removed from user space via a "rmdir" from userspace or the
  * unmount of the resctrl filesystem. On removal the resource group does
  * not go back to pseudo-locksetup mode before it is removed, instead it is
- * removed directly. There is thus assymmetry with the creation where the
+ * removed directly. There is thus asymmetry with the creation where the
  * &struct pseudo_lock_region is removed here while it was not created in
  * rdtgroup_pseudo_lock_create().
  *
index f9190adc52cb9beda0db99c07e805dc0f7c26d21..01fd30e7829dcf8accb31891e734ea770dd68b8b 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * User interface for Resource Alloction in Resource Director Technology(RDT)
+ * User interface for Resource Allocation in Resource Director Technology(RDT)
  *
  * Copyright (C) 2016 Intel Corporation
  *
@@ -294,7 +294,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
 /*
  * This is safe against resctrl_sched_in() called from __switch_to()
  * because __switch_to() is executed with interrupts disabled. A local call
- * from update_closid_rmid() is proteced against __switch_to() because
+ * from update_closid_rmid() is protected against __switch_to() because
  * preemption is disabled.
  */
 static void update_cpu_closid_rmid(void *info)
@@ -2555,7 +2555,7 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
 /*
  * This creates a directory mon_data which contains the monitored data.
  *
- * mon_data has one directory for each domain whic are named
+ * mon_data has one directory for each domain which are named
  * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
  * with L3 domain looks as below:
  * ./mon_data:
index 972ec3bfa9c0cfc0a743d30f3fc01df7150b9b87..21d1f062895a8dfdf4c66ecbec8d91da3feadb5c 100644 (file)
@@ -36,6 +36,8 @@ static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_CDP_L2,           CPUID_ECX,  2, 0x00000010, 2 },
        { X86_FEATURE_MBA,              CPUID_EBX,  3, 0x00000010, 0 },
        { X86_FEATURE_PER_THREAD_MBA,   CPUID_ECX,  0, 0x00000010, 3 },
+       { X86_FEATURE_SGX1,             CPUID_EAX,  0, 0x00000012, 0 },
+       { X86_FEATURE_SGX2,             CPUID_EAX,  1, 0x00000012, 0 },
        { X86_FEATURE_HW_PSTATE,        CPUID_EDX,  7, 0x80000007, 0 },
        { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
        { X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
index 91d3dc784a2925ec39d8a2251822df36d41ec017..9c1656779b2a058ff6f15d30bbac00ca557fd148 100644 (file)
@@ -3,3 +3,4 @@ obj-y += \
        encl.o \
        ioctl.o \
        main.o
+obj-$(CONFIG_X86_SGX_KVM)      += virt.o
diff --git a/arch/x86/kernel/cpu/sgx/arch.h b/arch/x86/kernel/cpu/sgx/arch.h
deleted file mode 100644 (file)
index dd7602c..0000000
+++ /dev/null
@@ -1,338 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/**
- * Copyright(c) 2016-20 Intel Corporation.
- *
- * Contains data structures defined by the SGX architecture.  Data structures
- * defined by the Linux software stack should not be placed here.
- */
-#ifndef _ASM_X86_SGX_ARCH_H
-#define _ASM_X86_SGX_ARCH_H
-
-#include <linux/bits.h>
-#include <linux/types.h>
-
-/* The SGX specific CPUID function. */
-#define SGX_CPUID              0x12
-/* EPC enumeration. */
-#define SGX_CPUID_EPC          2
-/* An invalid EPC section, i.e. the end marker. */
-#define SGX_CPUID_EPC_INVALID  0x0
-/* A valid EPC section. */
-#define SGX_CPUID_EPC_SECTION  0x1
-/* The bitmask for the EPC section type. */
-#define SGX_CPUID_EPC_MASK     GENMASK(3, 0)
-
-/**
- * enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV
- * %SGX_NOT_TRACKED:           Previous ETRACK's shootdown sequence has not
- *                             been completed yet.
- * %SGX_INVALID_EINITTOKEN:    EINITTOKEN is invalid and enclave signer's
- *                             public key does not match IA32_SGXLEPUBKEYHASH.
- * %SGX_UNMASKED_EVENT:                An unmasked event, e.g. INTR, was received
- */
-enum sgx_return_code {
-       SGX_NOT_TRACKED                 = 11,
-       SGX_INVALID_EINITTOKEN          = 16,
-       SGX_UNMASKED_EVENT              = 128,
-};
-
-/* The modulus size for 3072-bit RSA keys. */
-#define SGX_MODULUS_SIZE 384
-
-/**
- * enum sgx_miscselect - additional information to an SSA frame
- * %SGX_MISC_EXINFO:   Report #PF or #GP to the SSA frame.
- *
- * Save State Area (SSA) is a stack inside the enclave used to store processor
- * state when an exception or interrupt occurs. This enum defines additional
- * information stored to an SSA frame.
- */
-enum sgx_miscselect {
-       SGX_MISC_EXINFO         = BIT(0),
-};
-
-#define SGX_MISC_RESERVED_MASK GENMASK_ULL(63, 1)
-
-#define SGX_SSA_GPRS_SIZE              184
-#define SGX_SSA_MISC_EXINFO_SIZE       16
-
-/**
- * enum sgx_attributes - the attributes field in &struct sgx_secs
- * %SGX_ATTR_INIT:             Enclave can be entered (is initialized).
- * %SGX_ATTR_DEBUG:            Allow ENCLS(EDBGRD) and ENCLS(EDBGWR).
- * %SGX_ATTR_MODE64BIT:                Tell that this a 64-bit enclave.
- * %SGX_ATTR_PROVISIONKEY:      Allow to use provisioning keys for remote
- *                             attestation.
- * %SGX_ATTR_KSS:              Allow to use key separation and sharing (KSS).
- * %SGX_ATTR_EINITTOKENKEY:    Allow to use token signing key that is used to
- *                             sign cryptographic tokens that can be passed to
- *                             EINIT as an authorization to run an enclave.
- */
-enum sgx_attribute {
-       SGX_ATTR_INIT           = BIT(0),
-       SGX_ATTR_DEBUG          = BIT(1),
-       SGX_ATTR_MODE64BIT      = BIT(2),
-       SGX_ATTR_PROVISIONKEY   = BIT(4),
-       SGX_ATTR_EINITTOKENKEY  = BIT(5),
-       SGX_ATTR_KSS            = BIT(7),
-};
-
-#define SGX_ATTR_RESERVED_MASK (BIT_ULL(3) | BIT_ULL(6) | GENMASK_ULL(63, 8))
-
-/**
- * struct sgx_secs - SGX Enclave Control Structure (SECS)
- * @size:              size of the address space
- * @base:              base address of the  address space
- * @ssa_frame_size:    size of an SSA frame
- * @miscselect:                additional information stored to an SSA frame
- * @attributes:                attributes for enclave
- * @xfrm:              XSave-Feature Request Mask (subset of XCR0)
- * @mrenclave:         SHA256-hash of the enclave contents
- * @mrsigner:          SHA256-hash of the public key used to sign the SIGSTRUCT
- * @config_id:         a user-defined value that is used in key derivation
- * @isv_prod_id:       a user-defined value that is used in key derivation
- * @isv_svn:           a user-defined value that is used in key derivation
- * @config_svn:                a user-defined value that is used in key derivation
- *
- * SGX Enclave Control Structure (SECS) is a special enclave page that is not
- * visible in the address space. In fact, this structure defines the address
- * range and other global attributes for the enclave and it is the first EPC
- * page created for any enclave. It is moved from a temporary buffer to an EPC
- * by the means of ENCLS[ECREATE] function.
- */
-struct sgx_secs {
-       u64 size;
-       u64 base;
-       u32 ssa_frame_size;
-       u32 miscselect;
-       u8  reserved1[24];
-       u64 attributes;
-       u64 xfrm;
-       u32 mrenclave[8];
-       u8  reserved2[32];
-       u32 mrsigner[8];
-       u8  reserved3[32];
-       u32 config_id[16];
-       u16 isv_prod_id;
-       u16 isv_svn;
-       u16 config_svn;
-       u8  reserved4[3834];
-} __packed;
-
-/**
- * enum sgx_tcs_flags - execution flags for TCS
- * %SGX_TCS_DBGOPTIN:  If enabled allows single-stepping and breakpoints
- *                     inside an enclave. It is cleared by EADD but can
- *                     be set later with EDBGWR.
- */
-enum sgx_tcs_flags {
-       SGX_TCS_DBGOPTIN        = 0x01,
-};
-
-#define SGX_TCS_RESERVED_MASK  GENMASK_ULL(63, 1)
-#define SGX_TCS_RESERVED_SIZE  4024
-
-/**
- * struct sgx_tcs - Thread Control Structure (TCS)
- * @state:             used to mark an entered TCS
- * @flags:             execution flags (cleared by EADD)
- * @ssa_offset:                SSA stack offset relative to the enclave base
- * @ssa_index:         the current SSA frame index (cleard by EADD)
- * @nr_ssa_frames:     the number of frame in the SSA stack
- * @entry_offset:      entry point offset relative to the enclave base
- * @exit_addr:         address outside the enclave to exit on an exception or
- *                     interrupt
- * @fs_offset:         offset relative to the enclave base to become FS
- *                     segment inside the enclave
- * @gs_offset:         offset relative to the enclave base to become GS
- *                     segment inside the enclave
- * @fs_limit:          size to become a new FS-limit (only 32-bit enclaves)
- * @gs_limit:          size to become a new GS-limit (only 32-bit enclaves)
- *
- * Thread Control Structure (TCS) is an enclave page visible in its address
- * space that defines an entry point inside the enclave. A thread enters inside
- * an enclave by supplying address of TCS to ENCLU(EENTER). A TCS can be entered
- * by only one thread at a time.
- */
-struct sgx_tcs {
-       u64 state;
-       u64 flags;
-       u64 ssa_offset;
-       u32 ssa_index;
-       u32 nr_ssa_frames;
-       u64 entry_offset;
-       u64 exit_addr;
-       u64 fs_offset;
-       u64 gs_offset;
-       u32 fs_limit;
-       u32 gs_limit;
-       u8  reserved[SGX_TCS_RESERVED_SIZE];
-} __packed;
-
-/**
- * struct sgx_pageinfo - an enclave page descriptor
- * @addr:      address of the enclave page
- * @contents:  pointer to the page contents
- * @metadata:  pointer either to a SECINFO or PCMD instance
- * @secs:      address of the SECS page
- */
-struct sgx_pageinfo {
-       u64 addr;
-       u64 contents;
-       u64 metadata;
-       u64 secs;
-} __packed __aligned(32);
-
-
-/**
- * enum sgx_page_type - bits in the SECINFO flags defining the page type
- * %SGX_PAGE_TYPE_SECS:        a SECS page
- * %SGX_PAGE_TYPE_TCS: a TCS page
- * %SGX_PAGE_TYPE_REG: a regular page
- * %SGX_PAGE_TYPE_VA:  a VA page
- * %SGX_PAGE_TYPE_TRIM:        a page in trimmed state
- */
-enum sgx_page_type {
-       SGX_PAGE_TYPE_SECS,
-       SGX_PAGE_TYPE_TCS,
-       SGX_PAGE_TYPE_REG,
-       SGX_PAGE_TYPE_VA,
-       SGX_PAGE_TYPE_TRIM,
-};
-
-#define SGX_NR_PAGE_TYPES      5
-#define SGX_PAGE_TYPE_MASK     GENMASK(7, 0)
-
-/**
- * enum sgx_secinfo_flags - the flags field in &struct sgx_secinfo
- * %SGX_SECINFO_R:     allow read
- * %SGX_SECINFO_W:     allow write
- * %SGX_SECINFO_X:     allow execution
- * %SGX_SECINFO_SECS:  a SECS page
- * %SGX_SECINFO_TCS:   a TCS page
- * %SGX_SECINFO_REG:   a regular page
- * %SGX_SECINFO_VA:    a VA page
- * %SGX_SECINFO_TRIM:  a page in trimmed state
- */
-enum sgx_secinfo_flags {
-       SGX_SECINFO_R                   = BIT(0),
-       SGX_SECINFO_W                   = BIT(1),
-       SGX_SECINFO_X                   = BIT(2),
-       SGX_SECINFO_SECS                = (SGX_PAGE_TYPE_SECS << 8),
-       SGX_SECINFO_TCS                 = (SGX_PAGE_TYPE_TCS << 8),
-       SGX_SECINFO_REG                 = (SGX_PAGE_TYPE_REG << 8),
-       SGX_SECINFO_VA                  = (SGX_PAGE_TYPE_VA << 8),
-       SGX_SECINFO_TRIM                = (SGX_PAGE_TYPE_TRIM << 8),
-};
-
-#define SGX_SECINFO_PERMISSION_MASK    GENMASK_ULL(2, 0)
-#define SGX_SECINFO_PAGE_TYPE_MASK     (SGX_PAGE_TYPE_MASK << 8)
-#define SGX_SECINFO_RESERVED_MASK      ~(SGX_SECINFO_PERMISSION_MASK | \
-                                         SGX_SECINFO_PAGE_TYPE_MASK)
-
-/**
- * struct sgx_secinfo - describes attributes of an EPC page
- * @flags:     permissions and type
- *
- * Used together with ENCLS leaves that add or modify an EPC page to an
- * enclave to define page permissions and type.
- */
-struct sgx_secinfo {
-       u64 flags;
-       u8  reserved[56];
-} __packed __aligned(64);
-
-#define SGX_PCMD_RESERVED_SIZE 40
-
-/**
- * struct sgx_pcmd - Paging Crypto Metadata (PCMD)
- * @enclave_id:        enclave identifier
- * @mac:       MAC over PCMD, page contents and isvsvn
- *
- * PCMD is stored for every swapped page to the regular memory. When ELDU loads
- * the page back it recalculates the MAC by using a isvsvn number stored in a
- * VA page. Together these two structures bring integrity and rollback
- * protection.
- */
-struct sgx_pcmd {
-       struct sgx_secinfo secinfo;
-       u64 enclave_id;
-       u8  reserved[SGX_PCMD_RESERVED_SIZE];
-       u8  mac[16];
-} __packed __aligned(128);
-
-#define SGX_SIGSTRUCT_RESERVED1_SIZE 84
-#define SGX_SIGSTRUCT_RESERVED2_SIZE 20
-#define SGX_SIGSTRUCT_RESERVED3_SIZE 32
-#define SGX_SIGSTRUCT_RESERVED4_SIZE 12
-
-/**
- * struct sgx_sigstruct_header -  defines author of the enclave
- * @header1:           constant byte string
- * @vendor:            must be either 0x0000 or 0x8086
- * @date:              YYYYMMDD in BCD
- * @header2:           costant byte string
- * @swdefined:         software defined value
- */
-struct sgx_sigstruct_header {
-       u64 header1[2];
-       u32 vendor;
-       u32 date;
-       u64 header2[2];
-       u32 swdefined;
-       u8  reserved1[84];
-} __packed;
-
-/**
- * struct sgx_sigstruct_body - defines contents of the enclave
- * @miscselect:                additional information stored to an SSA frame
- * @misc_mask:         required miscselect in SECS
- * @attributes:                attributes for enclave
- * @xfrm:              XSave-Feature Request Mask (subset of XCR0)
- * @attributes_mask:   required attributes in SECS
- * @xfrm_mask:         required XFRM in SECS
- * @mrenclave:         SHA256-hash of the enclave contents
- * @isvprodid:         a user-defined value that is used in key derivation
- * @isvsvn:            a user-defined value that is used in key derivation
- */
-struct sgx_sigstruct_body {
-       u32 miscselect;
-       u32 misc_mask;
-       u8  reserved2[20];
-       u64 attributes;
-       u64 xfrm;
-       u64 attributes_mask;
-       u64 xfrm_mask;
-       u8  mrenclave[32];
-       u8  reserved3[32];
-       u16 isvprodid;
-       u16 isvsvn;
-} __packed;
-
-/**
- * struct sgx_sigstruct - an enclave signature
- * @header:            defines author of the enclave
- * @modulus:           the modulus of the public key
- * @exponent:          the exponent of the public key
- * @signature:         the signature calculated over the fields except modulus,
- * @body:              defines contents of the enclave
- * @q1:                        a value used in RSA signature verification
- * @q2:                        a value used in RSA signature verification
- *
- * Header and body are the parts that are actual signed. The remaining fields
- * define the signature of the enclave.
- */
-struct sgx_sigstruct {
-       struct sgx_sigstruct_header header;
-       u8  modulus[SGX_MODULUS_SIZE];
-       u32 exponent;
-       u8  signature[SGX_MODULUS_SIZE];
-       struct sgx_sigstruct_body body;
-       u8  reserved4[12];
-       u8  q1[SGX_MODULUS_SIZE];
-       u8  q2[SGX_MODULUS_SIZE];
-} __packed;
-
-#define SGX_LAUNCH_TOKEN_SIZE 304
-
-#endif /* _ASM_X86_SGX_ARCH_H */
index 8ce6d8371cfbf74eba9a4eba624220350d43f1c9..aa9b8b8688676fc66ebc6fdd605fe4467991af13 100644 (file)
@@ -136,10 +136,6 @@ static const struct file_operations sgx_encl_fops = {
        .get_unmapped_area      = sgx_get_unmapped_area,
 };
 
-const struct file_operations sgx_provision_fops = {
-       .owner                  = THIS_MODULE,
-};
-
 static struct miscdevice sgx_dev_enclave = {
        .minor = MISC_DYNAMIC_MINOR,
        .name = "sgx_enclave",
@@ -147,13 +143,6 @@ static struct miscdevice sgx_dev_enclave = {
        .fops = &sgx_encl_fops,
 };
 
-static struct miscdevice sgx_dev_provision = {
-       .minor = MISC_DYNAMIC_MINOR,
-       .name = "sgx_provision",
-       .nodename = "sgx_provision",
-       .fops = &sgx_provision_fops,
-};
-
 int __init sgx_drv_init(void)
 {
        unsigned int eax, ebx, ecx, edx;
@@ -187,11 +176,5 @@ int __init sgx_drv_init(void)
        if (ret)
                return ret;
 
-       ret = misc_register(&sgx_dev_provision);
-       if (ret) {
-               misc_deregister(&sgx_dev_enclave);
-               return ret;
-       }
-
        return 0;
 }
index 7449ef33f0819db43aae1283ac07dddd35ffc636..3be203297988f09e627388f2f69d4fb8263eaf46 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/shmem_fs.h>
 #include <linux/suspend.h>
 #include <linux/sched/mm.h>
-#include "arch.h"
+#include <asm/sgx.h>
 #include "encl.h"
 #include "encls.h"
 #include "sgx.h"
@@ -78,7 +78,7 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
 
        ret = __sgx_encl_eldu(encl_page, epc_page, secs_page);
        if (ret) {
-               sgx_free_epc_page(epc_page);
+               sgx_encl_free_epc_page(epc_page);
                return ERR_PTR(ret);
        }
 
@@ -404,7 +404,7 @@ void sgx_encl_release(struct kref *ref)
                        if (sgx_unmark_page_reclaimable(entry->epc_page))
                                continue;
 
-                       sgx_free_epc_page(entry->epc_page);
+                       sgx_encl_free_epc_page(entry->epc_page);
                        encl->secs_child_cnt--;
                        entry->epc_page = NULL;
                }
@@ -415,7 +415,7 @@ void sgx_encl_release(struct kref *ref)
        xa_destroy(&encl->page_array);
 
        if (!encl->secs_child_cnt && encl->secs.epc_page) {
-               sgx_free_epc_page(encl->secs.epc_page);
+               sgx_encl_free_epc_page(encl->secs.epc_page);
                encl->secs.epc_page = NULL;
        }
 
@@ -423,7 +423,7 @@ void sgx_encl_release(struct kref *ref)
                va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
                                           list);
                list_del(&va_page->list);
-               sgx_free_epc_page(va_page->epc_page);
+               sgx_encl_free_epc_page(va_page->epc_page);
                kfree(va_page);
        }
 
@@ -686,7 +686,7 @@ struct sgx_epc_page *sgx_alloc_va_page(void)
        ret = __epa(sgx_get_epc_virt_addr(epc_page));
        if (ret) {
                WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret);
-               sgx_free_epc_page(epc_page);
+               sgx_encl_free_epc_page(epc_page);
                return ERR_PTR(-EFAULT);
        }
 
@@ -735,3 +735,24 @@ bool sgx_va_page_full(struct sgx_va_page *va_page)
 
        return slot == SGX_VA_SLOT_COUNT;
 }
+
+/**
+ * sgx_encl_free_epc_page - free an EPC page assigned to an enclave
+ * @page:      EPC page to be freed
+ *
+ * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and
+ * only upon success, it puts the page back to free page list.  Otherwise, it
+ * gives a WARNING to indicate page is leaked.
+ */
+void sgx_encl_free_epc_page(struct sgx_epc_page *page)
+{
+       int ret;
+
+       WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
+
+       ret = __eremove(sgx_get_epc_virt_addr(page));
+       if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret))
+               return;
+
+       sgx_free_epc_page(page);
+}
index d8d30ccbef4c95017bec5926f1fab6d04fb12185..6e74f85b626451752b0ec8a0b02f64bd77e232c4 100644 (file)
@@ -115,5 +115,6 @@ struct sgx_epc_page *sgx_alloc_va_page(void);
 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
 bool sgx_va_page_full(struct sgx_va_page *va_page);
+void sgx_encl_free_epc_page(struct sgx_epc_page *page);
 
 #endif /* _X86_ENCL_H */
index 443188fe7e7057bb0dba10bda49acfdbf42757e4..9b204843b78d3ec7ff3903548358d39c4665dee8 100644 (file)
 #include <asm/traps.h>
 #include "sgx.h"
 
-enum sgx_encls_function {
-       ECREATE = 0x00,
-       EADD    = 0x01,
-       EINIT   = 0x02,
-       EREMOVE = 0x03,
-       EDGBRD  = 0x04,
-       EDGBWR  = 0x05,
-       EEXTEND = 0x06,
-       ELDU    = 0x08,
-       EBLOCK  = 0x09,
-       EPA     = 0x0A,
-       EWB     = 0x0B,
-       ETRACK  = 0x0C,
-};
-
 /**
  * ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr
  *
@@ -55,6 +40,19 @@ enum sgx_encls_function {
        } while (0);                                                      \
 }
 
+/*
+ * encls_faulted() - Check if an ENCLS leaf faulted given an error code
+ * @ret:       the return value of an ENCLS leaf function call
+ *
+ * Return:
+ * - true:     ENCLS leaf faulted.
+ * - false:    Otherwise.
+ */
+static inline bool encls_faulted(int ret)
+{
+       return ret & ENCLS_FAULT_FLAG;
+}
+
 /**
  * encls_failed() - Check if an ENCLS function failed
  * @ret:       the return value of an ENCLS function call
@@ -65,7 +63,7 @@ enum sgx_encls_function {
  */
 static inline bool encls_failed(int ret)
 {
-       if (ret & ENCLS_FAULT_FLAG)
+       if (encls_faulted(ret))
                return ENCLS_TRAPNR(ret) != X86_TRAP_PF;
 
        return !!ret;
index 90a5caf76939a7bc71423ea98b5bc7254c1c1261..83df20e3e633353ca0b707cda6cae51cb6f09714 100644 (file)
@@ -2,6 +2,7 @@
 /*  Copyright(c) 2016-20 Intel Corporation. */
 
 #include <asm/mman.h>
+#include <asm/sgx.h>
 #include <linux/mman.h>
 #include <linux/delay.h>
 #include <linux/file.h>
@@ -47,7 +48,7 @@ static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
        encl->page_cnt--;
 
        if (va_page) {
-               sgx_free_epc_page(va_page->epc_page);
+               sgx_encl_free_epc_page(va_page->epc_page);
                list_del(&va_page->list);
                kfree(va_page);
        }
@@ -117,7 +118,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
        return 0;
 
 err_out:
-       sgx_free_epc_page(encl->secs.epc_page);
+       sgx_encl_free_epc_page(encl->secs.epc_page);
        encl->secs.epc_page = NULL;
 
 err_out_backing:
@@ -365,7 +366,7 @@ err_out_unlock:
        mmap_read_unlock(current->mm);
 
 err_out_free:
-       sgx_free_epc_page(epc_page);
+       sgx_encl_free_epc_page(epc_page);
        kfree(encl_page);
 
        return ret;
@@ -495,7 +496,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
                         void *token)
 {
        u64 mrsigner[4];
-       int i, j, k;
+       int i, j;
        void *addr;
        int ret;
 
@@ -544,8 +545,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
 
                        preempt_disable();
 
-                       for (k = 0; k < 4; k++)
-                               wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + k, mrsigner[k]);
+                       sgx_update_lepubkeyhash(mrsigner);
 
                        ret = __einit(sigstruct, token, addr);
 
@@ -568,7 +568,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
                }
        }
 
-       if (ret & ENCLS_FAULT_FLAG) {
+       if (encls_faulted(ret)) {
                if (encls_failed(ret))
                        ENCLS_WARN(ret, "EINIT");
 
@@ -604,7 +604,6 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
 {
        struct sgx_sigstruct *sigstruct;
        struct sgx_enclave_init init_arg;
-       struct page *initp_page;
        void *token;
        int ret;
 
@@ -615,11 +614,15 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
        if (copy_from_user(&init_arg, arg, sizeof(init_arg)))
                return -EFAULT;
 
-       initp_page = alloc_page(GFP_KERNEL);
-       if (!initp_page)
+       /*
+        * 'sigstruct' must be on a page boundary and 'token' on a 512 byte
+        * boundary.  kmalloc() will give this alignment when allocating
+        * PAGE_SIZE bytes.
+        */
+       sigstruct = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!sigstruct)
                return -ENOMEM;
 
-       sigstruct = kmap(initp_page);
        token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
        memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
 
@@ -645,8 +648,7 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
        ret = sgx_encl_init(encl, sigstruct, token);
 
 out:
-       kunmap(initp_page);
-       __free_page(initp_page);
+       kfree(sigstruct);
        return ret;
 }
 
@@ -665,24 +667,11 @@ out:
 static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
 {
        struct sgx_enclave_provision params;
-       struct file *file;
 
        if (copy_from_user(&params, arg, sizeof(params)))
                return -EFAULT;
 
-       file = fget(params.fd);
-       if (!file)
-               return -EINVAL;
-
-       if (file->f_op != &sgx_provision_fops) {
-               fput(file);
-               return -EINVAL;
-       }
-
-       encl->attributes_mask |= SGX_ATTR_PROVISIONKEY;
-
-       fput(file);
-       return 0;
+       return sgx_set_attribute(&encl->attributes_mask, params.fd);
 }
 
 long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
index 8df81a3ed945738e5da7440ade5424bf53f17a11..63d3de02bbccb50ba7ab015227ac4cc680954877 100644 (file)
@@ -1,14 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0
 /*  Copyright(c) 2016-20 Intel Corporation. */
 
+#include <linux/file.h>
 #include <linux/freezer.h>
 #include <linux/highmem.h>
 #include <linux/kthread.h>
+#include <linux/miscdevice.h>
 #include <linux/pagemap.h>
 #include <linux/ratelimit.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
 #include <linux/slab.h>
+#include <asm/sgx.h>
 #include "driver.h"
 #include "encl.h"
 #include "encls.h"
@@ -23,42 +26,58 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
  * with sgx_reclaimer_lock acquired.
  */
 static LIST_HEAD(sgx_active_page_list);
-
 static DEFINE_SPINLOCK(sgx_reclaimer_lock);
 
+/* The free page list lock protected variables prepend the lock. */
+static unsigned long sgx_nr_free_pages;
+
+/* Nodes with one or more EPC sections. */
+static nodemask_t sgx_numa_mask;
+
+/*
+ * Array with one list_head for each possible NUMA node.  Each
+ * list contains all the sgx_epc_section's which are on that
+ * node.
+ */
+static struct sgx_numa_node *sgx_numa_nodes;
+
+static LIST_HEAD(sgx_dirty_page_list);
+
 /*
- * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS
- * pages whose child pages blocked EREMOVE.
+ * Reset post-kexec EPC pages to the uninitialized state. The pages are removed
+ * from the input list, and made available for the page allocator. SECS pages
+ * prepending their children in the input list are left intact.
  */
-static void sgx_sanitize_section(struct sgx_epc_section *section)
+static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
 {
        struct sgx_epc_page *page;
        LIST_HEAD(dirty);
        int ret;
 
-       /* init_laundry_list is thread-local, no need for a lock: */
-       while (!list_empty(&section->init_laundry_list)) {
+       /* dirty_page_list is thread-local, no need for a lock: */
+       while (!list_empty(dirty_page_list)) {
                if (kthread_should_stop())
                        return;
 
-               /* needed for access to ->page_list: */
-               spin_lock(&section->lock);
-
-               page = list_first_entry(&section->init_laundry_list,
-                                       struct sgx_epc_page, list);
+               page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
 
                ret = __eremove(sgx_get_epc_virt_addr(page));
-               if (!ret)
-                       list_move(&page->list, &section->page_list);
-               else
+               if (!ret) {
+                       /*
+                        * page is now sanitized.  Make it available via the SGX
+                        * page allocator:
+                        */
+                       list_del(&page->list);
+                       sgx_free_epc_page(page);
+               } else {
+                       /* The page is not yet clean - move to the dirty list. */
                        list_move_tail(&page->list, &dirty);
-
-               spin_unlock(&section->lock);
+               }
 
                cond_resched();
        }
 
-       list_splice(&dirty, &section->init_laundry_list);
+       list_splice(&dirty, dirty_page_list);
 }
 
 static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
@@ -195,10 +214,10 @@ static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
 
 /*
  * Swap page to the regular memory transformed to the blocked state by using
- * EBLOCK, which means that it can no loger be referenced (no new TLB entries).
+ * EBLOCK, which means that it can no longer be referenced (no new TLB entries).
  *
  * The first trial just tries to write the page assuming that some other thread
- * has reset the count for threads inside the enlave by using ETRACK, and
+ * has reset the count for threads inside the enclave by using ETRACK, and
  * previous thread count has been zeroed out. The second trial calls ETRACK
  * before EWB. If that fails we kick all the HW threads out, and then do EWB,
  * which should be guaranteed the succeed.
@@ -278,7 +297,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
 
                sgx_encl_ewb(encl->secs.epc_page, &secs_backing);
 
-               sgx_free_epc_page(encl->secs.epc_page);
+               sgx_encl_free_epc_page(encl->secs.epc_page);
                encl->secs.epc_page = NULL;
 
                sgx_encl_put_backing(&secs_backing, true);
@@ -308,6 +327,7 @@ static void sgx_reclaim_pages(void)
        struct sgx_epc_section *section;
        struct sgx_encl_page *encl_page;
        struct sgx_epc_page *epc_page;
+       struct sgx_numa_node *node;
        pgoff_t page_index;
        int cnt = 0;
        int ret;
@@ -379,50 +399,33 @@ skip:
                epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
 
                section = &sgx_epc_sections[epc_page->section];
-               spin_lock(&section->lock);
-               list_add_tail(&epc_page->list, &section->page_list);
-               section->free_cnt++;
-               spin_unlock(&section->lock);
-       }
-}
-
-static unsigned long sgx_nr_free_pages(void)
-{
-       unsigned long cnt = 0;
-       int i;
-
-       for (i = 0; i < sgx_nr_epc_sections; i++)
-               cnt += sgx_epc_sections[i].free_cnt;
+               node = section->node;
 
-       return cnt;
+               spin_lock(&node->lock);
+               list_add_tail(&epc_page->list, &node->free_page_list);
+               sgx_nr_free_pages++;
+               spin_unlock(&node->lock);
+       }
 }
 
 static bool sgx_should_reclaim(unsigned long watermark)
 {
-       return sgx_nr_free_pages() < watermark &&
-              !list_empty(&sgx_active_page_list);
+       return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list);
 }
 
 static int ksgxd(void *p)
 {
-       int i;
-
        set_freezable();
 
        /*
         * Sanitize pages in order to recover from kexec(). The 2nd pass is
         * required for SECS pages, whose child pages blocked EREMOVE.
         */
-       for (i = 0; i < sgx_nr_epc_sections; i++)
-               sgx_sanitize_section(&sgx_epc_sections[i]);
-
-       for (i = 0; i < sgx_nr_epc_sections; i++) {
-               sgx_sanitize_section(&sgx_epc_sections[i]);
+       __sgx_sanitize_pages(&sgx_dirty_page_list);
+       __sgx_sanitize_pages(&sgx_dirty_page_list);
 
-               /* Should never happen. */
-               if (!list_empty(&sgx_epc_sections[i].init_laundry_list))
-                       WARN(1, "EPC section %d has unsanitized pages.\n", i);
-       }
+       /* sanity check: */
+       WARN_ON(!list_empty(&sgx_dirty_page_list));
 
        while (!kthread_should_stop()) {
                if (try_to_freeze())
@@ -454,45 +457,56 @@ static bool __init sgx_page_reclaimer_init(void)
        return true;
 }
 
-static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section)
+static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
 {
-       struct sgx_epc_page *page;
+       struct sgx_numa_node *node = &sgx_numa_nodes[nid];
+       struct sgx_epc_page *page = NULL;
 
-       spin_lock(&section->lock);
+       spin_lock(&node->lock);
 
-       if (list_empty(&section->page_list)) {
-               spin_unlock(&section->lock);
+       if (list_empty(&node->free_page_list)) {
+               spin_unlock(&node->lock);
                return NULL;
        }
 
-       page = list_first_entry(&section->page_list, struct sgx_epc_page, list);
+       page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
        list_del_init(&page->list);
-       section->free_cnt--;
+       sgx_nr_free_pages--;
+
+       spin_unlock(&node->lock);
 
-       spin_unlock(&section->lock);
        return page;
 }
 
 /**
  * __sgx_alloc_epc_page() - Allocate an EPC page
  *
- * Iterate through EPC sections and borrow a free EPC page to the caller. When a
- * page is no longer needed it must be released with sgx_free_epc_page().
+ * Iterate through NUMA nodes and reserve ia free EPC page to the caller. Start
+ * from the NUMA node, where the caller is executing.
  *
  * Return:
- *   an EPC page,
- *   -errno on error
+ * - an EPC page:      A borrowed EPC pages were available.
+ * - NULL:             Out of EPC pages.
  */
 struct sgx_epc_page *__sgx_alloc_epc_page(void)
 {
-       struct sgx_epc_section *section;
        struct sgx_epc_page *page;
-       int i;
+       int nid_of_current = numa_node_id();
+       int nid = nid_of_current;
 
-       for (i = 0; i < sgx_nr_epc_sections; i++) {
-               section = &sgx_epc_sections[i];
+       if (node_isset(nid_of_current, sgx_numa_mask)) {
+               page = __sgx_alloc_epc_page_from_node(nid_of_current);
+               if (page)
+                       return page;
+       }
+
+       /* Fall back to the non-local NUMA nodes: */
+       while (true) {
+               nid = next_node_in(nid, sgx_numa_mask);
+               if (nid == nid_of_current)
+                       break;
 
-               page = __sgx_alloc_epc_page_from_section(section);
+               page = __sgx_alloc_epc_page_from_node(nid);
                if (page)
                        return page;
        }
@@ -598,23 +612,22 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
  * sgx_free_epc_page() - Free an EPC page
  * @page:      an EPC page
  *
- * Call EREMOVE for an EPC page and insert it back to the list of free pages.
+ * Put the EPC page back to the list of free pages. It's the caller's
+ * responsibility to make sure that the page is in uninitialized state. In other
+ * words, do EREMOVE, EWB or whatever operation is necessary before calling
+ * this function.
  */
 void sgx_free_epc_page(struct sgx_epc_page *page)
 {
        struct sgx_epc_section *section = &sgx_epc_sections[page->section];
-       int ret;
+       struct sgx_numa_node *node = section->node;
 
-       WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
+       spin_lock(&node->lock);
 
-       ret = __eremove(sgx_get_epc_virt_addr(page));
-       if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
-               return;
+       list_add_tail(&page->list, &node->free_page_list);
+       sgx_nr_free_pages++;
 
-       spin_lock(&section->lock);
-       list_add_tail(&page->list, &section->page_list);
-       section->free_cnt++;
-       spin_unlock(&section->lock);
+       spin_unlock(&node->lock);
 }
 
 static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
@@ -635,18 +648,14 @@ static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
        }
 
        section->phys_addr = phys_addr;
-       spin_lock_init(&section->lock);
-       INIT_LIST_HEAD(&section->page_list);
-       INIT_LIST_HEAD(&section->init_laundry_list);
 
        for (i = 0; i < nr_pages; i++) {
                section->pages[i].section = index;
                section->pages[i].flags = 0;
                section->pages[i].owner = NULL;
-               list_add_tail(&section->pages[i].list, &section->init_laundry_list);
+               list_add_tail(&section->pages[i].list, &sgx_dirty_page_list);
        }
 
-       section->free_cnt = nr_pages;
        return true;
 }
 
@@ -665,8 +674,13 @@ static bool __init sgx_page_cache_init(void)
 {
        u32 eax, ebx, ecx, edx, type;
        u64 pa, size;
+       int nid;
        int i;
 
+       sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL);
+       if (!sgx_numa_nodes)
+               return false;
+
        for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
                cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
 
@@ -689,6 +703,21 @@ static bool __init sgx_page_cache_init(void)
                        break;
                }
 
+               nid = numa_map_to_online_node(phys_to_target_node(pa));
+               if (nid == NUMA_NO_NODE) {
+                       /* The physical address is already printed above. */
+                       pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n");
+                       nid = 0;
+               }
+
+               if (!node_isset(nid, sgx_numa_mask)) {
+                       spin_lock_init(&sgx_numa_nodes[nid].lock);
+                       INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list);
+                       node_set(nid, sgx_numa_mask);
+               }
+
+               sgx_epc_sections[i].node =  &sgx_numa_nodes[nid];
+
                sgx_nr_epc_sections++;
        }
 
@@ -700,6 +729,67 @@ static bool __init sgx_page_cache_init(void)
        return true;
 }
 
+/*
+ * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller.
+ * Bare-metal driver requires to update them to hash of enclave's signer
+ * before EINIT. KVM needs to update them to guest's virtual MSR values
+ * before doing EINIT from guest.
+ */
+void sgx_update_lepubkeyhash(u64 *lepubkeyhash)
+{
+       int i;
+
+       WARN_ON_ONCE(preemptible());
+
+       for (i = 0; i < 4; i++)
+               wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
+}
+
+const struct file_operations sgx_provision_fops = {
+       .owner                  = THIS_MODULE,
+};
+
+static struct miscdevice sgx_dev_provision = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "sgx_provision",
+       .nodename = "sgx_provision",
+       .fops = &sgx_provision_fops,
+};
+
+/**
+ * sgx_set_attribute() - Update allowed attributes given file descriptor
+ * @allowed_attributes:                Pointer to allowed enclave attributes
+ * @attribute_fd:              File descriptor for specific attribute
+ *
+ * Append enclave attribute indicated by file descriptor to allowed
+ * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by
+ * /dev/sgx_provision is supported.
+ *
+ * Return:
+ * -0:         SGX_ATTR_PROVISIONKEY is appended to allowed_attributes
+ * -EINVAL:    Invalid, or not supported file descriptor
+ */
+int sgx_set_attribute(unsigned long *allowed_attributes,
+                     unsigned int attribute_fd)
+{
+       struct file *file;
+
+       file = fget(attribute_fd);
+       if (!file)
+               return -EINVAL;
+
+       if (file->f_op != &sgx_provision_fops) {
+               fput(file);
+               return -EINVAL;
+       }
+
+       *allowed_attributes |= SGX_ATTR_PROVISIONKEY;
+
+       fput(file);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sgx_set_attribute);
+
 static int __init sgx_init(void)
 {
        int ret;
@@ -716,12 +806,28 @@ static int __init sgx_init(void)
                goto err_page_cache;
        }
 
-       ret = sgx_drv_init();
+       ret = misc_register(&sgx_dev_provision);
        if (ret)
                goto err_kthread;
 
+       /*
+        * Always try to initialize the native *and* KVM drivers.
+        * The KVM driver is less picky than the native one and
+        * can function if the native one is not supported on the
+        * current system or fails to initialize.
+        *
+        * Error out only if both fail to initialize.
+        */
+       ret = sgx_drv_init();
+
+       if (sgx_vepc_init() && ret)
+               goto err_provision;
+
        return 0;
 
+err_provision:
+       misc_deregister(&sgx_dev_provision);
+
 err_kthread:
        kthread_stop(ksgxd_tsk);
 
index 5fa42d143feb869a1443ede7f5eb990bac184e0f..4628acec000915e2d2b41895088c3edbe5033c14 100644 (file)
@@ -8,11 +8,15 @@
 #include <linux/rwsem.h>
 #include <linux/types.h>
 #include <asm/asm.h>
-#include "arch.h"
+#include <asm/sgx.h>
 
 #undef pr_fmt
 #define pr_fmt(fmt) "sgx: " fmt
 
+#define EREMOVE_ERROR_MESSAGE \
+       "EREMOVE returned %d (0x%x) and an EPC page was leaked. SGX may become unusable. " \
+       "Refer to Documentation/x86/sgx.rst for more information."
+
 #define SGX_MAX_EPC_SECTIONS           8
 #define SGX_EEXTEND_BLOCK_SIZE         256
 #define SGX_NR_TO_SCAN                 16
@@ -29,29 +33,26 @@ struct sgx_epc_page {
        struct list_head list;
 };
 
+/*
+ * Contains the tracking data for NUMA nodes having EPC pages. Most importantly,
+ * the free page list local to the node is stored here.
+ */
+struct sgx_numa_node {
+       struct list_head free_page_list;
+       spinlock_t lock;
+};
+
 /*
  * The firmware can define multiple chunks of EPC to the different areas of the
  * physical memory e.g. for memory areas of the each node. This structure is
  * used to store EPC pages for one EPC section and virtual memory area where
  * the pages have been mapped.
- *
- * 'lock' must be held before accessing 'page_list' or 'free_cnt'.
  */
 struct sgx_epc_section {
        unsigned long phys_addr;
        void *virt_addr;
        struct sgx_epc_page *pages;
-
-       spinlock_t lock;
-       struct list_head page_list;
-       unsigned long free_cnt;
-
-       /*
-        * Pages which need EREMOVE run on them before they can be
-        * used.  Only safe to be accessed in ksgxd and init code.
-        * Not protected by locks.
-        */
-       struct list_head init_laundry_list;
+       struct sgx_numa_node *node;
 };
 
 extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
@@ -83,4 +84,15 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
 int sgx_unmark_page_reclaimable(struct sgx_epc_page *page);
 struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim);
 
+#ifdef CONFIG_X86_SGX_KVM
+int __init sgx_vepc_init(void);
+#else
+static inline int __init sgx_vepc_init(void)
+{
+       return -ENODEV;
+}
+#endif
+
+void sgx_update_lepubkeyhash(u64 *lepubkeyhash);
+
 #endif /* _X86_SGX_H */
diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c
new file mode 100644 (file)
index 0000000..6ad165a
--- /dev/null
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver to expose SGX enclave memory to KVM guests.
+ *
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <asm/sgx.h>
+#include <uapi/asm/sgx.h>
+
+#include "encls.h"
+#include "sgx.h"
+
+struct sgx_vepc {
+       struct xarray page_array;
+       struct mutex lock;
+};
+
+/*
+ * Temporary SECS pages that cannot be EREMOVE'd due to having child in other
+ * virtual EPC instances, and the lock to protect it.
+ */
+static struct mutex zombie_secs_pages_lock;
+static struct list_head zombie_secs_pages;
+
+static int __sgx_vepc_fault(struct sgx_vepc *vepc,
+                           struct vm_area_struct *vma, unsigned long addr)
+{
+       struct sgx_epc_page *epc_page;
+       unsigned long index, pfn;
+       int ret;
+
+       WARN_ON(!mutex_is_locked(&vepc->lock));
+
+       /* Calculate index of EPC page in virtual EPC's page_array */
+       index = vma->vm_pgoff + PFN_DOWN(addr - vma->vm_start);
+
+       epc_page = xa_load(&vepc->page_array, index);
+       if (epc_page)
+               return 0;
+
+       epc_page = sgx_alloc_epc_page(vepc, false);
+       if (IS_ERR(epc_page))
+               return PTR_ERR(epc_page);
+
+       ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL));
+       if (ret)
+               goto err_free;
+
+       pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page));
+
+       ret = vmf_insert_pfn(vma, addr, pfn);
+       if (ret != VM_FAULT_NOPAGE) {
+               ret = -EFAULT;
+               goto err_delete;
+       }
+
+       return 0;
+
+err_delete:
+       xa_erase(&vepc->page_array, index);
+err_free:
+       sgx_free_epc_page(epc_page);
+       return ret;
+}
+
+static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct sgx_vepc *vepc = vma->vm_private_data;
+       int ret;
+
+       mutex_lock(&vepc->lock);
+       ret = __sgx_vepc_fault(vepc, vma, vmf->address);
+       mutex_unlock(&vepc->lock);
+
+       if (!ret)
+               return VM_FAULT_NOPAGE;
+
+       if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) {
+               mmap_read_unlock(vma->vm_mm);
+               return VM_FAULT_RETRY;
+       }
+
+       return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct sgx_vepc_vm_ops = {
+       .fault = sgx_vepc_fault,
+};
+
+static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct sgx_vepc *vepc = file->private_data;
+
+       if (!(vma->vm_flags & VM_SHARED))
+               return -EINVAL;
+
+       vma->vm_ops = &sgx_vepc_vm_ops;
+       /* Don't copy VMA in fork() */
+       vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
+       vma->vm_private_data = vepc;
+
+       return 0;
+}
+
+static int sgx_vepc_free_page(struct sgx_epc_page *epc_page)
+{
+       int ret;
+
+       /*
+        * Take a previously guest-owned EPC page and return it to the
+        * general EPC page pool.
+        *
+        * Guests can not be trusted to have left this page in a good
+        * state, so run EREMOVE on the page unconditionally.  In the
+        * case that a guest properly EREMOVE'd this page, a superfluous
+        * EREMOVE is harmless.
+        */
+       ret = __eremove(sgx_get_epc_virt_addr(epc_page));
+       if (ret) {
+               /*
+                * Only SGX_CHILD_PRESENT is expected, which is because of
+                * EREMOVE'ing an SECS still with child, in which case it can
+                * be handled by EREMOVE'ing the SECS again after all pages in
+                * virtual EPC have been EREMOVE'd. See comments in below in
+                * sgx_vepc_release().
+                *
+                * The user of virtual EPC (KVM) needs to guarantee there's no
+                * logical processor is still running in the enclave in guest,
+                * otherwise EREMOVE will get SGX_ENCLAVE_ACT which cannot be
+                * handled here.
+                */
+               WARN_ONCE(ret != SGX_CHILD_PRESENT, EREMOVE_ERROR_MESSAGE,
+                         ret, ret);
+               return ret;
+       }
+
+       sgx_free_epc_page(epc_page);
+
+       return 0;
+}
+
+static int sgx_vepc_release(struct inode *inode, struct file *file)
+{
+       struct sgx_vepc *vepc = file->private_data;
+       struct sgx_epc_page *epc_page, *tmp, *entry;
+       unsigned long index;
+
+       LIST_HEAD(secs_pages);
+
+       xa_for_each(&vepc->page_array, index, entry) {
+               /*
+                * Remove all normal, child pages.  sgx_vepc_free_page()
+                * will fail if EREMOVE fails, but this is OK and expected on
+                * SECS pages.  Those can only be EREMOVE'd *after* all their
+                * child pages. Retries below will clean them up.
+                */
+               if (sgx_vepc_free_page(entry))
+                       continue;
+
+               xa_erase(&vepc->page_array, index);
+       }
+
+       /*
+        * Retry EREMOVE'ing pages.  This will clean up any SECS pages that
+        * only had children in this 'epc' area.
+        */
+       xa_for_each(&vepc->page_array, index, entry) {
+               epc_page = entry;
+               /*
+                * An EREMOVE failure here means that the SECS page still
+                * has children.  But, since all children in this 'sgx_vepc'
+                * have been removed, the SECS page must have a child on
+                * another instance.
+                */
+               if (sgx_vepc_free_page(epc_page))
+                       list_add_tail(&epc_page->list, &secs_pages);
+
+               xa_erase(&vepc->page_array, index);
+       }
+
+       /*
+        * SECS pages are "pinned" by child pages, and "unpinned" once all
+        * children have been EREMOVE'd.  A child page in this instance
+        * may have pinned an SECS page encountered in an earlier release(),
+        * creating a zombie.  Since some children were EREMOVE'd above,
+        * try to EREMOVE all zombies in the hopes that one was unpinned.
+        */
+       mutex_lock(&zombie_secs_pages_lock);
+       list_for_each_entry_safe(epc_page, tmp, &zombie_secs_pages, list) {
+               /*
+                * Speculatively remove the page from the list of zombies,
+                * if the page is successfully EREMOVE'd it will be added to
+                * the list of free pages.  If EREMOVE fails, throw the page
+                * on the local list, which will be spliced on at the end.
+                */
+               list_del(&epc_page->list);
+
+               if (sgx_vepc_free_page(epc_page))
+                       list_add_tail(&epc_page->list, &secs_pages);
+       }
+
+       if (!list_empty(&secs_pages))
+               list_splice_tail(&secs_pages, &zombie_secs_pages);
+       mutex_unlock(&zombie_secs_pages_lock);
+
+       kfree(vepc);
+
+       return 0;
+}
+
+static int sgx_vepc_open(struct inode *inode, struct file *file)
+{
+       struct sgx_vepc *vepc;
+
+       vepc = kzalloc(sizeof(struct sgx_vepc), GFP_KERNEL);
+       if (!vepc)
+               return -ENOMEM;
+       mutex_init(&vepc->lock);
+       xa_init(&vepc->page_array);
+
+       file->private_data = vepc;
+
+       return 0;
+}
+
+static const struct file_operations sgx_vepc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = sgx_vepc_open,
+       .release        = sgx_vepc_release,
+       .mmap           = sgx_vepc_mmap,
+};
+
+static struct miscdevice sgx_vepc_dev = {
+       .minor          = MISC_DYNAMIC_MINOR,
+       .name           = "sgx_vepc",
+       .nodename       = "sgx_vepc",
+       .fops           = &sgx_vepc_fops,
+};
+
+int __init sgx_vepc_init(void)
+{
+       /* SGX virtualization requires KVM to work */
+       if (!cpu_feature_enabled(X86_FEATURE_VMX))
+               return -ENODEV;
+
+       INIT_LIST_HEAD(&zombie_secs_pages);
+       mutex_init(&zombie_secs_pages_lock);
+
+       return misc_register(&sgx_vepc_dev);
+}
+
+/**
+ * sgx_virt_ecreate() - Run ECREATE on behalf of guest
+ * @pageinfo:  Pointer to PAGEINFO structure
+ * @secs:      Userspace pointer to SECS page
+ * @trapnr:    trap number injected to guest in case of ECREATE error
+ *
+ * Run ECREATE on behalf of guest after KVM traps ECREATE for the purpose
+ * of enforcing policies of guest's enclaves, and return the trap number
+ * which should be injected to guest in case of any ECREATE error.
+ *
+ * Return:
+ * -  0:       ECREATE was successful.
+ * - <0:       on error.
+ */
+int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs,
+                    int *trapnr)
+{
+       int ret;
+
+       /*
+        * @secs is an untrusted, userspace-provided address.  It comes from
+        * KVM and is assumed to be a valid pointer which points somewhere in
+        * userspace.  This can fault and call SGX or other fault handlers when
+        * userspace mapping @secs doesn't exist.
+        *
+        * Add a WARN() to make sure @secs is already valid userspace pointer
+        * from caller (KVM), who should already have handled invalid pointer
+        * case (for instance, made by malicious guest).  All other checks,
+        * such as alignment of @secs, are deferred to ENCLS itself.
+        */
+       if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE)))
+               return -EINVAL;
+
+       __uaccess_begin();
+       ret = __ecreate(pageinfo, (void *)secs);
+       __uaccess_end();
+
+       if (encls_faulted(ret)) {
+               *trapnr = ENCLS_TRAPNR(ret);
+               return -EFAULT;
+       }
+
+       /* ECREATE doesn't return an error code, it faults or succeeds. */
+       WARN_ON_ONCE(ret);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sgx_virt_ecreate);
+
+static int __sgx_virt_einit(void __user *sigstruct, void __user *token,
+                           void __user *secs)
+{
+       int ret;
+
+       /*
+        * Make sure all userspace pointers from caller (KVM) are valid.
+        * All other checks deferred to ENCLS itself.  Also see comment
+        * for @secs in sgx_virt_ecreate().
+        */
+#define SGX_EINITTOKEN_SIZE    304
+       if (WARN_ON_ONCE(!access_ok(sigstruct, sizeof(struct sgx_sigstruct)) ||
+                        !access_ok(token, SGX_EINITTOKEN_SIZE) ||
+                        !access_ok(secs, PAGE_SIZE)))
+               return -EINVAL;
+
+       __uaccess_begin();
+       ret = __einit((void *)sigstruct, (void *)token, (void *)secs);
+       __uaccess_end();
+
+       return ret;
+}
+
+/**
+ * sgx_virt_einit() - Run EINIT on behalf of guest
+ * @sigstruct:         Userspace pointer to SIGSTRUCT structure
+ * @token:             Userspace pointer to EINITTOKEN structure
+ * @secs:              Userspace pointer to SECS page
+ * @lepubkeyhash:      Pointer to guest's *virtual* SGX_LEPUBKEYHASH MSR values
+ * @trapnr:            trap number injected to guest in case of EINIT error
+ *
+ * Run EINIT on behalf of guest after KVM traps EINIT. If SGX_LC is available
+ * in host, SGX driver may rewrite the hardware values at wish, therefore KVM
+ * needs to update hardware values to guest's virtual MSR values in order to
+ * ensure EINIT is executed with expected hardware values.
+ *
+ * Return:
+ * -  0:       EINIT was successful.
+ * - <0:       on error.
+ */
+int sgx_virt_einit(void __user *sigstruct, void __user *token,
+                  void __user *secs, u64 *lepubkeyhash, int *trapnr)
+{
+       int ret;
+
+       if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
+               ret = __sgx_virt_einit(sigstruct, token, secs);
+       } else {
+               preempt_disable();
+
+               sgx_update_lepubkeyhash(lepubkeyhash);
+
+               ret = __sgx_virt_einit(sigstruct, token, secs);
+               preempt_enable();
+       }
+
+       /* Propagate up the error from the WARN_ON_ONCE in __sgx_virt_einit() */
+       if (ret == -EINVAL)
+               return ret;
+
+       if (encls_faulted(ret)) {
+               *trapnr = ENCLS_TRAPNR(ret);
+               return -EFAULT;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(sgx_virt_einit);
index 8678864ce7123c35cbb5d0ab09e9ffcd91095e16..132a2de44d2fe4d6906516d0c0b776d42a0ed74b 100644 (file)
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(__max_die_per_package);
 
 #ifdef CONFIG_SMP
 /*
- * Check if given CPUID extended toplogy "leaf" is implemented
+ * Check if given CPUID extended topology "leaf" is implemented
  */
 static int check_extended_topology_leaf(int leaf)
 {
@@ -44,7 +44,7 @@ static int check_extended_topology_leaf(int leaf)
        return 0;
 }
 /*
- * Return best CPUID Extended Toplogy Leaf supported
+ * Return best CPUID Extended Topology Leaf supported
  */
 static int detect_extended_topology_leaf(struct cpuinfo_x86 *c)
 {
index c6ede3b3d302d3fb28222f0f398e5afb6a3ca336..c04b933f48d38257e1ca241875a10581b1c92121 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/clocksource.h>
 #include <linux/cpu.h>
 #include <linux/reboot.h>
+#include <linux/static_call.h>
 #include <asm/div64.h>
 #include <asm/x86_init.h>
 #include <asm/hypervisor.h>
@@ -336,11 +337,11 @@ static void __init vmware_paravirt_ops_setup(void)
        vmware_cyc2ns_setup();
 
        if (vmw_sched_clock)
-               pv_ops.time.sched_clock = vmware_sched_clock;
+               paravirt_set_sched_clock(vmware_sched_clock);
 
        if (vmware_is_stealclock_available()) {
                has_steal_clock = true;
-               pv_ops.time.steal_clock = vmware_steal_clock;
+               static_call_update(pv_steal_clock, vmware_steal_clock);
 
                /* We use reboot notifier only to disable steal clock */
                register_reboot_notifier(&vmware_pv_reboot_nb);
@@ -378,6 +379,8 @@ static void __init vmware_set_capabilities(void)
 {
        setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC);
        setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+       if (vmware_tsc_khz)
+               setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
        if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL)
                setup_force_cpu_cap(X86_FEATURE_VMCALL);
        else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL)
index a8f3af257e26ca1caae108998f2ecf8d9d571df3..b1deacbeb266934b76c7d46b556784d00ae8cb91 100644 (file)
@@ -337,7 +337,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
        struct crash_memmap_data cmd;
        struct crash_mem *cmem;
 
-       cmem = vzalloc(sizeof(struct crash_mem));
+       cmem = vzalloc(struct_size(cmem, ranges, 1));
        if (!cmem)
                return -ENOMEM;
 
index 22aad412f965e60d0c41265cd2a544eb412d1b5b..f74cb7da9557e5a54e2a4d20dd9cbe7f7b4ab259 100644 (file)
@@ -793,7 +793,7 @@ core_initcall(e820__register_nvs_regions);
 #endif
 
 /*
- * Allocate the requested number of bytes with the requsted alignment
+ * Allocate the requested number of bytes with the requested alignment
  * and return (the physical address) to the caller. Also register this
  * range in the 'kexec' E820 table as a reserved range.
  *
index 683749b80ae2829c0a3f56734c4e0da0bc3e4e0e..a85c64000218968bc30854ccb583845b6790175d 100644 (file)
@@ -253,7 +253,7 @@ static bool xfeature_enabled(enum xfeature xfeature)
 static void __init setup_xstate_features(void)
 {
        u32 eax, ebx, ecx, edx, i;
-       /* start at the beginnning of the "extended state" */
+       /* start at the beginning of the "extended state" */
        unsigned int last_good_offset = offsetof(struct xregs_state,
                                                 extended_state_area);
        /*
index 5e9beb77cafd43895794295824135ecb3dea3b4e..18be44163a50fb4ec9d06f3094fad3b4a0bdb732 100644 (file)
@@ -104,7 +104,7 @@ static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
 static bool __head check_la57_support(unsigned long physaddr)
 {
        /*
-        * 5-level paging is detected and enabled at kernel decomression
+        * 5-level paging is detected and enabled at kernel decompression
         * stage. Only check if it has been enabled there.
         */
        if (!(native_read_cr4() & X86_CR4_LA57))
index ee1a283f8e966655d76ab52b0b3ac8d61b78e8ef..d552f177eca0e927c7a56efcff2c7fcbe7af21e3 100644 (file)
@@ -245,7 +245,7 @@ static const __initconst struct idt_data ist_idts[] = {
  * after that.
  *
  * Note, that X86_64 cannot install the real #PF handler in
- * idt_setup_early_traps() because the memory intialization needs the #PF
+ * idt_setup_early_traps() because the memory initialization needs the #PF
  * handler from the early_idt_handler_array to initialize the early page
  * tables.
  */
index 58aa712973ac80608652378785a3255c9a88dfb7..e28f6a5d14f1b2d14ca4c4e9aaae2ec2b0f4a3dd 100644 (file)
@@ -338,7 +338,7 @@ void fixup_irqs(void)
        irq_migrate_all_off_this_cpu();
 
        /*
-        * We can remove mdelay() and then send spuriuous interrupts to
+        * We can remove mdelay() and then send spurious interrupts to
         * new cpu targets for all the irqs that were handled previously by
         * this cpu. While it works, I have seen spurious interrupt messages
         * (nothing wrong but still...).
index ff7878df96b4606cc307c95902d7ebcbb046d806..3a43a2dee658189764c440240c0621225e75c4fb 100644 (file)
@@ -17,7 +17,7 @@
  *  Updated by:             Tom Rini <trini@kernel.crashing.org>
  *  Updated by:             Jason Wessel <jason.wessel@windriver.com>
  *  Modified for 386 by Jim Kingdon, Cygnus Support.
- *  Origianl kgdb, compatibility with 2.1.xx kernel by
+ *  Original kgdb, compatibility with 2.1.xx kernel by
  *  David Grothe <dave@gcom.com>
  *  Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
  *  X86_64 changes from Andi Kleen's patch merged by Jim Houston
@@ -642,7 +642,7 @@ void kgdb_arch_late(void)
        struct perf_event **pevent;
 
        /*
-        * Pre-allocate the hw breakpoint structions in the non-atomic
+        * Pre-allocate the hw breakpoint instructions in the non-atomic
         * portion of kgdb because this operation requires mutexs to
         * complete.
         */
index 51c7f5271aee47abf14b83733fa0e0d34c9a9926..596de2f6d3a57f522f1fa30496a5ec75977e63a7 100644 (file)
@@ -12,7 +12,7 @@
 
 #include "common.h"
 
-/* Ftrace callback handler for kprobes -- called under preepmt disabled */
+/* Ftrace callback handler for kprobes -- called under preempt disabled */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                           struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
index 78bb0fae39826cff94ac2081f327c91e7d87d8c4..172c947240b991ff40eb78a2faf8529679ae6264 100644 (file)
@@ -650,7 +650,7 @@ static void __init kvm_guest_init(void)
 
        if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
                has_steal_clock = 1;
-               pv_ops.time.steal_clock = kvm_steal_clock;
+               static_call_update(pv_steal_clock, kvm_steal_clock);
        }
 
        if (pv_tlb_flush_supported()) {
index 1fc0962c89c082b5b638a79efc040e4940de9991..d37ed4e1d0338c8b21ad461f5d2672cead15629a 100644 (file)
@@ -106,7 +106,7 @@ static inline void kvm_sched_clock_init(bool stable)
        if (!stable)
                clear_sched_clock_stable();
        kvm_sched_clock_offset = kvm_clock_read();
-       pv_ops.time.sched_clock = kvm_sched_clock_read;
+       paravirt_set_sched_clock(kvm_sched_clock_read);
 
        pr_info("kvm-clock: using sched offset of %llu cycles",
                kvm_sched_clock_offset);
index a29a44a98e5bef10751af769bd198d783e23b9fd..f01cd9a08155795e62e0a550817ca6e9ca5c1fdf 100644 (file)
@@ -260,7 +260,7 @@ static void set_idt(void *newidt, u16 limit)
 {
        struct desc_ptr curidt;
 
-       /* x86-64 supports unaliged loads & stores */
+       /* x86-64 supports unaligned loads & stores */
        curidt.size    = limit;
        curidt.address = (unsigned long)newidt;
 
index 4f75d0cf6305f04221bb9df16ac40e843193707b..9e1ea99ad9df4beca4771705b32b46444e7c925e 100644 (file)
@@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void)
        return pv_ops.lock.vcpu_is_preempted.func ==
                __raw_callee_save___native_vcpu_is_preempted;
 }
+
+void __init paravirt_set_cap(void)
+{
+       if (!pv_is_native_spin_unlock())
+               setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
+
+       if (!pv_is_native_vcpu_is_preempted())
+               setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
+}
index c60222ab8ab9b8ae2dded7687609b3165020c544..d0730264786b2175e409cc1396edc8668afe8ecf 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/highmem.h>
 #include <linux/kprobes.h>
 #include <linux/pgtable.h>
+#include <linux/static_call.h>
 
 #include <asm/bug.h>
 #include <asm/paravirt.h>
@@ -52,7 +53,10 @@ void __init default_banner(void)
 }
 
 /* Undefined instruction for dealing with missing ops pointers. */
-static const unsigned char ud2a[] = { 0x0f, 0x0b };
+static void paravirt_BUG(void)
+{
+       BUG();
+}
 
 struct branch {
        unsigned char opcode;
@@ -85,25 +89,6 @@ u64 notrace _paravirt_ident_64(u64 x)
 {
        return x;
 }
-
-static unsigned paravirt_patch_jmp(void *insn_buff, const void *target,
-                                  unsigned long addr, unsigned len)
-{
-       struct branch *b = insn_buff;
-       unsigned long delta = (unsigned long)target - (addr+5);
-
-       if (len < 5) {
-#ifdef CONFIG_RETPOLINE
-               WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
-#endif
-               return len;     /* call too long for patch site */
-       }
-
-       b->opcode = 0xe9;       /* jmp */
-       b->delta = delta;
-
-       return 5;
-}
 #endif
 
 DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
@@ -114,8 +99,8 @@ void __init native_pv_lock_init(void)
                static_branch_disable(&virt_spin_lock_key);
 }
 
-unsigned paravirt_patch_default(u8 type, void *insn_buff,
-                               unsigned long addr, unsigned len)
+unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
+                           unsigned int len)
 {
        /*
         * Neat trick to map patch type back to the call within the
@@ -125,20 +110,10 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff,
        unsigned ret;
 
        if (opfunc == NULL)
-               /* If there's no function, patch it with a ud2a (BUG) */
-               ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a));
+               /* If there's no function, patch it with paravirt_BUG() */
+               ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len);
        else if (opfunc == _paravirt_nop)
                ret = 0;
-
-#ifdef CONFIG_PARAVIRT_XXL
-       /* identity functions just return their single argument */
-       else if (opfunc == _paravirt_ident_64)
-               ret = paravirt_patch_ident_64(insn_buff, len);
-
-       else if (type == PARAVIRT_PATCH(cpu.iret))
-               /* If operation requires a jmp, then jmp */
-               ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
-#endif
        else
                /* Otherwise call the function. */
                ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
@@ -146,19 +121,6 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff,
        return ret;
 }
 
-unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
-                             const char *start, const char *end)
-{
-       unsigned insn_len = end - start;
-
-       /* Alternative instruction is too large for the patch site and we cannot continue: */
-       BUG_ON(insn_len > len || start == NULL);
-
-       memcpy(insn_buff, start, insn_len);
-
-       return insn_len;
-}
-
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
 
@@ -167,6 +129,14 @@ static u64 native_steal_clock(int cpu)
        return 0;
 }
 
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
+
+void paravirt_set_sched_clock(u64 (*func)(void))
+{
+       static_call_update(pv_sched_clock, func);
+}
+
 /* These are in entry.S */
 extern void native_iret(void);
 
@@ -269,13 +239,6 @@ struct pv_info pv_info = {
 #define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
 
 struct paravirt_patch_template pv_ops = {
-       /* Init ops. */
-       .init.patch             = native_patch,
-
-       /* Time ops. */
-       .time.sched_clock       = native_sched_clock,
-       .time.steal_clock       = native_steal_clock,
-
        /* Cpu ops. */
        .cpu.io_delay           = native_io_delay,
 
@@ -308,8 +271,6 @@ struct paravirt_patch_template pv_ops = {
 
        .cpu.load_sp0           = native_load_sp0,
 
-       .cpu.iret               = native_iret,
-
 #ifdef CONFIG_X86_IOPL_IOPERM
        .cpu.invalidate_io_bitmap       = native_tss_invalidate_io_bitmap,
        .cpu.update_io_bitmap           = native_tss_update_io_bitmap,
@@ -414,6 +375,8 @@ struct paravirt_patch_template pv_ops = {
 NOKPROBE_SYMBOL(native_get_debugreg);
 NOKPROBE_SYMBOL(native_set_debugreg);
 NOKPROBE_SYMBOL(native_load_idt);
+
+void (*paravirt_iret)(void) = native_iret;
 #endif
 
 EXPORT_SYMBOL(pv_ops);
diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c
deleted file mode 100644 (file)
index abd27ec..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/stringify.h>
-
-#include <asm/paravirt.h>
-#include <asm/asm-offsets.h>
-
-#define PSTART(d, m)                                                   \
-       patch_data_##d.m
-
-#define PEND(d, m)                                                     \
-       (PSTART(d, m) + sizeof(patch_data_##d.m))
-
-#define PATCH(d, m, insn_buff, len)                                            \
-       paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
-
-#define PATCH_CASE(ops, m, data, insn_buff, len)                               \
-       case PARAVIRT_PATCH(ops.m):                                     \
-               return PATCH(data, ops##_##m, insn_buff, len)
-
-#ifdef CONFIG_PARAVIRT_XXL
-struct patch_xxl {
-       const unsigned char     irq_irq_disable[1];
-       const unsigned char     irq_irq_enable[1];
-       const unsigned char     irq_save_fl[2];
-       const unsigned char     mmu_read_cr2[3];
-       const unsigned char     mmu_read_cr3[3];
-       const unsigned char     mmu_write_cr3[3];
-       const unsigned char     cpu_wbinvd[2];
-       const unsigned char     mov64[3];
-};
-
-static const struct patch_xxl patch_data_xxl = {
-       .irq_irq_disable        = { 0xfa },             // cli
-       .irq_irq_enable         = { 0xfb },             // sti
-       .irq_save_fl            = { 0x9c, 0x58 },       // pushf; pop %[re]ax
-       .mmu_read_cr2           = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
-       .mmu_read_cr3           = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
-       .mmu_write_cr3          = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
-       .cpu_wbinvd             = { 0x0f, 0x09 },       // wbinvd
-       .mov64                  = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
-};
-
-unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
-{
-       return PATCH(xxl, mov64, insn_buff, len);
-}
-# endif /* CONFIG_PARAVIRT_XXL */
-
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-struct patch_lock {
-       unsigned char queued_spin_unlock[3];
-       unsigned char vcpu_is_preempted[2];
-};
-
-static const struct patch_lock patch_data_lock = {
-       .vcpu_is_preempted      = { 0x31, 0xc0 },       // xor %eax, %eax
-
-# ifdef CONFIG_X86_64
-       .queued_spin_unlock     = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
-# else
-       .queued_spin_unlock     = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
-# endif
-};
-#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-
-unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
-                         unsigned int len)
-{
-       switch (type) {
-
-#ifdef CONFIG_PARAVIRT_XXL
-       PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
-       PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
-       PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
-
-       PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
-       PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
-       PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
-
-       PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
-#endif
-
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-       case PARAVIRT_PATCH(lock.queued_spin_unlock):
-               if (pv_is_native_spin_unlock())
-                       return PATCH(lock, queued_spin_unlock, insn_buff, len);
-               break;
-
-       case PARAVIRT_PATCH(lock.vcpu_is_preempted):
-               if (pv_is_native_vcpu_is_preempted())
-                       return PATCH(lock, vcpu_is_preempted, insn_buff, len);
-               break;
-#endif
-       default:
-               break;
-       }
-
-       return paravirt_patch_default(type, insn_buff, addr, len);
-}
index 9c214d7085a489a2c18c093619e6ef6b1210b01d..43cbfc84153ae2c6ba4bcff93fe719e3902a0d1b 100644 (file)
@@ -63,14 +63,9 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
                 */
                .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
 
-               /*
-                * .sp1 is cpu_current_top_of_stack.  The init task never
-                * runs user code, but cpu_current_top_of_stack should still
-                * be well defined before the first context switch.
-                */
+#ifdef CONFIG_X86_32
                .sp1 = TOP_OF_INIT_STACK,
 
-#ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
                .ss1 = __KERNEL_CS,
 #endif
@@ -451,7 +446,7 @@ void speculative_store_bypass_ht_init(void)
         * First HT sibling to come up on the core.  Link shared state of
         * the first HT sibling to itself. The siblings on the same core
         * which come up later will see the shared state pointer and link
-        * themself to the state of this CPU.
+        * themselves to the state of this CPU.
         */
        st->shared_state = st;
 }
index 11065dc03f5bc83c5eda66309047ccff34620507..eda37df016f02905e4ca6e2155efbbbb4180d744 100644 (file)
@@ -89,7 +89,7 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
        /*
         * Assumption here is that last_value, a global accumulator, always goes
         * forward. If we are less than that, we should not be much smaller.
-        * We assume there is an error marging we're inside, and then the correction
+        * We assume there is an error margin we're inside, and then the correction
         * does not sacrifice accuracy.
         *
         * For reads: global may have changed between test and return,
index 94b33885f8d20463c3038392a36373e266357a9d..f469153eca8ab305a873d4ac667b031a64e453ee 100644 (file)
@@ -107,7 +107,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
         *  - Write protect disabled
         *  - No task switch
         *  - Don't do FP software emulation.
-        *  - Proctected mode enabled
+        *  - Protected mode enabled
         */
        movl    %cr0, %eax
        andl    $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
index a4d9a261425b0f48da767884068112a053823fdb..c53271aebb64d9b6298f034206a9a130bc067c92 100644 (file)
@@ -121,7 +121,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
         *  - Write protect disabled
         *  - No task switch
         *  - Don't do FP software emulation.
-        *  - Proctected mode enabled
+        *  - Protected mode enabled
         */
        movq    %cr0, %rax
        andq    $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
index 5ecd69a48393d29c4aaac22f9e95ccb073a2b1b4..69757fac7462a9d48ac6cbc39fd1eea6e51083a3 100644 (file)
@@ -65,7 +65,7 @@ RESERVE_BRK(dmi_alloc, 65536);
 
 /*
  * Range of the BSS area. The size of the BSS area is determined
- * at link time, with RESERVE_BRK*() facility reserving additional
+ * at link time, with RESERVE_BRK() facility reserving additional
  * chunks.
  */
 unsigned long _brk_start = (unsigned long)__brk_base;
@@ -633,11 +633,16 @@ static void __init trim_snb_memory(void)
        printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
 
        /*
-        * Reserve all memory below the 1 MB mark that has not
-        * already been reserved.
+        * SandyBridge integrated graphics devices have a bug that prevents
+        * them from accessing certain memory ranges, namely anything below
+        * 1M and in the pages listed in bad_pages[] above.
+        *
+        * To avoid these pages being ever accessed by SNB gfx devices
+        * reserve all memory below the 1 MB mark and bad_pages that have
+        * not already been reserved at boot time.
         */
        memblock_reserve(0, 1<<20);
-       
+
        for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
                if (memblock_reserve(bad_pages[i], PAGE_SIZE))
                        printk(KERN_WARNING "failed to reserve 0x%08lx\n",
@@ -645,18 +650,6 @@ static void __init trim_snb_memory(void)
        }
 }
 
-/*
- * Here we put platform-specific memory range workarounds, i.e.
- * memory known to be corrupt or otherwise in need to be reserved on
- * specific platforms.
- *
- * If this gets used more widely it could use a real dispatch mechanism.
- */
-static void __init trim_platform_memory_ranges(void)
-{
-       trim_snb_memory();
-}
-
 static void __init trim_bios_range(void)
 {
        /*
@@ -725,11 +718,41 @@ static int __init parse_reservelow(char *p)
 
 early_param("reservelow", parse_reservelow);
 
-static void __init trim_low_memory_range(void)
+static void __init early_reserve_memory(void)
 {
+       /*
+        * Reserve the memory occupied by the kernel between _text and
+        * __end_of_kernel_reserve symbols. Any kernel sections after the
+        * __end_of_kernel_reserve symbol must be explicitly reserved with a
+        * separate memblock_reserve() or they will be discarded.
+        */
+       memblock_reserve(__pa_symbol(_text),
+                        (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
+
+       /*
+        * The first 4Kb of memory is a BIOS owned area, but generally it is
+        * not listed as such in the E820 table.
+        *
+        * Reserve the first memory page and typically some additional
+        * memory (64KiB by default) since some BIOSes are known to corrupt
+        * low memory. See the Kconfig help text for X86_RESERVE_LOW.
+        *
+        * In addition, make sure page 0 is always reserved because on
+        * systems with L1TF its contents can be leaked to user processes.
+        */
        memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
+
+       early_reserve_initrd();
+
+       if (efi_enabled(EFI_BOOT))
+               efi_memblock_x86_reserve_range();
+
+       memblock_x86_reserve_range_setup_data();
+
+       reserve_ibft_region();
+       reserve_bios_regions();
 }
-       
+
 /*
  * Dump out kernel offset information on panic.
  */
@@ -764,29 +787,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 
 void __init setup_arch(char **cmdline_p)
 {
-       /*
-        * Reserve the memory occupied by the kernel between _text and
-        * __end_of_kernel_reserve symbols. Any kernel sections after the
-        * __end_of_kernel_reserve symbol must be explicitly reserved with a
-        * separate memblock_reserve() or they will be discarded.
-        */
-       memblock_reserve(__pa_symbol(_text),
-                        (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
-
-       /*
-        * Make sure page 0 is always reserved because on systems with
-        * L1TF its contents can be leaked to user processes.
-        */
-       memblock_reserve(0, PAGE_SIZE);
-
-       early_reserve_initrd();
-
-       /*
-        * At this point everything still needed from the boot loader
-        * or BIOS or kernel text should be early reserved or marked not
-        * RAM in e820. All other memory is free game.
-        */
-
 #ifdef CONFIG_X86_32
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
 
@@ -910,8 +910,18 @@ void __init setup_arch(char **cmdline_p)
 
        parse_early_param();
 
-       if (efi_enabled(EFI_BOOT))
-               efi_memblock_x86_reserve_range();
+       /*
+        * Do some memory reservations *before* memory is added to
+        * memblock, so memblock allocations won't overwrite it.
+        * Do it after early param, so we could get (unlikely) panic from
+        * serial.
+        *
+        * After this point everything still needed from the boot loader or
+        * firmware or kernel text should be early reserved or marked not
+        * RAM in e820. All other memory is free game.
+        */
+       early_reserve_memory();
+
 #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Memory used by the kernel cannot be hot-removed because Linux
@@ -938,9 +948,6 @@ void __init setup_arch(char **cmdline_p)
 
        x86_report_nx();
 
-       /* after early param, so could get panic from serial */
-       memblock_x86_reserve_range_setup_data();
-
        if (acpi_mps_check()) {
 #ifdef CONFIG_X86_LOCAL_APIC
                disable_apic = 1;
@@ -1032,22 +1039,17 @@ void __init setup_arch(char **cmdline_p)
         */
        find_smp_config();
 
-       reserve_ibft_region();
-
        early_alloc_pgt_buf();
 
        /*
         * Need to conclude brk, before e820__memblock_setup()
-        *  it could use memblock_find_in_range, could overlap with
-        *  brk area.
+        * it could use memblock_find_in_range, could overlap with
+        * brk area.
         */
        reserve_brk();
 
        cleanup_highmap();
 
-       /* Look for ACPI tables and reserve memory occupied by them. */
-       acpi_boot_table_init();
-
        memblock_set_current_limit(ISA_END_ADDRESS);
        e820__memblock_setup();
 
@@ -1057,8 +1059,6 @@ void __init setup_arch(char **cmdline_p)
         */
        sev_setup_arch();
 
-       reserve_bios_regions();
-
        efi_fake_memmap();
        efi_find_mirror();
        efi_esrt_init();
@@ -1084,8 +1084,12 @@ void __init setup_arch(char **cmdline_p)
 
        reserve_real_mode();
 
-       trim_platform_memory_ranges();
-       trim_low_memory_range();
+       /*
+        * Reserving memory causing GPU hangs on Sandy Bridge integrated
+        * graphics devices should be done after we allocated memory under
+        * 1M for the real mode trampoline.
+        */
+       trim_snb_memory();
 
        init_mem_mapping();
 
@@ -1132,6 +1136,8 @@ void __init setup_arch(char **cmdline_p)
        reserve_initrd();
 
        acpi_table_upgrade();
+       /* Look for ACPI tables and reserve memory occupied by them. */
+       acpi_boot_table_init();
 
        vsmp_init();
 
index cdc04d09124233d0c90c3def9a2dc6fcaadaade9..0aa9f13efd57230b4690059878cd0253ed8c2e01 100644 (file)
@@ -24,7 +24,7 @@ static bool __init sev_es_check_cpu_features(void)
        return true;
 }
 
-static void sev_es_terminate(unsigned int reason)
+static void __noreturn sev_es_terminate(unsigned int reason)
 {
        u64 val = GHCB_SEV_TERMINATE;
 
@@ -186,7 +186,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
         * make it accessible to the hypervisor.
         *
         * In particular, check for:
-        *      - Hypervisor CPUID bit
         *      - Availability of CPUID leaf 0x8000001f
         *      - SEV CPUID bit.
         *
@@ -194,10 +193,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
         * can't be checked here.
         */
 
-       if ((fn == 1 && !(regs->cx & BIT(31))))
-               /* Hypervisor bit */
-               goto fail;
-       else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
+       if (fn == 0x80000000 && (regs->ax < 0x8000001f))
                /* SEV leaf check */
                goto fail;
        else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
@@ -210,12 +206,8 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
        return;
 
 fail:
-       sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE);
-       VMGEXIT();
-
-       /* Shouldn't get here - if we do halt the machine */
-       while (true)
-               asm volatile("hlt\n");
+       /* Terminate the guest */
+       sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
 }
 
 static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
index 04a780abb512d01eb246067eff2b27f6e8501c51..26f5479a97a8bbab1d55eedcc689c8186aed57cc 100644 (file)
@@ -137,29 +137,41 @@ static __always_inline bool on_vc_stack(struct pt_regs *regs)
 }
 
 /*
- * This function handles the case when an NMI is raised in the #VC exception
- * handler entry code. In this case, the IST entry for #VC must be adjusted, so
- * that any subsequent #VC exception will not overwrite the stack contents of the
- * interrupted #VC handler.
+ * This function handles the case when an NMI is raised in the #VC
+ * exception handler entry code, before the #VC handler has switched off
+ * its IST stack. In this case, the IST entry for #VC must be adjusted,
+ * so that any nested #VC exception will not overwrite the stack
+ * contents of the interrupted #VC handler.
  *
  * The IST entry is adjusted unconditionally so that it can be also be
- * unconditionally adjusted back in sev_es_ist_exit(). Otherwise a nested
- * sev_es_ist_exit() call may adjust back the IST entry too early.
+ * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
+ * nested sev_es_ist_exit() call may adjust back the IST entry too
+ * early.
+ *
+ * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
+ * on the NMI IST stack, as they are only called from NMI handling code
+ * right now.
  */
 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
 {
        unsigned long old_ist, new_ist;
 
        /* Read old IST entry */
-       old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+       new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
 
-       /* Make room on the IST stack */
+       /*
+        * If NMI happened while on the #VC IST stack, set the new IST
+        * value below regs->sp, so that the interrupted stack frame is
+        * not overwritten by subsequent #VC exceptions.
+        */
        if (on_vc_stack(regs))
-               new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
-       else
-               new_ist = old_ist - sizeof(old_ist);
+               new_ist = regs->sp;
 
-       /* Store old IST entry */
+       /*
+        * Reserve additional 8 bytes and store old IST value so this
+        * adjustment can be unrolled in __sev_es_ist_exit().
+        */
+       new_ist -= sizeof(old_ist);
        *(unsigned long *)new_ist = old_ist;
 
        /* Set new IST entry */
@@ -277,7 +289,7 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
                        return ES_EXCEPTION;
                }
 
-               insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE - res, 1);
+               insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE, 1);
                insn_get_length(&ctxt->insn);
        }
 
index f306e85a08a641883dc98fb6156df4857480205e..a06cb107c0e88e8621cd514d3dd1546979f4bfb7 100644 (file)
@@ -492,7 +492,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
         * SS descriptor, but we do need SS to be valid.  It's possible
         * that the old SS is entirely bogus -- this can happen if the
         * signal we're trying to deliver is #GP or #SS caused by a bad
-        * SS value.  We also have a compatbility issue here: DOSEMU
+        * SS value.  We also have a compatibility issue here: DOSEMU
         * relies on the contents of the SS register indicating the
         * SS value at the time of the signal, even though that code in
         * DOSEMU predates sigreturn's ability to restore SS.  (DOSEMU
index eff4ce3b10da7185cb770e35b6006516e72830db..06db901fabe8e0e9d61403b754080fe2f9b46ac3 100644 (file)
@@ -67,7 +67,7 @@
  *     5AP.    symmetric IO mode (normal Linux operation) not affected.
  *             'noapic' mode has vector 0xf filled out properly.
  *     6AP.    'noapic' mode might be affected - fixed in later steppings
- *     7AP.    We do not assume writes to the LVT deassering IRQs
+ *     7AP.    We do not assume writes to the LVT deasserting IRQs
  *     8AP.    We do not enable low power mode (deep sleep) during MP bootup
  *     9AP.    We do not use mixed mode
  *
@@ -204,7 +204,7 @@ static void native_stop_other_cpus(int wait)
                }
                /*
                 * Don't wait longer than 10 ms if the caller didn't
-                * reqeust it. If wait is true, the machine hangs here if
+                * request it. If wait is true, the machine hangs here if
                 * one or more CPUs do not reach shutdown state.
                 */
                timeout = USEC_PER_MSEC * 10;
index f877150a91da11ade0e910473691693530bac64c..1e2050c4f94a6eb7213308bb79f318d647be8c1d 100644 (file)
@@ -1407,7 +1407,7 @@ void __init calculate_max_logical_packages(void)
        int ncpus;
 
        /*
-        * Today neither Intel nor AMD support heterogenous systems so
+        * Today neither Intel nor AMD support heterogeneous systems so
         * extrapolate the boot cpu's data to all packages.
         */
        ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
@@ -1659,13 +1659,17 @@ void play_dead_common(void)
        local_irq_disable();
 }
 
-bool wakeup_cpu0(void)
+/**
+ * cond_wakeup_cpu0 - Wake up CPU0 if needed.
+ *
+ * If NMI wants to wake up CPU0, start CPU0.
+ */
+void cond_wakeup_cpu0(void)
 {
        if (smp_processor_id() == 0 && enable_start_cpu0)
-               return true;
-
-       return false;
+               start_cpu0();
 }
+EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
 
 /*
  * We need to flush the caches before going to sleep, lest we have
@@ -1734,11 +1738,8 @@ static inline void mwait_play_dead(void)
                __monitor(mwait_ptr, 0, 0);
                mb();
                __mwait(eax, 0);
-               /*
-                * If NMI wants to wake up CPU0, start CPU0.
-                */
-               if (wakeup_cpu0())
-                       start_cpu0();
+
+               cond_wakeup_cpu0();
        }
 }
 
@@ -1749,11 +1750,8 @@ void hlt_play_dead(void)
 
        while (1) {
                native_halt();
-               /*
-                * If NMI wants to wake up CPU0, start CPU0.
-                */
-               if (wakeup_cpu0())
-                       start_cpu0();
+
+               cond_wakeup_cpu0();
        }
 }
 
index 8627fda8d9930dd553d8fef0576f01b581109fdf..15b058eefc4e7f9758982ed39c30822680e59d7d 100644 (file)
@@ -29,12 +29,6 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
        }
 }
 
-/*
- * This function returns an error if it detects any unreliable features of the
- * stack.  Otherwise it guarantees that the stack trace is reliable.
- *
- * If the task is not 'current', the caller *must* ensure the task is inactive.
- */
 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
                             void *cookie, struct task_struct *task)
 {
index 653b7f617b61bfce19ccf0e0254462c00e18c5e5..8a56a6d80098b26a87ac3fc3f13d8d986a6db1d5 100644 (file)
@@ -10,7 +10,7 @@
  * EFI Quirks
  * Several EFI systems do not correctly advertise their boot framebuffers.
  * Hence, we use this static table of known broken machines and fix up the
- * information so framebuffer drivers can load corectly.
+ * information so framebuffer drivers can load correctly.
  */
 
 #include <linux/dmi.h>
index 4c09ba1102047c66db21e340c6a3f4bcf528c7b1..f9af561c3cd4fef9ff3b3bbeb53cb8ba03a1524b 100644 (file)
@@ -49,6 +49,30 @@ bool tboot_enabled(void)
        return tboot != NULL;
 }
 
+/* noinline to prevent gcc from warning about dereferencing constant fixaddr */
+static noinline __init bool check_tboot_version(void)
+{
+       if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
+               pr_warn("tboot at 0x%llx is invalid\n", boot_params.tboot_addr);
+               return false;
+       }
+
+       if (tboot->version < 5) {
+               pr_warn("tboot version is invalid: %u\n", tboot->version);
+               return false;
+       }
+
+       pr_info("found shared page at phys addr 0x%llx:\n",
+               boot_params.tboot_addr);
+       pr_debug("version: %d\n", tboot->version);
+       pr_debug("log_addr: 0x%08x\n", tboot->log_addr);
+       pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry);
+       pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base);
+       pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
+
+       return true;
+}
+
 void __init tboot_probe(void)
 {
        /* Look for valid page-aligned address for shared page. */
@@ -66,25 +90,9 @@ void __init tboot_probe(void)
 
        /* Map and check for tboot UUID. */
        set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr);
-       tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE);
-       if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
-               pr_warn("tboot at 0x%llx is invalid\n", boot_params.tboot_addr);
+       tboot = (void *)fix_to_virt(FIX_TBOOT_BASE);
+       if (!check_tboot_version())
                tboot = NULL;
-               return;
-       }
-       if (tboot->version < 5) {
-               pr_warn("tboot version is invalid: %u\n", tboot->version);
-               tboot = NULL;
-               return;
-       }
-
-       pr_info("found shared page at phys addr 0x%llx:\n",
-               boot_params.tboot_addr);
-       pr_debug("version: %d\n", tboot->version);
-       pr_debug("log_addr: 0x%08x\n", tboot->log_addr);
-       pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry);
-       pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base);
-       pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
 }
 
 static pgd_t *tboot_pg_dir;
index f5477eab5692b3e308db251fd71fb0de1ef5c34d..bd83748e2bde3a7f1b95b94a90db5c0577f99b66 100644 (file)
@@ -113,7 +113,7 @@ int arch_register_cpu(int num)
         * Two known BSP/CPU0 dependencies: Resume from suspend/hibernate
         * depends on BSP. PIC interrupts depend on BSP.
         *
-        * If the BSP depencies are under control, one can tell kernel to
+        * If the BSP dependencies are under control, one can tell kernel to
         * enable BSP hotplug. This basically adds a control file and
         * one can attempt to offline BSP.
         */
index ac1874a2a70e8cc41654bc652a8922238192a8ff..f577d07fbd43a0bac91b0ec7fc8f1d7299fbf22c 100644 (file)
@@ -395,7 +395,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
                /*
                 * Adjust our frame so that we return straight to the #GP
                 * vector with the expected RSP value.  This is safe because
-                * we won't enable interupts or schedule before we invoke
+                * we won't enable interrupts or schedule before we invoke
                 * general_protection, so nothing will clobber the stack
                 * frame we just set up.
                 *
@@ -556,7 +556,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
                tsk->thread.trap_nr = X86_TRAP_GP;
 
                if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
-                       return;
+                       goto exit;
 
                show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
                force_sig(SIGSEGV);
@@ -978,6 +978,10 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
                goto out_irq;
        }
 
+       /* #DB for bus lock can only be triggered from userspace. */
+       if (dr6 & DR_BUS_LOCK)
+               handle_bus_lock(regs);
+
        /* Add the virtual_dr6 bits for signals. */
        dr6 |= current->thread.virtual_dr6;
        if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
@@ -1057,7 +1061,7 @@ static void math_error(struct pt_regs *regs, int trapnr)
                goto exit;
 
        if (fixup_vdso_exception(regs, trapnr, 0, 0))
-               return;
+               goto exit;
 
        force_sig_fault(SIGFPE, si_code,
                        (void __user *)uprobe_get_trap_addr(regs));
index f70dffc2771f53d5f2b924126150d244463c09ee..57ec01192180534964b60e11090313b749f250d8 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/percpu.h>
 #include <linux/timex.h>
 #include <linux/static_key.h>
+#include <linux/static_call.h>
 
 #include <asm/hpet.h>
 #include <asm/timer.h>
@@ -254,7 +255,7 @@ unsigned long long sched_clock(void)
 
 bool using_native_sched_clock(void)
 {
-       return pv_ops.time.sched_clock == native_sched_clock;
+       return static_call_query(pv_sched_clock) == native_sched_clock;
 }
 #else
 unsigned long long
@@ -739,7 +740,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
         * 2) Reference counter. If available we use the HPET or the
         * PMTIMER as a reference to check the sanity of that value.
         * We use separate TSC readouts and check inside of the
-        * reference read for any possible disturbance. We dicard
+        * reference read for any possible disturbance. We discard
         * disturbed values here as well. We do that around the PIT
         * calibration delay loop as we have to wait for a certain
         * amount of time anyway.
@@ -1079,7 +1080,7 @@ static void tsc_resume(struct clocksource *cs)
  * very small window right after one CPU updated cycle_last under
  * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
  * is smaller than the cycle_last reference value due to a TSC which
- * is slighty behind. This delta is nowhere else observable, but in
+ * is slightly behind. This delta is nowhere else observable, but in
  * that case it results in a forward time jump in the range of hours
  * due to the unsigned delta calculation of the time keeping core
  * code, which is necessary to support wrapping clocksources like pm
@@ -1264,7 +1265,7 @@ EXPORT_SYMBOL(convert_art_to_tsc);
  *     corresponding clocksource
  *     @cycles:        System counter value
  *     @cs:            Clocksource corresponding to system counter value. Used
- *                     by timekeeping code to verify comparibility of two cycle
+ *                     by timekeeping code to verify comparability of two cycle
  *                     values.
  */
 
index 3d3c761eb74a64a535f0d0c516d275e48a0a36b8..50a4515fe0ad15ec241c257735022287094a4514 100644 (file)
@@ -472,7 +472,7 @@ retry:
        /*
         * Add the result to the previous adjustment value.
         *
-        * The adjustement value is slightly off by the overhead of the
+        * The adjustment value is slightly off by the overhead of the
         * sync mechanism (observed values are ~200 TSC cycles), but this
         * really depends on CPU, node distance and frequency. So
         * compensating for this is hard to get right. Experiments show
index f6225bf22c02f60b66fb25abe9b068030a1f3c70..fac1daae7994a6d751f89d041d5c643231ce8f2c 100644 (file)
@@ -272,7 +272,7 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
                 * by whether the operand is a register or a memory location.
                 * If operand is a register, return as many bytes as the operand
                 * size. If operand is memory, return only the two least
-                * siginificant bytes.
+                * significant bytes.
                 */
                if (X86_MODRM_MOD(insn->modrm.value) == 3)
                        *data_size = insn->opnd_bytes;
index a788d5120d4d94abe25f9199709ae99fd0929e49..f6b93a35ce1455c7252d2fa491167ba187d32e7b 100644 (file)
@@ -84,6 +84,18 @@ config KVM_INTEL
          To compile this as a module, choose M here: the module
          will be called kvm-intel.
 
+config X86_SGX_KVM
+       bool "Software Guard eXtensions (SGX) Virtualization"
+       depends on X86_SGX && KVM_INTEL
+       help
+
+         Enables KVM guests to create SGX enclaves.
+
+         This includes support to expose "raw" unreclaimable enclave memory to
+         guests via a device node, e.g. /dev/sgx_vepc.
+
+         If unsure, say N.
+
 config KVM_AMD
        tristate "KVM for AMD processors support"
        depends on KVM
index 6bd2f8b830e49ff115ea9058bb6a2c51ebcdfaf5..c02466a1410b7588e9d261245433f0b4ce1edb64 100644 (file)
@@ -1033,7 +1033,7 @@ EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
  *  - Centaur:    0xc0000000 - 0xcfffffff
  *
  * The Hypervisor class is further subdivided into sub-classes that each act as
- * their own indepdent class associated with a 0x100 byte range.  E.g. if Qemu
+ * their own independent class associated with a 0x100 byte range.  E.g. if Qemu
  * is advertising support for both HyperV and KVM, the resulting Hypervisor
  * CPUID sub-classes are:
  *
index f7970ba6219fc52887a4b6b3757ef6de8686ce8d..cdd2a2b6550e71b488fe6a1e3ef5f7ad142daf40 100644 (file)
@@ -3222,7 +3222,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
        }
 
        /*
-        * Now load segment descriptors. If fault happenes at this stage
+        * Now load segment descriptors. If fault happens at this stage
         * it is handled in a context of new task
         */
        ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
index 8a4de3f12820d5f6afe34c2eb62753525896aa71..d5b72a08e566ca9faeff77d672d228669b47b452 100644 (file)
@@ -269,7 +269,7 @@ int kvm_set_routing_entry(struct kvm *kvm,
                          const struct kvm_irq_routing_entry *ue)
 {
        /* We can't check irqchip_in_kernel() here as some callers are
-        * currently inititalizing the irqchip. Other callers should therefore
+        * currently initializing the irqchip. Other callers should therefore
         * check kvm_arch_can_set_irq_routing() before calling this function.
         */
        switch (ue->type) {
index 486aa94ecf1da37b138fd90a0669fa0eb47ba068..62b1729277ef582dd596cc498516d83224b2e2b7 100644 (file)
@@ -4961,7 +4961,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 
        /*
         * No need to care whether allocation memory is successful
-        * or not since pte prefetch is skiped if it does not have
+        * or not since pte prefetch is skipped if it does not have
         * enough objects in the cache.
         */
        mmu_topup_memory_caches(vcpu, true);
@@ -5906,7 +5906,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
                                      lpage_disallowed_link);
                WARN_ON_ONCE(!sp->lpage_disallowed);
                if (is_tdp_mmu_page(sp)) {
-                       flush = kvm_tdp_mmu_zap_sp(kvm, sp);
+                       flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
                } else {
                        kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
                        WARN_ON_ONCE(sp->lpage_disallowed);
index 1f6f98c76bdf10a064dbe41cbe5638af49554d93..360983865398925cec503e4a4c2648bf6da57d2c 100644 (file)
@@ -59,7 +59,7 @@ struct kvm_mmu_page {
 #ifdef CONFIG_X86_64
        bool tdp_mmu_page;
 
-       /* Used for freeing the page asyncronously if it is a TDP MMU page. */
+       /* Used for freeing the page asynchronously if it is a TDP MMU page. */
        struct rcu_head rcu_head;
 #endif
 };
index 018d82e73e3117d14b05adaa104f46569402b222..34207b8748861e188053c203945cb97b440166a1 100644 (file)
@@ -404,7 +404,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
         * If this warning were to trigger it would indicate that there was a
         * missing MMU notifier or a race with some notifier handler.
         * A present, leaf SPTE should never be directly replaced with another
-        * present leaf SPTE pointing to a differnt PFN. A notifier handler
+        * present leaf SPTE pointing to a different PFN. A notifier handler
         * should be zapping the SPTE before the main MM's page table is
         * changed, or the SPTE should be zeroed, and the TLBs flushed by the
         * thread before replacement.
@@ -418,7 +418,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
 
                /*
                 * Crash the host to prevent error propagation and guest data
-                * courruption.
+                * corruption.
                 */
                BUG();
        }
@@ -529,7 +529,7 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
        /*
         * No other thread can overwrite the removed SPTE as they
         * must either wait on the MMU lock or use
-        * tdp_mmu_set_spte_atomic which will not overrite the
+        * tdp_mmu_set_spte_atomic which will not overwrite the
         * special removed SPTE value. No bookkeeping is needed
         * here since the SPTE is going from non-present
         * to non-present.
index 7b30bc967af38c2da2781cba6dbb0d1fb906ff15..67e753edfa225bafdd2a728e5c2d9939b0bc2db6 100644 (file)
@@ -103,7 +103,7 @@ static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
 
 /* returns general purpose PMC with the specified MSR. Note that it can be
  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
- * paramenter to tell them apart.
+ * parameter to tell them apart.
  */
 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
                                         u32 base)
index 78bdcfac4e400ac31647329e1d5b609278be4b02..3e55674098be08c7f983f2d1810d2908961da1b2 100644 (file)
@@ -727,7 +727,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
        struct amd_svm_iommu_ir *ir;
 
        /**
-        * In some cases, the existing irte is updaed and re-set,
+        * In some cases, the existing irte is updated and re-set,
         * so we need to check here if it's already been * added
         * to the ir_list.
         */
@@ -838,7 +838,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
                 * Here, we setup with legacy mode in the following cases:
                 * 1. When cannot target interrupt to a specific vcpu.
                 * 2. Unsetting posted interrupt.
-                * 3. APIC virtialization is disabled for the vcpu.
+                * 3. APIC virtualization is disabled for the vcpu.
                 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
                 */
                if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
index 874ea309279f5f9e02698854099c54f0aab95f56..2b27a94524033d7dd4606c37668f60f48233d1a2 100644 (file)
@@ -2082,7 +2082,7 @@ void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
        hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
        hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
 
-       /* PKRU is restored on VMEXIT, save the curent host value */
+       /* PKRU is restored on VMEXIT, save the current host value */
        hostsa->pkru = read_pkru();
 
        /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
index 58a45bb139f88ab78c4a01e370693379968f0984..6dad89248312192421e7542e5f6f70ae54125217 100644 (file)
@@ -4400,7 +4400,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i
         *
         * This happens because CPU microcode reading instruction bytes
         * uses a special opcode which attempts to read data using CPL=0
-        * priviledges. The microcode reads CS:RIP and if it hits a SMAP
+        * privileges. The microcode reads CS:RIP and if it hits a SMAP
         * fault, it gives up and returns no instruction bytes.
         *
         * Detection:
index bcca0b80e0d040be4ce9a4f9a2ee05d77e0c8257..1e069aac741004f1fbea664f813eaad430a997c3 100644 (file)
@@ -3537,7 +3537,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
         * snapshot restore (migration).
         *
         * In this flow, it is assumed that vmcs12 cache was
-        * trasferred as part of captured nVMX state and should
+        * transferred as part of captured nVMX state and should
         * therefore not be read from guest memory (which may not
         * exist on destination host yet).
         */
index 4831bc44ce66fc0e4773f74f4a213a7e48b8be01..459748680daf2b93e40a67c56e7306a85a65121b 100644 (file)
@@ -10,7 +10,7 @@
 #include "vmx.h"
 
 /*
- * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
+ * We maintain a per-CPU linked-list of vCPU, so in wakeup_handler() we
  * can find which vCPU should be waken up.
  */
 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
index 32cf8287d4a77306bb38da05ee6250441879e6f7..bcbf0d2139e9a527e04901ee17e351aad4cf9af0 100644 (file)
@@ -1529,7 +1529,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
 
        /*
         * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
-        * utilize encodings marked reserved will casue a #GP fault.
+        * utilize encodings marked reserved will cause a #GP fault.
         */
        value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
        if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
@@ -2761,7 +2761,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
        /*
-        * Update real mode segment cache. It may be not up-to-date if sement
+        * Update real mode segment cache. It may be not up-to-date if segment
         * register was written while vcpu was in a guest mode.
         */
        vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
@@ -6027,19 +6027,19 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
             exit_reason.basic != EXIT_REASON_PML_FULL &&
             exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
             exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
+               int ndata = 3;
+
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
-               vcpu->run->internal.ndata = 3;
                vcpu->run->internal.data[0] = vectoring_info;
                vcpu->run->internal.data[1] = exit_reason.full;
                vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
                if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
-                       vcpu->run->internal.ndata++;
-                       vcpu->run->internal.data[3] =
+                       vcpu->run->internal.data[ndata++] =
                                vmcs_read64(GUEST_PHYSICAL_ADDRESS);
                }
-               vcpu->run->internal.data[vcpu->run->internal.ndata++] =
-                       vcpu->arch.last_vmentry_cpu;
+               vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
+               vcpu->run->internal.ndata = ndata;
                return 0;
        }
 
@@ -7252,7 +7252,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
        if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
                vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
 
-       /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */
+       /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
        if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
                vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
 
index eca63625aee4d826913d944bb98d2f39bd14cce2..efc7a82ab14015d64fc65c2f4ffdb6588ddd6024 100644 (file)
@@ -156,9 +156,9 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 
 /*
  * lapic timer advance (tscdeadline mode only) in nanoseconds.  '-1' enables
- * adaptive tuning starting from default advancment of 1000ns.  '0' disables
+ * adaptive tuning starting from default advancement of 1000ns.  '0' disables
  * advancement entirely.  Any other value is used as-is and disables adaptive
- * tuning, i.e. allows priveleged userspace to set an exact advancement time.
+ * tuning, i.e. allows privileged userspace to set an exact advancement time.
  */
 static int __read_mostly lapic_timer_advance_ns = -1;
 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
@@ -1287,7 +1287,7 @@ static const u32 emulated_msrs_all[] = {
        MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
 
        MSR_IA32_TSC_ADJUST,
-       MSR_IA32_TSCDEADLINE,
+       MSR_IA32_TSC_DEADLINE,
        MSR_IA32_ARCH_CAPABILITIES,
        MSR_IA32_PERF_CAPABILITIES,
        MSR_IA32_MISC_ENABLE,
@@ -1372,7 +1372,7 @@ static u64 kvm_get_arch_capabilities(void)
        /*
         * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
         * the nested hypervisor runs with NX huge pages.  If it is not,
-        * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
+        * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other
         * L1 guests, so it need not worry about its own (L2) guests.
         */
        data |= ARCH_CAP_PSCHANGE_MC_NO;
@@ -1849,7 +1849,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
                        ret = EXIT_FASTPATH_EXIT_HANDLED;
                }
                break;
-       case MSR_IA32_TSCDEADLINE:
+       case MSR_IA32_TSC_DEADLINE:
                data = kvm_read_edx_eax(vcpu);
                if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
                        kvm_skip_emulated_instruction(vcpu);
@@ -3087,7 +3087,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                return kvm_set_apic_base(vcpu, msr_info);
        case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
                return kvm_x2apic_msr_write(vcpu, msr, data);
-       case MSR_IA32_TSCDEADLINE:
+       case MSR_IA32_TSC_DEADLINE:
                kvm_set_lapic_tscdeadline_msr(vcpu, data);
                break;
        case MSR_IA32_TSC_ADJUST:
@@ -3449,7 +3449,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                break;
        case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
                return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
-       case MSR_IA32_TSCDEADLINE:
+       case MSR_IA32_TSC_DEADLINE:
                msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
                break;
        case MSR_IA32_TSC_ADJUST:
@@ -4025,7 +4025,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 {
        struct kvm_host_map map;
        struct kvm_steal_time *st;
-       int idx;
 
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
@@ -4033,15 +4032,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
        if (vcpu->arch.st.preempted)
                return;
 
-       /*
-        * Take the srcu lock as memslots will be accessed to check the gfn
-        * cache generation against the memslots generation.
-        */
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-
        if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
                        &vcpu->arch.st.cache, true))
-               goto out;
+               return;
 
        st = map.hva +
                offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
@@ -4049,20 +4042,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
        st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
 
        kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
-
-out:
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       int idx;
+
        if (vcpu->preempted && !vcpu->arch.guest_state_protected)
                vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
 
+       /*
+        * Take the srcu lock as memslots will be accessed to check the gfn
+        * cache generation against the memslots generation.
+        */
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (kvm_xen_msr_enabled(vcpu->kvm))
                kvm_xen_runstate_set_preempted(vcpu);
        else
                kvm_steal_time_set_preempted(vcpu);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
        static_call(kvm_x86_vcpu_put)(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
index 3b6544111ac92dc310f23e4919b699e541ea3c56..16bc9130e7a5e81192ed7a0ee9999a3276c0d8e7 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 
 /* if you want SMP support, implement these with real spinlocks */
 .macro LOCK reg
index 1c5c81c16b0668f17ae3cc11b05f95f52a153637..ce6935690766f41a6ac4262c966de4c9fcbcdcc1 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 
 .macro read64 reg
        movl %ebx, %eax
index 2402d4c489d298cf05e368ef4cb40d457ed5e574..db4b4f9197c7d99d6b945b6586da9274fdd6ce2a 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/export.h>
 
 /*
index 77b9b2a3b5c84dbf8418d7816e84412297fbc12b..57b79c577496d7700c8df7fe5c3a5c1b24370674 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
 #include <asm/export.h>
index bb0b3fe1e0a0251c716ec2568576d8e0ceb13900..2bf07e18e38cb3ffef9a0d0c476f000c46501ff5 100644 (file)
@@ -232,7 +232,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
  * resolve_seg_reg() - obtain segment register index
  * @insn:      Instruction with operands
  * @regs:      Register values as seen when entering kernel mode
- * @regoff:    Operand offset, in pt_regs, used to deterimine segment register
+ * @regoff:    Operand offset, in pt_regs, used to determine segment register
  *
  * Determine the segment register associated with the operands and, if
  * applicable, prefixes and the instruction pointed by @insn.
@@ -517,7 +517,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
  * @insn:      Instruction containing ModRM byte
  * @regs:      Register values as seen when entering kernel mode
  * @offs1:     Offset of the first operand register
- * @offs2:     Offset of the second opeand register, if applicable
+ * @offs2:     Offset of the second operand register, if applicable
  *
  * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte
  * in @insn. This function is to be used with 16-bit address encodings. The
@@ -576,7 +576,7 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
         * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement-
         * only addressing. This means that no registers are involved in
         * computing the effective address. Thus, ensure that the first
-        * register offset is invalild. The second register offset is already
+        * register offset is invalid. The second register offset is already
         * invalid under the aforementioned conditions.
         */
        if ((X86_MODRM_MOD(insn->modrm.value) == 0) &&
index 1e299ac73c8698622c7e631a4571b5b88e8b32f7..1cc9da6e29c7926be24bea853952f115c4fa5a3f 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/linkage.h>
 #include <asm/errno.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/export.h>
 
 .pushsection .noinstr.text, "ax"
index 41902fe8b85982e10409c1a68e86cf01ee1519fd..64801010d312d92820b3c3e86738ae99b5be9ec3 100644 (file)
@@ -8,7 +8,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/export.h>
 
 #undef memmove
index 0bfd26e4ca9e938af774c5b1e4fea753d47d156c..9827ae267f96e00660870f775fb8d2d297c20445 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/export.h>
 
 /*
index 419365c48b2ada2b40094552affd0e4cd4ba433c..cc5f4ea943d37ff4b87bde9a5baa7888db827c42 100644 (file)
@@ -14,7 +14,7 @@
  *     tested so far for any MMX solution figured.
  *
  *     22/09/2000 - Arjan van de Ven
- *             Improved for non-egineering-sample Athlons
+ *             Improved for non-engineering-sample Athlons
  *
  */
 #include <linux/hardirq.h>
index 75a0915b0d01d769a98484463be321c8585e5e20..40bbe56bde3256eadea3d68a01b6af713b943552 100644 (file)
@@ -252,7 +252,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
        rv->err = wrmsr_safe_regs(rv->regs);
 }
 
-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
 {
        int err;
        struct msr_regs_info rv;
@@ -265,7 +265,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
 }
 EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
 
-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
 {
        int err;
        struct msr_regs_info rv;
index 3bd905e10ee21bd532338740c5294ea75a8284ba..b09cd2ad426ccde84865b1d974f3dd35d5109c54 100644 (file)
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(msrs_free);
  * argument @m.
  *
  */
-int msr_read(u32 msr, struct msr *m)
+static int msr_read(u32 msr, struct msr *m)
 {
        int err;
        u64 val;
@@ -54,7 +54,7 @@ int msr_read(u32 msr, struct msr *m)
  * @msr: MSR to write
  * @m: value to write
  */
-int msr_write(u32 msr, struct msr *m)
+static int msr_write(u32 msr, struct msr *m)
 {
        return wrmsrl_safe(msr, m->q);
 }
index f6fb1d218dccf7fe84de55591c6ed22f5ddc528f..6bb74b5c238c61db1c23b8ec33152561bf1deaba 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
 #include <asm/unwind_hints.h>
index 4a9887851ad8ab05e11fb427df846721bd66ff43..990d847ae902c6e032462e55914dbc680a3959bd 100644 (file)
@@ -547,7 +547,7 @@ static void frndint_(FPU_REG *st0_ptr, u_char st0_tag)
                single_arg_error(st0_ptr, st0_tag);
 }
 
-static int fsin(FPU_REG *st0_ptr, u_char tag)
+static int f_sin(FPU_REG *st0_ptr, u_char tag)
 {
        u_char arg_sign = getsign(st0_ptr);
 
@@ -608,6 +608,11 @@ static int fsin(FPU_REG *st0_ptr, u_char tag)
        }
 }
 
+static void fsin(FPU_REG *st0_ptr, u_char tag)
+{
+       f_sin(st0_ptr, tag);
+}
+
 static int f_cos(FPU_REG *st0_ptr, u_char tag)
 {
        u_char st0_sign;
@@ -724,7 +729,7 @@ static void fsincos(FPU_REG *st0_ptr, u_char st0_tag)
        }
 
        reg_copy(st0_ptr, &arg);
-       if (!fsin(st0_ptr, st0_tag)) {
+       if (!f_sin(st0_ptr, st0_tag)) {
                push();
                FPU_copy_to_reg0(&arg, st0_tag);
                f_cos(&st(0), st0_tag);
@@ -1635,7 +1640,7 @@ void FPU_triga(void)
 }
 
 static FUNC_ST0 const trig_table_b[] = {
-       fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0) fsin, fcos
+       fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, fsin, fcos
 };
 
 void FPU_trigb(void)
index fe6246ff98870580ce11a2b2178b68b731c4036c..7ca6417c0c8d1727f78d4bb6df8f5d08f0197788 100644 (file)
@@ -964,7 +964,7 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
 /* The return value (in eax) is zero if the result is exact,
    if bits are changed due to rounding, truncation, etc, then
    a non-zero value is returned */
-/* Overflow is signalled by a non-zero return value (in eax).
+/* Overflow is signaled by a non-zero return value (in eax).
    In the case of overflow, the returned significand always has the
    largest possible value */
 int FPU_round_to_int(FPU_REG *r, u_char tag)
index 11a1f798451bd153822dbcc7babd41ffb6f174cd..4a9fc3cc5a4d4266adfd2ccc5911c4fa8a066476 100644 (file)
@@ -575,7 +575,7 @@ Normalise_result:
 #ifdef PECULIAR_486
        /*
         * This implements a special feature of 80486 behaviour.
-        * Underflow will be signalled even if the number is
+        * Underflow will be signaled even if the number is
         * not a denormal after rounding.
         * This difference occurs only for masked underflow, and not
         * in the unmasked case.
index a73347e2cdfc569e7ef2dbd6a3b12ea1af5f6702..1c548ad0075204e3d129913be2767b64d1edb2fc 100644 (file)
@@ -1497,7 +1497,7 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
         * userspace task is trying to access some valid (from guest's point of
         * view) memory which is not currently mapped by the host (e.g. the
         * memory is swapped out). Note, the corresponding "page ready" event
-        * which is injected when the memory becomes available, is delived via
+        * which is injected when the memory becomes available, is delivered via
         * an interrupt mechanism and not a #PF exception
         * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
         *
@@ -1523,7 +1523,7 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
         *
         * In case the fault hit a RCU idle region the conditional entry
         * code reenabled RCU to avoid subsequent wreckage which helps
-        * debugability.
+        * debuggability.
         */
        state = irqentry_enter(regs);
 
index dd694fb9391696e8cecfce2c5a8185786af423ba..fbf41dd142ca11f02b4fe2f524942f9dd08bcee0 100644 (file)
@@ -29,7 +29,7 @@
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
- * is only compied when SMP=y.
+ * is only compiled when SMP=y.
  */
 #define CREATE_TRACE_POINTS
 #include <trace/events/tlb.h>
@@ -756,7 +756,7 @@ void __init init_mem_mapping(void)
 
 #ifdef CONFIG_X86_64
        if (max_pfn > max_low_pfn) {
-               /* can we preseve max_low_pfn ?*/
+               /* can we preserve max_low_pfn ?*/
                max_low_pfn = max_pfn;
        }
 #else
@@ -939,7 +939,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
 {
        /*
         * end could be not aligned, and We can not align that,
-        * decompresser could be confused by aligned initrd_end
+        * decompressor could be confused by aligned initrd_end
         * We already reserve the end partial page before in
         *   - i386_start_kernel()
         *   - x86_64_start_kernel()
index b5a3fa4033d38bf00fd4766683459892c9554ec8..55247451ba85bdc4b01ac6fd96395d0203ce8462 100644 (file)
@@ -172,7 +172,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end)
 
                /*
                 * With folded p4d, pgd_none() is always false, we need to
-                * handle synchonization on p4d level.
+                * handle synchronization on p4d level.
                 */
                MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref));
                p4d_ref = p4d_offset(pgd_ref, addr);
@@ -986,7 +986,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
                if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
                        /*
                         * Do not free direct mapping pages since they were
-                        * freed when offlining, or simplely not in use.
+                        * freed when offlining, or simply not in use.
                         */
                        if (!direct)
                                free_pagetable(pte_page(*pte), 0);
@@ -1004,7 +1004,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
                         *
                         * If we are not removing the whole page, it means
                         * other page structs in this page are being used and
-                        * we canot remove them. So fill the unused page_structs
+                        * we cannot remove them. So fill the unused page_structs
                         * with 0xFD, and remove the page when it is wholly
                         * filled with 0xFD.
                         */
index 6e6b39710e5fa561d1aec5c7005b79b0a1290da8..557f0fe25dff47641f03a86e5166a03f19627901 100644 (file)
@@ -96,7 +96,7 @@ void __init kernel_randomize_memory(void)
        memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
                CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
 
-       /* Adapt phyiscal memory region size based on available memory */
+       /* Adapt physical memory region size based on available memory */
        if (memory_tb < kaslr_regions[0].size_tb)
                kaslr_regions[0].size_tb = memory_tb;
 
index be020a7bc41474d54fcd1619f81e244509cb2be5..d3efbc5b344982658393b4bca3c744d5977318ec 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Support for MMIO probes.
- * Benfit many code from kprobes
+ * Benefit many code from kprobes
  * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
  *     2007 Alexander Eichner
  *     2008 Pekka Paalanen <pq@iki.fi>
index ae78cef7998023e7d7520dab74e0153b543af8f4..f633f9e23b8fcc897b990a3c96aeccee34f0c10d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/dma-mapping.h>
+#include <linux/virtio_config.h>
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
@@ -484,3 +485,8 @@ void __init mem_encrypt_init(void)
        print_mem_encrypt_feature_info();
 }
 
+int arch_has_restricted_virtio_memory_access(void)
+{
+       return sev_active();
+}
+EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
index 7a84fc8bc5c36f476ba0561eed47e7cb08014ef9..17d292b7072f1e6cce6d2f951f566f2727820d82 100644 (file)
@@ -27,7 +27,7 @@ SYM_FUNC_START(sme_encrypt_execute)
         *     - stack page (PAGE_SIZE)
         *     - encryption routine page (PAGE_SIZE)
         *     - intermediate copy buffer (PMD_PAGE_SIZE)
-        *    R8 - physcial address of the pagetables to use for encryption
+        *    R8 - physical address of the pagetables to use for encryption
         */
 
        push    %rbp
index 6c5eb6f3f14f40f9002e2afde0ebbccee1237e75..a19374d2610132ee94d78ee5f675d7004236aaa0 100644 (file)
@@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp)
 
 #define AMD_SME_BIT    BIT(0)
 #define AMD_SEV_BIT    BIT(1)
-       /*
-        * Set the feature mask (SME or SEV) based on whether we are
-        * running under a hypervisor.
-        */
-       eax = 1;
-       ecx = 0;
-       native_cpuid(&eax, &ebx, &ecx, &edx);
-       feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
+
+       /* Check the SEV MSR whether SEV or SME is enabled */
+       sev_status   = __rdmsr(MSR_AMD64_SEV);
+       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
 
        /*
         * Check for the SME/SEV feature:
@@ -530,19 +526,26 @@ void __init sme_enable(struct boot_params *bp)
 
        /* Check if memory encryption is enabled */
        if (feature_mask == AMD_SME_BIT) {
+               /*
+                * No SME if Hypervisor bit is set. This check is here to
+                * prevent a guest from trying to enable SME. For running as a
+                * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
+                * might be other hypervisors which emulate that MSR as non-zero
+                * or even pass it through to the guest.
+                * A malicious hypervisor can still trick a guest into this
+                * path, but there is no way to protect against that.
+                */
+               eax = 1;
+               ecx = 0;
+               native_cpuid(&eax, &ebx, &ecx, &edx);
+               if (ecx & BIT(31))
+                       return;
+
                /* For SME, check the SYSCFG MSR */
                msr = __rdmsr(MSR_K8_SYSCFG);
                if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
                        return;
        } else {
-               /* For SEV, check the SEV MSR */
-               msr = __rdmsr(MSR_AMD64_SEV);
-               if (!(msr & MSR_AMD64_SEV_ENABLED))
-                       return;
-
-               /* Save SEV_STATUS to avoid reading MSR again */
-               sev_status = msr;
-
                /* SEV state cannot be controlled by a command line option */
                sme_me_mask = me_mask;
                sev_enabled = true;
index ca311aaa67b889b6985eeefdad379569432b76c8..3112ca7786ed1462686fb8fbae96b621d3334195 100644 (file)
@@ -695,7 +695,7 @@ int memtype_free(u64 start, u64 end)
 
 
 /**
- * lookup_memtype - Looksup the memory type for a physical address
+ * lookup_memtype - Looks up the memory type for a physical address
  * @paddr: physical address of which memory type needs to be looked up
  *
  * Only to be called when PAT is enabled
@@ -800,6 +800,7 @@ void memtype_free_io(resource_size_t start, resource_size_t end)
        memtype_free(start, end);
 }
 
+#ifdef CONFIG_X86_PAT
 int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
 {
        enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
@@ -813,6 +814,7 @@ void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
        memtype_free_io(start, start + size);
 }
 EXPORT_SYMBOL(arch_io_free_memtype_wc);
+#endif
 
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                unsigned long size, pgprot_t vma_prot)
index 16f878c26667933c5641069bb5b40c8f97221cc7..427980617557c173b533fe6f8b258f5dab29c014 100644 (file)
@@ -680,7 +680,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
  * end up in this kind of memory, for instance.
  *
  * This could be optimized, but it is only intended to be
- * used at inititalization time, and keeping it
+ * used at initialization time, and keeping it
  * unoptimized should increase the testing coverage for
  * the more obscure platforms.
  */
index 8873ed1438a978ec9b7e959556bbc1450161c5ba..a2332eef66e9f8532e9e52a56925a2a04f95a53d 100644 (file)
@@ -128,7 +128,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
 /*
  * Called from the FPU code when creating a fresh set of FPU
  * registers.  This is called from a very specific context where
- * we know the FPU regstiers are safe for use and we can use PKRU
+ * we know the FPU registers are safe for use and we can use PKRU
  * directly.
  */
 void copy_init_pkru_to_fpregs(void)
index 1aab92930569af4542db384dabd46a56a6890662..5d5c7bb50ce9e2e9646f783d26adb1e33ea094aa 100644 (file)
@@ -361,7 +361,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
                         * global, so set it as global in both copies.  Note:
                         * the X86_FEATURE_PGE check is not _required_ because
                         * the CPU ignores _PAGE_GLOBAL when PGE is not
-                        * supported.  The check keeps consistentency with
+                        * supported.  The check keeps consistency with
                         * code that only set this bit when supported.
                         */
                        if (boot_cpu_has(X86_FEATURE_PGE))
@@ -440,10 +440,9 @@ static void __init pti_clone_user_shared(void)
 
        for_each_possible_cpu(cpu) {
                /*
-                * The SYSCALL64 entry code needs to be able to find the
-                * thread stack and needs one word of scratch space in which
-                * to spill a register.  All of this lives in the TSS, in
-                * the sp1 and sp2 slots.
+                * The SYSCALL64 entry code needs one word of scratch space
+                * in which to spill a register.  It lives in the sp2 slot
+                * of the CPU's TSS.
                 *
                 * This is done for all possible CPUs during boot to ensure
                 * that it's propagated to all mms.
@@ -512,7 +511,7 @@ static void pti_clone_entry_text(void)
 static inline bool pti_kernel_image_global_ok(void)
 {
        /*
-        * Systems with PCIDs get litlle benefit from global
+        * Systems with PCIDs get little benefit from global
         * kernel text and are not worth the downsides.
         */
        if (cpu_feature_enabled(X86_FEATURE_PCID))
index 569ac1d57f55a318b6f7a7daf5cfee55be15e4c4..98f269560d4060fb1f93ecfc968bccbe6a2574de 100644 (file)
@@ -106,7 +106,7 @@ static inline u16 kern_pcid(u16 asid)
 
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
        /*
-        * Make sure that the dynamic ASID space does not confict with the
+        * Make sure that the dynamic ASID space does not conflict with the
         * bit we are using to switch between user and kernel ASIDs.
         */
        BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
@@ -736,7 +736,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
         *    3, we'd be break the invariant: we'd update local_tlb_gen above
         *    1 without the full flush that's needed for tlb_gen 2.
         *
-        * 2. f->new_tlb_gen == mm_tlb_gen.  This is purely an optimiation.
+        * 2. f->new_tlb_gen == mm_tlb_gen.  This is purely an optimization.
         *    Partial TLB flushes are not all that much cheaper than full TLB
         *    flushes, so it seems unlikely that it would be a performance win
         *    to do a partial flush if that won't bring our TLB fully up to
@@ -876,7 +876,7 @@ static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
 static inline void put_flush_tlb_info(void)
 {
 #ifdef CONFIG_DEBUG_VM
-       /* Complete reentrency prevention checks */
+       /* Complete reentrancy prevention checks */
        barrier();
        this_cpu_dec(flush_tlb_info_idx);
 #endif
index b35fc80238846611f2ba9a881639648d7c99f648..220e72434f3c0d3b920f787a387f371c63ef2ac1 100644 (file)
@@ -1556,7 +1556,7 @@ emit_cond_jmp:            /* Convert BPF opcode to x86 */
                        if (is_imm8(jmp_offset)) {
                                if (jmp_padding) {
                                        /* To keep the jmp_offset valid, the extra bytes are
-                                        * padded before the jump insn, so we substract the
+                                        * padded before the jump insn, so we subtract the
                                         * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
                                         *
                                         * If the previous pass already emits an imm8
@@ -1631,7 +1631,7 @@ emit_jmp:
                                if (jmp_padding) {
                                        /* To avoid breaking jmp_offset, the extra bytes
                                         * are padded before the actual jmp insn, so
-                                        * 2 bytes is substracted from INSN_SZ_DIFF.
+                                        * 2 bytes is subtracted from INSN_SZ_DIFF.
                                         *
                                         * If the previous pass already emits an imm8
                                         * jmp, there is nothing to pad (0 byte).
@@ -1689,7 +1689,16 @@ emit_jmp:
                }
 
                if (image) {
-                       if (unlikely(proglen + ilen > oldproglen)) {
+                       /*
+                        * When populating the image, assert that:
+                        *
+                        *  i) We do not write beyond the allocated space, and
+                        * ii) addrs[i] did not change from the prior run, in order
+                        *     to validate assumptions made for computing branch
+                        *     displacements.
+                        */
+                       if (unlikely(proglen + ilen > oldproglen ||
+                                    proglen + ilen != addrs[i])) {
                                pr_err("bpf_jit: fatal error\n");
                                return -EFAULT;
                        }
index d17b67c69f89ae7a4104942bdccf559031175b1e..6a99def7d315d0992a8716b82a3f021a899099ec 100644 (file)
@@ -2276,7 +2276,16 @@ notyet:
                }
 
                if (image) {
-                       if (unlikely(proglen + ilen > oldproglen)) {
+                       /*
+                        * When populating the image, assert that:
+                        *
+                        *  i) We do not write beyond the allocated space, and
+                        * ii) addrs[i] did not change from the prior run, in order
+                        *     to validate assumptions made for computing branch
+                        *     displacements.
+                        */
+                       if (unlikely(proglen + ilen > oldproglen ||
+                                    proglen + ilen != addrs[i])) {
                                pr_err("bpf_jit: fatal error\n");
                                return -EFAULT;
                        }
index 0a0e168be1cbec0da9bad7cf2266d11118eae977..02dc64625e64d2e599504d264afb605d5cff5ae4 100644 (file)
@@ -375,7 +375,7 @@ static const struct dmi_system_id msi_k8t_dmi_table[] = {
  * The BIOS only gives options "DISABLED" and "AUTO". This code sets
  * the corresponding register-value to enable the soundcard.
  *
- * The soundcard is only enabled, if the mainborad is identified
+ * The soundcard is only enabled, if the mainboard is identified
  * via DMI-tables and the soundcard is detected to be off.
  */
 static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
index 1b82d77019b17502edc7a5696543137cff23bc76..df7b5477fc4f2a0306a8103de7510e9affe4f4e3 100644 (file)
@@ -195,7 +195,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        }
 
        /*
-        * Certain firmware versions are way too sentimential and still believe
+        * Certain firmware versions are way too sentimental and still believe
         * they are exclusive and unquestionable owners of the first physical page,
         * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
         * (but then write-access it later during SetVirtualAddressMap()).
@@ -457,7 +457,7 @@ void __init efi_dump_pagetable(void)
  * in a kernel thread and user context. Preemption needs to remain disabled
  * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
  * can not change under us.
- * It should be ensured that there are no concurent calls to this function.
+ * It should be ensured that there are no concurrent calls to this function.
  */
 void efi_enter_mm(void)
 {
index 67d93a243c353c5aad59a65910f529928d10c5d8..7850111008a8b87e4c5a63ea984fcfec18af8184 100644 (file)
@@ -441,7 +441,7 @@ void __init efi_free_boot_services(void)
                 * 1.4.4 with SGX enabled booting Linux via Fedora 24's
                 * grub2-efi on a hard disk.  (And no, I don't know why
                 * this happened, but Linux should still try to boot rather
-                * panicing early.)
+                * panicking early.)
                 */
                rm_size = real_mode_size_needed();
                if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
@@ -726,7 +726,7 @@ void efi_crash_gracefully_on_page_fault(unsigned long phys_addr)
         * Buggy efi_reset_system() is handled differently from other EFI
         * Runtime Services as it doesn't use efi_rts_wq. Although,
         * native_machine_emergency_restart() says that machine_real_restart()
-        * could fail, it's better not to compilcate this fault handler
+        * could fail, it's better not to complicate this fault handler
         * because this case occurs *very* rarely and hence could be improved
         * on a need by basis.
         */
index 0286fe1b14b5cb57ca09f83ceb8a74d4f3b9f716..d3d456925b2a9388d245a940a4097a57360e59a6 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * imr.c -- Intel Isolated Memory Region driver
  *
  * Copyright(c) 2013 Intel Corporation.
@@ -551,7 +551,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
 
        /*
         * Setup an unlocked IMR around the physical extent of the kernel
-        * from the beginning of the .text secton to the end of the
+        * from the beginning of the .text section to the end of the
         * .rodata section as one physically contiguous block.
         *
         * We don't round up @size since it is already PAGE_SIZE aligned.
index 570e3062faac267df713fa8d8b2a3ba74461c579..761f3689f60a6b4d8002bd312686ce9b5e26b553 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * imr_selftest.c -- Intel Isolated Memory Region self-test driver
  *
  * Copyright(c) 2013 Intel Corporation.
index 526f70f27c1c3571e96e91b27d1dc869cb97ef73..fdd49d70b43738be371da7efb5476339ca315ef7 100644 (file)
@@ -187,7 +187,7 @@ bool iosf_mbi_available(void)
 EXPORT_SYMBOL(iosf_mbi_available);
 
 /*
- **************** P-Unit/kernel shared I2C bus arbritration ****************
+ **************** P-Unit/kernel shared I2C bus arbitration ****************
  *
  * Some Bay Trail and Cherry Trail devices have the P-Unit and us (the kernel)
  * share a single I2C bus to the PMIC. Below are helpers to arbitrate the
@@ -493,7 +493,7 @@ static void iosf_sideband_debug_init(void)
        /* mcrx */
        debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
 
-       /* mcr - initiates mailbox tranaction */
+       /* mcr - initiates mailbox transaction */
        debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
 }
 
index 85f4638764d69b87d310702cd3453518f1bc547e..994a229cb79fe190fe6836dfb0c4aa7bc51fbd45 100644 (file)
@@ -27,7 +27,7 @@ static bool                           lid_wake_on_close;
  * wake-on-close. This is implemented as standard by the XO-1.5 DSDT.
  *
  * We provide here a sysfs attribute that will additionally enable
- * wake-on-close behavior. This is useful (e.g.) when we oportunistically
+ * wake-on-close behavior. This is useful (e.g.) when we opportunistically
  * suspend with the display running; if the lid is then closed, we want to
  * wake up to turn the display off.
  *
index 26d1f66937892eb9e4d3d78af9b3c1fa3993291b..75e3319e8bee5b22986473c5690cc4da8bc56e9c 100644 (file)
@@ -131,7 +131,7 @@ void * __init prom_early_alloc(unsigned long size)
                const size_t chunk_size = max(PAGE_SIZE, size);
 
                /*
-                * To mimimize the number of allocations, grab at least
+                * To minimize the number of allocations, grab at least
                 * PAGE_SIZE of memory (that's an arbitrary choice that's
                 * fast enough on the platforms we care about while minimizing
                 * wasted bootmem) and hand off chunks of it to callers.
index d2ccadc247e6f9a51abcf09184eafc2c8b59346a..66b317398b8a625be4bb2924bb04a967b89c7492 100644 (file)
  *          the boot start info structure.
  * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared.
  * - `cr4`: all bits are cleared.
- * - `cs `: must be a 32-bit read/execute code segment with a base of ‘0’
- *          and a limit of ‘0xFFFFFFFF’. The selector value is unspecified.
+ * - `cs `: must be a 32-bit read/execute code segment with a base of `0`
+ *          and a limit of `0xFFFFFFFF`. The selector value is unspecified.
  * - `ds`, `es`: must be a 32-bit read/write data segment with a base of
- *               ‘0’ and a limit of ‘0xFFFFFFFF’. The selector values are all
+ *               `0` and a limit of `0xFFFFFFFF`. The selector values are all
  *               unspecified.
  * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit
  *         of '0x67'.
index eafc530c8767c4075377403fa89c88416b52a8a5..1e9ff28bc2e04c270ab594fa82f11dbc3590bc0d 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/kdebug.h>
 #include <asm/local64.h>
 #include <asm/nmi.h>
+#include <asm/reboot.h>
 #include <asm/traps.h>
 #include <asm/uv/uv.h>
 #include <asm/uv/uv_hub.h>
@@ -91,6 +92,8 @@ static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
 static atomic_t uv_nmi_slave_continue;
 static cpumask_var_t uv_nmi_cpu_mask;
 
+static atomic_t uv_nmi_kexec_failed;
+
 /* Values for uv_nmi_slave_continue */
 #define SLAVE_CLEAR    0
 #define SLAVE_CONTINUE 1
@@ -834,38 +837,35 @@ static void uv_nmi_touch_watchdogs(void)
        touch_nmi_watchdog();
 }
 
-static atomic_t uv_nmi_kexec_failed;
-
-#if defined(CONFIG_KEXEC_CORE)
-static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
+static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
 {
+       /* Check if kdump kernel loaded for both main and secondary CPUs */
+       if (!kexec_crash_image) {
+               if (main)
+                       pr_err("UV: NMI error: kdump kernel not loaded\n");
+               return;
+       }
+
        /* Call crash to dump system state */
-       if (master) {
+       if (main) {
                pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
                crash_kexec(regs);
 
-               pr_emerg("UV: crash_kexec unexpectedly returned");
+               pr_emerg("UV: crash_kexec unexpectedly returned\n");
                atomic_set(&uv_nmi_kexec_failed, 1);
-               if (!kexec_crash_image) {
-                       pr_cont("crash kernel not loaded\n");
-                       return;
-               }
-               pr_cont("kexec busy, stalling cpus while waiting\n");
-       }
 
-       /* If crash exec fails the slaves should return, otherwise stall */
-       while (atomic_read(&uv_nmi_kexec_failed) == 0)
-               mdelay(10);
-}
+       } else { /* secondary */
 
-#else /* !CONFIG_KEXEC_CORE */
-static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
-{
-       if (master)
-               pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
-       atomic_set(&uv_nmi_kexec_failed, 1);
+               /* If kdump kernel fails, secondaries will exit this loop */
+               while (atomic_read(&uv_nmi_kexec_failed) == 0) {
+
+                       /* Once shootdown cpus starts, they do not return */
+                       run_crash_ipi_callback(regs);
+
+                       mdelay(10);
+               }
+       }
 }
-#endif /* !CONFIG_KEXEC_CORE */
 
 #ifdef CONFIG_KGDB
 #ifdef CONFIG_KGDB_KDB
@@ -889,7 +889,7 @@ static inline int uv_nmi_kdb_reason(void)
  * Call KGDB/KDB from NMI handler
  *
  * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
- * 'kdb' has no affect on which is used.  See the KGDB documention for further
+ * 'kdb' has no affect on which is used.  See the KGDB documentation for further
  * information.
  */
 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
index db1378c6ff2621dcf5b5363424b79d63aea67991..c9908bcdb249d91e88f96210cd0d57854ee7c484 100644 (file)
@@ -321,7 +321,7 @@ int hibernate_resume_nonboot_cpu_disable(void)
 
 /*
  * When bsp_check() is called in hibernate and suspend, cpu hotplug
- * is disabled already. So it's unnessary to handle race condition between
+ * is disabled already. So it's unnecessary to handle race condition between
  * cpumask query and cpu hotplug.
  */
 static int bsp_check(void)
index 22fda7d991590504f956f943d4d4d7bfdcea67ee..1be71ef5e4c4ed52ed074a90bf7d6d55efc901fb 100644 (file)
@@ -103,7 +103,7 @@ static void __init setup_real_mode(void)
                *ptr += phys_base;
        }
 
-       /* Must be perfomed *after* relocation. */
+       /* Must be performed *after* relocation. */
        trampoline_header = (struct trampoline_header *)
                __va(real_mode_header->trampoline_header);
 
index dc0a337f985b66151efd295b1aeb3973086cb1a0..4f18cd9eacd8ecc60ad58a05d61fa92def7e63e8 100644 (file)
@@ -1070,8 +1070,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
 
        .read_pmc = xen_read_pmc,
 
-       .iret = xen_iret,
-
        .load_tr_desc = paravirt_nop,
        .set_ldt = xen_set_ldt,
        .load_gdt = xen_load_gdt,
@@ -1233,8 +1231,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
 
        /* Install Xen paravirt ops */
        pv_info = xen_info;
-       pv_ops.init.patch = paravirt_patch_default;
        pv_ops.cpu = xen_cpu_ops;
+       paravirt_iret = xen_iret;
        xen_init_irq_ops();
 
        /*
index cf2ade864c3020be11927d71b67b636c75a0a3da..1e28c880f64262e4806cb134474939acf2f0a3b3 100644 (file)
@@ -2410,7 +2410,7 @@ int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
        rmd.prot = prot;
        /*
         * We use the err_ptr to indicate if there we are doing a contiguous
-        * mapping or a discontigious mapping.
+        * mapping or a discontiguous mapping.
         */
        rmd.contiguous = !err_ptr;
        rmd.no_translate = no_translate;
index 91f5b330dcc6db596b5ef19bb1c7f03d3abeeb3c..d9c945ee1100847bd04173efdb1aedf33b49e610 100644 (file)
@@ -379,11 +379,6 @@ void xen_timer_resume(void)
        }
 }
 
-static const struct pv_time_ops xen_time_ops __initconst = {
-       .sched_clock = xen_sched_clock,
-       .steal_clock = xen_steal_clock,
-};
-
 static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
 static u64 xen_clock_value_saved;
 
@@ -525,17 +520,24 @@ static void __init xen_time_init(void)
                pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
 }
 
-void __init xen_init_time_ops(void)
+static void __init xen_init_time_common(void)
 {
        xen_sched_clock_offset = xen_clocksource_read();
-       pv_ops.time = xen_time_ops;
+       static_call_update(pv_steal_clock, xen_steal_clock);
+       paravirt_set_sched_clock(xen_sched_clock);
+
+       x86_platform.calibrate_tsc = xen_tsc_khz;
+       x86_platform.get_wallclock = xen_get_wallclock;
+}
+
+void __init xen_init_time_ops(void)
+{
+       xen_init_time_common();
 
        x86_init.timers.timer_init = xen_time_init;
        x86_init.timers.setup_percpu_clockev = x86_init_noop;
        x86_cpuinit.setup_percpu_clockev = x86_init_noop;
 
-       x86_platform.calibrate_tsc = xen_tsc_khz;
-       x86_platform.get_wallclock = xen_get_wallclock;
        /* Dom0 uses the native method to set the hardware RTC. */
        if (!xen_initial_domain())
                x86_platform.set_wallclock = xen_set_wallclock;
@@ -569,13 +571,11 @@ void __init xen_hvm_init_time_ops(void)
                return;
        }
 
-       xen_sched_clock_offset = xen_clocksource_read();
-       pv_ops.time = xen_time_ops;
+       xen_init_time_common();
+
        x86_init.timers.setup_percpu_clockev = xen_time_init;
        x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
 
-       x86_platform.calibrate_tsc = xen_tsc_khz;
-       x86_platform.get_wallclock = xen_get_wallclock;
        x86_platform.set_wallclock = xen_set_wallclock;
 }
 #endif
index ff241e663c018feae6f102065dbcce475eb3ae29..8ba1ed8defd0bb1d705e28531c122b952c3168af 100644 (file)
@@ -89,6 +89,8 @@ static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
                return -EINVAL;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
+       if (bdev->bd_part_count)
+               return -EBUSY;
 
        /*
         * Reopen the device to revalidate the driver state and force a
index 2a248399068635ff51a8c66f4081f1816e3e25ff..6cbd1f1a5837b2b61552a5084e08873abab5cb34 100644 (file)
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 x509_certificate_list
+x509_revocation_list
index c94e93d8bccf038f4684c4cce93d558d9e7269ff..ab88d2a7f3c7fb38526b334562cd7b283ca5ea62 100644 (file)
@@ -83,4 +83,21 @@ config SYSTEM_BLACKLIST_HASH_LIST
          wrapper to incorporate the list into the kernel.  Each <hash> should
          be a string of hex digits.
 
+config SYSTEM_REVOCATION_LIST
+       bool "Provide system-wide ring of revocation certificates"
+       depends on SYSTEM_BLACKLIST_KEYRING
+       depends on PKCS7_MESSAGE_PARSER=y
+       help
+         If set, this allows revocation certificates to be stored in the
+         blacklist keyring and implements a hook whereby a PKCS#7 message can
+         be checked to see if it matches such a certificate.
+
+config SYSTEM_REVOCATION_KEYS
+       string "X.509 certificates to be preloaded into the system blacklist keyring"
+       depends on SYSTEM_REVOCATION_LIST
+       help
+         If set, this option should be the filename of a PEM-formatted file
+         containing X.509 certificates to be included in the default blacklist
+         keyring.
+
 endmenu
index f4c25b67aad90b5833f1e390e2cf0ae303412fbd..b6db52ebf0beb166f290899519515fccc72e4786 100644 (file)
@@ -3,8 +3,9 @@
 # Makefile for the linux kernel signature checking certificates.
 #
 
-obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
-obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o
+obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o common.o
+obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o common.o
+obj-$(CONFIG_SYSTEM_REVOCATION_LIST) += revocation_certificates.o
 ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),"")
 obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
 else
@@ -29,7 +30,7 @@ $(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREF
        $(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
 endif # CONFIG_SYSTEM_TRUSTED_KEYRING
 
-clean-files := x509_certificate_list .x509.list
+clean-files := x509_certificate_list .x509.list x509_revocation_list
 
 ifeq ($(CONFIG_MODULE_SIG),y)
 ###############################################################################
@@ -104,3 +105,17 @@ targets += signing_key.x509
 $(obj)/signing_key.x509: scripts/extract-cert $(X509_DEP) FORCE
        $(call if_changed,extract_certs,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY))
 endif # CONFIG_MODULE_SIG
+
+ifeq ($(CONFIG_SYSTEM_REVOCATION_LIST),y)
+
+$(eval $(call config_filename,SYSTEM_REVOCATION_KEYS))
+
+$(obj)/revocation_certificates.o: $(obj)/x509_revocation_list
+
+quiet_cmd_extract_certs  = EXTRACT_CERTS   $(patsubst "%",%,$(2))
+      cmd_extract_certs  = scripts/extract-cert $(2) $@
+
+targets += x509_revocation_list
+$(obj)/x509_revocation_list: scripts/extract-cert $(SYSTEM_REVOCATION_KEYS_SRCPREFIX)$(SYSTEM_REVOCATION_KEYS_FILENAME) FORCE
+       $(call if_changed,extract_certs,$(SYSTEM_REVOCATION_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_REVOCATION_KEYS))
+endif
index bffe4c6f4a9e204d215119278d419bb491d2d7c8..c9a435b15af40679513d596ad845e31359a7c4df 100644 (file)
 #include <linux/uidgid.h>
 #include <keys/system_keyring.h>
 #include "blacklist.h"
+#include "common.h"
 
 static struct key *blacklist_keyring;
 
+#ifdef CONFIG_SYSTEM_REVOCATION_LIST
+extern __initconst const u8 revocation_certificate_list[];
+extern __initconst const unsigned long revocation_certificate_list_size;
+#endif
+
 /*
  * The description must be a type prefix, a colon and then an even number of
  * hex digits.  The hash is kept in the description.
@@ -145,6 +151,49 @@ int is_binary_blacklisted(const u8 *hash, size_t hash_len)
 }
 EXPORT_SYMBOL_GPL(is_binary_blacklisted);
 
+#ifdef CONFIG_SYSTEM_REVOCATION_LIST
+/**
+ * add_key_to_revocation_list - Add a revocation certificate to the blacklist
+ * @data: The data blob containing the certificate
+ * @size: The size of data blob
+ */
+int add_key_to_revocation_list(const char *data, size_t size)
+{
+       key_ref_t key;
+
+       key = key_create_or_update(make_key_ref(blacklist_keyring, true),
+                                  "asymmetric",
+                                  NULL,
+                                  data,
+                                  size,
+                                  ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW),
+                                  KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_BUILT_IN);
+
+       if (IS_ERR(key)) {
+               pr_err("Problem with revocation key (%ld)\n", PTR_ERR(key));
+               return PTR_ERR(key);
+       }
+
+       return 0;
+}
+
+/**
+ * is_key_on_revocation_list - Determine if the key for a PKCS#7 message is revoked
+ * @pkcs7: The PKCS#7 message to check
+ */
+int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
+{
+       int ret;
+
+       ret = pkcs7_validate_trust(pkcs7, blacklist_keyring);
+
+       if (ret == 0)
+               return -EKEYREJECTED;
+
+       return -ENOKEY;
+}
+#endif
+
 /*
  * Initialise the blacklist
  */
@@ -177,3 +226,18 @@ static int __init blacklist_init(void)
  * Must be initialised before we try and load the keys into the keyring.
  */
 device_initcall(blacklist_init);
+
+#ifdef CONFIG_SYSTEM_REVOCATION_LIST
+/*
+ * Load the compiled-in list of revocation X.509 certificates.
+ */
+static __init int load_revocation_certificate_list(void)
+{
+       if (revocation_certificate_list_size)
+               pr_notice("Loading compiled-in revocation X.509 certificates\n");
+
+       return load_certificate_list(revocation_certificate_list, revocation_certificate_list_size,
+                                    blacklist_keyring);
+}
+late_initcall(load_revocation_certificate_list);
+#endif
index 1efd6fa0dc608c2a3d598b56c798f3e772a2bdbc..51b320cf85749e7a82788ed54a301b8a63243f1e 100644 (file)
@@ -1,3 +1,5 @@
 #include <linux/kernel.h>
+#include <linux/errno.h>
+#include <crypto/pkcs7.h>
 
 extern const char __initconst *const blacklist_hashes[];
diff --git a/certs/common.c b/certs/common.c
new file mode 100644 (file)
index 0000000..16a2208
--- /dev/null
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include "common.h"
+
+int load_certificate_list(const u8 cert_list[],
+                         const unsigned long list_size,
+                         const struct key *keyring)
+{
+       key_ref_t key;
+       const u8 *p, *end;
+       size_t plen;
+
+       p = cert_list;
+       end = p + list_size;
+       while (p < end) {
+               /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
+                * than 256 bytes in size.
+                */
+               if (end - p < 4)
+                       goto dodgy_cert;
+               if (p[0] != 0x30 &&
+                   p[1] != 0x82)
+                       goto dodgy_cert;
+               plen = (p[2] << 8) | p[3];
+               plen += 4;
+               if (plen > end - p)
+                       goto dodgy_cert;
+
+               key = key_create_or_update(make_key_ref(keyring, 1),
+                                          "asymmetric",
+                                          NULL,
+                                          p,
+                                          plen,
+                                          ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+                                          KEY_USR_VIEW | KEY_USR_READ),
+                                          KEY_ALLOC_NOT_IN_QUOTA |
+                                          KEY_ALLOC_BUILT_IN |
+                                          KEY_ALLOC_BYPASS_RESTRICTION);
+               if (IS_ERR(key)) {
+                       pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
+                              PTR_ERR(key));
+               } else {
+                       pr_notice("Loaded X.509 cert '%s'\n",
+                                 key_ref_to_ptr(key)->description);
+                       key_ref_put(key);
+               }
+               p += plen;
+       }
+
+       return 0;
+
+dodgy_cert:
+       pr_err("Problem parsing in-kernel X.509 certificate list\n");
+       return 0;
+}
diff --git a/certs/common.h b/certs/common.h
new file mode 100644 (file)
index 0000000..abdb579
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _CERT_COMMON_H
+#define _CERT_COMMON_H
+
+int load_certificate_list(const u8 cert_list[], const unsigned long list_size,
+                         const struct key *keyring);
+
+#endif
diff --git a/certs/revocation_certificates.S b/certs/revocation_certificates.S
new file mode 100644 (file)
index 0000000..f21aae8
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
+#include <linux/init.h>
+
+       __INITRODATA
+
+       .align 8
+       .globl revocation_certificate_list
+revocation_certificate_list:
+__revocation_list_start:
+       .incbin "certs/x509_revocation_list"
+__revocation_list_end:
+
+       .align 8
+       .globl revocation_certificate_list_size
+revocation_certificate_list_size:
+#ifdef CONFIG_64BIT
+       .quad __revocation_list_end - __revocation_list_start
+#else
+       .long __revocation_list_end - __revocation_list_start
+#endif
index 4b693da488f14bcc66b452789f2eab92da959f9f..0c9a4795e847b5b43d68ef6786d53895d1502e64 100644 (file)
@@ -16,6 +16,7 @@
 #include <keys/asymmetric-type.h>
 #include <keys/system_keyring.h>
 #include <crypto/pkcs7.h>
+#include "common.h"
 
 static struct key *builtin_trusted_keys;
 #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
@@ -137,54 +138,10 @@ device_initcall(system_trusted_keyring_init);
  */
 static __init int load_system_certificate_list(void)
 {
-       key_ref_t key;
-       const u8 *p, *end;
-       size_t plen;
-
        pr_notice("Loading compiled-in X.509 certificates\n");
 
-       p = system_certificate_list;
-       end = p + system_certificate_list_size;
-       while (p < end) {
-               /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
-                * than 256 bytes in size.
-                */
-               if (end - p < 4)
-                       goto dodgy_cert;
-               if (p[0] != 0x30 &&
-                   p[1] != 0x82)
-                       goto dodgy_cert;
-               plen = (p[2] << 8) | p[3];
-               plen += 4;
-               if (plen > end - p)
-                       goto dodgy_cert;
-
-               key = key_create_or_update(make_key_ref(builtin_trusted_keys, 1),
-                                          "asymmetric",
-                                          NULL,
-                                          p,
-                                          plen,
-                                          ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
-                                          KEY_USR_VIEW | KEY_USR_READ),
-                                          KEY_ALLOC_NOT_IN_QUOTA |
-                                          KEY_ALLOC_BUILT_IN |
-                                          KEY_ALLOC_BYPASS_RESTRICTION);
-               if (IS_ERR(key)) {
-                       pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
-                              PTR_ERR(key));
-               } else {
-                       pr_notice("Loaded X.509 cert '%s'\n",
-                                 key_ref_to_ptr(key)->description);
-                       key_ref_put(key);
-               }
-               p += plen;
-       }
-
-       return 0;
-
-dodgy_cert:
-       pr_err("Problem parsing in-kernel X.509 certificate list\n");
-       return 0;
+       return load_certificate_list(system_certificate_list, system_certificate_list_size,
+                                    builtin_trusted_keys);
 }
 late_initcall(load_system_certificate_list);
 
@@ -242,6 +199,12 @@ int verify_pkcs7_message_sig(const void *data, size_t len,
                        pr_devel("PKCS#7 platform keyring is not available\n");
                        goto error;
                }
+
+               ret = is_key_on_revocation_list(pkcs7);
+               if (ret != -ENOKEY) {
+                       pr_devel("PKCS#7 platform key is on revocation list\n");
+                       goto error;
+               }
        }
        ret = pkcs7_validate_trust(pkcs7, trusted_keys);
        if (ret < 0) {
index 5809cc198fa7c35dcc087447e8e7c244a41a885a..ca3b02dcbbfac7ecb5daf3c4035205ffc733372f 100644 (file)
@@ -242,6 +242,16 @@ config CRYPTO_ECDH
        help
          Generic implementation of the ECDH algorithm
 
+config CRYPTO_ECDSA
+       tristate "ECDSA (NIST P192, P256 etc.) algorithm"
+       select CRYPTO_ECC
+       select CRYPTO_AKCIPHER
+       select ASN1
+       help
+         Elliptic Curve Digital Signature Algorithm (NIST P192, P256 etc.)
+         is A NIST cryptographic standard algorithm. Only signature verification
+         is implemented.
+
 config CRYPTO_ECRDSA
        tristate "EC-RDSA (GOST 34.10) algorithm"
        select CRYPTO_ECC
@@ -1213,7 +1223,6 @@ config CRYPTO_BLOWFISH_X86_64
 
 config CRYPTO_CAMELLIA
        tristate "Camellia cipher algorithms"
-       depends on CRYPTO
        select CRYPTO_ALGAPI
        help
          Camellia cipher algorithms module.
@@ -1229,7 +1238,6 @@ config CRYPTO_CAMELLIA
 config CRYPTO_CAMELLIA_X86_64
        tristate "Camellia cipher algorithm (x86_64)"
        depends on X86 && 64BIT
-       depends on CRYPTO
        select CRYPTO_SKCIPHER
        imply CRYPTO_CTR
        help
@@ -1246,7 +1254,6 @@ config CRYPTO_CAMELLIA_X86_64
 config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
        tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
        depends on X86 && 64BIT
-       depends on CRYPTO
        select CRYPTO_SKCIPHER
        select CRYPTO_CAMELLIA_X86_64
        select CRYPTO_SIMD
@@ -1265,7 +1272,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
 config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
        tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)"
        depends on X86 && 64BIT
-       depends on CRYPTO
        select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
        help
          Camellia cipher algorithm module (x86_64/AES-NI/AVX2).
@@ -1281,7 +1287,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
 config CRYPTO_CAMELLIA_SPARC64
        tristate "Camellia cipher algorithm (SPARC64)"
        depends on SPARC64
-       depends on CRYPTO
        select CRYPTO_ALGAPI
        select CRYPTO_SKCIPHER
        help
index cf23affb16780686a83dea9684095bad03ac9539..10526d4559b802b27224215ba1ac32b3cacc9abe 100644 (file)
@@ -50,6 +50,12 @@ sm2_generic-y += sm2.o
 
 obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o
 
+$(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h
+$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h
+ecdsa_generic-y += ecdsa.o
+ecdsa_generic-y += ecdsasignature.asn1.o
+obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o
+
 crypto_acompress-y := acompress.o
 crypto_acompress-y += scompress.o
 obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o
index 6920ebe7767953c53261960f5e203aa8422c9359..6ef9c174c973d1eb50f8e47dd016bce5722f6b59 100644 (file)
@@ -21,9 +21,28 @@ union aegis_block {
        u8 bytes[AEGIS_BLOCK_SIZE];
 };
 
+struct aegis_state;
+
+extern int aegis128_have_aes_insn;
+
 #define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block))
 #define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN)
 
+bool crypto_aegis128_have_simd(void);
+void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
+void crypto_aegis128_init_simd(struct aegis_state *state,
+                              const union aegis_block *key,
+                              const u8 *iv);
+void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
+                                       const u8 *src, unsigned int size);
+void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
+                                       const u8 *src, unsigned int size);
+int crypto_aegis128_final_simd(struct aegis_state *state,
+                              union aegis_block *tag_xor,
+                              unsigned int assoclen,
+                              unsigned int cryptlen,
+                              unsigned int authsize);
+
 static __always_inline void crypto_aegis_block_xor(union aegis_block *dst,
                                                   const union aegis_block *src)
 {
index 89dc1c5596890d7e6822dbc33caec72441de8bdc..c4f1bfa1d04fa9eb00fc55cfaa89ed0690a45607 100644 (file)
@@ -58,21 +58,6 @@ static bool aegis128_do_simd(void)
        return false;
 }
 
-bool crypto_aegis128_have_simd(void);
-void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
-void crypto_aegis128_init_simd(struct aegis_state *state,
-                              const union aegis_block *key,
-                              const u8 *iv);
-void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
-                                       const u8 *src, unsigned int size);
-void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
-                                       const u8 *src, unsigned int size);
-int crypto_aegis128_final_simd(struct aegis_state *state,
-                              union aegis_block *tag_xor,
-                              unsigned int assoclen,
-                              unsigned int cryptlen,
-                              unsigned int authsize);
-
 static void crypto_aegis128_update(struct aegis_state *state)
 {
        union aegis_block tmp;
index 94d591a002a4737cd73369d9100e2fea5b15b11e..a7856915ec85b4172eceff31442f024d7d00809f 100644 (file)
@@ -30,7 +30,7 @@ bool crypto_aegis128_have_simd(void)
        return IS_ENABLED(CONFIG_ARM64);
 }
 
-void crypto_aegis128_init_simd(union aegis_block *state,
+void crypto_aegis128_init_simd(struct aegis_state *state,
                               const union aegis_block *key,
                               const u8 *iv)
 {
@@ -39,14 +39,14 @@ void crypto_aegis128_init_simd(union aegis_block *state,
        kernel_neon_end();
 }
 
-void crypto_aegis128_update_simd(union aegis_block *state, const void *msg)
+void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg)
 {
        kernel_neon_begin();
        crypto_aegis128_update_neon(state, msg);
        kernel_neon_end();
 }
 
-void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst,
+void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
                                        const u8 *src, unsigned int size)
 {
        kernel_neon_begin();
@@ -54,7 +54,7 @@ void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst,
        kernel_neon_end();
 }
 
-void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
+void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
                                        const u8 *src, unsigned int size)
 {
        kernel_neon_begin();
@@ -62,7 +62,7 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
        kernel_neon_end();
 }
 
-int crypto_aegis128_final_simd(union aegis_block *state,
+int crypto_aegis128_final_simd(struct aegis_state *state,
                               union aegis_block *tag_xor,
                               unsigned int assoclen,
                               unsigned int cryptlen,
index 9acb9d2c4bcf9368142cbbfc556ebfdc72aa051d..18cc82dc4a42f0804e9439ee7b36ece1bd9406b5 100644 (file)
@@ -491,8 +491,8 @@ static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
 /**
  * af_alg_alloc_tsgl - allocate the TX SGL
  *
- * @sk socket of connection to user space
- * @return: 0 upon success, < 0 upon error
+ * @sk: socket of connection to user space
+ * Return: 0 upon success, < 0 upon error
  */
 static int af_alg_alloc_tsgl(struct sock *sk)
 {
@@ -525,15 +525,15 @@ static int af_alg_alloc_tsgl(struct sock *sk)
 }
 
 /**
- * aead_count_tsgl - Count number of TX SG entries
+ * af_alg_count_tsgl - Count number of TX SG entries
  *
  * The counting starts from the beginning of the SGL to @bytes. If
- * an offset is provided, the counting of the SG entries starts at the offset.
+ * an @offset is provided, the counting of the SG entries starts at the @offset.
  *
- * @sk socket of connection to user space
- * @bytes Count the number of SG entries holding given number of bytes.
- * @offset Start the counting of SG entries from the given offset.
- * @return Number of TX SG entries found given the constraints
+ * @sk: socket of connection to user space
+ * @bytes: Count the number of SG entries holding given number of bytes.
+ * @offset: Start the counting of SG entries from the given offset.
+ * Return: Number of TX SG entries found given the constraints
  */
 unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
 {
@@ -577,19 +577,19 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
 EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
 
 /**
- * aead_pull_tsgl - Release the specified buffers from TX SGL
+ * af_alg_pull_tsgl - Release the specified buffers from TX SGL
  *
- * If @dst is non-null, reassign the pages to dst. The caller must release
+ * If @dst is non-null, reassign the pages to @dst. The caller must release
  * the pages. If @dst_offset is given only reassign the pages to @dst starting
  * at the @dst_offset (byte). The caller must ensure that @dst is large
  * enough (e.g. by using af_alg_count_tsgl with the same offset).
  *
- * @sk socket of connection to user space
- * @used Number of bytes to pull from TX SGL
- * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
- *     caller must release the buffers in dst.
- * @dst_offset Reassign the TX SGL from given offset. All buffers before
- *            reaching the offset is released.
+ * @sk: socket of connection to user space
+ * @used: Number of bytes to pull from TX SGL
+ * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
+ *      caller must release the buffers in dst.
+ * @dst_offset: Reassign the TX SGL from given offset. All buffers before
+ *             reaching the offset is released.
  */
 void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
                      size_t dst_offset)
@@ -657,7 +657,7 @@ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
 /**
  * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
  *
- * @areq Request holding the TX and RX SGL
+ * @areq: Request holding the TX and RX SGL
  */
 static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
 {
@@ -692,9 +692,9 @@ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
 /**
  * af_alg_wait_for_wmem - wait for availability of writable memory
  *
- * @sk socket of connection to user space
- * @flags If MSG_DONTWAIT is set, then only report if function would sleep
- * @return 0 when writable memory is available, < 0 upon error
+ * @sk: socket of connection to user space
+ * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
+ * Return: 0 when writable memory is available, < 0 upon error
  */
 static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
 {
@@ -725,7 +725,7 @@ static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
 /**
  * af_alg_wmem_wakeup - wakeup caller when writable memory is available
  *
- * @sk socket of connection to user space
+ * @sk: socket of connection to user space
  */
 void af_alg_wmem_wakeup(struct sock *sk)
 {
@@ -748,10 +748,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
 /**
  * af_alg_wait_for_data - wait for availability of TX data
  *
- * @sk socket of connection to user space
- * @flags If MSG_DONTWAIT is set, then only report if function would sleep
- * @min Set to minimum request size if partial requests are allowed.
- * @return 0 when writable memory is available, < 0 upon error
+ * @sk: socket of connection to user space
+ * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
+ * @min: Set to minimum request size if partial requests are allowed.
+ * Return: 0 when writable memory is available, < 0 upon error
  */
 int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
 {
@@ -790,7 +790,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
 /**
  * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
  *
- * @sk socket of connection to user space
+ * @sk: socket of connection to user space
  */
 static void af_alg_data_wakeup(struct sock *sk)
 {
@@ -820,12 +820,12 @@ static void af_alg_data_wakeup(struct sock *sk)
  *
  * In addition, the ctx is filled with the information sent via CMSG.
  *
- * @sock socket of connection to user space
- * @msg message from user space
- * @size size of message from user space
- * @ivsize the size of the IV for the cipher operation to verify that the
+ * @sock: socket of connection to user space
+ * @msg: message from user space
+ * @size: size of message from user space
+ * @ivsize: the size of the IV for the cipher operation to verify that the
  *        user-space-provided IV has the right size
- * @return the number of copied data upon success, < 0 upon error
+ * Return: the number of copied data upon success, < 0 upon error
  */
 int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
                   unsigned int ivsize)
@@ -977,6 +977,11 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg);
 
 /**
  * af_alg_sendpage - sendpage system call handler
+ * @sock: socket of connection to user space to write to
+ * @page: data to send
+ * @offset: offset into page to begin sending
+ * @size: length of data
+ * @flags: message send/receive flags
  *
  * This is a generic implementation of sendpage to fill ctx->tsgl_list.
  */
@@ -1035,6 +1040,7 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
 
 /**
  * af_alg_free_resources - release resources required for crypto request
+ * @areq: Request holding the TX and RX SGL
  */
 void af_alg_free_resources(struct af_alg_async_req *areq)
 {
@@ -1047,6 +1053,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources);
 
 /**
  * af_alg_async_cb - AIO callback handler
+ * @_req: async request info
+ * @err: if non-zero, error result to be returned via ki_complete();
+ *       otherwise return the AIO output length via ki_complete().
  *
  * This handler cleans up the struct af_alg_async_req upon completion of the
  * AIO operation.
@@ -1073,6 +1082,9 @@ EXPORT_SYMBOL_GPL(af_alg_async_cb);
 
 /**
  * af_alg_poll - poll system call handler
+ * @file: file pointer
+ * @sock: socket to poll
+ * @wait: poll_table
  */
 __poll_t af_alg_poll(struct file *file, struct socket *sock,
                         poll_table *wait)
@@ -1098,9 +1110,9 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
 /**
  * af_alg_alloc_areq - allocate struct af_alg_async_req
  *
- * @sk socket of connection to user space
- * @areqlen size of struct af_alg_async_req + crypto_*_reqsize
- * @return allocated data structure or ERR_PTR upon error
+ * @sk: socket of connection to user space
+ * @areqlen: size of struct af_alg_async_req + crypto_*_reqsize
+ * Return: allocated data structure or ERR_PTR upon error
  */
 struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
                                           unsigned int areqlen)
@@ -1125,13 +1137,13 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
  * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
  *                  operation
  *
- * @sk socket of connection to user space
- * @msg user space message
- * @flags flags used to invoke recvmsg with
- * @areq instance of the cryptographic request that will hold the RX SGL
- * @maxsize maximum number of bytes to be pulled from user space
- * @outlen number of bytes in the RX SGL
- * @return 0 on success, < 0 upon error
+ * @sk: socket of connection to user space
+ * @msg: user space message
+ * @flags: flags used to invoke recvmsg with
+ * @areq: instance of the cryptographic request that will hold the RX SGL
+ * @maxsize: maximum number of bytes to be pulled from user space
+ * @outlen: number of bytes in the RX SGL
+ * Return: 0 on success, < 0 upon error
  */
 int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
                    struct af_alg_async_req *areq, size_t maxsize,
index ed08cbd5b9d3f41e70509bd1c00d346cd5d55c86..c4eda56cff8917e08c2de3b0ada13df291c785ab 100644 (file)
@@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
 {
        struct crypto_alg *alg;
 
-       if (unlikely(!mem))
+       if (IS_ERR_OR_NULL(mem))
                return;
 
        alg = tfm->__crt_alg;
index 788a4ba1e2e747de90b600b56a5b610ba4eb935b..4fefb219bfdc86318f33b822eef7bc8214a2a106 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/scatterlist.h>
+#include <linux/asn1.h>
 #include <keys/asymmetric-subtype.h>
 #include <crypto/public_key.h>
 #include <crypto/akcipher.h>
@@ -85,7 +86,8 @@ int software_key_determine_akcipher(const char *encoding,
                return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
        }
 
-       if (strcmp(encoding, "raw") == 0) {
+       if (strcmp(encoding, "raw") == 0 ||
+           strcmp(encoding, "x962") == 0) {
                strcpy(alg_name, pkey->pkey_algo);
                return 0;
        }
index 52c9b455fc7df56845516edc94526b585358a87b..6d003096b5bca4dcaeb13158d88f8907e1a2b51a 100644 (file)
@@ -227,6 +227,26 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
                ctx->cert->sig->hash_algo = "sha224";
                goto rsa_pkcs1;
 
+       case OID_id_ecdsa_with_sha1:
+               ctx->cert->sig->hash_algo = "sha1";
+               goto ecdsa;
+
+       case OID_id_ecdsa_with_sha224:
+               ctx->cert->sig->hash_algo = "sha224";
+               goto ecdsa;
+
+       case OID_id_ecdsa_with_sha256:
+               ctx->cert->sig->hash_algo = "sha256";
+               goto ecdsa;
+
+       case OID_id_ecdsa_with_sha384:
+               ctx->cert->sig->hash_algo = "sha384";
+               goto ecdsa;
+
+       case OID_id_ecdsa_with_sha512:
+               ctx->cert->sig->hash_algo = "sha512";
+               goto ecdsa;
+
        case OID_gost2012Signature256:
                ctx->cert->sig->hash_algo = "streebog256";
                goto ecrdsa;
@@ -255,6 +275,11 @@ sm2:
        ctx->cert->sig->encoding = "raw";
        ctx->algo_oid = ctx->last_oid;
        return 0;
+ecdsa:
+       ctx->cert->sig->pkey_algo = "ecdsa";
+       ctx->cert->sig->encoding = "x962";
+       ctx->algo_oid = ctx->last_oid;
+       return 0;
 }
 
 /*
@@ -276,7 +301,8 @@ int x509_note_signature(void *context, size_t hdrlen,
 
        if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
            strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 ||
-           strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0) {
+           strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 ||
+           strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) {
                /* Discard the BIT STRING metadata */
                if (vlen < 1 || *(const u8 *)value != 0)
                        return -EBADMSG;
@@ -459,6 +485,7 @@ int x509_extract_key_data(void *context, size_t hdrlen,
                          const void *value, size_t vlen)
 {
        struct x509_parse_context *ctx = context;
+       enum OID oid;
 
        ctx->key_algo = ctx->last_oid;
        switch (ctx->last_oid) {
@@ -470,7 +497,25 @@ int x509_extract_key_data(void *context, size_t hdrlen,
                ctx->cert->pub->pkey_algo = "ecrdsa";
                break;
        case OID_id_ecPublicKey:
-               ctx->cert->pub->pkey_algo = "sm2";
+               if (parse_OID(ctx->params, ctx->params_size, &oid) != 0)
+                       return -EBADMSG;
+
+               switch (oid) {
+               case OID_sm2:
+                       ctx->cert->pub->pkey_algo = "sm2";
+                       break;
+               case OID_id_prime192v1:
+                       ctx->cert->pub->pkey_algo = "ecdsa-nist-p192";
+                       break;
+               case OID_id_prime256v1:
+                       ctx->cert->pub->pkey_algo = "ecdsa-nist-p256";
+                       break;
+               case OID_id_ansip384r1:
+                       ctx->cert->pub->pkey_algo = "ecdsa-nist-p384";
+                       break;
+               default:
+                       return -ENOPKG;
+               }
                break;
        default:
                return -ENOPKG;
index ae450eb8be144fbe41596d75d04f811ff6864521..3d45161b271a48f4a508a0dd3d9a981e6f7f15dd 100644 (file)
@@ -129,7 +129,9 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
        }
 
        ret = -EKEYREJECTED;
-       if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
+       if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0 &&
+           (strncmp(cert->pub->pkey_algo, "ecdsa-", 6) != 0 ||
+            strcmp(cert->sig->pkey_algo, "ecdsa") != 0))
                goto out;
 
        ret = public_key_verify_signature(cert->pub, cert->sig);
index 0e103fb5dd777c625d554114fe32a574674dae42..a989cb44fd16081c01966071ac8203108f259236 100644 (file)
@@ -1,26 +1,4 @@
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please  visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright 2012 Xyratex Technology Limited
  */
index c80aa25994a0c3e4b87cefba958862d5a657b2f3..afc6cefdc1d9ef0fa6e683c5047afd18c323230e 100644 (file)
@@ -24,6 +24,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <crypto/ecc_curve.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/slab.h>
@@ -42,7 +43,14 @@ typedef struct {
        u64 m_high;
 } uint128_t;
 
-static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
+/* Returns curv25519 curve param */
+const struct ecc_curve *ecc_get_curve25519(void)
+{
+       return &ecc_25519;
+}
+EXPORT_SYMBOL(ecc_get_curve25519);
+
+const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
 {
        switch (curve_id) {
        /* In FIPS mode only allow P256 and higher */
@@ -50,10 +58,13 @@ static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
                return fips_enabled ? NULL : &nist_p192;
        case ECC_CURVE_NIST_P256:
                return &nist_p256;
+       case ECC_CURVE_NIST_P384:
+               return &nist_p384;
        default:
                return NULL;
        }
 }
+EXPORT_SYMBOL(ecc_get_curve);
 
 static u64 *ecc_alloc_digits_space(unsigned int ndigits)
 {
@@ -128,7 +139,7 @@ bool vli_is_zero(const u64 *vli, unsigned int ndigits)
 }
 EXPORT_SYMBOL(vli_is_zero);
 
-/* Returns nonzero if bit bit of vli is set. */
+/* Returns nonzero if bit of vli is set. */
 static u64 vli_test_bit(const u64 *vli, unsigned int bit)
 {
        return (vli[bit / 64] & ((u64)1 << (bit % 64)));
@@ -775,18 +786,133 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
        }
 }
 
+#define SL32OR32(x32, y32) (((u64)x32 << 32) | y32)
+#define AND64H(x64)  (x64 & 0xffFFffFF00000000ull)
+#define AND64L(x64)  (x64 & 0x00000000ffFFffFFull)
+
+/* Computes result = product % curve_prime
+ * from "Mathematical routines for the NIST prime elliptic curves"
+ */
+static void vli_mmod_fast_384(u64 *result, const u64 *product,
+                               const u64 *curve_prime, u64 *tmp)
+{
+       int carry;
+       const unsigned int ndigits = 6;
+
+       /* t */
+       vli_set(result, product, ndigits);
+
+       /* s1 */
+       tmp[0] = 0;             // 0 || 0
+       tmp[1] = 0;             // 0 || 0
+       tmp[2] = SL32OR32(product[11], (product[10]>>32));      //a22||a21
+       tmp[3] = product[11]>>32;       // 0 ||a23
+       tmp[4] = 0;             // 0 || 0
+       tmp[5] = 0;             // 0 || 0
+       carry = vli_lshift(tmp, tmp, 1, ndigits);
+       carry += vli_add(result, result, tmp, ndigits);
+
+       /* s2 */
+       tmp[0] = product[6];    //a13||a12
+       tmp[1] = product[7];    //a15||a14
+       tmp[2] = product[8];    //a17||a16
+       tmp[3] = product[9];    //a19||a18
+       tmp[4] = product[10];   //a21||a20
+       tmp[5] = product[11];   //a23||a22
+       carry += vli_add(result, result, tmp, ndigits);
+
+       /* s3 */
+       tmp[0] = SL32OR32(product[11], (product[10]>>32));      //a22||a21
+       tmp[1] = SL32OR32(product[6], (product[11]>>32));       //a12||a23
+       tmp[2] = SL32OR32(product[7], (product[6])>>32);        //a14||a13
+       tmp[3] = SL32OR32(product[8], (product[7]>>32));        //a16||a15
+       tmp[4] = SL32OR32(product[9], (product[8]>>32));        //a18||a17
+       tmp[5] = SL32OR32(product[10], (product[9]>>32));       //a20||a19
+       carry += vli_add(result, result, tmp, ndigits);
+
+       /* s4 */
+       tmp[0] = AND64H(product[11]);   //a23|| 0
+       tmp[1] = (product[10]<<32);     //a20|| 0
+       tmp[2] = product[6];    //a13||a12
+       tmp[3] = product[7];    //a15||a14
+       tmp[4] = product[8];    //a17||a16
+       tmp[5] = product[9];    //a19||a18
+       carry += vli_add(result, result, tmp, ndigits);
+
+       /* s5 */
+       tmp[0] = 0;             //  0|| 0
+       tmp[1] = 0;             //  0|| 0
+       tmp[2] = product[10];   //a21||a20
+       tmp[3] = product[11];   //a23||a22
+       tmp[4] = 0;             //  0|| 0
+       tmp[5] = 0;             //  0|| 0
+       carry += vli_add(result, result, tmp, ndigits);
+
+       /* s6 */
+       tmp[0] = AND64L(product[10]);   // 0 ||a20
+       tmp[1] = AND64H(product[10]);   //a21|| 0
+       tmp[2] = product[11];   //a23||a22
+       tmp[3] = 0;             // 0 || 0
+       tmp[4] = 0;             // 0 || 0
+       tmp[5] = 0;             // 0 || 0
+       carry += vli_add(result, result, tmp, ndigits);
+
+       /* d1 */
+       tmp[0] = SL32OR32(product[6], (product[11]>>32));       //a12||a23
+       tmp[1] = SL32OR32(product[7], (product[6]>>32));        //a14||a13
+       tmp[2] = SL32OR32(product[8], (product[7]>>32));        //a16||a15
+       tmp[3] = SL32OR32(product[9], (product[8]>>32));        //a18||a17
+       tmp[4] = SL32OR32(product[10], (product[9]>>32));       //a20||a19
+       tmp[5] = SL32OR32(product[11], (product[10]>>32));      //a22||a21
+       carry -= vli_sub(result, result, tmp, ndigits);
+
+       /* d2 */
+       tmp[0] = (product[10]<<32);     //a20|| 0
+       tmp[1] = SL32OR32(product[11], (product[10]>>32));      //a22||a21
+       tmp[2] = (product[11]>>32);     // 0 ||a23
+       tmp[3] = 0;             // 0 || 0
+       tmp[4] = 0;             // 0 || 0
+       tmp[5] = 0;             // 0 || 0
+       carry -= vli_sub(result, result, tmp, ndigits);
+
+       /* d3 */
+       tmp[0] = 0;             // 0 || 0
+       tmp[1] = AND64H(product[11]);   //a23|| 0
+       tmp[2] = product[11]>>32;       // 0 ||a23
+       tmp[3] = 0;             // 0 || 0
+       tmp[4] = 0;             // 0 || 0
+       tmp[5] = 0;             // 0 || 0
+       carry -= vli_sub(result, result, tmp, ndigits);
+
+       if (carry < 0) {
+               do {
+                       carry += vli_add(result, result, curve_prime, ndigits);
+               } while (carry < 0);
+       } else {
+               while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
+                       carry -= vli_sub(result, result, curve_prime, ndigits);
+       }
+
+}
+
+#undef SL32OR32
+#undef AND64H
+#undef AND64L
+
 /* Computes result = product % curve_prime for different curve_primes.
  *
  * Note that curve_primes are distinguished just by heuristic check and
  * not by complete conformance check.
  */
 static bool vli_mmod_fast(u64 *result, u64 *product,
-                         const u64 *curve_prime, unsigned int ndigits)
+                         const struct ecc_curve *curve)
 {
        u64 tmp[2 * ECC_MAX_DIGITS];
+       const u64 *curve_prime = curve->p;
+       const unsigned int ndigits = curve->g.ndigits;
 
-       /* Currently, both NIST primes have -1 in lowest qword. */
-       if (curve_prime[0] != -1ull) {
+       /* All NIST curves have name prefix 'nist_' */
+       if (strncmp(curve->name, "nist_", 5) != 0) {
                /* Try to handle Pseudo-Marsenne primes. */
                if (curve_prime[ndigits - 1] == -1ull) {
                        vli_mmod_special(result, product, curve_prime,
@@ -809,6 +935,9 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
        case 4:
                vli_mmod_fast_256(result, product, curve_prime, tmp);
                break;
+       case 6:
+               vli_mmod_fast_384(result, product, curve_prime, tmp);
+               break;
        default:
                pr_err_ratelimited("ecc: unsupported digits size!\n");
                return false;
@@ -832,22 +961,22 @@ EXPORT_SYMBOL(vli_mod_mult_slow);
 
 /* Computes result = (left * right) % curve_prime. */
 static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
-                             const u64 *curve_prime, unsigned int ndigits)
+                             const struct ecc_curve *curve)
 {
        u64 product[2 * ECC_MAX_DIGITS];
 
-       vli_mult(product, left, right, ndigits);
-       vli_mmod_fast(result, product, curve_prime, ndigits);
+       vli_mult(product, left, right, curve->g.ndigits);
+       vli_mmod_fast(result, product, curve);
 }
 
 /* Computes result = left^2 % curve_prime. */
 static void vli_mod_square_fast(u64 *result, const u64 *left,
-                               const u64 *curve_prime, unsigned int ndigits)
+                               const struct ecc_curve *curve)
 {
        u64 product[2 * ECC_MAX_DIGITS];
 
-       vli_square(product, left, ndigits);
-       vli_mmod_fast(result, product, curve_prime, ndigits);
+       vli_square(product, left, curve->g.ndigits);
+       vli_mmod_fast(result, product, curve);
 }
 
 #define EVEN(vli) (!(vli[0] & 1))
@@ -945,25 +1074,27 @@ static bool ecc_point_is_zero(const struct ecc_point *point)
 
 /* Double in place */
 static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
-                                     u64 *curve_prime, unsigned int ndigits)
+                                       const struct ecc_curve *curve)
 {
        /* t1 = x, t2 = y, t3 = z */
        u64 t4[ECC_MAX_DIGITS];
        u64 t5[ECC_MAX_DIGITS];
+       const u64 *curve_prime = curve->p;
+       const unsigned int ndigits = curve->g.ndigits;
 
        if (vli_is_zero(z1, ndigits))
                return;
 
        /* t4 = y1^2 */
-       vli_mod_square_fast(t4, y1, curve_prime, ndigits);
+       vli_mod_square_fast(t4, y1, curve);
        /* t5 = x1*y1^2 = A */
-       vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits);
+       vli_mod_mult_fast(t5, x1, t4, curve);
        /* t4 = y1^4 */
-       vli_mod_square_fast(t4, t4, curve_prime, ndigits);
+       vli_mod_square_fast(t4, t4, curve);
        /* t2 = y1*z1 = z3 */
-       vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits);
+       vli_mod_mult_fast(y1, y1, z1, curve);
        /* t3 = z1^2 */
-       vli_mod_square_fast(z1, z1, curve_prime, ndigits);
+       vli_mod_square_fast(z1, z1, curve);
 
        /* t1 = x1 + z1^2 */
        vli_mod_add(x1, x1, z1, curve_prime, ndigits);
@@ -972,7 +1103,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
        /* t3 = x1 - z1^2 */
        vli_mod_sub(z1, x1, z1, curve_prime, ndigits);
        /* t1 = x1^2 - z1^4 */
-       vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits);
+       vli_mod_mult_fast(x1, x1, z1, curve);
 
        /* t3 = 2*(x1^2 - z1^4) */
        vli_mod_add(z1, x1, x1, curve_prime, ndigits);
@@ -989,7 +1120,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
        /* t1 = 3/2*(x1^2 - z1^4) = B */
 
        /* t3 = B^2 */
-       vli_mod_square_fast(z1, x1, curve_prime, ndigits);
+       vli_mod_square_fast(z1, x1, curve);
        /* t3 = B^2 - A */
        vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
        /* t3 = B^2 - 2A = x3 */
@@ -997,7 +1128,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
        /* t5 = A - x3 */
        vli_mod_sub(t5, t5, z1, curve_prime, ndigits);
        /* t1 = B * (A - x3) */
-       vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+       vli_mod_mult_fast(x1, x1, t5, curve);
        /* t4 = B * (A - x3) - y1^4 = y3 */
        vli_mod_sub(t4, x1, t4, curve_prime, ndigits);
 
@@ -1007,23 +1138,22 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
 }
 
 /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
-static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime,
-                   unsigned int ndigits)
+static void apply_z(u64 *x1, u64 *y1, u64 *z, const struct ecc_curve *curve)
 {
        u64 t1[ECC_MAX_DIGITS];
 
-       vli_mod_square_fast(t1, z, curve_prime, ndigits);    /* z^2 */
-       vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */
-       vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits);  /* z^3 */
-       vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */
+       vli_mod_square_fast(t1, z, curve);              /* z^2 */
+       vli_mod_mult_fast(x1, x1, t1, curve);   /* x1 * z^2 */
+       vli_mod_mult_fast(t1, t1, z, curve);    /* z^3 */
+       vli_mod_mult_fast(y1, y1, t1, curve);   /* y1 * z^3 */
 }
 
 /* P = (x1, y1) => 2P, (x2, y2) => P' */
 static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
-                               u64 *p_initial_z, u64 *curve_prime,
-                               unsigned int ndigits)
+                               u64 *p_initial_z, const struct ecc_curve *curve)
 {
        u64 z[ECC_MAX_DIGITS];
+       const unsigned int ndigits = curve->g.ndigits;
 
        vli_set(x2, x1, ndigits);
        vli_set(y2, y1, ndigits);
@@ -1034,35 +1164,37 @@ static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
        if (p_initial_z)
                vli_set(z, p_initial_z, ndigits);
 
-       apply_z(x1, y1, z, curve_prime, ndigits);
+       apply_z(x1, y1, z, curve);
 
-       ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits);
+       ecc_point_double_jacobian(x1, y1, z, curve);
 
-       apply_z(x2, y2, z, curve_prime, ndigits);
+       apply_z(x2, y2, z, curve);
 }
 
 /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
  * or P => P', Q => P + Q
  */
-static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
-                    unsigned int ndigits)
+static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
+                       const struct ecc_curve *curve)
 {
        /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
        u64 t5[ECC_MAX_DIGITS];
+       const u64 *curve_prime = curve->p;
+       const unsigned int ndigits = curve->g.ndigits;
 
        /* t5 = x2 - x1 */
        vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
        /* t5 = (x2 - x1)^2 = A */
-       vli_mod_square_fast(t5, t5, curve_prime, ndigits);
+       vli_mod_square_fast(t5, t5, curve);
        /* t1 = x1*A = B */
-       vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+       vli_mod_mult_fast(x1, x1, t5, curve);
        /* t3 = x2*A = C */
-       vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
+       vli_mod_mult_fast(x2, x2, t5, curve);
        /* t4 = y2 - y1 */
        vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
        /* t5 = (y2 - y1)^2 = D */
-       vli_mod_square_fast(t5, y2, curve_prime, ndigits);
+       vli_mod_square_fast(t5, y2, curve);
 
        /* t5 = D - B */
        vli_mod_sub(t5, t5, x1, curve_prime, ndigits);
@@ -1071,11 +1203,11 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
        /* t3 = C - B */
        vli_mod_sub(x2, x2, x1, curve_prime, ndigits);
        /* t2 = y1*(C - B) */
-       vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits);
+       vli_mod_mult_fast(y1, y1, x2, curve);
        /* t3 = B - x3 */
        vli_mod_sub(x2, x1, t5, curve_prime, ndigits);
        /* t4 = (y2 - y1)*(B - x3) */
-       vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits);
+       vli_mod_mult_fast(y2, y2, x2, curve);
        /* t4 = y3 */
        vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
 
@@ -1086,22 +1218,24 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
  * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
  * or P => P - Q, Q => P + Q
  */
-static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
-                      unsigned int ndigits)
+static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
+                       const struct ecc_curve *curve)
 {
        /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
        u64 t5[ECC_MAX_DIGITS];
        u64 t6[ECC_MAX_DIGITS];
        u64 t7[ECC_MAX_DIGITS];
+       const u64 *curve_prime = curve->p;
+       const unsigned int ndigits = curve->g.ndigits;
 
        /* t5 = x2 - x1 */
        vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
        /* t5 = (x2 - x1)^2 = A */
-       vli_mod_square_fast(t5, t5, curve_prime, ndigits);
+       vli_mod_square_fast(t5, t5, curve);
        /* t1 = x1*A = B */
-       vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
+       vli_mod_mult_fast(x1, x1, t5, curve);
        /* t3 = x2*A = C */
-       vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
+       vli_mod_mult_fast(x2, x2, t5, curve);
        /* t4 = y2 + y1 */
        vli_mod_add(t5, y2, y1, curve_prime, ndigits);
        /* t4 = y2 - y1 */
@@ -1110,29 +1244,29 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
        /* t6 = C - B */
        vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
        /* t2 = y1 * (C - B) */
-       vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits);
+       vli_mod_mult_fast(y1, y1, t6, curve);
        /* t6 = B + C */
        vli_mod_add(t6, x1, x2, curve_prime, ndigits);
        /* t3 = (y2 - y1)^2 */
-       vli_mod_square_fast(x2, y2, curve_prime, ndigits);
+       vli_mod_square_fast(x2, y2, curve);
        /* t3 = x3 */
        vli_mod_sub(x2, x2, t6, curve_prime, ndigits);
 
        /* t7 = B - x3 */
        vli_mod_sub(t7, x1, x2, curve_prime, ndigits);
        /* t4 = (y2 - y1)*(B - x3) */
-       vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits);
+       vli_mod_mult_fast(y2, y2, t7, curve);
        /* t4 = y3 */
        vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
 
        /* t7 = (y2 + y1)^2 = F */
-       vli_mod_square_fast(t7, t5, curve_prime, ndigits);
+       vli_mod_square_fast(t7, t5, curve);
        /* t7 = x3' */
        vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
        /* t6 = x3' - B */
        vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
        /* t6 = (y2 + y1)*(x3' - B) */
-       vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits);
+       vli_mod_mult_fast(t6, t6, t5, curve);
        /* t2 = y3' */
        vli_mod_sub(y1, t6, y1, curve_prime, ndigits);
 
@@ -1162,41 +1296,37 @@ static void ecc_point_mult(struct ecc_point *result,
        vli_set(rx[1], point->x, ndigits);
        vli_set(ry[1], point->y, ndigits);
 
-       xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime,
-                           ndigits);
+       xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve);
 
        for (i = num_bits - 2; i > 0; i--) {
                nb = !vli_test_bit(scalar, i);
-               xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
-                          ndigits);
-               xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime,
-                        ndigits);
+               xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve);
+               xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve);
        }
 
        nb = !vli_test_bit(scalar, 0);
-       xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
-                  ndigits);
+       xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve);
 
        /* Find final 1/Z value. */
        /* X1 - X0 */
        vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits);
        /* Yb * (X1 - X0) */
-       vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits);
+       vli_mod_mult_fast(z, z, ry[1 - nb], curve);
        /* xP * Yb * (X1 - X0) */
-       vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits);
+       vli_mod_mult_fast(z, z, point->x, curve);
 
        /* 1 / (xP * Yb * (X1 - X0)) */
        vli_mod_inv(z, z, curve_prime, point->ndigits);
 
        /* yP / (xP * Yb * (X1 - X0)) */
-       vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits);
+       vli_mod_mult_fast(z, z, point->y, curve);
        /* Xb * yP / (xP * Yb * (X1 - X0)) */
-       vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits);
+       vli_mod_mult_fast(z, z, rx[1 - nb], curve);
        /* End 1/Z calculation */
 
-       xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits);
+       xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve);
 
-       apply_z(rx[0], ry[0], z, curve_prime, ndigits);
+       apply_z(rx[0], ry[0], z, curve);
 
        vli_set(result->x, rx[0], ndigits);
        vli_set(result->y, ry[0], ndigits);
@@ -1217,9 +1347,9 @@ static void ecc_point_add(const struct ecc_point *result,
        vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
        vli_set(px, p->x, ndigits);
        vli_set(py, p->y, ndigits);
-       xycz_add(px, py, result->x, result->y, curve->p, ndigits);
+       xycz_add(px, py, result->x, result->y, curve);
        vli_mod_inv(z, z, curve->p, ndigits);
-       apply_z(result->x, result->y, z, curve->p, ndigits);
+       apply_z(result->x, result->y, z, curve);
 }
 
 /* Computes R = u1P + u2Q mod p using Shamir's trick.
@@ -1248,8 +1378,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
        points[2] = q;
        points[3] = &sum;
 
-       num_bits = max(vli_num_bits(u1, ndigits),
-                      vli_num_bits(u2, ndigits));
+       num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits));
        i = num_bits - 1;
        idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
        point = points[idx];
@@ -1260,7 +1389,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
        z[0] = 1;
 
        for (--i; i >= 0; i--) {
-               ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);
+               ecc_point_double_jacobian(rx, ry, z, curve);
                idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
                point = points[idx];
                if (point) {
@@ -1270,27 +1399,17 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
 
                        vli_set(tx, point->x, ndigits);
                        vli_set(ty, point->y, ndigits);
-                       apply_z(tx, ty, z, curve->p, ndigits);
+                       apply_z(tx, ty, z, curve);
                        vli_mod_sub(tz, rx, tx, curve->p, ndigits);
-                       xycz_add(tx, ty, rx, ry, curve->p, ndigits);
-                       vli_mod_mult_fast(z, z, tz, curve->p, ndigits);
+                       xycz_add(tx, ty, rx, ry, curve);
+                       vli_mod_mult_fast(z, z, tz, curve);
                }
        }
        vli_mod_inv(z, z, curve->p, ndigits);
-       apply_z(rx, ry, z, curve->p, ndigits);
+       apply_z(rx, ry, z, curve);
 }
 EXPORT_SYMBOL(ecc_point_mult_shamir);
 
-static inline void ecc_swap_digits(const u64 *in, u64 *out,
-                                  unsigned int ndigits)
-{
-       const __be64 *src = (__force __be64 *)in;
-       int i;
-
-       for (i = 0; i < ndigits; i++)
-               out[i] = be64_to_cpu(src[ndigits - 1 - i]);
-}
-
 static int __ecc_is_key_valid(const struct ecc_curve *curve,
                              const u64 *private_key, unsigned int ndigits)
 {
@@ -1441,10 +1560,10 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
                return -EINVAL;
 
        /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
-       vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
-       vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
-       vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
-       vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
+       vli_mod_square_fast(yy, pk->y, curve); /* y^2 */
+       vli_mod_square_fast(xxx, pk->x, curve); /* x^2 */
+       vli_mod_mult_fast(xxx, xxx, pk->x, curve); /* x^3 */
+       vli_mod_mult_fast(w, curve->a, pk->x, curve); /* a·x */
        vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
        vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
        if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
index d4e546b9ad79ea72d9dc0122cc7457e7f4feac84..a006132646a4389e74bdadb1a27ac998cea97993 100644 (file)
 #ifndef _CRYPTO_ECC_H
 #define _CRYPTO_ECC_H
 
+#include <crypto/ecc_curve.h>
+
 /* One digit is u64 qword. */
 #define ECC_CURVE_NIST_P192_DIGITS  3
 #define ECC_CURVE_NIST_P256_DIGITS  4
-#define ECC_MAX_DIGITS             (512 / 64)
+#define ECC_CURVE_NIST_P384_DIGITS  6
+#define ECC_MAX_DIGITS              (512 / 64) /* due to ecrdsa */
 
 #define ECC_DIGITS_TO_BYTES_SHIFT 3
 
-/**
- * struct ecc_point - elliptic curve point in affine coordinates
- *
- * @x:         X coordinate in vli form.
- * @y:         Y coordinate in vli form.
- * @ndigits:   Length of vlis in u64 qwords.
- */
-struct ecc_point {
-       u64 *x;
-       u64 *y;
-       u8 ndigits;
-};
+#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
 
 #define ECC_POINT_INIT(x, y, ndigits)  (struct ecc_point) { x, y, ndigits }
 
 /**
- * struct ecc_curve - definition of elliptic curve
- *
- * @name:      Short name of the curve.
- * @g:         Generator point of the curve.
- * @p:         Prime number, if Barrett's reduction is used for this curve
- *             pre-calculated value 'mu' is appended to the @p after ndigits.
- *             Use of Barrett's reduction is heuristically determined in
- *             vli_mmod_fast().
- * @n:         Order of the curve group.
- * @a:         Curve parameter a.
- * @b:         Curve parameter b.
+ * ecc_swap_digits() - Copy ndigits from big endian array to native array
+ * @in:       Input array
+ * @out:      Output array
+ * @ndigits:  Number of digits to copy
  */
-struct ecc_curve {
-       char *name;
-       struct ecc_point g;
-       u64 *p;
-       u64 *n;
-       u64 *a;
-       u64 *b;
-};
+static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
+{
+       const __be64 *src = (__force __be64 *)in;
+       int i;
+
+       for (i = 0; i < ndigits; i++)
+               out[i] = be64_to_cpu(src[ndigits - 1 - i]);
+}
 
 /**
  * ecc_is_key_valid() - Validate a given ECDH private key
index 69be6c7d228f2ed3513c7f6810942606873bf13e..9719934c9428ef68325fd36e2653c337774b50ea 100644 (file)
@@ -54,4 +54,53 @@ static struct ecc_curve nist_p256 = {
        .b = nist_p256_b
 };
 
+/* NIST P-384 */
+static u64 nist_p384_g_x[] = { 0x3A545E3872760AB7ull, 0x5502F25DBF55296Cull,
+                               0x59F741E082542A38ull, 0x6E1D3B628BA79B98ull,
+                               0x8Eb1C71EF320AD74ull, 0xAA87CA22BE8B0537ull };
+static u64 nist_p384_g_y[] = { 0x7A431D7C90EA0E5Full, 0x0A60B1CE1D7E819Dull,
+                               0xE9DA3113B5F0B8C0ull, 0xF8F41DBD289A147Cull,
+                               0x5D9E98BF9292DC29ull, 0x3617DE4A96262C6Full };
+static u64 nist_p384_p[] = { 0x00000000FFFFFFFFull, 0xFFFFFFFF00000000ull,
+                               0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull,
+                               0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p384_n[] = { 0xECEC196ACCC52973ull, 0x581A0DB248B0A77Aull,
+                               0xC7634D81F4372DDFull, 0xFFFFFFFFFFFFFFFFull,
+                               0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p384_a[] = { 0x00000000FFFFFFFCull, 0xFFFFFFFF00000000ull,
+                               0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull,
+                               0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull,
+                               0x0314088f5013875aull, 0x181d9c6efe814112ull,
+                               0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull };
+static struct ecc_curve nist_p384 = {
+       .name = "nist_384",
+       .g = {
+               .x = nist_p384_g_x,
+               .y = nist_p384_g_y,
+               .ndigits = 6,
+       },
+       .p = nist_p384_p,
+       .n = nist_p384_n,
+       .a = nist_p384_a,
+       .b = nist_p384_b
+};
+
+/* curve25519 */
+static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000,
+                               0x0000000000000000, 0x0000000000000000 };
+static u64 curve25519_p[] = { 0xffffffffffffffed, 0xffffffffffffffff,
+                               0xffffffffffffffff, 0x7fffffffffffffff };
+static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000,
+                               0x0000000000000000, 0x0000000000000000 };
+static const struct ecc_curve ecc_25519 = {
+       .name = "curve25519",
+       .g = {
+               .x = curve25519_g_x,
+               .ndigits = 4,
+       },
+       .p = curve25519_p,
+       .a = curve25519_a,
+};
+
 #endif
index 96f80c8f8e30484c2c1b656d91d12a0e82eeee9f..04a427b8c956477deb435c5b9d68ec7a6f20f174 100644 (file)
@@ -23,33 +23,16 @@ static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
        return kpp_tfm_ctx(tfm);
 }
 
-static unsigned int ecdh_supported_curve(unsigned int curve_id)
-{
-       switch (curve_id) {
-       case ECC_CURVE_NIST_P192: return ECC_CURVE_NIST_P192_DIGITS;
-       case ECC_CURVE_NIST_P256: return ECC_CURVE_NIST_P256_DIGITS;
-       default: return 0;
-       }
-}
-
 static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
                           unsigned int len)
 {
        struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
        struct ecdh params;
-       unsigned int ndigits;
 
        if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
-           params.key_size > sizeof(ctx->private_key))
+           params.key_size > sizeof(u64) * ctx->ndigits)
                return -EINVAL;
 
-       ndigits = ecdh_supported_curve(params.curve_id);
-       if (!ndigits)
-               return -EINVAL;
-
-       ctx->curve_id = params.curve_id;
-       ctx->ndigits = ndigits;
-
        if (!params.key || !params.key_size)
                return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
                                       ctx->private_key);
@@ -140,13 +123,24 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
        return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
 }
 
-static struct kpp_alg ecdh = {
+static int ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
+{
+       struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+
+       ctx->curve_id = ECC_CURVE_NIST_P192;
+       ctx->ndigits = ECC_CURVE_NIST_P192_DIGITS;
+
+       return 0;
+}
+
+static struct kpp_alg ecdh_nist_p192 = {
        .set_secret = ecdh_set_secret,
        .generate_public_key = ecdh_compute_value,
        .compute_shared_secret = ecdh_compute_value,
        .max_size = ecdh_max_size,
+       .init = ecdh_nist_p192_init_tfm,
        .base = {
-               .cra_name = "ecdh",
+               .cra_name = "ecdh-nist-p192",
                .cra_driver_name = "ecdh-generic",
                .cra_priority = 100,
                .cra_module = THIS_MODULE,
@@ -154,14 +148,48 @@ static struct kpp_alg ecdh = {
        },
 };
 
+static int ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+       struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
+
+       ctx->curve_id = ECC_CURVE_NIST_P256;
+       ctx->ndigits = ECC_CURVE_NIST_P256_DIGITS;
+
+       return 0;
+}
+
+static struct kpp_alg ecdh_nist_p256 = {
+       .set_secret = ecdh_set_secret,
+       .generate_public_key = ecdh_compute_value,
+       .compute_shared_secret = ecdh_compute_value,
+       .max_size = ecdh_max_size,
+       .init = ecdh_nist_p256_init_tfm,
+       .base = {
+               .cra_name = "ecdh-nist-p256",
+               .cra_driver_name = "ecdh-generic",
+               .cra_priority = 100,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct ecdh_ctx),
+       },
+};
+
+static bool ecdh_nist_p192_registered;
+
 static int ecdh_init(void)
 {
-       return crypto_register_kpp(&ecdh);
+       int ret;
+
+       ret = crypto_register_kpp(&ecdh_nist_p192);
+       ecdh_nist_p192_registered = ret == 0;
+
+       return crypto_register_kpp(&ecdh_nist_p256);
 }
 
 static void ecdh_exit(void)
 {
-       crypto_unregister_kpp(&ecdh);
+       if (ecdh_nist_p192_registered)
+               crypto_unregister_kpp(&ecdh_nist_p192);
+       crypto_unregister_kpp(&ecdh_nist_p256);
 }
 
 subsys_initcall(ecdh_init);
index fca63b559f655266fab1ff432dda85b21225c906..f18f9028f9121d66b6d1210c3408f6bcfdb7be14 100644 (file)
@@ -10,7 +10,7 @@
 #include <crypto/ecdh.h>
 #include <crypto/kpp.h>
 
-#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short))
+#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + sizeof(short))
 
 static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
 {
@@ -46,7 +46,6 @@ int crypto_ecdh_encode_key(char *buf, unsigned int len,
                return -EINVAL;
 
        ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
-       ptr = ecdh_pack_data(ptr, &params->curve_id, sizeof(params->curve_id));
        ptr = ecdh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
        ecdh_pack_data(ptr, params->key, params->key_size);
 
@@ -70,7 +69,6 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
        if (unlikely(len < secret.len))
                return -EINVAL;
 
-       ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
        ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
        if (secret.len != crypto_ecdh_key_len(params))
                return -EINVAL;
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
new file mode 100644 (file)
index 0000000..1e7b150
--- /dev/null
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 IBM Corporation
+ */
+
+#include <linux/module.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/ecdh.h>
+#include <linux/asn1_decoder.h>
+#include <linux/scatterlist.h>
+
+#include "ecc.h"
+#include "ecdsasignature.asn1.h"
+
+struct ecc_ctx {
+       unsigned int curve_id;
+       const struct ecc_curve *curve;
+
+       bool pub_key_set;
+       u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */
+       u64 y[ECC_MAX_DIGITS];
+       struct ecc_point pub_key;
+};
+
+struct ecdsa_signature_ctx {
+       const struct ecc_curve *curve;
+       u64 r[ECC_MAX_DIGITS];
+       u64 s[ECC_MAX_DIGITS];
+};
+
+/*
+ * Get the r and s components of a signature from the X509 certificate.
+ */
+static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
+                                 const void *value, size_t vlen, unsigned int ndigits)
+{
+       size_t keylen = ndigits * sizeof(u64);
+       ssize_t diff = vlen - keylen;
+       const char *d = value;
+       u8 rs[ECC_MAX_BYTES];
+
+       if (!value || !vlen)
+               return -EINVAL;
+
+       /* diff = 0: 'value' has exacly the right size
+        * diff > 0: 'value' has too many bytes; one leading zero is allowed that
+        *           makes the value a positive integer; error on more
+        * diff < 0: 'value' is missing leading zeros, which we add
+        */
+       if (diff > 0) {
+               /* skip over leading zeros that make 'value' a positive int */
+               if (*d == 0) {
+                       vlen -= 1;
+                       diff--;
+                       d++;
+               }
+               if (diff)
+                       return -EINVAL;
+       }
+       if (-diff >= keylen)
+               return -EINVAL;
+
+       if (diff) {
+               /* leading zeros not given in 'value' */
+               memset(rs, 0, -diff);
+       }
+
+       memcpy(&rs[-diff], d, vlen);
+
+       ecc_swap_digits((u64 *)rs, dest, ndigits);
+
+       return 0;
+}
+
+int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
+                         const void *value, size_t vlen)
+{
+       struct ecdsa_signature_ctx *sig = context;
+
+       return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen,
+                                     sig->curve->g.ndigits);
+}
+
+int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
+                         const void *value, size_t vlen)
+{
+       struct ecdsa_signature_ctx *sig = context;
+
+       return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen,
+                                     sig->curve->g.ndigits);
+}
+
+static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s)
+{
+       const struct ecc_curve *curve = ctx->curve;
+       unsigned int ndigits = curve->g.ndigits;
+       u64 s1[ECC_MAX_DIGITS];
+       u64 u1[ECC_MAX_DIGITS];
+       u64 u2[ECC_MAX_DIGITS];
+       u64 x1[ECC_MAX_DIGITS];
+       u64 y1[ECC_MAX_DIGITS];
+       struct ecc_point res = ECC_POINT_INIT(x1, y1, ndigits);
+
+       /* 0 < r < n  and 0 < s < n */
+       if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 ||
+           vli_is_zero(s, ndigits) || vli_cmp(s, curve->n, ndigits) >= 0)
+               return -EBADMSG;
+
+       /* hash is given */
+       pr_devel("hash : %016llx %016llx ... %016llx\n",
+                hash[ndigits - 1], hash[ndigits - 2], hash[0]);
+
+       /* s1 = (s^-1) mod n */
+       vli_mod_inv(s1, s, curve->n, ndigits);
+       /* u1 = (hash * s1) mod n */
+       vli_mod_mult_slow(u1, hash, s1, curve->n, ndigits);
+       /* u2 = (r * s1) mod n */
+       vli_mod_mult_slow(u2, r, s1, curve->n, ndigits);
+       /* res = u1*G + u2 * pub_key */
+       ecc_point_mult_shamir(&res, u1, &curve->g, u2, &ctx->pub_key, curve);
+
+       /* res.x = res.x mod n (if res.x > order) */
+       if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1))
+               /* faster alternative for NIST p384, p256 & p192 */
+               vli_sub(res.x, res.x, curve->n, ndigits);
+
+       if (!vli_cmp(res.x, r, ndigits))
+               return 0;
+
+       return -EKEYREJECTED;
+}
+
+/*
+ * Verify an ECDSA signature.
+ */
+static int ecdsa_verify(struct akcipher_request *req)
+{
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+       size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
+       struct ecdsa_signature_ctx sig_ctx = {
+               .curve = ctx->curve,
+       };
+       u8 rawhash[ECC_MAX_BYTES];
+       u64 hash[ECC_MAX_DIGITS];
+       unsigned char *buffer;
+       ssize_t diff;
+       int ret;
+
+       if (unlikely(!ctx->pub_key_set))
+               return -EINVAL;
+
+       buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       sg_pcopy_to_buffer(req->src,
+               sg_nents_for_len(req->src, req->src_len + req->dst_len),
+               buffer, req->src_len + req->dst_len, 0);
+
+       ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx,
+                              buffer, req->src_len);
+       if (ret < 0)
+               goto error;
+
+       /* if the hash is shorter then we will add leading zeros to fit to ndigits */
+       diff = keylen - req->dst_len;
+       if (diff >= 0) {
+               if (diff)
+                       memset(rawhash, 0, diff);
+               memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
+       } else if (diff < 0) {
+               /* given hash is longer, we take the left-most bytes */
+               memcpy(&rawhash, buffer + req->src_len, keylen);
+       }
+
+       ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
+
+       ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s);
+
+error:
+       kfree(buffer);
+
+       return ret;
+}
+
+static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id)
+{
+       ctx->curve_id = curve_id;
+       ctx->curve = ecc_get_curve(curve_id);
+       if (!ctx->curve)
+               return -EINVAL;
+
+       return 0;
+}
+
+
+static void ecdsa_ecc_ctx_deinit(struct ecc_ctx *ctx)
+{
+       ctx->pub_key_set = false;
+}
+
+static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
+{
+       unsigned int curve_id = ctx->curve_id;
+       int ret;
+
+       ecdsa_ecc_ctx_deinit(ctx);
+       ret = ecdsa_ecc_ctx_init(ctx, curve_id);
+       if (ret == 0)
+               ctx->pub_key = ECC_POINT_INIT(ctx->x, ctx->y,
+                                             ctx->curve->g.ndigits);
+       return ret;
+}
+
+/*
+ * Set the public key given the raw uncompressed key data from an X509
+ * certificate. The key data contain the concatenated X and Y coordinates of
+ * the public key.
+ */
+static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
+{
+       struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+       const unsigned char *d = key;
+       const u64 *digits = (const u64 *)&d[1];
+       unsigned int ndigits;
+       int ret;
+
+       ret = ecdsa_ecc_ctx_reset(ctx);
+       if (ret < 0)
+               return ret;
+
+       if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0)
+               return -EINVAL;
+       /* we only accept uncompressed format indicated by '4' */
+       if (d[0] != 4)
+               return -EINVAL;
+
+       keylen--;
+       ndigits = (keylen >> 1) / sizeof(u64);
+       if (ndigits != ctx->curve->g.ndigits)
+               return -EINVAL;
+
+       ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
+       ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
+       ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
+
+       ctx->pub_key_set = ret == 0;
+
+       return ret;
+}
+
+static void ecdsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+       struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       ecdsa_ecc_ctx_deinit(ctx);
+}
+
+static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm)
+{
+       struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+}
+
+static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384);
+}
+
+static struct akcipher_alg ecdsa_nist_p384 = {
+       .verify = ecdsa_verify,
+       .set_pub_key = ecdsa_set_pub_key,
+       .max_size = ecdsa_max_size,
+       .init = ecdsa_nist_p384_init_tfm,
+       .exit = ecdsa_exit_tfm,
+       .base = {
+               .cra_name = "ecdsa-nist-p384",
+               .cra_driver_name = "ecdsa-nist-p384-generic",
+               .cra_priority = 100,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct ecc_ctx),
+       },
+};
+
+static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256);
+}
+
+static struct akcipher_alg ecdsa_nist_p256 = {
+       .verify = ecdsa_verify,
+       .set_pub_key = ecdsa_set_pub_key,
+       .max_size = ecdsa_max_size,
+       .init = ecdsa_nist_p256_init_tfm,
+       .exit = ecdsa_exit_tfm,
+       .base = {
+               .cra_name = "ecdsa-nist-p256",
+               .cra_driver_name = "ecdsa-nist-p256-generic",
+               .cra_priority = 100,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct ecc_ctx),
+       },
+};
+
+static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192);
+}
+
+static struct akcipher_alg ecdsa_nist_p192 = {
+       .verify = ecdsa_verify,
+       .set_pub_key = ecdsa_set_pub_key,
+       .max_size = ecdsa_max_size,
+       .init = ecdsa_nist_p192_init_tfm,
+       .exit = ecdsa_exit_tfm,
+       .base = {
+               .cra_name = "ecdsa-nist-p192",
+               .cra_driver_name = "ecdsa-nist-p192-generic",
+               .cra_priority = 100,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct ecc_ctx),
+       },
+};
+static bool ecdsa_nist_p192_registered;
+
+static int ecdsa_init(void)
+{
+       int ret;
+
+       /* NIST p192 may not be available in FIPS mode */
+       ret = crypto_register_akcipher(&ecdsa_nist_p192);
+       ecdsa_nist_p192_registered = ret == 0;
+
+       ret = crypto_register_akcipher(&ecdsa_nist_p256);
+       if (ret)
+               goto nist_p256_error;
+
+       ret = crypto_register_akcipher(&ecdsa_nist_p384);
+       if (ret)
+               goto nist_p384_error;
+
+       return 0;
+
+nist_p384_error:
+       crypto_unregister_akcipher(&ecdsa_nist_p256);
+
+nist_p256_error:
+       if (ecdsa_nist_p192_registered)
+               crypto_unregister_akcipher(&ecdsa_nist_p192);
+       return ret;
+}
+
+static void ecdsa_exit(void)
+{
+       if (ecdsa_nist_p192_registered)
+               crypto_unregister_akcipher(&ecdsa_nist_p192);
+       crypto_unregister_akcipher(&ecdsa_nist_p256);
+       crypto_unregister_akcipher(&ecdsa_nist_p384);
+}
+
+subsys_initcall(ecdsa_init);
+module_exit(ecdsa_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
+MODULE_DESCRIPTION("ECDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecdsa-generic");
diff --git a/crypto/ecdsasignature.asn1 b/crypto/ecdsasignature.asn1
new file mode 100644 (file)
index 0000000..621ab75
--- /dev/null
@@ -0,0 +1,4 @@
+ECDSASignature ::= SEQUENCE {
+       r       INTEGER ({ ecdsa_get_signature_r }),
+       s       INTEGER ({ ecdsa_get_signature_s })
+}
index c36ea0c8be984bb1c305bb234aedaa9ea68080c9..76a04d000c0d3fd55c1521862208c04def6ce7bd 100644 (file)
@@ -63,10 +63,7 @@ do {                                                         \
 } while (0)
 
 /* Rotate right one 64 bit number as a 56 bit number */
-#define ror56_64(k, n)                                         \
-do {                                                           \
-       k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n));      \
-} while (0)
+#define ror56_64(k, n) (k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)))
 
 /*
  * Sboxes for Feistel network derived from
index 6e147c43fc186c2c540c6a23c8b995c1da413c2d..a11b3208760f35d508ed31a3eff46b9c15eb8bc8 100644 (file)
@@ -597,7 +597,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
        if (!ec)
                return -1;
 
-       while (0 < len) {
+       while (len > 0) {
                unsigned int tocopy;
 
                jent_gen_entropy(ec);
@@ -678,7 +678,7 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
        }
 
        /* verify and set the oversampling rate */
-       if (0 == osr)
+       if (osr == 0)
                osr = 1; /* minimum sampling rate is 1 */
        entropy_collector->osr = osr;
 
@@ -769,7 +769,7 @@ int jent_entropy_init(void)
                 * etc. with the goal to clear it to get the worst case
                 * measurements.
                 */
-               if (CLEARCACHE > i)
+               if (i < CLEARCACHE)
                        continue;
 
                if (stuck)
@@ -826,7 +826,7 @@ int jent_entropy_init(void)
         * should not fail. The value of 3 should cover the NTP case being
         * performed during our test run.
         */
-       if (3 < time_backwards)
+       if (time_backwards > 3)
                return JENT_ENOMONOTONIC;
 
        /*
index 3517773bc7f732b06f947c238744d75ae70c7e80..054d9a216fc9f3dbabeecbdd12a28954b1e0fa5e 100644 (file)
@@ -114,9 +114,9 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
                        scatterwalk_start(walk, sg);
                        scatterwalk_advance(walk, skip);
                        break;
-               } else
-                       skip -= sg->length;
+               }
 
+               skip -= sg->length;
                sg = sg_next(sg);
        }
 }
index a888d84b524a4805dcb83ada16d92e0f05bd84ad..fea082b25fe4b6636ce184acdb907bb571f23603 100644 (file)
@@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
        u8 *buf = NULL;
        int err;
 
-       crypto_stats_get(alg);
        if (!seed && slen) {
                buf = kmalloc(slen, GFP_KERNEL);
-               if (!buf) {
-                       crypto_alg_put(alg);
+               if (!buf)
                        return -ENOMEM;
-               }
 
                err = get_random_bytes_wait(buf, slen);
-               if (err) {
-                       crypto_alg_put(alg);
+               if (err)
                        goto out;
-               }
                seed = buf;
        }
 
+       crypto_stats_get(alg);
        err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
        crypto_stats_rng_seed(alg, err);
 out:
index 236c87547a174edda55c952f21d3ca6664d42349..45f98b750053e72d7bc4ca9aa069f965291a5c18 100644 (file)
@@ -272,6 +272,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
        u32 *k = ctx->expkey;
        u8  *k8 = (u8 *)k;
        u32 r0, r1, r2, r3, r4;
+       __le32 *lk;
        int i;
 
        /* Copy key, add padding */
@@ -283,22 +284,32 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
        while (i < SERPENT_MAX_KEY_SIZE)
                k8[i++] = 0;
 
+       lk = (__le32 *)k;
+       k[0] = le32_to_cpu(lk[0]);
+       k[1] = le32_to_cpu(lk[1]);
+       k[2] = le32_to_cpu(lk[2]);
+       k[3] = le32_to_cpu(lk[3]);
+       k[4] = le32_to_cpu(lk[4]);
+       k[5] = le32_to_cpu(lk[5]);
+       k[6] = le32_to_cpu(lk[6]);
+       k[7] = le32_to_cpu(lk[7]);
+
        /* Expand key using polynomial */
 
-       r0 = le32_to_cpu(k[3]);
-       r1 = le32_to_cpu(k[4]);
-       r2 = le32_to_cpu(k[5]);
-       r3 = le32_to_cpu(k[6]);
-       r4 = le32_to_cpu(k[7]);
-
-       keyiter(le32_to_cpu(k[0]), r0, r4, r2, 0, 0);
-       keyiter(le32_to_cpu(k[1]), r1, r0, r3, 1, 1);
-       keyiter(le32_to_cpu(k[2]), r2, r1, r4, 2, 2);
-       keyiter(le32_to_cpu(k[3]), r3, r2, r0, 3, 3);
-       keyiter(le32_to_cpu(k[4]), r4, r3, r1, 4, 4);
-       keyiter(le32_to_cpu(k[5]), r0, r4, r2, 5, 5);
-       keyiter(le32_to_cpu(k[6]), r1, r0, r3, 6, 6);
-       keyiter(le32_to_cpu(k[7]), r2, r1, r4, 7, 7);
+       r0 = k[3];
+       r1 = k[4];
+       r2 = k[5];
+       r3 = k[6];
+       r4 = k[7];
+
+       keyiter(k[0], r0, r4, r2, 0, 0);
+       keyiter(k[1], r1, r0, r3, 1, 1);
+       keyiter(k[2], r2, r1, r4, 2, 2);
+       keyiter(k[3], r3, r2, r0, 3, 3);
+       keyiter(k[4], r4, r3, r1, 4, 4);
+       keyiter(k[5], r0, r4, r2, 5, 5);
+       keyiter(k[6], r1, r0, r3, 6, 6);
+       keyiter(k[7], r2, r1, r4, 7, 7);
 
        keyiter(k[0], r3, r2, r0, 8, 8);
        keyiter(k[1], r4, r3, r1, 9, 9);
index 93359999c94bd48f198b5bc0bb1de1e9ac7d18e6..10c5b3b01ec47c2e45a044f3ad8c4180004f8c5a 100644 (file)
@@ -1168,11 +1168,6 @@ static inline int check_shash_op(const char *op, int err,
        return err;
 }
 
-static inline const void *sg_data(struct scatterlist *sg)
-{
-       return page_address(sg_page(sg)) + sg->offset;
-}
-
 /* Test one hash test vector in one configuration, using the shash API */
 static int test_shash_vec_cfg(const struct hash_testvec *vec,
                              const char *vec_name,
@@ -1230,7 +1225,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
                        return 0;
                if (cfg->nosimd)
                        crypto_disable_simd_for_test();
-               err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]),
+               err = crypto_shash_digest(desc, sg_virt(&tsgl->sgl[0]),
                                          tsgl->sgl[0].length, result);
                if (cfg->nosimd)
                        crypto_reenable_simd_for_test();
@@ -1266,7 +1261,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
                    cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
                        if (divs[i]->nosimd)
                                crypto_disable_simd_for_test();
-                       err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]),
+                       err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]),
                                                 tsgl->sgl[i].length, result);
                        if (divs[i]->nosimd)
                                crypto_reenable_simd_for_test();
@@ -1278,7 +1273,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
                }
                if (divs[i]->nosimd)
                        crypto_disable_simd_for_test();
-               err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]),
+               err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]),
                                          tsgl->sgl[i].length);
                if (divs[i]->nosimd)
                        crypto_reenable_simd_for_test();
@@ -4904,11 +4899,38 @@ static const struct alg_test_desc alg_test_descs[] = {
                }
        }, {
 #endif
-               .alg = "ecdh",
+#ifndef CONFIG_CRYPTO_FIPS
+               .alg = "ecdh-nist-p192",
                .test = alg_test_kpp,
                .fips_allowed = 1,
                .suite = {
-                       .kpp = __VECS(ecdh_tv_template)
+                       .kpp = __VECS(ecdh_p192_tv_template)
+               }
+       }, {
+#endif
+               .alg = "ecdh-nist-p256",
+               .test = alg_test_kpp,
+               .fips_allowed = 1,
+               .suite = {
+                       .kpp = __VECS(ecdh_p256_tv_template)
+               }
+       }, {
+               .alg = "ecdsa-nist-p192",
+               .test = alg_test_akcipher,
+               .suite = {
+                       .akcipher = __VECS(ecdsa_nist_p192_tv_template)
+               }
+       }, {
+               .alg = "ecdsa-nist-p256",
+               .test = alg_test_akcipher,
+               .suite = {
+                       .akcipher = __VECS(ecdsa_nist_p256_tv_template)
+               }
+       }, {
+               .alg = "ecdsa-nist-p384",
+               .test = alg_test_akcipher,
+               .suite = {
+                       .akcipher = __VECS(ecdsa_nist_p384_tv_template)
                }
        }, {
                .alg = "ecrdsa",
index ced56ea0c9b43b2be118bfdee7759eb10c9b6631..34e4a3db39917eab258e58336c84da806e0b518d 100644 (file)
@@ -566,6 +566,430 @@ static const struct akcipher_testvec rsa_tv_template[] = {
        }
 };
 
+/*
+ * ECDSA test vectors.
+ */
+static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
+       {
+       .key =
+       "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
+       "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
+       "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
+       "\x98",
+       .key_len = 49,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x01",
+       .param_len = 21,
+       .m =
+       "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
+       "\x63\x85\xe7\x82",
+       .m_size = 20,
+       .algo = OID_id_ecdsa_with_sha1,
+       .c =
+       "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
+       "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
+       "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86"
+       "\x80\x6f\xa5\x79\x77\xda\xd0",
+       .c_size = 55,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key =
+       "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
+       "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
+       "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
+       "\xa3",
+       .key_len = 49,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x01",
+       .param_len = 21,
+       .m =
+       "\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40"
+       "\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11",
+       .m_size = 28,
+       .algo = OID_id_ecdsa_with_sha224,
+       .c =
+       "\x30\x34\x02\x18\x5a\x8b\x82\x69\x7e\x8a\x0a\x09\x14\xf8\x11\x2b"
+       "\x55\xdc\xae\x37\x83\x7b\x12\xe6\xb6\x5b\xcb\xd4\x02\x18\x6a\x14"
+       "\x4f\x53\x75\xc8\x02\x48\xeb\xc3\x92\x0f\x1e\x72\xee\xc4\xa3\xe3"
+       "\x5c\x99\xdb\x92\x5b\x36",
+       .c_size = 54,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key =
+       "\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae"
+       "\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56"
+       "\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58"
+       "\x91",
+       .key_len = 49,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x01",
+       .param_len = 21,
+       .m =
+       "\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd"
+       "\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7",
+       .m_size = 32,
+       .algo = OID_id_ecdsa_with_sha256,
+       .c =
+       "\x30\x35\x02\x18\x3f\x72\x3f\x1f\x42\xd2\x3f\x1d\x6b\x1a\x58\x56"
+       "\xf1\x8f\xf7\xfd\x01\x48\xfb\x5f\x72\x2a\xd4\x8f\x02\x19\x00\xb3"
+       "\x69\x43\xfd\x48\x19\x86\xcf\x32\xdd\x41\x74\x6a\x51\xc7\xd9\x7d"
+       "\x3a\x97\xd9\xcd\x1a\x6a\x49",
+       .c_size = 55,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key =
+       "\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7"
+       "\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f"
+       "\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e"
+       "\x8b",
+       .key_len = 49,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x01",
+       .param_len = 21,
+       .m =
+       "\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9"
+       "\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61"
+       "\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78",
+       .m_size = 48,
+       .algo = OID_id_ecdsa_with_sha384,
+       .c =
+       "\x30\x35\x02\x19\x00\xf0\xa3\x38\xce\x2b\xf8\x9d\x1a\xcf\x7f\x34"
+       "\xb4\xb4\xe5\xc5\x00\xdd\x15\xbb\xd6\x8c\xa7\x03\x78\x02\x18\x64"
+       "\xbc\x5a\x1f\x82\x96\x61\xd7\xd1\x01\x77\x44\x5d\x53\xa4\x7c\x93"
+       "\x12\x3b\x3b\x28\xfb\x6d\xe1",
+       .c_size = 55,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key =
+       "\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea"
+       "\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d"
+       "\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11"
+       "\x57",
+       .key_len = 49,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x01",
+       .param_len = 21,
+       .m =
+       "\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23"
+       "\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67"
+       "\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70"
+       "\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5",
+       .m_size = 64,
+       .algo = OID_id_ecdsa_with_sha512,
+       .c =
+       "\x30\x35\x02\x19\x00\x88\x5b\x8f\x59\x43\xbf\xcf\xc6\xdd\x3f\x07"
+       "\x87\x12\xa0\xd4\xac\x2b\x11\x2d\x1c\xb6\x06\xc9\x6c\x02\x18\x73"
+       "\xb4\x22\x9a\x98\x73\x3c\x83\xa9\x14\x2a\x5e\xf5\xe5\xfb\x72\x28"
+       "\x6a\xdf\x97\xfd\x82\x76\x24",
+       .c_size = 55,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       },
+};
+
+static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
+       {
+       .key =
+       "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
+       "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
+       "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
+       "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
+       "\xaf",
+       .key_len = 65,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x07",
+       .param_len = 21,
+       .m =
+       "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
+       "\x0b\xde\x6a\x42",
+       .m_size = 20,
+       .algo = OID_id_ecdsa_with_sha1,
+       .c =
+       "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
+       "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
+       "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d"
+       "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7"
+       "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
+       .c_size = 72,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key =
+       "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
+       "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
+       "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
+       "\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0"
+       "\xd4",
+       .key_len = 65,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x07",
+       .param_len = 21,
+       .m =
+       "\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41"
+       "\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0",
+       .m_size = 28,
+       .algo = OID_id_ecdsa_with_sha224,
+       .c =
+       "\x30\x44\x02\x20\x20\x43\xfa\xc0\x9f\x9d\x7b\xe7\xae\xce\x77\x59"
+       "\x1a\xdb\x59\xd5\x34\x62\x79\xcb\x6a\x91\x67\x2e\x7d\x25\xd8\x25"
+       "\xf5\x81\xd2\x1e\x02\x20\x5f\xf8\x74\xf8\x57\xd0\x5e\x54\x76\x20"
+       "\x4a\x77\x22\xec\xc8\x66\xbf\x50\x05\x58\x39\x0e\x26\x92\xce\xd5"
+       "\x2e\x8b\xde\x5a\x04\x0e",
+       .c_size = 70,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key =
+       "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f"
+       "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00"
+       "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9"
+       "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e"
+       "\xb8",
+       .key_len = 65,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x07",
+       .param_len = 21,
+       .m =
+       "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b"
+       "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4",
+       .m_size = 32,
+       .algo = OID_id_ecdsa_with_sha256,
+       .c =
+       "\x30\x45\x02\x20\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63"
+       "\xa8\x1a\xad\xfc\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67"
+       "\xdc\x0c\xd0\x82\x02\x21\x00\xbd\xff\xce\xee\x42\xc3\x97\xff\xf9"
+       "\xa9\x81\xac\x4a\x50\xd0\x91\x0a\x6e\x1b\xc4\xaf\xe1\x83\xc3\x4f"
+       "\x2a\x65\x35\x23\xe3\x1d\xfa",
+       .c_size = 71,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key =
+       "\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d"
+       "\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e"
+       "\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00"
+       "\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2"
+       "\x7c",
+       .key_len = 65,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x07",
+       .param_len = 21,
+       .m =
+       "\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6"
+       "\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94"
+       "\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb",
+       .m_size = 48,
+       .algo = OID_id_ecdsa_with_sha384,
+       .c =
+       "\x30\x46\x02\x21\x00\x8e\xf3\x6f\xdc\xf8\x69\xa6\x2e\xd0\x2e\x95"
+       "\x54\xd1\x95\x64\x93\x08\xb2\x6b\x24\x94\x48\x46\x5e\xf2\xe4\x6c"
+       "\xc7\x94\xb1\xd5\xfe\x02\x21\x00\xeb\xa7\x80\x26\xdc\xf9\x3a\x44"
+       "\x19\xfb\x5f\x92\xf4\xc9\x23\x37\x69\xf4\x3b\x4f\x47\xcf\x9b\x16"
+       "\xc0\x60\x11\x92\xdc\x17\x89\x12",
+       .c_size = 72,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key =
+       "\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a"
+       "\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38"
+       "\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b"
+       "\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92"
+       "\xbf",
+       .key_len = 65,
+       .params =
+       "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+       "\xce\x3d\x03\x01\x07",
+       .param_len = 21,
+       .m =
+       "\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9"
+       "\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca"
+       "\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf"
+       "\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6",
+       .m_size = 64,
+       .algo = OID_id_ecdsa_with_sha512,
+       .c =
+       "\x30\x45\x02\x21\x00\xb8\x6d\x87\x81\x43\xdf\xfb\x9f\x40\xea\x44"
+       "\x81\x00\x4e\x29\x08\xed\x8c\x73\x30\x6c\x22\xb3\x97\x76\xf6\x04"
+       "\x99\x09\x37\x4d\xfa\x02\x20\x1e\xb9\x75\x31\xf6\x04\xa5\x4d\xf8"
+       "\x00\xdd\xab\xd4\xc0\x2b\xe6\x5c\xad\xc3\x78\x1c\xc2\xc1\x19\x76"
+       "\x31\x79\x4a\xe9\x81\x6a\xee",
+       .c_size = 71,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       },
+};
+
+static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
+       {
+       .key = /* secp384r1(sha1) */
+       "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
+       "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
+       "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
+       "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
+       "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
+       "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
+       "\xf1",
+       .key_len = 97,
+       .params =
+       "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+       "\x00\x22",
+       .param_len = 18,
+       .m =
+       "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
+       "\x3a\x69\xc1\x93",
+       .m_size = 20,
+       .algo = OID_id_ecdsa_with_sha1,
+       .c =
+       "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
+       "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
+       "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e"
+       "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0"
+       "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88"
+       "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26"
+       "\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
+       .c_size = 104,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key = /* secp384r1(sha224) */
+       "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
+       "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
+       "\x3e\xe8\x29\x6e\xf6\xe4\xb5\x8e\xc7\x4a\xc2\x5f\x37\x13\x99\x05"
+       "\xb6\xa4\x9d\xf9\xfb\x79\x41\xe7\xd7\x96\x9f\x73\x3b\x39\x43\xdc"
+       "\xda\xf4\x06\xb9\xa5\x29\x01\x9d\x3b\xe1\xd8\x68\x77\x2a\xf4\x50"
+       "\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d"
+       "\xe0",
+       .key_len = 97,
+       .params =
+       "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+       "\x00\x22",
+       .param_len = 18,
+       .m =
+       "\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd"
+       "\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5",
+       .m_size = 28,
+       .algo = OID_id_ecdsa_with_sha224,
+       .c =
+       "\x30\x66\x02\x31\x00\x8a\x51\x84\xce\x13\x1e\xd2\xdc\xec\xcb\xe4"
+       "\x89\x47\xb2\xf7\xbc\x97\xf1\xc8\x72\x26\xcf\x5a\x5e\xc5\xda\xb4"
+       "\xe3\x93\x07\xe0\x99\xc9\x9c\x11\xb8\x10\x01\xc5\x41\x3f\xdd\x15"
+       "\x1b\x68\x2b\x9d\x8b\x02\x31\x00\x8b\x03\x2c\xfc\x1f\xd1\xa9\xa4"
+       "\x4b\x00\x08\x31\x6c\xf5\xd5\xf6\xdf\xd8\x68\xa2\x64\x42\x65\xf3"
+       "\x4d\xd0\xc6\x6e\xb0\xe9\xfc\x14\x9f\x19\xd0\x42\x8b\x93\xc2\x11"
+       "\x88\x2b\x82\x26\x5e\x1c\xda\xfb",
+       .c_size = 104,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key = /* secp384r1(sha256) */
+       "\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a"
+       "\x1f\x52\x00\x63\x78\xf1\xa9\xfd\x75\x4c\x9e\xb2\x20\x1a\x91\x5a"
+       "\xba\x7a\xa3\xe5\x6c\xb6\x25\x68\x4b\xe8\x13\xa6\x54\x87\x2c\x0e"
+       "\xd0\x83\x95\xbc\xbf\xc5\x28\x4f\x77\x1c\x46\xa6\xf0\xbc\xd4\xa4"
+       "\x8d\xc2\x8f\xb3\x32\x37\x40\xd6\xca\xf8\xae\x07\x34\x52\x39\x52"
+       "\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc"
+       "\xab",
+       .key_len = 97,
+       .params =
+       "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+       "\x00\x22",
+       .param_len = 18,
+       .m =
+       "\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3"
+       "\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2",
+       .m_size = 32,
+       .algo = OID_id_ecdsa_with_sha256,
+       .c =
+       "\x30\x64\x02\x30\x08\x09\x12\x9d\x6e\x96\x64\xa6\x8e\x3f\x7e\xce"
+       "\x0a\x9b\xaa\x59\xcc\x47\x53\x87\xbc\xbd\x83\x3f\xaf\x06\x3f\x84"
+       "\x04\xe2\xf9\x67\xb6\xc6\xfc\x70\x2e\x66\x3c\x77\xc8\x8d\x2c\x79"
+       "\x3a\x8e\x32\xc4\x02\x30\x40\x34\xb8\x90\xa9\x80\xab\x47\x26\xa2"
+       "\xb0\x89\x42\x0a\xda\xd9\xdd\xce\xbc\xb2\x97\xf4\x9c\xf3\x15\x68"
+       "\xc0\x75\x3e\x23\x5e\x36\x4f\x8d\xde\x1e\x93\x8d\x95\xbb\x10\x0e"
+       "\xf4\x1f\x39\xca\x4d\x43",
+       .c_size = 102,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key = /* secp384r1(sha384) */
+       "\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f"
+       "\x74\x16\x43\xe9\x1a\x25\x7c\x55\xff\xf0\x29\x68\x66\x20\x91\xf9"
+       "\xdb\x2b\xf6\xb3\x6c\x54\x01\xca\xc7\x6a\x5c\x0d\xeb\x68\xd9\x3c"
+       "\xf1\x01\x74\x1f\xf9\x6c\xe5\x5b\x60\xe9\x7f\x5d\xb3\x12\x80\x2a"
+       "\xd8\x67\x92\xc9\x0e\x4c\x4c\x6b\xa1\xb2\xa8\x1e\xac\x1c\x97\xd9"
+       "\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89"
+       "\x9e",
+       .key_len = 97,
+       .params =
+       "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+       "\x00\x22",
+       .param_len = 18,
+       .m =
+       "\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf"
+       "\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1"
+       "\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff",
+       .m_size = 48,
+       .algo = OID_id_ecdsa_with_sha384,
+       .c =
+       "\x30\x66\x02\x31\x00\x9b\x28\x68\xc0\xa1\xea\x8c\x50\xee\x2e\x62"
+       "\x35\x46\xfa\x00\xd8\x2d\x7a\x91\x5f\x49\x2d\x22\x08\x29\xe6\xfb"
+       "\xca\x8c\xd6\xb6\xb4\x3b\x1f\x07\x8f\x15\x02\xfe\x1d\xa2\xa4\xc8"
+       "\xf2\xea\x9d\x11\x1f\x02\x31\x00\xfc\x50\xf6\x43\xbd\x50\x82\x0e"
+       "\xbf\xe3\x75\x24\x49\xac\xfb\xc8\x71\xcd\x8f\x18\x99\xf0\x0f\x13"
+       "\x44\x92\x8c\x86\x99\x65\xb3\x97\x96\x17\x04\xc9\x05\x77\xf1\x8e"
+       "\xab\x8d\x4e\xde\xe6\x6d\x9b\x66",
+       .c_size = 104,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       }, {
+       .key = /* secp384r1(sha512) */
+       "\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46"
+       "\xcb\xf6\x05\xc2\xee\x73\x83\xbc\xea\x30\x61\x4d\x40\x05\x41\xf4"
+       "\x8c\xe3\x0e\x5c\xf0\x50\xf2\x07\x19\xe8\x4f\x25\xbe\xee\x0c\x95"
+       "\x54\x36\x86\xec\xc2\x20\x75\xf3\x89\xb5\x11\xa1\xb7\xf5\xaf\xbe"
+       "\x81\xe4\xc3\x39\x06\xbd\xe4\xfe\x68\x1c\x6d\x99\x2b\x1b\x63\xfa"
+       "\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac"
+       "\xa3",
+       .key_len = 97,
+       .params =
+       "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+       "\x00\x22",
+       .param_len = 18,
+       .m =
+       "\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1"
+       "\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71"
+       "\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc"
+       "\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e",
+       .m_size = 64,
+       .algo = OID_id_ecdsa_with_sha512,
+       .c =
+       "\x30\x63\x02\x2f\x1d\x20\x94\x77\xfe\x31\xfa\x4d\xc6\xef\xda\x02"
+       "\xe7\x0f\x52\x9a\x02\xde\x93\xe8\x83\xe4\x84\x4c\xfc\x6f\x80\xe3"
+       "\xaf\xb3\xd9\xdc\x2b\x43\x0e\x6a\xb3\x53\x6f\x3e\xb3\xc7\xa8\xb3"
+       "\x17\x77\xd1\x02\x30\x63\xf6\xf0\x3d\x5f\x5f\x99\x3f\xde\x3a\x3d"
+       "\x16\xaf\xb4\x52\x6a\xec\x63\xe3\x0c\xec\x50\xdc\xcc\xc4\x6a\x03"
+       "\x5f\x8d\x7a\xf9\xfb\x34\xe4\x8b\x80\xa5\xb6\xda\x2c\x4e\x45\xcf"
+       "\x3c\x93\xff\x50\x5d",
+       .c_size = 101,
+       .public_key_vec = true,
+       .siggen_sigver_test = true,
+       },
+};
+
 /*
  * EC-RDSA test vectors are generated by gost-engine.
  */
@@ -2261,19 +2685,17 @@ static const struct kpp_testvec curve25519_tv_template[] = {
 }
 };
 
-static const struct kpp_testvec ecdh_tv_template[] = {
-       {
 #ifndef CONFIG_CRYPTO_FIPS
+static const struct kpp_testvec ecdh_p192_tv_template[] = {
+       {
        .secret =
 #ifdef __LITTLE_ENDIAN
        "\x02\x00" /* type */
-       "\x20\x00" /* len */
-       "\x01\x00" /* curve_id */
+       "\x1e\x00" /* len */
        "\x18\x00" /* key_size */
 #else
        "\x00\x02" /* type */
-       "\x00\x20" /* len */
-       "\x00\x01" /* curve_id */
+       "\x00\x1e" /* len */
        "\x00\x18" /* key_size */
 #endif
        "\xb5\x05\xb1\x71\x1e\xbf\x8c\xda"
@@ -2301,18 +2723,20 @@ static const struct kpp_testvec ecdh_tv_template[] = {
        .b_public_size = 48,
        .expected_a_public_size = 48,
        .expected_ss_size = 24
-       }, {
+       }
+};
 #endif
+
+static const struct kpp_testvec ecdh_p256_tv_template[] = {
+       {
        .secret =
 #ifdef __LITTLE_ENDIAN
        "\x02\x00" /* type */
-       "\x28\x00" /* len */
-       "\x02\x00" /* curve_id */
+       "\x26\x00" /* len */
        "\x20\x00" /* key_size */
 #else
        "\x00\x02" /* type */
-       "\x00\x28" /* len */
-       "\x00\x02" /* curve_id */
+       "\x00\x26" /* len */
        "\x00\x20" /* key_size */
 #endif
        "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
@@ -2350,25 +2774,21 @@ static const struct kpp_testvec ecdh_tv_template[] = {
        .secret =
 #ifdef __LITTLE_ENDIAN
        "\x02\x00" /* type */
-       "\x08\x00" /* len */
-       "\x02\x00" /* curve_id */
+       "\x06\x00" /* len */
        "\x00\x00", /* key_size */
 #else
        "\x00\x02" /* type */
-       "\x00\x08" /* len */
-       "\x00\x02" /* curve_id */
+       "\x00\x06" /* len */
        "\x00\x00", /* key_size */
 #endif
        .b_secret =
 #ifdef __LITTLE_ENDIAN
        "\x02\x00" /* type */
-       "\x28\x00" /* len */
-       "\x02\x00" /* curve_id */
+       "\x26\x00" /* len */
        "\x20\x00" /* key_size */
 #else
        "\x00\x02" /* type */
-       "\x00\x28" /* len */
-       "\x00\x02" /* curve_id */
+       "\x00\x26" /* len */
        "\x00\x20" /* key_size */
 #endif
        "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
index ee240d36f947c997afd84ca585ecd07a8ad91f71..46bd50f3c3a42c119e1523cb7f1f6cb64fb8cbe6 100644 (file)
@@ -548,12 +548,10 @@ ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length)
        if ((index < MSG_FIRST_INDEX) || (index >= MSG_LAST_INDEX))
                return -EINVAL;
 
-       newstr = kmalloc(length + 1, GFP_KERNEL);
+       newstr = kmemdup_nul(text, length, GFP_KERNEL);
        if (!newstr)
                return -ENOMEM;
 
-       memcpy(newstr, text, length);
-       newstr[length] = '\0';
        if (index >= MSG_FORMATTED_START &&
            index <= MSG_FORMATTED_END &&
            !fmt_validate(speakup_default_msgs[index], newstr)) {
index 768a6b4d23680b775fd5f3343a998c2c6c47ac77..4e2d76b8b697ecb7f7f14d4dfaab4a9a3e64909f 100644 (file)
@@ -544,9 +544,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
                        return -ENODEV;
 
 #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
-               /* If NMI wants to wake up CPU0, start CPU0. */
-               if (wakeup_cpu0())
-                       start_cpu0();
+               cond_wakeup_cpu0();
 #endif
        }
 
index c119736ca56acc79d4f7faee931f58673aafd457..63d2c4339689730d5b4b80495116271ebf653da9 100644 (file)
@@ -1506,6 +1506,12 @@ static void binder_free_transaction(struct binder_transaction *t)
 
        if (target_proc) {
                binder_inner_proc_lock(target_proc);
+               target_proc->outstanding_txns--;
+               if (target_proc->outstanding_txns < 0)
+                       pr_warn("%s: Unexpected outstanding_txns %d\n",
+                               __func__, target_proc->outstanding_txns);
+               if (!target_proc->outstanding_txns && target_proc->is_frozen)
+                       wake_up_interruptible_all(&target_proc->freeze_wait);
                if (t->buffer)
                        t->buffer->transaction = NULL;
                binder_inner_proc_unlock(target_proc);
@@ -2331,10 +2337,11 @@ static int binder_fixup_parent(struct binder_transaction *t,
  * If the @thread parameter is not NULL, the transaction is always queued
  * to the waitlist of that specific thread.
  *
- * Return:     true if the transactions was successfully queued
- *             false if the target process or thread is dead
+ * Return:     0 if the transaction was successfully queued
+ *             BR_DEAD_REPLY if the target process or thread is dead
+ *             BR_FROZEN_REPLY if the target process or thread is frozen
  */
-static bool binder_proc_transaction(struct binder_transaction *t,
+static int binder_proc_transaction(struct binder_transaction *t,
                                    struct binder_proc *proc,
                                    struct binder_thread *thread)
 {
@@ -2353,11 +2360,16 @@ static bool binder_proc_transaction(struct binder_transaction *t,
        }
 
        binder_inner_proc_lock(proc);
+       if (proc->is_frozen) {
+               proc->sync_recv |= !oneway;
+               proc->async_recv |= oneway;
+       }
 
-       if (proc->is_dead || (thread && thread->is_dead)) {
+       if ((proc->is_frozen && !oneway) || proc->is_dead ||
+                       (thread && thread->is_dead)) {
                binder_inner_proc_unlock(proc);
                binder_node_unlock(node);
-               return false;
+               return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
        }
 
        if (!thread && !pending_async)
@@ -2373,10 +2385,11 @@ static bool binder_proc_transaction(struct binder_transaction *t,
        if (!pending_async)
                binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
 
+       proc->outstanding_txns++;
        binder_inner_proc_unlock(proc);
        binder_node_unlock(node);
 
-       return true;
+       return 0;
 }
 
 /**
@@ -3007,19 +3020,25 @@ static void binder_transaction(struct binder_proc *proc,
                        goto err_bad_object_type;
                }
        }
-       tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+       if (t->buffer->oneway_spam_suspect)
+               tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
+       else
+               tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
        t->work.type = BINDER_WORK_TRANSACTION;
 
        if (reply) {
                binder_enqueue_thread_work(thread, tcomplete);
                binder_inner_proc_lock(target_proc);
-               if (target_thread->is_dead) {
+               if (target_thread->is_dead || target_proc->is_frozen) {
+                       return_error = target_thread->is_dead ?
+                               BR_DEAD_REPLY : BR_FROZEN_REPLY;
                        binder_inner_proc_unlock(target_proc);
                        goto err_dead_proc_or_thread;
                }
                BUG_ON(t->buffer->async_transaction != 0);
                binder_pop_transaction_ilocked(target_thread, in_reply_to);
                binder_enqueue_thread_work_ilocked(target_thread, &t->work);
+               target_proc->outstanding_txns++;
                binder_inner_proc_unlock(target_proc);
                wake_up_interruptible_sync(&target_thread->wait);
                binder_free_transaction(in_reply_to);
@@ -3038,7 +3057,9 @@ static void binder_transaction(struct binder_proc *proc,
                t->from_parent = thread->transaction_stack;
                thread->transaction_stack = t;
                binder_inner_proc_unlock(proc);
-               if (!binder_proc_transaction(t, target_proc, target_thread)) {
+               return_error = binder_proc_transaction(t,
+                               target_proc, target_thread);
+               if (return_error) {
                        binder_inner_proc_lock(proc);
                        binder_pop_transaction_ilocked(thread, t);
                        binder_inner_proc_unlock(proc);
@@ -3048,7 +3069,8 @@ static void binder_transaction(struct binder_proc *proc,
                BUG_ON(target_node == NULL);
                BUG_ON(t->buffer->async_transaction != 1);
                binder_enqueue_thread_work(thread, tcomplete);
-               if (!binder_proc_transaction(t, target_proc, NULL))
+               return_error = binder_proc_transaction(t, target_proc, NULL);
+               if (return_error)
                        goto err_dead_proc_or_thread;
        }
        if (target_thread)
@@ -3065,7 +3087,6 @@ static void binder_transaction(struct binder_proc *proc,
        return;
 
 err_dead_proc_or_thread:
-       return_error = BR_DEAD_REPLY;
        return_error_line = __LINE__;
        binder_dequeue_work(proc, tcomplete);
 err_translate_failed:
@@ -3696,7 +3717,7 @@ static int binder_wait_for_work(struct binder_thread *thread,
                binder_inner_proc_lock(proc);
                list_del_init(&thread->waiting_thread_node);
                if (signal_pending(current)) {
-                       ret = -ERESTARTSYS;
+                       ret = -EINTR;
                        break;
                }
        }
@@ -3875,9 +3896,14 @@ retry:
 
                        binder_stat_br(proc, thread, cmd);
                } break;
-               case BINDER_WORK_TRANSACTION_COMPLETE: {
+               case BINDER_WORK_TRANSACTION_COMPLETE:
+               case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
+                       if (proc->oneway_spam_detection_enabled &&
+                                  w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
+                               cmd = BR_ONEWAY_SPAM_SUSPECT;
+                       else
+                               cmd = BR_TRANSACTION_COMPLETE;
                        binder_inner_proc_unlock(proc);
-                       cmd = BR_TRANSACTION_COMPLETE;
                        kfree(w);
                        binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
                        if (put_user(cmd, (uint32_t __user *)ptr))
@@ -4298,6 +4324,9 @@ static void binder_free_proc(struct binder_proc *proc)
 
        BUG_ON(!list_empty(&proc->todo));
        BUG_ON(!list_empty(&proc->delivered_death));
+       if (proc->outstanding_txns)
+               pr_warn("%s: Unexpected outstanding_txns %d\n",
+                       __func__, proc->outstanding_txns);
        device = container_of(proc->context, struct binder_device, context);
        if (refcount_dec_and_test(&device->ref)) {
                kfree(proc->context->name);
@@ -4359,6 +4388,7 @@ static int binder_thread_release(struct binder_proc *proc,
                             (t->to_thread == thread) ? "in" : "out");
 
                if (t->to_thread == thread) {
+                       thread->proc->outstanding_txns--;
                        t->to_proc = NULL;
                        t->to_thread = NULL;
                        if (t->buffer) {
@@ -4609,6 +4639,76 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
        return 0;
 }
 
+static int binder_ioctl_freeze(struct binder_freeze_info *info,
+                              struct binder_proc *target_proc)
+{
+       int ret = 0;
+
+       if (!info->enable) {
+               binder_inner_proc_lock(target_proc);
+               target_proc->sync_recv = false;
+               target_proc->async_recv = false;
+               target_proc->is_frozen = false;
+               binder_inner_proc_unlock(target_proc);
+               return 0;
+       }
+
+       /*
+        * Freezing the target. Prevent new transactions by
+        * setting frozen state. If timeout specified, wait
+        * for transactions to drain.
+        */
+       binder_inner_proc_lock(target_proc);
+       target_proc->sync_recv = false;
+       target_proc->async_recv = false;
+       target_proc->is_frozen = true;
+       binder_inner_proc_unlock(target_proc);
+
+       if (info->timeout_ms > 0)
+               ret = wait_event_interruptible_timeout(
+                       target_proc->freeze_wait,
+                       (!target_proc->outstanding_txns),
+                       msecs_to_jiffies(info->timeout_ms));
+
+       if (!ret && target_proc->outstanding_txns)
+               ret = -EAGAIN;
+
+       if (ret < 0) {
+               binder_inner_proc_lock(target_proc);
+               target_proc->is_frozen = false;
+               binder_inner_proc_unlock(target_proc);
+       }
+
+       return ret;
+}
+
+static int binder_ioctl_get_freezer_info(
+                               struct binder_frozen_status_info *info)
+{
+       struct binder_proc *target_proc;
+       bool found = false;
+
+       info->sync_recv = 0;
+       info->async_recv = 0;
+
+       mutex_lock(&binder_procs_lock);
+       hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+               if (target_proc->pid == info->pid) {
+                       found = true;
+                       binder_inner_proc_lock(target_proc);
+                       info->sync_recv |= target_proc->sync_recv;
+                       info->async_recv |= target_proc->async_recv;
+                       binder_inner_proc_unlock(target_proc);
+               }
+       }
+       mutex_unlock(&binder_procs_lock);
+
+       if (!found)
+               return -EINVAL;
+
+       return 0;
+}
+
 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        int ret;
@@ -4727,6 +4827,96 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                }
                break;
        }
+       case BINDER_FREEZE: {
+               struct binder_freeze_info info;
+               struct binder_proc **target_procs = NULL, *target_proc;
+               int target_procs_count = 0, i = 0;
+
+               ret = 0;
+
+               if (copy_from_user(&info, ubuf, sizeof(info))) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+
+               mutex_lock(&binder_procs_lock);
+               hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+                       if (target_proc->pid == info.pid)
+                               target_procs_count++;
+               }
+
+               if (target_procs_count == 0) {
+                       mutex_unlock(&binder_procs_lock);
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               target_procs = kcalloc(target_procs_count,
+                                      sizeof(struct binder_proc *),
+                                      GFP_KERNEL);
+
+               if (!target_procs) {
+                       mutex_unlock(&binder_procs_lock);
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
+                       if (target_proc->pid != info.pid)
+                               continue;
+
+                       binder_inner_proc_lock(target_proc);
+                       target_proc->tmp_ref++;
+                       binder_inner_proc_unlock(target_proc);
+
+                       target_procs[i++] = target_proc;
+               }
+               mutex_unlock(&binder_procs_lock);
+
+               for (i = 0; i < target_procs_count; i++) {
+                       if (ret >= 0)
+                               ret = binder_ioctl_freeze(&info,
+                                                         target_procs[i]);
+
+                       binder_proc_dec_tmpref(target_procs[i]);
+               }
+
+               kfree(target_procs);
+
+               if (ret < 0)
+                       goto err;
+               break;
+       }
+       case BINDER_GET_FROZEN_INFO: {
+               struct binder_frozen_status_info info;
+
+               if (copy_from_user(&info, ubuf, sizeof(info))) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+
+               ret = binder_ioctl_get_freezer_info(&info);
+               if (ret < 0)
+                       goto err;
+
+               if (copy_to_user(ubuf, &info, sizeof(info))) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+               break;
+       }
+       case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
+               uint32_t enable;
+
+               if (copy_from_user(&enable, ubuf, sizeof(enable))) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               binder_inner_proc_lock(proc);
+               proc->oneway_spam_detection_enabled = (bool)enable;
+               binder_inner_proc_unlock(proc);
+               break;
+       }
        default:
                ret = -EINVAL;
                goto err;
@@ -4736,7 +4926,7 @@ err:
        if (thread)
                thread->looper_need_return = false;
        wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
-       if (ret && ret != -ERESTARTSYS)
+       if (ret && ret != -EINTR)
                pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
 err_unlocked:
        trace_binder_ioctl_done(ret);
@@ -4823,6 +5013,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
        get_task_struct(current->group_leader);
        proc->tsk = current->group_leader;
        INIT_LIST_HEAD(&proc->todo);
+       init_waitqueue_head(&proc->freeze_wait);
        proc->default_priority = task_nice(current);
        /* binderfs stashes devices in i_private */
        if (is_binderfs_device(nodp)) {
@@ -5035,6 +5226,9 @@ static void binder_deferred_release(struct binder_proc *proc)
        proc->tmp_ref++;
 
        proc->is_dead = true;
+       proc->is_frozen = false;
+       proc->sync_recv = false;
+       proc->async_recv = false;
        threads = 0;
        active_transactions = 0;
        while ((n = rb_first(&proc->threads))) {
@@ -5385,7 +5579,9 @@ static const char * const binder_return_strings[] = {
        "BR_FINISHED",
        "BR_DEAD_BINDER",
        "BR_CLEAR_DEATH_NOTIFICATION_DONE",
-       "BR_FAILED_REPLY"
+       "BR_FAILED_REPLY",
+       "BR_FROZEN_REPLY",
+       "BR_ONEWAY_SPAM_SUSPECT",
 };
 
 static const char * const binder_command_strings[] = {
index 7caf74ad24053a49327e07eeb902feb527376d90..340515f54498ce885be926afc35364e9412ae37a 100644 (file)
@@ -338,7 +338,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
        return vma;
 }
 
-static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
 {
        /*
         * Find the amount and size of buffers allocated by the current caller;
@@ -366,13 +366,19 @@ static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
 
        /*
         * Warn if this pid has more than 50 transactions, or more than 50% of
-        * async space (which is 25% of total buffer size).
+        * async space (which is 25% of total buffer size). Oneway spam is only
+        * detected when the threshold is exceeded.
         */
        if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                             "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
                              alloc->pid, pid, num_buffers, total_alloc_size);
+               if (!alloc->oneway_spam_detected) {
+                       alloc->oneway_spam_detected = true;
+                       return true;
+               }
        }
+       return false;
 }
 
 static struct binder_buffer *binder_alloc_new_buf_locked(
@@ -525,6 +531,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
        buffer->async_transaction = is_async;
        buffer->extra_buffers_size = extra_buffers_size;
        buffer->pid = pid;
+       buffer->oneway_spam_suspect = false;
        if (is_async) {
                alloc->free_async_space -= size + sizeof(struct binder_buffer);
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
@@ -536,7 +543,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
                         * of async space left (which is less than 10% of total
                         * buffer size).
                         */
-                       debug_low_async_space_locked(alloc, pid);
+                       buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
+               } else {
+                       alloc->oneway_spam_detected = false;
                }
        }
        return buffer;
index 6e8e001381af4ba2c2f73a2ab7bbca1feff5397f..7dea57a84c79b1eb2d77df14a14f9b7d272711bf 100644 (file)
@@ -26,6 +26,8 @@ struct binder_transaction;
  * @clear_on_free:      %true if buffer must be zeroed after use
  * @allow_user_free:    %true if user is allowed to free buffer
  * @async_transaction:  %true if buffer is in use for an async txn
+ * @oneway_spam_suspect: %true if total async allocate size just exceed
+ * spamming detect threshold
  * @debug_id:           unique ID for debugging
  * @transaction:        pointer to associated struct binder_transaction
  * @target_node:        struct binder_node associated with this buffer
@@ -45,7 +47,8 @@ struct binder_buffer {
        unsigned clear_on_free:1;
        unsigned allow_user_free:1;
        unsigned async_transaction:1;
-       unsigned debug_id:28;
+       unsigned oneway_spam_suspect:1;
+       unsigned debug_id:27;
 
        struct binder_transaction *transaction;
 
@@ -87,6 +90,8 @@ struct binder_lru_page {
  * @buffer_size:        size of address space specified via mmap
  * @pid:                pid for associated binder_proc (invariant after init)
  * @pages_high:         high watermark of offset in @pages
+ * @oneway_spam_detected: %true if oneway spam detection fired, clear that
+ * flag once the async buffer has returned to a healthy state
  *
  * Bookkeeping structure for per-proc address space management for binder
  * buffers. It is normally initialized during binder_init() and binder_mmap()
@@ -107,6 +112,7 @@ struct binder_alloc {
        uint32_t buffer_free;
        int pid;
        size_t pages_high;
+       bool oneway_spam_detected;
 };
 
 #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
index 6cd79011e35d5d4c4499969ff37ba0a09c24736f..810c0b84d3f81374438c42b25cb7b9983ee96feb 100644 (file)
@@ -155,7 +155,7 @@ enum binder_stat_types {
 };
 
 struct binder_stats {
-       atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+       atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
        atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
        atomic_t obj_created[BINDER_STAT_COUNT];
        atomic_t obj_deleted[BINDER_STAT_COUNT];
@@ -174,6 +174,7 @@ struct binder_work {
        enum binder_work_type {
                BINDER_WORK_TRANSACTION = 1,
                BINDER_WORK_TRANSACTION_COMPLETE,
+               BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
                BINDER_WORK_RETURN_ERROR,
                BINDER_WORK_NODE,
                BINDER_WORK_DEAD_BINDER,
@@ -367,9 +368,22 @@ struct binder_ref {
  *                        (protected by binder_deferred_lock)
  * @deferred_work:        bitmap of deferred work to perform
  *                        (protected by binder_deferred_lock)
+ * @outstanding_txns:     number of transactions to be transmitted before
+ *                        processes in freeze_wait are woken up
+ *                        (protected by @inner_lock)
  * @is_dead:              process is dead and awaiting free
  *                        when outstanding transactions are cleaned up
  *                        (protected by @inner_lock)
+ * @is_frozen:            process is frozen and unable to service
+ *                        binder transactions
+ *                        (protected by @inner_lock)
+ * @sync_recv:            process received sync transactions since last frozen
+ *                        (protected by @inner_lock)
+ * @async_recv:           process received async transactions since last frozen
+ *                        (protected by @inner_lock)
+ * @freeze_wait:          waitqueue of processes waiting for all outstanding
+ *                        transactions to be processed
+ *                        (protected by @inner_lock)
  * @todo:                 list of work for this process
  *                        (protected by @inner_lock)
  * @stats:                per-process binder statistics
@@ -396,6 +410,8 @@ struct binder_ref {
  * @outer_lock:           no nesting under innor or node lock
  *                        Lock order: 1) outer, 2) node, 3) inner
  * @binderfs_entry:       process-specific binderfs log file
+ * @oneway_spam_detection_enabled: process enabled oneway spam detection
+ *                        or not
  *
  * Bookkeeping structure for binder processes
  */
@@ -410,7 +426,12 @@ struct binder_proc {
        struct task_struct *tsk;
        struct hlist_node deferred_work_node;
        int deferred_work;
+       int outstanding_txns;
        bool is_dead;
+       bool is_frozen;
+       bool sync_recv;
+       bool async_recv;
+       wait_queue_head_t freeze_wait;
 
        struct list_head todo;
        struct binder_stats stats;
@@ -426,6 +447,7 @@ struct binder_proc {
        spinlock_t inner_lock;
        spinlock_t outer_lock;
        struct dentry *binderfs_entry;
+       bool oneway_spam_detection_enabled;
 };
 
 /**
index b574cce98dc3686ce473b1f2cc132b4de8616ed2..422753d52244bd446664e2b68f427f9a3a6ff32b 100644 (file)
@@ -2054,7 +2054,7 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
        }
        submitted++;
        ATM_SKB(skb)->vcc = vcc;
-       tasklet_disable(&ENI_DEV(vcc->dev)->task);
+       tasklet_disable_in_atomic(&ENI_DEV(vcc->dev)->task);
        res = do_tx(skb);
        tasklet_enable(&ENI_DEV(vcc->dev)->task);
        if (res == enq_ok) return 0;
index f7bd0f4db13d832788e839d965c7f0bd7c8efb4e..9c00d203d61e3d1444011e7c7a83728813a70aea 100644 (file)
@@ -461,6 +461,10 @@ attribute_container_add_class_device(struct device *classdev)
 /**
  * attribute_container_add_class_device_adapter - simple adapter for triggers
  *
+ * @cont: the container to register.
+ * @dev:  the generic device to activate the trigger for
+ * @classdev:  the class device to add
+ *
  * This function is identical to attribute_container_add_class_device except
  * that it is designed to be called from the triggers
  */
index d8b314e7d0fdc88cfa33f37f2ccefdd329cb286e..adc199dfba3cb3ff3e03ea3c98ae4645799fa442 100644 (file)
@@ -265,8 +265,3 @@ void __init auxiliary_bus_init(void)
 {
        WARN_ON(bus_register(&auxiliary_bus_type));
 }
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Auxiliary Bus");
-MODULE_AUTHOR("David Ertman <david.m.ertman@intel.com>");
-MODULE_AUTHOR("Kiran Patil <kiran.patil@intel.com>");
index 52b3d7b75c2750e85c0b5f1b238964dd42cb5fa9..e5f9b7e656c34b385ef2aa2a309a702c587b6388 100644 (file)
@@ -185,11 +185,13 @@ extern int device_links_read_lock(void);
 extern void device_links_read_unlock(int idx);
 extern int device_links_read_lock_held(void);
 extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_force_bind(struct device *dev);
 extern void device_links_driver_bound(struct device *dev);
 extern void device_links_driver_cleanup(struct device *dev);
 extern void device_links_no_driver(struct device *dev);
 extern bool device_links_busy(struct device *dev);
 extern void device_links_unbind_consumers(struct device *dev);
+extern void fw_devlink_drivers_done(void);
 
 /* device pm support */
 void device_pm_move_to_tail(struct device *dev);
index dcfbe7251dc43b86edd347c371c306d70d32e39d..272ba42392f0c0d953ea593dd26421f9f2f1e5a6 100644 (file)
@@ -65,7 +65,6 @@ struct master {
        const struct component_master_ops *ops;
        struct device *dev;
        struct component_match *match;
-       struct dentry *dentry;
 };
 
 struct component {
@@ -125,15 +124,13 @@ core_initcall(component_debug_init);
 
 static void component_master_debugfs_add(struct master *m)
 {
-       m->dentry = debugfs_create_file(dev_name(m->dev), 0444,
-                                       component_debugfs_dir,
-                                       m, &component_devices_fops);
+       debugfs_create_file(dev_name(m->dev), 0444, component_debugfs_dir, m,
+                           &component_devices_fops);
 }
 
 static void component_master_debugfs_del(struct master *m)
 {
-       debugfs_remove(m->dentry);
-       m->dentry = NULL;
+       debugfs_remove(debugfs_lookup(dev_name(m->dev), component_debugfs_dir));
 }
 
 #else
index f29839382f8166b4106e96eb9a0cc41f1e2d7b34..4a8bf8cda52bc77243fcffb8af554309aeed01bb 100644 (file)
@@ -51,6 +51,7 @@ static LIST_HEAD(deferred_sync);
 static unsigned int defer_sync_state_count = 1;
 static DEFINE_MUTEX(fwnode_link_lock);
 static bool fw_devlink_is_permissive(void);
+static bool fw_devlink_drv_reg_done;
 
 /**
  * fwnode_link_add - Create a link between two fwnode_handles.
@@ -1153,6 +1154,41 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(waiting_for_supplier);
 
+/**
+ * device_links_force_bind - Prepares device to be force bound
+ * @dev: Consumer device.
+ *
+ * device_bind_driver() force binds a device to a driver without calling any
+ * driver probe functions. So the consumer really isn't going to wait for any
+ * supplier before it's bound to the driver. We still want the device link
+ * states to be sensible when this happens.
+ *
+ * In preparation for device_bind_driver(), this function goes through each
+ * supplier device links and checks if the supplier is bound. If it is, then
+ * the device link status is set to CONSUMER_PROBE. Otherwise, the device link
+ * is dropped. Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+void device_links_force_bind(struct device *dev)
+{
+       struct device_link *link, *ln;
+
+       device_links_write_lock();
+
+       list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
+               if (!(link->flags & DL_FLAG_MANAGED))
+                       continue;
+
+               if (link->status != DL_STATE_AVAILABLE) {
+                       device_link_drop_managed(link);
+                       continue;
+               }
+               WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+       }
+       dev->links.status = DL_DEV_PROBING;
+
+       device_links_write_unlock();
+}
+
 /**
  * device_links_driver_bound - Update device links after probing its driver.
  * @dev: Device to update the links for.
@@ -1503,7 +1539,7 @@ static void device_links_purge(struct device *dev)
 #define FW_DEVLINK_FLAGS_RPM           (FW_DEVLINK_FLAGS_ON | \
                                         DL_FLAG_PM_RUNTIME)
 
-static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
 static int __init fw_devlink_setup(char *arg)
 {
        if (!arg)
@@ -1563,6 +1599,52 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
                fw_devlink_parse_fwtree(child);
 }
 
+static void fw_devlink_relax_link(struct device_link *link)
+{
+       if (!(link->flags & DL_FLAG_INFERRED))
+               return;
+
+       if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
+               return;
+
+       pm_runtime_drop_link(link);
+       link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
+       dev_dbg(link->consumer, "Relaxing link with %s\n",
+               dev_name(link->supplier));
+}
+
+static int fw_devlink_no_driver(struct device *dev, void *data)
+{
+       struct device_link *link = to_devlink(dev);
+
+       if (!link->supplier->can_match)
+               fw_devlink_relax_link(link);
+
+       return 0;
+}
+
+void fw_devlink_drivers_done(void)
+{
+       fw_devlink_drv_reg_done = true;
+       device_links_write_lock();
+       class_for_each_device(&devlink_class, NULL, NULL,
+                             fw_devlink_no_driver);
+       device_links_write_unlock();
+}
+
+static void fw_devlink_unblock_consumers(struct device *dev)
+{
+       struct device_link *link;
+
+       if (!fw_devlink_flags || fw_devlink_is_permissive())
+               return;
+
+       device_links_write_lock();
+       list_for_each_entry(link, &dev->links.consumers, s_node)
+               fw_devlink_relax_link(link);
+       device_links_write_unlock();
+}
+
 /**
  * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
  * @con: Device to check dependencies for.
@@ -1599,21 +1681,16 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
 
                ret = 1;
 
-               if (!(link->flags & DL_FLAG_INFERRED))
-                       continue;
-
-               pm_runtime_drop_link(link);
-               link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
-               dev_dbg(link->consumer, "Relaxing link with %s\n",
-                       dev_name(link->supplier));
+               fw_devlink_relax_link(link);
        }
        return ret;
 }
 
 /**
  * fw_devlink_create_devlink - Create a device link from a consumer to fwnode
- * @con - Consumer device for the device link
- * @sup_handle - fwnode handle of supplier
+ * @con: consumer device for the device link
+ * @sup_handle: fwnode handle of supplier
+ * @flags: devlink flags
  *
  * This function will try to create a device link between the consumer device
  * @con and the supplier device represented by @sup_handle.
@@ -1709,7 +1786,7 @@ out:
 
 /**
  * __fw_devlink_link_to_consumers - Create device links to consumers of a device
- * @dev - Device that needs to be linked to its consumers
+ * @dev: Device that needs to be linked to its consumers
  *
  * This function looks at all the consumer fwnodes of @dev and creates device
  * links between the consumer device and @dev (supplier).
@@ -1779,8 +1856,8 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
 
 /**
  * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
- * @dev - The consumer device that needs to be linked to its suppliers
- * @fwnode - Root of the fwnode tree that is used to create device links
+ * @dev: The consumer device that needs to be linked to its suppliers
+ * @fwnode: Root of the fwnode tree that is used to create device links
  *
  * This function looks at all the supplier fwnodes of fwnode tree rooted at
  * @fwnode and creates device links between @dev (consumer) and all the
@@ -3240,6 +3317,15 @@ int device_add(struct device *dev)
        }
 
        bus_probe_device(dev);
+
+       /*
+        * If all driver registration is done and a newly added device doesn't
+        * match with any driver, don't block its consumers from probing in
+        * case the consumer device is able to operate without this supplier.
+        */
+       if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
+               fw_devlink_unblock_consumers(dev);
+
        if (parent)
                klist_add_tail(&dev->p->knode_parent,
                               &parent->p->klist_children);
index 8f1d6569564c4099deea31b1ebfd2371481ee36a..2b9e41377a0705a5743775ba6d8837b3a190da13 100644 (file)
@@ -409,13 +409,11 @@ __cpu_device_create(struct device *parent, void *drvdata,
                    const char *fmt, va_list args)
 {
        struct device *dev = NULL;
-       int retval = -ENODEV;
+       int retval = -ENOMEM;
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev) {
-               retval = -ENOMEM;
+       if (!dev)
                goto error;
-       }
 
        device_initialize(dev);
        dev->parent = parent;
index e2cf3b29123e8eb36639cf990867f321a804e9d7..ecd7cf848daff7ea3522cb18f568f2203c5a522b 100644 (file)
@@ -55,7 +55,6 @@ static DEFINE_MUTEX(deferred_probe_mutex);
 static LIST_HEAD(deferred_probe_pending_list);
 static LIST_HEAD(deferred_probe_active_list);
 static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
-static struct dentry *deferred_devices;
 static bool initcalls_done;
 
 /* Save the async probe drivers' name from kernel cmdline */
@@ -69,6 +68,12 @@ static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
  */
 static bool defer_all_probes;
 
+static void __device_set_deferred_probe_reason(const struct device *dev, char *reason)
+{
+       kfree(dev->p->deferred_probe_reason);
+       dev->p->deferred_probe_reason = reason;
+}
+
 /*
  * deferred_probe_work_func() - Retry probing devices in the active list.
  */
@@ -97,8 +102,7 @@ static void deferred_probe_work_func(struct work_struct *work)
 
                get_device(dev);
 
-               kfree(dev->p->deferred_probe_reason);
-               dev->p->deferred_probe_reason = NULL;
+               __device_set_deferred_probe_reason(dev, NULL);
 
                /*
                 * Drop the mutex while probing each device; the probe path may
@@ -126,6 +130,9 @@ static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
 
 void driver_deferred_probe_add(struct device *dev)
 {
+       if (!dev->can_match)
+               return;
+
        mutex_lock(&deferred_probe_mutex);
        if (list_empty(&dev->p->deferred_probe)) {
                dev_dbg(dev, "Added to deferred list\n");
@@ -140,8 +147,7 @@ void driver_deferred_probe_del(struct device *dev)
        if (!list_empty(&dev->p->deferred_probe)) {
                dev_dbg(dev, "Removed from deferred list\n");
                list_del_init(&dev->p->deferred_probe);
-               kfree(dev->p->deferred_probe_reason);
-               dev->p->deferred_probe_reason = NULL;
+               __device_set_deferred_probe_reason(dev, NULL);
        }
        mutex_unlock(&deferred_probe_mutex);
 }
@@ -185,7 +191,7 @@ static void driver_deferred_probe_trigger(void)
         * Kick the re-probe thread.  It may already be scheduled, but it is
         * safe to kick it again.
         */
-       schedule_work(&deferred_probe_work);
+       queue_work(system_unbound_wq, &deferred_probe_work);
 }
 
 /**
@@ -220,11 +226,12 @@ void device_unblock_probing(void)
 void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf)
 {
        const char *drv = dev_driver_string(dev);
+       char *reason;
 
        mutex_lock(&deferred_probe_mutex);
 
-       kfree(dev->p->deferred_probe_reason);
-       dev->p->deferred_probe_reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
+       reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
+       __device_set_deferred_probe_reason(dev, reason);
 
        mutex_unlock(&deferred_probe_mutex);
 }
@@ -292,14 +299,18 @@ int driver_deferred_probe_check_state(struct device *dev)
 
 static void deferred_probe_timeout_work_func(struct work_struct *work)
 {
-       struct device_private *private, *p;
+       struct device_private *p;
+
+       fw_devlink_drivers_done();
 
        driver_deferred_probe_timeout = 0;
        driver_deferred_probe_trigger();
        flush_work(&deferred_probe_work);
 
-       list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
-               dev_info(private->device, "deferred probe pending\n");
+       mutex_lock(&deferred_probe_mutex);
+       list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
+               dev_info(p->device, "deferred probe pending\n");
+       mutex_unlock(&deferred_probe_mutex);
        wake_up_all(&probe_timeout_waitqueue);
 }
 static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -313,8 +324,8 @@ static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_
  */
 static int deferred_probe_initcall(void)
 {
-       deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
-                                              NULL, &deferred_devs_fops);
+       debugfs_create_file("devices_deferred", 0444, NULL, NULL,
+                           &deferred_devs_fops);
 
        driver_deferred_probe_enable = true;
        driver_deferred_probe_trigger();
@@ -322,6 +333,9 @@ static int deferred_probe_initcall(void)
        flush_work(&deferred_probe_work);
        initcalls_done = true;
 
+       if (!IS_ENABLED(CONFIG_MODULES))
+               fw_devlink_drivers_done();
+
        /*
         * Trigger deferred probe again, this time we won't defer anything
         * that is optional
@@ -339,7 +353,7 @@ late_initcall(deferred_probe_initcall);
 
 static void __exit deferred_probe_exit(void)
 {
-       debugfs_remove_recursive(deferred_devices);
+       debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL));
 }
 __exitcall(deferred_probe_exit);
 
@@ -416,8 +430,11 @@ static int driver_sysfs_add(struct device *dev)
        if (ret)
                goto rm_dev;
 
-       if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
-           !device_create_file(dev, &dev_attr_coredump))
+       if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump)
+               return 0;
+
+       ret = device_create_file(dev, &dev_attr_coredump);
+       if (!ret)
                return 0;
 
        sysfs_remove_link(&dev->kobj, "driver");
@@ -460,8 +477,10 @@ int device_bind_driver(struct device *dev)
        int ret;
 
        ret = driver_sysfs_add(dev);
-       if (!ret)
+       if (!ret) {
+               device_links_force_bind(dev);
                driver_bound(dev);
+       }
        else if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -729,6 +748,7 @@ static int driver_probe_device(struct device_driver *drv, struct device *dev)
        if (!device_is_registered(dev))
                return -ENODEV;
 
+       dev->can_match = true;
        pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
                 drv->bus->name, __func__, dev_name(dev), drv->name);
 
@@ -832,6 +852,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
                return 0;
        } else if (ret == -EPROBE_DEFER) {
                dev_dbg(dev, "Device match requests probe deferral\n");
+               dev->can_match = true;
                driver_deferred_probe_add(dev);
        } else if (ret < 0) {
                dev_dbg(dev, "Bus failed to match device: %d\n", ret);
@@ -1067,6 +1088,7 @@ static int __driver_attach(struct device *dev, void *data)
                return 0;
        } else if (ret == -EPROBE_DEFER) {
                dev_dbg(dev, "Device match requests probe deferral\n");
+               dev->can_match = true;
                driver_deferred_probe_add(dev);
        } else if (ret < 0) {
                dev_dbg(dev, "Bus failed to match device: %d\n", ret);
index 9243468e2c99fd7b28541fe34d860eb0b0c4da23..8eec0e0ddff7b1ec40b585daed4977bf13914425 100644 (file)
@@ -202,7 +202,7 @@ static int devcd_match_failing(struct device *dev, const void *failing)
  * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
  * using the sg_chain function then that function should be called only once
  * on the chained table
- * @table: pointer to sg_table to free
+ * @data: pointer to sg_table to free
  */
 static void devcd_free_sgtable(void *data)
 {
@@ -210,7 +210,7 @@ static void devcd_free_sgtable(void *data)
 }
 
 /**
- * devcd_read_from_table - copy data from sg_table to a given buffer
+ * devcd_read_from_sgtable - copy data from sg_table to a given buffer
  * and return the number of bytes read
  * @buffer: the buffer to copy the data to it
  * @buf_len: the length of the buffer
@@ -292,13 +292,16 @@ void dev_coredumpm(struct device *dev, struct module *owner,
        if (device_add(&devcd->devcd_dev))
                goto put_device;
 
+       /*
+        * These should normally not fail, but there is no problem
+        * continuing without the links, so just warn instead of
+        * failing.
+        */
        if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
-                             "failing_device"))
-               /* nothing - symlink will be missing */;
-
-       if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
-                             "devcoredump"))
-               /* nothing - symlink will be missing */;
+                             "failing_device") ||
+           sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
+                             "devcoredump"))
+               dev_warn(dev, "devcoredump create_link failed\n");
 
        INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
        schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
index fb9d5289a620338e42cf14e9f40046794c9f2d7c..8746f2212781b5153afa2307aee5dc6493248250 100644 (file)
@@ -58,8 +58,8 @@ static void devres_log(struct device *dev, struct devres_node *node,
                       const char *op)
 {
        if (unlikely(log_devres))
-               dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
-                       op, node, node->name, (unsigned long)node->size);
+               dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
+                       op, node, node->name, node->size);
 }
 #else /* CONFIG_DEBUG_DEVRES */
 #define set_node_dbginfo(node, n, s)   do {} while (0)
@@ -1228,6 +1228,6 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
 void devm_free_percpu(struct device *dev, void __percpu *pdata)
 {
        WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
-                              (void *)pdata));
+                              (__force void *)pdata));
 }
 EXPORT_SYMBOL_GPL(devm_free_percpu);
index 653c8c6ac7a73a2137598065b79a078fdaaf1841..8be352ab4ddbfdcfdda4885dde97c39b5477cf32 100644 (file)
@@ -371,7 +371,7 @@ int __init devtmpfs_mount(void)
        return err;
 }
 
-static DECLARE_COMPLETION(setup_done);
+static __initdata DECLARE_COMPLETION(setup_done);
 
 static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
                  struct device *dev)
@@ -405,7 +405,7 @@ static void __noreturn devtmpfs_work_loop(void)
        }
 }
 
-static int __init devtmpfs_setup(void *p)
+static noinline int __init devtmpfs_setup(void *p)
 {
        int err;
 
@@ -419,7 +419,6 @@ static int __init devtmpfs_setup(void *p)
        init_chroot(".");
 out:
        *(int *)p = err;
-       complete(&setup_done);
        return err;
 }
 
@@ -432,6 +431,7 @@ static int __ref devtmpfsd(void *p)
 {
        int err = devtmpfs_setup(p);
 
+       complete(&setup_done);
        if (err)
                return err;
        devtmpfs_work_loop();
index f449dbb2c74666a83caabb55b39e8c9de31a622c..2c36f61d30bcb0d7c6b315c6dfcd8e1ee492722e 100644 (file)
@@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
        if (!dev)
                return;
 
+       device_initialize(dev);
        dev->parent = &node->dev;
        dev->release = node_cache_release;
        if (dev_set_name(dev, "memory_side_cache"))
-               goto free_dev;
+               goto put_device;
 
-       if (device_register(dev))
-               goto free_name;
+       if (device_add(dev))
+               goto put_device;
 
        pm_runtime_no_callbacks(dev);
        node->cache_dev = dev;
        return;
-free_name:
-       kfree_const(dev->kobj.name);
-free_dev:
-       kfree(dev);
+put_device:
+       put_device(dev);
 }
 
 /**
@@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
                return;
 
        dev = &info->dev;
+       device_initialize(dev);
        dev->parent = node->cache_dev;
        dev->release = node_cacheinfo_release;
        dev->groups = cache_groups;
        if (dev_set_name(dev, "index%d", cache_attrs->level))
-               goto free_cache;
+               goto put_device;
 
        info->cache_attrs = *cache_attrs;
-       if (device_register(dev)) {
+       if (device_add(dev)) {
                dev_warn(&node->dev, "failed to add cache level:%d\n",
                         cache_attrs->level);
-               goto free_name;
+               goto put_device;
        }
        pm_runtime_no_callbacks(dev);
        list_add_tail(&info->node, &node->cache_attrs);
        return;
-free_name:
-       kfree_const(dev->kobj.name);
-free_cache:
-       kfree(info);
+put_device:
+       put_device(dev);
 }
 
 static void node_remove_caches(struct node *node)
index 2c1e2e0c1a59cc5f037f57aec963c8fcfa04a8e2..0b72b134a3048971345c96f0aedbe1fdbac5f671 100644 (file)
@@ -316,10 +316,11 @@ void *platform_msi_get_host_data(struct irq_domain *domain)
 }
 
 /**
- * platform_msi_create_device_domain - Create a platform-msi domain
+ * __platform_msi_create_device_domain - Create a platform-msi domain
  *
  * @dev:               The device generating the MSIs
  * @nvec:              The number of MSIs that need to be allocated
+ * @is_tree:           flag to indicate tree hierarchy
  * @write_msi_msg:     Callback to write an interrupt message for @dev
  * @ops:               The hierarchy domain operations to use
  * @host_data:         Private data associated to this domain
index 6e1f8e0b661cef101dc6d4a01ad56beea41345d5..9cd34def2237b8a27eb827863826dbe51cca6709 100644 (file)
@@ -192,7 +192,7 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
 #ifdef CONFIG_SPARC
        /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
        if (!dev || num >= dev->archdata.num_irqs)
-               return -ENXIO;
+               goto out_not_found;
        ret = dev->archdata.irqs[num];
        goto out;
 #else
@@ -223,10 +223,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
                struct irq_data *irqd;
 
                irqd = irq_get_irq_data(r->start);
-               if (!irqd) {
-                       ret = -ENXIO;
-                       goto out;
-               }
+               if (!irqd)
+                       goto out_not_found;
                irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
        }
 
@@ -249,8 +247,9 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
                        goto out;
        }
 
-       ret = -ENXIO;
 #endif
+out_not_found:
+       ret = -ENXIO;
 out:
        WARN(ret == 0, "0 is an invalid IRQ number\n");
        return ret;
index d638259b829a5ef24303f874cbe2405722fbeae6..5ade7539ac0244beb20e5e71e10439ed0c5118ef 100644 (file)
@@ -154,7 +154,7 @@ static struct device *wakeup_source_device_create(struct device *parent,
        dev_set_drvdata(dev, ws);
        device_set_pm_not_required(dev);
 
-       retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id);
+       retval = dev_set_name(dev, "wakeup%d", ws->id);
        if (retval)
                goto error;
 
index fa3719ef80e4d12b93655189ed4cec22075a04e0..3cc11b813f28ca8ff6cb508b83cd3d1d2c996cce 100644 (file)
 #include <linux/slab.h>
 
 struct swnode {
-       int id;
        struct kobject kobj;
        struct fwnode_handle fwnode;
        const struct software_node *node;
+       int id;
 
        /* hierarchy */
        struct ida child_ids;
@@ -720,19 +720,30 @@ software_node_find_by_name(const struct software_node *parent, const char *name)
 }
 EXPORT_SYMBOL_GPL(software_node_find_by_name);
 
-static int
-software_node_register_properties(struct software_node *node,
-                                 const struct property_entry *properties)
+static struct software_node *software_node_alloc(const struct property_entry *properties)
 {
        struct property_entry *props;
+       struct software_node *node;
 
        props = property_entries_dup(properties);
        if (IS_ERR(props))
-               return PTR_ERR(props);
+               return ERR_CAST(props);
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node) {
+               property_entries_free(props);
+               return ERR_PTR(-ENOMEM);
+       }
 
        node->properties = props;
 
-       return 0;
+       return node;
+}
+
+static void software_node_free(const struct software_node *node)
+{
+       property_entries_free(node->properties);
+       kfree(node);
 }
 
 static void software_node_release(struct kobject *kobj)
@@ -746,10 +757,9 @@ static void software_node_release(struct kobject *kobj)
                ida_simple_remove(&swnode_root_ids, swnode->id);
        }
 
-       if (swnode->allocated) {
-               property_entries_free(swnode->node->properties);
-               kfree(swnode->node);
-       }
+       if (swnode->allocated)
+               software_node_free(swnode->node);
+
        ida_destroy(&swnode->child_ids);
        kfree(swnode);
 }
@@ -767,22 +777,19 @@ swnode_register(const struct software_node *node, struct swnode *parent,
        int ret;
 
        swnode = kzalloc(sizeof(*swnode), GFP_KERNEL);
-       if (!swnode) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
+       if (!swnode)
+               return ERR_PTR(-ENOMEM);
 
        ret = ida_simple_get(parent ? &parent->child_ids : &swnode_root_ids,
                             0, 0, GFP_KERNEL);
        if (ret < 0) {
                kfree(swnode);
-               goto out_err;
+               return ERR_PTR(ret);
        }
 
        swnode->id = ret;
        swnode->node = node;
        swnode->parent = parent;
-       swnode->allocated = allocated;
        swnode->kobj.kset = swnode_kset;
        fwnode_init(&swnode->fwnode, &software_node_ops);
 
@@ -803,16 +810,17 @@ swnode_register(const struct software_node *node, struct swnode *parent,
                return ERR_PTR(ret);
        }
 
+       /*
+        * Assign the flag only in the successful case, so
+        * the above kobject_put() won't mess up with properties.
+        */
+       swnode->allocated = allocated;
+
        if (parent)
                list_add_tail(&swnode->entry, &parent->children);
 
        kobject_uevent(&swnode->kobj, KOBJ_ADD);
        return &swnode->fwnode;
-
-out_err:
-       if (allocated)
-               property_entries_free(node->properties);
-       return ERR_PTR(ret);
 }
 
 /**
@@ -880,7 +888,11 @@ EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
  * software_node_register_node_group - Register a group of software nodes
  * @node_group: NULL terminated array of software node pointers to be registered
  *
- * Register multiple software nodes at once.
+ * Register multiple software nodes at once. If any node in the array
+ * has its .parent pointer set (which can only be to another software_node),
+ * then its parent **must** have been registered before it is; either outside
+ * of this function or by ordering the array such that parent comes before
+ * child.
  */
 int software_node_register_node_group(const struct software_node **node_group)
 {
@@ -906,10 +918,14 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
  * software_node_unregister_node_group - Unregister a group of software nodes
  * @node_group: NULL terminated array of software node pointers to be unregistered
  *
- * Unregister multiple software nodes at once. The array will be unwound in
- * reverse order (i.e. last entry first) and thus if any members of the array are
- * children of another member then the children must appear later in the list such
- * that they are unregistered first.
+ * Unregister multiple software nodes at once. If parent pointers are set up
+ * in any of the software nodes then the array **must** be ordered such that
+ * parents come before their children.
+ *
+ * NOTE: If you are uncertain whether the array is ordered such that
+ * parents will be unregistered before their children, it is wiser to
+ * remove the nodes individually, in the correct order (child before
+ * parent).
  */
 void software_node_unregister_node_group(
                const struct software_node **node_group)
@@ -963,31 +979,28 @@ struct fwnode_handle *
 fwnode_create_software_node(const struct property_entry *properties,
                            const struct fwnode_handle *parent)
 {
+       struct fwnode_handle *fwnode;
        struct software_node *node;
-       struct swnode *p = NULL;
-       int ret;
+       struct swnode *p;
 
-       if (parent) {
-               if (IS_ERR(parent))
-                       return ERR_CAST(parent);
-               if (!is_software_node(parent))
-                       return ERR_PTR(-EINVAL);
-               p = to_swnode(parent);
-       }
+       if (IS_ERR(parent))
+               return ERR_CAST(parent);
 
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
-               return ERR_PTR(-ENOMEM);
+       p = to_swnode(parent);
+       if (parent && !p)
+               return ERR_PTR(-EINVAL);
 
-       ret = software_node_register_properties(node, properties);
-       if (ret) {
-               kfree(node);
-               return ERR_PTR(ret);
-       }
+       node = software_node_alloc(properties);
+       if (IS_ERR(node))
+               return ERR_CAST(node);
 
        node->parent = p ? p->node : NULL;
 
-       return swnode_register(node, p, 1);
+       fwnode = swnode_register(node, p, 1);
+       if (IS_ERR(fwnode))
+               software_node_free(node);
+
+       return fwnode;
 }
 EXPORT_SYMBOL_GPL(fwnode_create_software_node);
 
@@ -1032,6 +1045,7 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
        }
 
        set_secondary_fwnode(dev, &swnode->fwnode);
+       software_node_notify(dev, KOBJ_ADD);
 
        return 0;
 }
@@ -1105,8 +1119,8 @@ int software_node_notify(struct device *dev, unsigned long action)
 
        switch (action) {
        case KOBJ_ADD:
-               ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
-                                       "software_node");
+               ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
+                                              "software_node");
                if (ret)
                        break;
 
index ba225eb1b7615fb6a56996acd3c18cbba773afe8..2f3fa31a948e2e86f6e9753a304443110a29fcc6 100644 (file)
@@ -8,7 +8,7 @@ config TEST_ASYNC_DRIVER_PROBE
          The module name will be test_async_driver_probe.ko
 
          If unsure say N.
-config KUNIT_DRIVER_PE_TEST
+config DRIVER_PE_KUNIT_TEST
        bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
        depends on KUNIT=y
        default KUNIT_ALL_TESTS
index 2f15fae8625f191d42fd80f594d86fcb5a8fe4c5..64b2f3d744d51aa91d98d64012adb8fce6bae54b 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE)  += test_async_driver_probe.o
 
-obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
+obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
 CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
index abe03315180ff9a2d77c25b541a6b08e07d498bb..1106fedcceed88aad2e6fdb83eeff7c846b5a577 100644 (file)
@@ -27,6 +27,9 @@ static void pe_test_uints(struct kunit *test)
        node = fwnode_create_software_node(entries, NULL);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
 
+       error = fwnode_property_count_u8(node, "prop-u8");
+       KUNIT_EXPECT_EQ(test, error, 1);
+
        error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
@@ -48,6 +51,9 @@ static void pe_test_uints(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
 
+       error = fwnode_property_count_u16(node, "prop-u16");
+       KUNIT_EXPECT_EQ(test, error, 1);
+
        error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
@@ -65,6 +71,9 @@ static void pe_test_uints(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
 
+       error = fwnode_property_count_u32(node, "prop-u32");
+       KUNIT_EXPECT_EQ(test, error, 1);
+
        error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
@@ -82,6 +91,9 @@ static void pe_test_uints(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
 
+       error = fwnode_property_count_u64(node, "prop-u64");
+       KUNIT_EXPECT_EQ(test, error, 1);
+
        error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
@@ -95,15 +107,19 @@ static void pe_test_uints(struct kunit *test)
        error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
        KUNIT_EXPECT_NE(test, error, 0);
 
+       /* Count 64-bit values as 16-bit */
+       error = fwnode_property_count_u16(node, "prop-u64");
+       KUNIT_EXPECT_EQ(test, error, 4);
+
        fwnode_remove_software_node(node);
 }
 
 static void pe_test_uint_arrays(struct kunit *test)
 {
-       static const u8 a_u8[16] = { 8, 9 };
-       static const u16 a_u16[16] = { 16, 17 };
-       static const u32 a_u32[16] = { 32, 33 };
-       static const u64 a_u64[16] = { 64, 65 };
+       static const u8 a_u8[10] = { 8, 9 };
+       static const u16 a_u16[10] = { 16, 17 };
+       static const u32 a_u32[10] = { 32, 33 };
+       static const u64 a_u64[10] = { 64, 65 };
        static const struct property_entry entries[] = {
                PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
                PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
@@ -126,6 +142,9 @@ static void pe_test_uint_arrays(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
 
+       error = fwnode_property_count_u8(node, "prop-u8");
+       KUNIT_EXPECT_EQ(test, error, 10);
+
        error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
@@ -148,6 +167,9 @@ static void pe_test_uint_arrays(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
 
+       error = fwnode_property_count_u16(node, "prop-u16");
+       KUNIT_EXPECT_EQ(test, error, 10);
+
        error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
@@ -170,6 +192,9 @@ static void pe_test_uint_arrays(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
 
+       error = fwnode_property_count_u32(node, "prop-u32");
+       KUNIT_EXPECT_EQ(test, error, 10);
+
        error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
@@ -192,6 +217,9 @@ static void pe_test_uint_arrays(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
 
+       error = fwnode_property_count_u64(node, "prop-u64");
+       KUNIT_EXPECT_EQ(test, error, 10);
+
        error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
@@ -210,6 +238,14 @@ static void pe_test_uint_arrays(struct kunit *test)
        error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
        KUNIT_EXPECT_NE(test, error, 0);
 
+       /* Count 64-bit values as 16-bit */
+       error = fwnode_property_count_u16(node, "prop-u64");
+       KUNIT_EXPECT_EQ(test, error, 40);
+
+       /* Other way around */
+       error = fwnode_property_count_u64(node, "prop-u16");
+       KUNIT_EXPECT_EQ(test, error, 2);
+
        fwnode_remove_software_node(node);
 }
 
@@ -239,6 +275,9 @@ static void pe_test_strings(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_STREQ(test, str, "single");
 
+       error = fwnode_property_string_array_count(node, "str");
+       KUNIT_EXPECT_EQ(test, error, 1);
+
        error = fwnode_property_read_string_array(node, "str", strs, 1);
        KUNIT_EXPECT_EQ(test, error, 1);
        KUNIT_EXPECT_STREQ(test, strs[0], "single");
@@ -258,6 +297,9 @@ static void pe_test_strings(struct kunit *test)
        KUNIT_EXPECT_EQ(test, error, 0);
        KUNIT_EXPECT_STREQ(test, str, "");
 
+       error = fwnode_property_string_array_count(node, "strs");
+       KUNIT_EXPECT_EQ(test, error, 2);
+
        error = fwnode_property_read_string_array(node, "strs", strs, 3);
        KUNIT_EXPECT_EQ(test, error, 2);
        KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
@@ -370,15 +412,8 @@ static void pe_test_reference(struct kunit *test)
        };
 
        static const struct software_node_ref_args refs[] = {
-               {
-                       .node = &nodes[0],
-                       .nargs = 0,
-               },
-               {
-                       .node = &nodes[1],
-                       .nargs = 2,
-                       .args = { 3, 4 },
-               },
+               SOFTWARE_NODE_REFERENCE(&nodes[0]),
+               SOFTWARE_NODE_REFERENCE(&nodes[1], 3, 4),
        };
 
        const struct property_entry entries[] = {
index b0c71d3a81a024375c6b789affb97728ea6b2ac2..bda5c815e44156da9a2ba660cf5150156d171721 100644 (file)
@@ -313,6 +313,7 @@ struct xen_blkif {
 
        struct work_struct      free_work;
        unsigned int            nr_ring_pages;
+       bool                    multi_ref;
        /* All rings for this device. */
        struct xen_blkif_ring   *rings;
        unsigned int            nr_rings;
index c2aaf690352c7aee1229bcdeb0169ef6db6610e2..125b22205d3836220cf7f8c2f23d77cfa310430f 100644 (file)
@@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
        for (i = 0; i < nr_grefs; i++) {
                char ring_ref_name[RINGREF_NAME_LEN];
 
-               snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
+               if (blkif->multi_ref)
+                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
+               else {
+                       WARN_ON(i != 0);
+                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
+               }
+
                err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
                                   "%u", &ring_ref[i]);
 
                if (err != 1) {
-                       if (nr_grefs == 1)
-                               break;
-
                        err = -EINVAL;
                        xenbus_dev_fatal(dev, err, "reading %s/%s",
                                         dir, ring_ref_name);
@@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
                }
        }
 
-       if (err != 1) {
-               WARN_ON(nr_grefs != 1);
-
-               err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
-                                  &ring_ref[0]);
-               if (err != 1) {
-                       err = -EINVAL;
-                       xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
-                       return err;
-               }
-       }
-
        err = -ENOMEM;
        for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
                req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
                 blkif->nr_rings, blkif->blk_protocol, protocol,
                 blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
 
-       ring_page_order = xenbus_read_unsigned(dev->otherend,
-                                              "ring-page-order", 0);
-
-       if (ring_page_order > xen_blkif_max_ring_order) {
+       err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
+                          &ring_page_order);
+       if (err != 1) {
+               blkif->nr_ring_pages = 1;
+               blkif->multi_ref = false;
+       } else if (ring_page_order <= xen_blkif_max_ring_order) {
+               blkif->nr_ring_pages = 1 << ring_page_order;
+               blkif->multi_ref = true;
+       } else {
                err = -EINVAL;
                xenbus_dev_fatal(dev, err,
                                 "requested ring page order %d exceed max:%d",
@@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
                return err;
        }
 
-       blkif->nr_ring_pages = 1 << ring_page_order;
-
        if (blkif->nr_rings == 1)
                return read_per_ring_refs(&blkif->rings[0], dev->otherend);
        else {
index e1c6798889f48afe9e6fc14a8ef5d93f919d365c..06c4efd97780013341ade3232aca246972f08f8b 100644 (file)
@@ -2397,7 +2397,7 @@ static void blkfront_connect(struct blkfront_info *info)
        }
 
        /*
-        * physcial-sector-size is a newer field, so old backends may not
+        * physical-sector-size is a newer field, so old backends may not
         * provide this. Assume physical sector size to be the same as
         * sector_size in that case.
         */
index 52683fd22e050f7e21e01d9a396e924c9f6d7894..5cbfbd948f676492bc501bff02a40c9b738d10e9 100644 (file)
@@ -4849,8 +4849,8 @@ static int btusb_probe(struct usb_interface *intf,
                        data->diag = NULL;
        }
 
-       if (!enable_autosuspend)
-               usb_disable_autosuspend(data->udev);
+       if (enable_autosuspend)
+               usb_enable_autosuspend(data->udev);
 
        err = hci_register_dev(hdev);
        if (err < 0)
@@ -4910,9 +4910,6 @@ static void btusb_disconnect(struct usb_interface *intf)
                gpiod_put(data->reset_gpio);
 
        hci_free_dev(hdev);
-
-       if (!enable_autosuspend)
-               usb_enable_autosuspend(data->udev);
 }
 
 #ifdef CONFIG_PM
index c2546bf229fb362d49d666596715a1dab4291403..8100cf51cd09e7e02dff5282baa0181bc64d168e 100644 (file)
@@ -389,7 +389,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 {
        const struct firmware *firmware = NULL;
-       struct image_info *image_info;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
        const char *fw_name;
        void *buf;
@@ -417,9 +416,9 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
                }
        }
 
-       /* If device is in pass through, do reset to ready state transition */
-       if (mhi_cntrl->ee == MHI_EE_PTHRU)
-               goto fw_load_ee_pthru;
+       /* wait for ready on pass through or any other execution environment */
+       if (mhi_cntrl->ee != MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_PBL)
+               goto fw_load_ready_state;
 
        fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
                mhi_cntrl->edl_image : mhi_cntrl->fw_image;
@@ -461,9 +460,10 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
                goto error_fw_load;
        }
 
-       if (mhi_cntrl->ee == MHI_EE_EDL) {
+       /* Wait for ready since EDL image was loaded */
+       if (fw_name == mhi_cntrl->edl_image) {
                release_firmware(firmware);
-               return;
+               goto fw_load_ready_state;
        }
 
        write_lock_irq(&mhi_cntrl->pm_lock);
@@ -488,47 +488,45 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 
        release_firmware(firmware);
 
-fw_load_ee_pthru:
+fw_load_ready_state:
        /* Transitioning into MHI RESET->READY state */
        ret = mhi_ready_state_transition(mhi_cntrl);
-
-       if (!mhi_cntrl->fbc_download)
-               return;
-
        if (ret) {
                dev_err(dev, "MHI did not enter READY state\n");
                goto error_ready_state;
        }
 
-       /* Wait for the SBL event */
-       ret = wait_event_timeout(mhi_cntrl->state_event,
-                                mhi_cntrl->ee == MHI_EE_SBL ||
-                                MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
-                                msecs_to_jiffies(mhi_cntrl->timeout_ms));
+       dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
+       return;
 
-       if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
-               dev_err(dev, "MHI did not enter SBL\n");
-               goto error_ready_state;
+error_ready_state:
+       if (mhi_cntrl->fbc_download) {
+               mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+               mhi_cntrl->fbc_image = NULL;
        }
 
-       /* Start full firmware image download */
-       image_info = mhi_cntrl->fbc_image;
+error_fw_load:
+       mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+       wake_up_all(&mhi_cntrl->state_event);
+}
+
+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
+{
+       struct image_info *image_info = mhi_cntrl->fbc_image;
+       struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       int ret;
+
+       if (!image_info)
+               return -EIO;
+
        ret = mhi_fw_load_bhie(mhi_cntrl,
                               /* Vector table is the last entry */
                               &image_info->mhi_buf[image_info->entries - 1]);
        if (ret) {
-               dev_err(dev, "MHI did not load image over BHIe, ret: %d\n",
-                       ret);
-               goto error_fw_load;
+               dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
+               mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+               wake_up_all(&mhi_cntrl->state_event);
        }
 
-       return;
-
-error_ready_state:
-       mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
-       mhi_cntrl->fbc_image = NULL;
-
-error_fw_load:
-       mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
-       wake_up_all(&mhi_cntrl->state_event);
+       return ret;
 }
index 7d43138ce66d8408a1f09c91a2a76ec0f8693a15..858d7516410bb75e9aea5692cbc9eed4e9a66400 100644 (file)
@@ -377,7 +377,7 @@ static struct dentry *mhi_debugfs_root;
 void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
 {
        mhi_cntrl->debugfs_dentry =
-                       debugfs_create_dir(dev_name(mhi_cntrl->cntrl_dev),
+                       debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev),
                                           mhi_debugfs_root);
 
        debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry,
index be4eebb0971bc18270371f9c10dd9071f9b80cbf..c81b377fca8f7e7b0bf6e96c0b9c3f32ca22eb94 100644 (file)
 static DEFINE_IDA(mhi_controller_ida);
 
 const char * const mhi_ee_str[MHI_EE_MAX] = {
-       [MHI_EE_PBL] = "PBL",
-       [MHI_EE_SBL] = "SBL",
-       [MHI_EE_AMSS] = "AMSS",
-       [MHI_EE_RDDM] = "RDDM",
-       [MHI_EE_WFW] = "WFW",
-       [MHI_EE_PTHRU] = "PASS THRU",
-       [MHI_EE_EDL] = "EDL",
+       [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
+       [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
+       [MHI_EE_AMSS] = "MISSION MODE",
+       [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
+       [MHI_EE_WFW] = "WLAN FIRMWARE",
+       [MHI_EE_PTHRU] = "PASS THROUGH",
+       [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
+       [MHI_EE_FP] = "FLASH PROGRAMMER",
        [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
        [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
 };
@@ -37,8 +38,9 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
        [DEV_ST_TRANSITION_PBL] = "PBL",
        [DEV_ST_TRANSITION_READY] = "READY",
        [DEV_ST_TRANSITION_SBL] = "SBL",
-       [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
-       [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
+       [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
+       [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
+       [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
        [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
 };
 
@@ -49,24 +51,30 @@ const char * const mhi_state_str[MHI_STATE_MAX] = {
        [MHI_STATE_M1] = "M1",
        [MHI_STATE_M2] = "M2",
        [MHI_STATE_M3] = "M3",
-       [MHI_STATE_M3_FAST] = "M3_FAST",
+       [MHI_STATE_M3_FAST] = "M3 FAST",
        [MHI_STATE_BHI] = "BHI",
-       [MHI_STATE_SYS_ERR] = "SYS_ERR",
+       [MHI_STATE_SYS_ERR] = "SYS ERROR",
+};
+
+const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
+       [MHI_CH_STATE_TYPE_RESET] = "RESET",
+       [MHI_CH_STATE_TYPE_STOP] = "STOP",
+       [MHI_CH_STATE_TYPE_START] = "START",
 };
 
 static const char * const mhi_pm_state_str[] = {
        [MHI_PM_STATE_DISABLE] = "DISABLE",
-       [MHI_PM_STATE_POR] = "POR",
+       [MHI_PM_STATE_POR] = "POWER ON RESET",
        [MHI_PM_STATE_M0] = "M0",
        [MHI_PM_STATE_M2] = "M2",
        [MHI_PM_STATE_M3_ENTER] = "M?->M3",
        [MHI_PM_STATE_M3] = "M3",
        [MHI_PM_STATE_M3_EXIT] = "M3->M0",
-       [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
-       [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
-       [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
+       [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
+       [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
+       [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
        [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
-       [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+       [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
 };
 
 const char *to_mhi_pm_state_str(enum mhi_pm_state state)
@@ -508,8 +516,6 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 
        /* Setup wake db */
        mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
-       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
-       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
        mhi_cntrl->wake_set = false;
 
        /* Setup channel db address for each channel in tre_ring */
@@ -552,6 +558,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
        struct mhi_ring *buf_ring;
        struct mhi_ring *tre_ring;
        struct mhi_chan_ctxt *chan_ctxt;
+       u32 tmp;
 
        buf_ring = &mhi_chan->buf_ring;
        tre_ring = &mhi_chan->tre_ring;
@@ -565,7 +572,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
        vfree(buf_ring->base);
 
        buf_ring->base = tre_ring->base = NULL;
+       tre_ring->ctxt_wp = NULL;
        chan_ctxt->rbase = 0;
+       chan_ctxt->rlen = 0;
+       chan_ctxt->rp = 0;
+       chan_ctxt->wp = 0;
+
+       tmp = chan_ctxt->chcfg;
+       tmp &= ~CHAN_CTX_CHSTATE_MASK;
+       tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
+       chan_ctxt->chcfg = tmp;
+
+       /* Update to all cores */
+       smp_wmb();
 }
 
 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -863,12 +882,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
        u32 soc_info;
        int ret, i;
 
-       if (!mhi_cntrl)
-               return -EINVAL;
-
-       if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+       if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
+           !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
            !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
-           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
+           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
                return -EINVAL;
 
        ret = parse_config(mhi_cntrl, config);
@@ -890,8 +907,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
        INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
        init_waitqueue_head(&mhi_cntrl->state_event);
 
-       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
-                               ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
+       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
        if (!mhi_cntrl->hiprio_wq) {
                dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
                ret = -ENOMEM;
@@ -1083,8 +1099,6 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
                        mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
        }
 
-       mhi_cntrl->pre_init = true;
-
        mutex_unlock(&mhi_cntrl->pm_mutex);
 
        return 0;
@@ -1115,7 +1129,6 @@ void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
        }
 
        mhi_deinit_dev_ctxt(mhi_cntrl);
-       mhi_cntrl->pre_init = false;
 }
 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
 
@@ -1296,7 +1309,8 @@ static int mhi_driver_remove(struct device *dev)
 
                mutex_lock(&mhi_chan->mutex);
 
-               if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
+               if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
+                    ch_state[dir] == MHI_CH_STATE_STOP) &&
                    !mhi_chan->offload_ch)
                        mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
 
index 6f80ec30c0cdcc342ad0faa128bda13e13b96473..5b9ea66b92dc32fb7dce14f4a4db5ea7419839af 100644 (file)
@@ -369,6 +369,18 @@ enum mhi_ch_state {
        MHI_CH_STATE_ERROR = 0x5,
 };
 
+enum mhi_ch_state_type {
+       MHI_CH_STATE_TYPE_RESET,
+       MHI_CH_STATE_TYPE_STOP,
+       MHI_CH_STATE_TYPE_START,
+       MHI_CH_STATE_TYPE_MAX,
+};
+
+extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
+#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
+                                    "INVALID_STATE" : \
+                                    mhi_ch_state_type_str[(state)])
+
 #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
                                    mode != MHI_DB_BRST_ENABLE)
 
@@ -379,13 +391,15 @@ extern const char * const mhi_ee_str[MHI_EE_MAX];
 #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
                        ee == MHI_EE_EDL)
 
-#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
+                                ee == MHI_EE_FP)
 
 enum dev_st_transition {
        DEV_ST_TRANSITION_PBL,
        DEV_ST_TRANSITION_READY,
        DEV_ST_TRANSITION_SBL,
        DEV_ST_TRANSITION_MISSION_MODE,
+       DEV_ST_TRANSITION_FP,
        DEV_ST_TRANSITION_SYS_ERR,
        DEV_ST_TRANSITION_DISABLE,
        DEV_ST_TRANSITION_MAX,
@@ -619,6 +633,7 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
                 enum mhi_cmd_type cmd);
+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
 static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
 {
        return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
@@ -643,6 +658,9 @@ int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
                                    void __iomem *base, u32 offset, u32 mask,
                                    u32 shift, u32 *out);
+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
+                                   void __iomem *base, u32 offset, u32 mask,
+                                   u32 shift, u32 val, u32 delayus);
 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
                   u32 offset, u32 val);
 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
index 4e0131b94056ba8532f591960fee4e4c527570c2..22acde118bc352560f2d329a074b71959770ae32 100644 (file)
@@ -4,6 +4,7 @@
  *
  */
 
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
@@ -37,6 +38,28 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
        return 0;
 }
 
+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
+                                   void __iomem *base, u32 offset,
+                                   u32 mask, u32 shift, u32 val, u32 delayus)
+{
+       int ret;
+       u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+
+       while (retry--) {
+               ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift,
+                                        &out);
+               if (ret)
+                       return ret;
+
+               if (out == val)
+                       return 0;
+
+               fsleep(delayus);
+       }
+
+       return -ETIMEDOUT;
+}
+
 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
                   u32 offset, u32 val)
 {
@@ -242,10 +265,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
        smp_wmb();
 }
 
+static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+{
+       return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
+}
+
 int mhi_destroy_device(struct device *dev, void *data)
 {
+       struct mhi_chan *ul_chan, *dl_chan;
        struct mhi_device *mhi_dev;
        struct mhi_controller *mhi_cntrl;
+       enum mhi_ee_type ee = MHI_EE_MAX;
 
        if (dev->bus != &mhi_bus_type)
                return 0;
@@ -257,6 +287,17 @@ int mhi_destroy_device(struct device *dev, void *data)
        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
                return 0;
 
+       ul_chan = mhi_dev->ul_chan;
+       dl_chan = mhi_dev->dl_chan;
+
+       /*
+        * If execution environment is specified, remove only those devices that
+        * started in them based on ee_mask for the channels as we move on to a
+        * different execution environment
+        */
+       if (data)
+               ee = *(enum mhi_ee_type *)data;
+
        /*
         * For the suspend and resume case, this function will get called
         * without mhi_unregister_controller(). Hence, we need to drop the
@@ -264,11 +305,19 @@ int mhi_destroy_device(struct device *dev, void *data)
         * be sure that there will be no instances of mhi_dev left after
         * this.
         */
-       if (mhi_dev->ul_chan)
-               put_device(&mhi_dev->ul_chan->mhi_dev->dev);
+       if (ul_chan) {
+               if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
+                       return 0;
 
-       if (mhi_dev->dl_chan)
-               put_device(&mhi_dev->dl_chan->mhi_dev->dev);
+               put_device(&ul_chan->mhi_dev->dev);
+       }
+
+       if (dl_chan) {
+               if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
+                       return 0;
+
+               put_device(&dl_chan->mhi_dev->dev);
+       }
 
        dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
                 mhi_dev->name);
@@ -383,7 +432,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
        struct mhi_event_ctxt *er_ctxt =
                &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
        struct mhi_ring *ev_ring = &mhi_event->ring;
-       void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+       dma_addr_t ptr = er_ctxt->rp;
+       void *dev_rp;
+
+       if (!is_valid_ring_ptr(ev_ring, ptr)) {
+               dev_err(&mhi_cntrl->mhi_dev->dev,
+                       "Event ring rp points outside of the event ring\n");
+               return IRQ_HANDLED;
+       }
+
+       dev_rp = mhi_to_virtual(ev_ring, ptr);
 
        /* Only proceed if event ring has pending events */
        if (ev_ring->rp == dev_rp)
@@ -407,9 +465,9 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
 {
        struct mhi_controller *mhi_cntrl = priv;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
-       enum mhi_state state = MHI_STATE_MAX;
+       enum mhi_state state;
        enum mhi_pm_state pm_state = 0;
-       enum mhi_ee_type ee = 0;
+       enum mhi_ee_type ee;
 
        write_lock_irq(&mhi_cntrl->pm_lock);
        if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -418,11 +476,11 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
        }
 
        state = mhi_get_mhi_state(mhi_cntrl);
-       ee = mhi_cntrl->ee;
-       mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
-       dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
-               TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
-               TO_MHI_STATE_STR(state));
+       ee = mhi_get_exec_env(mhi_cntrl);
+       dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
+               TO_MHI_EXEC_STR(mhi_cntrl->ee),
+               TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+               TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state));
 
        if (state == MHI_STATE_SYS_ERR) {
                dev_dbg(dev, "System error detected\n");
@@ -431,27 +489,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
        }
        write_unlock_irq(&mhi_cntrl->pm_lock);
 
-        /* If device supports RDDM don't bother processing SYS error */
-       if (mhi_cntrl->rddm_image) {
-               /* host may be performing a device power down already */
-               if (!mhi_is_active(mhi_cntrl))
-                       goto exit_intvec;
+       if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
+               goto exit_intvec;
 
-               if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
+       switch (ee) {
+       case MHI_EE_RDDM:
+               /* proceed if power down is not already in progress */
+               if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
                        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+                       mhi_cntrl->ee = ee;
                        wake_up_all(&mhi_cntrl->state_event);
                }
-               goto exit_intvec;
-       }
-
-       if (pm_state == MHI_PM_SYS_ERR_DETECT) {
+               break;
+       case MHI_EE_PBL:
+       case MHI_EE_EDL:
+       case MHI_EE_PTHRU:
+               mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+               mhi_cntrl->ee = ee;
                wake_up_all(&mhi_cntrl->state_event);
-
-               /* For fatal errors, we let controller decide next step */
-               if (MHI_IN_PBL(ee))
-                       mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
-               else
-                       mhi_pm_sys_err_handler(mhi_cntrl);
+               mhi_pm_sys_err_handler(mhi_cntrl);
+               break;
+       default:
+               wake_up_all(&mhi_cntrl->state_event);
+               mhi_pm_sys_err_handler(mhi_cntrl);
+               break;
        }
 
 exit_intvec:
@@ -536,6 +597,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
                struct mhi_buf_info *buf_info;
                u16 xfer_len;
 
+               if (!is_valid_ring_ptr(tre_ring, ptr)) {
+                       dev_err(&mhi_cntrl->mhi_dev->dev,
+                               "Event element points outside of the tre ring\n");
+                       break;
+               }
                /* Get the TRB this event points to */
                ev_tre = mhi_to_virtual(tre_ring, ptr);
 
@@ -570,8 +636,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
                        /* notify client */
                        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
 
-                       if (mhi_chan->dir == DMA_TO_DEVICE)
+                       if (mhi_chan->dir == DMA_TO_DEVICE) {
                                atomic_dec(&mhi_cntrl->pending_pkts);
+                               /* Release the reference got from mhi_queue() */
+                               mhi_cntrl->runtime_put(mhi_cntrl);
+                       }
 
                        /*
                         * Recycle the buffer if buffer is pre-allocated,
@@ -595,15 +664,15 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
        case MHI_EV_CC_OOB:
        case MHI_EV_CC_DB_MODE:
        {
-               unsigned long flags;
+               unsigned long pm_lock_flags;
 
                mhi_chan->db_cfg.db_mode = 1;
-               read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+               read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
                if (tre_ring->wp != tre_ring->rp &&
                    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
                        mhi_ring_chan_db(mhi_cntrl, mhi_chan);
                }
-               read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+               read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
                break;
        }
        case MHI_EV_CC_BAD_TRE:
@@ -695,6 +764,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
        struct mhi_chan *mhi_chan;
        u32 chan;
 
+       if (!is_valid_ring_ptr(mhi_ring, ptr)) {
+               dev_err(&mhi_cntrl->mhi_dev->dev,
+                       "Event element points outside of the cmd ring\n");
+               return;
+       }
+
        cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
 
        chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
@@ -719,6 +794,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
        u32 chan;
        int count = 0;
+       dma_addr_t ptr = er_ctxt->rp;
 
        /*
         * This is a quick check to avoid unnecessary event processing
@@ -728,7 +804,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
        if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
                return -EIO;
 
-       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+       if (!is_valid_ring_ptr(ev_ring, ptr)) {
+               dev_err(&mhi_cntrl->mhi_dev->dev,
+                       "Event ring rp points outside of the event ring\n");
+               return -EIO;
+       }
+
+       dev_rp = mhi_to_virtual(ev_ring, ptr);
        local_rp = ev_ring->rp;
 
        while (dev_rp != local_rp) {
@@ -771,14 +853,14 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
                                break;
                        case MHI_STATE_SYS_ERR:
                        {
-                               enum mhi_pm_state new_state;
+                               enum mhi_pm_state pm_state;
 
                                dev_dbg(dev, "System error detected\n");
                                write_lock_irq(&mhi_cntrl->pm_lock);
-                               new_state = mhi_tryset_pm_state(mhi_cntrl,
+                               pm_state = mhi_tryset_pm_state(mhi_cntrl,
                                                        MHI_PM_SYS_ERR_DETECT);
                                write_unlock_irq(&mhi_cntrl->pm_lock);
-                               if (new_state == MHI_PM_SYS_ERR_DETECT)
+                               if (pm_state == MHI_PM_SYS_ERR_DETECT)
                                        mhi_pm_sys_err_handler(mhi_cntrl);
                                break;
                        }
@@ -807,6 +889,9 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
                        case MHI_EE_AMSS:
                                st = DEV_ST_TRANSITION_MISSION_MODE;
                                break;
+                       case MHI_EE_FP:
+                               st = DEV_ST_TRANSITION_FP;
+                               break;
                        case MHI_EE_RDDM:
                                mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
                                write_lock_irq(&mhi_cntrl->pm_lock);
@@ -834,6 +919,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
                         */
                        if (chan < mhi_cntrl->max_chan) {
                                mhi_chan = &mhi_cntrl->mhi_chan[chan];
+                               if (!mhi_chan->configured)
+                                       break;
                                parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
                                event_quota--;
                        }
@@ -845,7 +932,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 
                mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
                local_rp = ev_ring->rp;
-               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+               ptr = er_ctxt->rp;
+               if (!is_valid_ring_ptr(ev_ring, ptr)) {
+                       dev_err(&mhi_cntrl->mhi_dev->dev,
+                               "Event ring rp points outside of the event ring\n");
+                       return -EIO;
+               }
+
+               dev_rp = mhi_to_virtual(ev_ring, ptr);
                count++;
        }
 
@@ -868,11 +963,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
        int count = 0;
        u32 chan;
        struct mhi_chan *mhi_chan;
+       dma_addr_t ptr = er_ctxt->rp;
 
        if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
                return -EIO;
 
-       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+       if (!is_valid_ring_ptr(ev_ring, ptr)) {
+               dev_err(&mhi_cntrl->mhi_dev->dev,
+                       "Event ring rp points outside of the event ring\n");
+               return -EIO;
+       }
+
+       dev_rp = mhi_to_virtual(ev_ring, ptr);
        local_rp = ev_ring->rp;
 
        while (dev_rp != local_rp && event_quota > 0) {
@@ -886,7 +988,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
                 * Only process the event ring elements whose channel
                 * ID is within the maximum supported range.
                 */
-               if (chan < mhi_cntrl->max_chan) {
+               if (chan < mhi_cntrl->max_chan &&
+                   mhi_cntrl->mhi_chan[chan].configured) {
                        mhi_chan = &mhi_cntrl->mhi_chan[chan];
 
                        if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
@@ -900,7 +1003,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
 
                mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
                local_rp = ev_ring->rp;
-               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+               ptr = er_ctxt->rp;
+               if (!is_valid_ring_ptr(ev_ring, ptr)) {
+                       dev_err(&mhi_cntrl->mhi_dev->dev,
+                               "Event ring rp points outside of the event ring\n");
+                       return -EIO;
+               }
+
+               dev_rp = mhi_to_virtual(ev_ring, ptr);
                count++;
        }
        read_lock_bh(&mhi_cntrl->pm_lock);
@@ -996,7 +1107,7 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
 
        ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
        if (unlikely(ret)) {
-               ret = -ENOMEM;
+               ret = -EAGAIN;
                goto exit_unlock;
        }
 
@@ -1004,9 +1115,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
        if (unlikely(ret))
                goto exit_unlock;
 
-       /* trigger M3 exit if necessary */
-       if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-               mhi_trigger_resume(mhi_cntrl);
+       /* Packet is queued, take a usage ref to exit M3 if necessary
+        * for host->device buffer, balanced put is done on buffer completion
+        * for device->host buffer, balanced put is after ringing the DB
+        */
+       mhi_cntrl->runtime_get(mhi_cntrl);
 
        /* Assert dev_wake (to exit/prevent M1/M2)*/
        mhi_cntrl->wake_toggle(mhi_cntrl);
@@ -1014,12 +1127,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
        if (mhi_chan->dir == DMA_TO_DEVICE)
                atomic_inc(&mhi_cntrl->pending_pkts);
 
-       if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-               ret = -EIO;
-               goto exit_unlock;
-       }
+       if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+               mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 
-       mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+       if (dir == DMA_FROM_DEVICE)
+               mhi_cntrl->runtime_put(mhi_cntrl);
 
 exit_unlock:
        read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
@@ -1162,6 +1274,11 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
                cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
                cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
                break;
+       case MHI_CMD_STOP_CHAN:
+               cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
+               cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
+               cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
+               break;
        case MHI_CMD_START_CHAN:
                cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
                cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
@@ -1183,56 +1300,125 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
        return 0;
 }
 
-static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
-                                   struct mhi_chan *mhi_chan)
+static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
+                                   struct mhi_chan *mhi_chan,
+                                   enum mhi_ch_state_type to_state)
 {
+       struct device *dev = &mhi_chan->mhi_dev->dev;
+       enum mhi_cmd_type cmd = MHI_CMD_NOP;
        int ret;
-       struct device *dev = &mhi_cntrl->mhi_dev->dev;
-
-       dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
 
-       /* no more processing events for this channel */
-       mutex_lock(&mhi_chan->mutex);
-       write_lock_irq(&mhi_chan->lock);
-       if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
-           mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
+       dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
+               TO_CH_STATE_TYPE_STR(to_state));
+
+       switch (to_state) {
+       case MHI_CH_STATE_TYPE_RESET:
+               write_lock_irq(&mhi_chan->lock);
+               if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
+                   mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
+                   mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
+                       write_unlock_irq(&mhi_chan->lock);
+                       return -EINVAL;
+               }
+               mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
                write_unlock_irq(&mhi_chan->lock);
-               mutex_unlock(&mhi_chan->mutex);
-               return;
+
+               cmd = MHI_CMD_RESET_CHAN;
+               break;
+       case MHI_CH_STATE_TYPE_STOP:
+               if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+                       return -EINVAL;
+
+               cmd = MHI_CMD_STOP_CHAN;
+               break;
+       case MHI_CH_STATE_TYPE_START:
+               if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
+                   mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
+                       return -EINVAL;
+
+               cmd = MHI_CMD_START_CHAN;
+               break;
+       default:
+               dev_err(dev, "%d: Channel state update to %s not allowed\n",
+                       mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+               return -EINVAL;
        }
 
-       mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
-       write_unlock_irq(&mhi_chan->lock);
+       /* bring host and device out of suspended states */
+       ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
+       if (ret)
+               return ret;
+       mhi_cntrl->runtime_get(mhi_cntrl);
 
        reinit_completion(&mhi_chan->completion);
-       read_lock_bh(&mhi_cntrl->pm_lock);
-       if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
-               read_unlock_bh(&mhi_cntrl->pm_lock);
-               goto error_invalid_state;
+       ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
+       if (ret) {
+               dev_err(dev, "%d: Failed to send %s channel command\n",
+                       mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+               goto exit_channel_update;
        }
 
-       mhi_cntrl->wake_toggle(mhi_cntrl);
-       read_unlock_bh(&mhi_cntrl->pm_lock);
+       ret = wait_for_completion_timeout(&mhi_chan->completion,
+                                      msecs_to_jiffies(mhi_cntrl->timeout_ms));
+       if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+               dev_err(dev,
+                       "%d: Failed to receive %s channel command completion\n",
+                       mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+               ret = -EIO;
+               goto exit_channel_update;
+       }
 
-       mhi_cntrl->runtime_get(mhi_cntrl);
+       ret = 0;
+
+       if (to_state != MHI_CH_STATE_TYPE_RESET) {
+               write_lock_irq(&mhi_chan->lock);
+               mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
+                                     MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
+               write_unlock_irq(&mhi_chan->lock);
+       }
+
+       dev_dbg(dev, "%d: Channel state change to %s successful\n",
+               mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+
+exit_channel_update:
        mhi_cntrl->runtime_put(mhi_cntrl);
-       ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
+       mhi_device_put(mhi_cntrl->mhi_dev);
+
+       return ret;
+}
+
+static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+                                 struct mhi_chan *mhi_chan)
+{
+       int ret;
+       struct device *dev = &mhi_chan->mhi_dev->dev;
+
+       mutex_lock(&mhi_chan->mutex);
+
+       if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+               dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
+                       TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
+               goto exit_unprepare_channel;
+       }
+
+       /* no more processing events for this channel */
+       ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+                                      MHI_CH_STATE_TYPE_RESET);
        if (ret)
-               goto error_invalid_state;
+               dev_err(dev, "%d: Failed to reset channel, still resetting\n",
+                       mhi_chan->chan);
 
-       /* even if it fails we will still reset */
-       ret = wait_for_completion_timeout(&mhi_chan->completion,
-                               msecs_to_jiffies(mhi_cntrl->timeout_ms));
-       if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
-               dev_err(dev,
-                       "Failed to receive cmd completion, still resetting\n");
+exit_unprepare_channel:
+       write_lock_irq(&mhi_chan->lock);
+       mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+       write_unlock_irq(&mhi_chan->lock);
 
-error_invalid_state:
        if (!mhi_chan->offload_ch) {
                mhi_reset_chan(mhi_cntrl, mhi_chan);
                mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
        }
-       dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
+       dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
+
        mutex_unlock(&mhi_chan->mutex);
 }
 
@@ -1240,28 +1426,16 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
                        struct mhi_chan *mhi_chan)
 {
        int ret = 0;
-       struct device *dev = &mhi_cntrl->mhi_dev->dev;
-
-       dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
+       struct device *dev = &mhi_chan->mhi_dev->dev;
 
        if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
-               dev_err(dev,
-                       "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
-                       TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
-                       mhi_chan->name);
+               dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
+                       TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
                return -ENOTCONN;
        }
 
        mutex_lock(&mhi_chan->mutex);
 
-       /* If channel is not in disable state, do not allow it to start */
-       if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
-               ret = -EIO;
-               dev_dbg(dev, "channel: %d is not in disabled state\n",
-                       mhi_chan->chan);
-               goto error_init_chan;
-       }
-
        /* Check of client manages channel context for offload channels */
        if (!mhi_chan->offload_ch) {
                ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
@@ -1269,34 +1443,11 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
                        goto error_init_chan;
        }
 
-       reinit_completion(&mhi_chan->completion);
-       read_lock_bh(&mhi_cntrl->pm_lock);
-       if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
-               read_unlock_bh(&mhi_cntrl->pm_lock);
-               ret = -EIO;
-               goto error_pm_state;
-       }
-
-       mhi_cntrl->wake_toggle(mhi_cntrl);
-       read_unlock_bh(&mhi_cntrl->pm_lock);
-       mhi_cntrl->runtime_get(mhi_cntrl);
-       mhi_cntrl->runtime_put(mhi_cntrl);
-
-       ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
+       ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+                                      MHI_CH_STATE_TYPE_START);
        if (ret)
                goto error_pm_state;
 
-       ret = wait_for_completion_timeout(&mhi_chan->completion,
-                               msecs_to_jiffies(mhi_cntrl->timeout_ms));
-       if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
-               ret = -EIO;
-               goto error_pm_state;
-       }
-
-       write_lock_irq(&mhi_chan->lock);
-       mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
-       write_unlock_irq(&mhi_chan->lock);
-
        /* Pre-allocate buffer for xfer ring */
        if (mhi_chan->pre_alloc) {
                int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1334,9 +1485,6 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
 
        mutex_unlock(&mhi_chan->mutex);
 
-       dev_dbg(dev, "Chan: %d successfully moved to start state\n",
-               mhi_chan->chan);
-
        return 0;
 
 error_pm_state:
@@ -1350,7 +1498,7 @@ error_init_chan:
 
 error_pre_alloc:
        mutex_unlock(&mhi_chan->mutex);
-       __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+       mhi_unprepare_channel(mhi_cntrl, mhi_chan);
 
        return ret;
 }
@@ -1365,6 +1513,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
        struct mhi_ring *ev_ring;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
        unsigned long flags;
+       dma_addr_t ptr;
 
        dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
 
@@ -1372,7 +1521,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
 
        /* mark all stale events related to channel as STALE event */
        spin_lock_irqsave(&mhi_event->lock, flags);
-       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+       ptr = er_ctxt->rp;
+       if (!is_valid_ring_ptr(ev_ring, ptr)) {
+               dev_err(&mhi_cntrl->mhi_dev->dev,
+                       "Event ring rp points outside of the event ring\n");
+               dev_rp = ev_ring->rp;
+       } else {
+               dev_rp = mhi_to_virtual(ev_ring, ptr);
+       }
 
        local_rp = ev_ring->rp;
        while (dev_rp != local_rp) {
@@ -1403,8 +1560,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
        while (tre_ring->rp != tre_ring->wp) {
                struct mhi_buf_info *buf_info = buf_ring->rp;
 
-               if (mhi_chan->dir == DMA_TO_DEVICE)
+               if (mhi_chan->dir == DMA_TO_DEVICE) {
                        atomic_dec(&mhi_cntrl->pending_pkts);
+                       /* Release the reference got from mhi_queue() */
+                       mhi_cntrl->runtime_put(mhi_cntrl);
+               }
 
                if (!buf_info->pre_mapped)
                        mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
@@ -1467,7 +1627,7 @@ error_open_chan:
                if (!mhi_chan)
                        continue;
 
-               __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+               mhi_unprepare_channel(mhi_cntrl, mhi_chan);
        }
 
        return ret;
@@ -1485,7 +1645,7 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
                if (!mhi_chan)
                        continue;
 
-               __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+               mhi_unprepare_channel(mhi_cntrl, mhi_chan);
        }
 }
 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
index 681960c72d2a8629560b7d7c65c48a22cd0eec80..e2e59a341fef64ee707a72dc4fbf38ca17aef08d 100644 (file)
@@ -153,35 +153,33 @@ static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
 /* Handle device ready state transition */
 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
 {
-       void __iomem *base = mhi_cntrl->regs;
        struct mhi_event *mhi_event;
        enum mhi_pm_state cur_state;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
-       u32 reset = 1, ready = 0;
+       u32 interval_us = 25000; /* poll register field every 25 milliseconds */
        int ret, i;
 
-       /* Wait for RESET to be cleared and READY bit to be set by the device */
-       wait_event_timeout(mhi_cntrl->state_event,
-                          MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
-                          mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
-                                             MHICTRL_RESET_MASK,
-                                             MHICTRL_RESET_SHIFT, &reset) ||
-                          mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
-                                             MHISTATUS_READY_MASK,
-                                             MHISTATUS_READY_SHIFT, &ready) ||
-                          (!reset && ready),
-                          msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
        /* Check if device entered error state */
        if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
                dev_err(dev, "Device link is not accessible\n");
                return -EIO;
        }
 
-       /* Timeout if device did not transition to ready state */
-       if (reset || !ready) {
-               dev_err(dev, "Device Ready timeout\n");
-               return -ETIMEDOUT;
+       /* Wait for RESET to be cleared and READY bit to be set by the device */
+       ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+                                MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
+                                interval_us);
+       if (ret) {
+               dev_err(dev, "Device failed to clear MHI Reset\n");
+               return ret;
+       }
+
+       ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+                                MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1,
+                                interval_us);
+       if (ret) {
+               dev_err(dev, "Device failed to enter MHI Ready\n");
+               return ret;
        }
 
        dev_dbg(dev, "Device in READY State\n");
@@ -377,24 +375,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 {
        struct mhi_event *mhi_event;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
        int i, ret;
 
        dev_dbg(dev, "Processing Mission Mode transition\n");
 
        write_lock_irq(&mhi_cntrl->pm_lock);
        if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
-               mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+               ee = mhi_get_exec_env(mhi_cntrl);
 
-       if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+       if (!MHI_IN_MISSION_MODE(ee)) {
                mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
                write_unlock_irq(&mhi_cntrl->pm_lock);
                wake_up_all(&mhi_cntrl->state_event);
                return -EIO;
        }
+       mhi_cntrl->ee = ee;
        write_unlock_irq(&mhi_cntrl->pm_lock);
 
        wake_up_all(&mhi_cntrl->state_event);
 
+       device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
+                             mhi_destroy_device);
        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
 
        /* Force MHI to be in M0 state before continuing */
@@ -560,6 +562,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
 static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
 {
        enum mhi_pm_state cur_state, prev_state;
+       enum dev_st_transition next_state;
        struct mhi_event *mhi_event;
        struct mhi_cmd_ctxt *cmd_ctxt;
        struct mhi_cmd *mhi_cmd;
@@ -673,7 +676,23 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
                er_ctxt->wp = er_ctxt->rbase;
        }
 
-       mhi_ready_state_transition(mhi_cntrl);
+       /* Transition to next state */
+       if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+               write_lock_irq(&mhi_cntrl->pm_lock);
+               cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+               write_unlock_irq(&mhi_cntrl->pm_lock);
+               if (cur_state != MHI_PM_POR) {
+                       dev_err(dev, "Error moving to state %s from %s\n",
+                               to_mhi_pm_state_str(MHI_PM_POR),
+                               to_mhi_pm_state_str(cur_state));
+                       goto exit_sys_error_transition;
+               }
+               next_state = DEV_ST_TRANSITION_PBL;
+       } else {
+               next_state = DEV_ST_TRANSITION_READY;
+       }
+
+       mhi_queue_state_transition(mhi_cntrl, next_state);
 
 exit_sys_error_transition:
        dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
@@ -742,8 +761,7 @@ void mhi_pm_st_worker(struct work_struct *work)
                        if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
                                mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
                        write_unlock_irq(&mhi_cntrl->pm_lock);
-                       if (MHI_IN_PBL(mhi_cntrl->ee))
-                               mhi_fw_load_handler(mhi_cntrl);
+                       mhi_fw_load_handler(mhi_cntrl);
                        break;
                case DEV_ST_TRANSITION_SBL:
                        write_lock_irq(&mhi_cntrl->pm_lock);
@@ -755,10 +773,18 @@ void mhi_pm_st_worker(struct work_struct *work)
                         * either SBL or AMSS states
                         */
                        mhi_create_devices(mhi_cntrl);
+                       if (mhi_cntrl->fbc_download)
+                               mhi_download_amss_image(mhi_cntrl);
                        break;
                case DEV_ST_TRANSITION_MISSION_MODE:
                        mhi_pm_mission_mode_transition(mhi_cntrl);
                        break;
+               case DEV_ST_TRANSITION_FP:
+                       write_lock_irq(&mhi_cntrl->pm_lock);
+                       mhi_cntrl->ee = MHI_EE_FP;
+                       write_unlock_irq(&mhi_cntrl->pm_lock);
+                       mhi_create_devices(mhi_cntrl);
+                       break;
                case DEV_ST_TRANSITION_READY:
                        mhi_ready_state_transition(mhi_cntrl);
                        break;
@@ -822,7 +848,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
                return -EBUSY;
        }
 
-       dev_info(dev, "Allowing M3 transition\n");
+       dev_dbg(dev, "Allowing M3 transition\n");
        new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
        if (new_state != MHI_PM_M3_ENTER) {
                write_unlock_irq(&mhi_cntrl->pm_lock);
@@ -836,7 +862,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
        /* Set MHI to M3 and wait for completion */
        mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
        write_unlock_irq(&mhi_cntrl->pm_lock);
-       dev_info(dev, "Wait for M3 completion\n");
+       dev_dbg(dev, "Waiting for M3 completion\n");
 
        ret = wait_event_timeout(mhi_cntrl->state_event,
                                 mhi_cntrl->dev_state == MHI_STATE_M3 ||
@@ -870,9 +896,9 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
        enum mhi_pm_state cur_state;
        int ret;
 
-       dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
-                to_mhi_pm_state_str(mhi_cntrl->pm_state),
-                TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+       dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
+               to_mhi_pm_state_str(mhi_cntrl->pm_state),
+               TO_MHI_STATE_STR(mhi_cntrl->dev_state));
 
        if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
                return 0;
@@ -880,6 +906,9 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
                return -EIO;
 
+       if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
+               return -EINVAL;
+
        /* Notify clients about exiting LPM */
        list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
                mutex_lock(&itr->mutex);
@@ -1033,13 +1062,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
        mutex_lock(&mhi_cntrl->pm_mutex);
        mhi_cntrl->pm_state = MHI_PM_DISABLE;
 
-       if (!mhi_cntrl->pre_init) {
-               /* Setup device context */
-               ret = mhi_init_dev_ctxt(mhi_cntrl);
-               if (ret)
-                       goto error_dev_ctxt;
-       }
-
        ret = mhi_init_irq_setup(mhi_cntrl);
        if (ret)
                goto error_setup_irq;
@@ -1092,7 +1114,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
                                                           &val) ||
                                        !val,
                                msecs_to_jiffies(mhi_cntrl->timeout_ms));
-               if (ret) {
+               if (!ret) {
                        ret = -EIO;
                        dev_info(dev, "Failed to reset MHI due to syserr state\n");
                        goto error_bhi_offset;
@@ -1121,10 +1143,7 @@ error_bhi_offset:
        mhi_deinit_free_irq(mhi_cntrl);
 
 error_setup_irq:
-       if (!mhi_cntrl->pre_init)
-               mhi_deinit_dev_ctxt(mhi_cntrl);
-
-error_dev_ctxt:
+       mhi_cntrl->pm_state = MHI_PM_DISABLE;
        mutex_unlock(&mhi_cntrl->pm_mutex);
 
        return ret;
@@ -1136,12 +1155,19 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
        enum mhi_pm_state cur_state, transition_state;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 
+       mutex_lock(&mhi_cntrl->pm_mutex);
+       write_lock_irq(&mhi_cntrl->pm_lock);
+       cur_state = mhi_cntrl->pm_state;
+       if (cur_state == MHI_PM_DISABLE) {
+               write_unlock_irq(&mhi_cntrl->pm_lock);
+               mutex_unlock(&mhi_cntrl->pm_mutex);
+               return; /* Already powered down */
+       }
+
        /* If it's not a graceful shutdown, force MHI to linkdown state */
        transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
                           MHI_PM_LD_ERR_FATAL_DETECT;
 
-       mutex_lock(&mhi_cntrl->pm_mutex);
-       write_lock_irq(&mhi_cntrl->pm_lock);
        cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
        if (cur_state != transition_state) {
                dev_err(dev, "Failed to move to state: %s from: %s\n",
@@ -1166,15 +1192,6 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
        flush_work(&mhi_cntrl->st_worker);
 
        free_irq(mhi_cntrl->irq[0], mhi_cntrl);
-
-       if (!mhi_cntrl->pre_init) {
-               /* Free all allocated resources */
-               if (mhi_cntrl->fbc_image) {
-                       mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
-                       mhi_cntrl->fbc_image = NULL;
-               }
-               mhi_deinit_dev_ctxt(mhi_cntrl);
-       }
 }
 EXPORT_SYMBOL_GPL(mhi_power_down);
 
index 20673a4b4a3c657ed97d59cbc2bac29af35ce962..7c810f02a2ef4d164f959e3a02a7aa6684bfe32b 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/mhi.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 
@@ -71,9 +72,9 @@ struct mhi_pci_dev_info {
                .doorbell_mode_switch = false,          \
        }
 
-#define MHI_EVENT_CONFIG_CTRL(ev_ring)         \
+#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
        {                                       \
-               .num_elements = 64,             \
+               .num_elements = el_count,       \
                .irq_moderation_ms = 0,         \
                .irq = (ev_ring) + 1,           \
                .priority = 1,                  \
@@ -114,9 +115,69 @@ struct mhi_pci_dev_info {
                .doorbell_mode_switch = true,           \
        }
 
-#define MHI_EVENT_CONFIG_DATA(ev_ring)         \
+#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
+       {                                               \
+               .num = ch_num,                          \
+               .name = ch_name,                        \
+               .num_elements = el_count,               \
+               .event_ring = ev_ring,                  \
+               .dir = DMA_TO_DEVICE,                   \
+               .ee_mask = BIT(MHI_EE_SBL),             \
+               .pollcfg = 0,                           \
+               .doorbell = MHI_DB_BRST_DISABLE,        \
+               .lpm_notify = false,                    \
+               .offload_channel = false,               \
+               .doorbell_mode_switch = false,          \
+       }                                               \
+
+#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
+       {                                               \
+               .num = ch_num,                          \
+               .name = ch_name,                        \
+               .num_elements = el_count,               \
+               .event_ring = ev_ring,                  \
+               .dir = DMA_FROM_DEVICE,                 \
+               .ee_mask = BIT(MHI_EE_SBL),             \
+               .pollcfg = 0,                           \
+               .doorbell = MHI_DB_BRST_DISABLE,        \
+               .lpm_notify = false,                    \
+               .offload_channel = false,               \
+               .doorbell_mode_switch = false,          \
+       }
+
+#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
+       {                                               \
+               .num = ch_num,                          \
+               .name = ch_name,                        \
+               .num_elements = el_count,               \
+               .event_ring = ev_ring,                  \
+               .dir = DMA_TO_DEVICE,                   \
+               .ee_mask = BIT(MHI_EE_FP),              \
+               .pollcfg = 0,                           \
+               .doorbell = MHI_DB_BRST_DISABLE,        \
+               .lpm_notify = false,                    \
+               .offload_channel = false,               \
+               .doorbell_mode_switch = false,          \
+       }                                               \
+
+#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
+       {                                               \
+               .num = ch_num,                          \
+               .name = ch_name,                        \
+               .num_elements = el_count,               \
+               .event_ring = ev_ring,                  \
+               .dir = DMA_FROM_DEVICE,                 \
+               .ee_mask = BIT(MHI_EE_FP),              \
+               .pollcfg = 0,                           \
+               .doorbell = MHI_DB_BRST_DISABLE,        \
+               .lpm_notify = false,                    \
+               .offload_channel = false,               \
+               .doorbell_mode_switch = false,          \
+       }
+
+#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
        {                                       \
-               .num_elements = 128,            \
+               .num_elements = el_count,       \
                .irq_moderation_ms = 5,         \
                .irq = (ev_ring) + 1,           \
                .priority = 1,                  \
@@ -127,9 +188,9 @@ struct mhi_pci_dev_info {
                .offload_channel = false,       \
        }
 
-#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \
+#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
        {                                       \
-               .num_elements = 2048,           \
+               .num_elements = el_count,       \
                .irq_moderation_ms = 1,         \
                .irq = (ev_ring) + 1,           \
                .priority = 1,                  \
@@ -150,21 +211,23 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
        MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
        MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
        MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+       MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+       MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
        MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
        MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
 };
 
 static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
        /* first ring is control+data ring */
-       MHI_EVENT_CONFIG_CTRL(0),
+       MHI_EVENT_CONFIG_CTRL(0, 64),
        /* DIAG dedicated event ring */
-       MHI_EVENT_CONFIG_DATA(1),
+       MHI_EVENT_CONFIG_DATA(1, 128),
        /* Hardware channels request dedicated hardware event rings */
-       MHI_EVENT_CONFIG_HW_DATA(2, 100),
-       MHI_EVENT_CONFIG_HW_DATA(3, 101)
+       MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+       MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
 };
 
-static struct mhi_controller_config modem_qcom_v1_mhiv_config = {
+static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
        .max_channels = 128,
        .timeout_ms = 8000,
        .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
@@ -173,6 +236,15 @@ static struct mhi_controller_config modem_qcom_v1_mhiv_config = {
        .event_cfg = modem_qcom_v1_mhi_events,
 };
 
+static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
+       .name = "qcom-sdx65m",
+       .fw = "qcom/sdx65m/xbl.elf",
+       .edl = "qcom/sdx65m/edl.mbn",
+       .config = &modem_qcom_v1_mhiv_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32
+};
+
 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
        .name = "qcom-sdx55m",
        .fw = "qcom/sdx55m/sbl1.mbn",
@@ -182,15 +254,121 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
        .dma_data_width = 32
 };
 
+static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
+       .name = "qcom-sdx24",
+       .edl = "qcom/prog_firehose_sdx24.mbn",
+       .config = &modem_qcom_v1_mhiv_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32
+};
+
+static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
+       MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
+       MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+       MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+       MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+       MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+       /* The EDL firmware is a flash-programmer exposing firehose protocol */
+       MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+       MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+       MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+       MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_quectel_em1xx_events[] = {
+       MHI_EVENT_CONFIG_CTRL(0, 128),
+       MHI_EVENT_CONFIG_DATA(1, 128),
+       MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+       MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
+};
+
+static const struct mhi_controller_config modem_quectel_em1xx_config = {
+       .max_channels = 128,
+       .timeout_ms = 20000,
+       .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
+       .ch_cfg = mhi_quectel_em1xx_channels,
+       .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
+       .event_cfg = mhi_quectel_em1xx_events,
+};
+
+static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
+       .name = "quectel-em1xx",
+       .edl = "qcom/prog_firehose_sdx24.mbn",
+       .config = &modem_quectel_em1xx_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32
+};
+
+static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
+       MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+       MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+       MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(32, "AT", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(33, "AT", 32, 0),
+       MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+       MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
+       MHI_EVENT_CONFIG_CTRL(0, 128),
+       MHI_EVENT_CONFIG_DATA(1, 128),
+       MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+       MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
+};
+
+static const struct mhi_controller_config modem_foxconn_sdx55_config = {
+       .max_channels = 128,
+       .timeout_ms = 20000,
+       .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
+       .ch_cfg = mhi_foxconn_sdx55_channels,
+       .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+       .event_cfg = mhi_foxconn_sdx55_events,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
+       .name = "foxconn-sdx55",
+       .fw = "qcom/sdx55m/sbl1.mbn",
+       .edl = "qcom/sdx55m/edl.mbn",
+       .config = &modem_foxconn_sdx55_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32
+};
+
 static const struct pci_device_id mhi_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
                .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
+       { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
+               .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
+       { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
+               .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+       { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
+               .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+       { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+               .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+       /* T99W175 (sdx55), Both for eSIM and Non-eSIM */
+       { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
+               .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+       /* DW5930e (sdx55), With eSIM, It's also T99W175 */
+       { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
+               .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+       /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
+       { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
+               .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
        {  }
 };
 MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
 
 enum mhi_pci_device_status {
        MHI_PCI_DEV_STARTED,
+       MHI_PCI_DEV_SUSPENDED,
 };
 
 struct mhi_pci_device {
@@ -224,12 +402,31 @@ static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
        case MHI_CB_FATAL_ERROR:
        case MHI_CB_SYS_ERROR:
                dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
+               pm_runtime_forbid(&pdev->dev);
+               break;
+       case MHI_CB_EE_MISSION_MODE:
+               pm_runtime_allow(&pdev->dev);
                break;
        default:
                break;
        }
 }
 
+static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
+{
+       /* no-op */
+}
+
+static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
+{
+       /* no-op */
+}
+
+static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
+{
+       /* no-op */
+}
+
 static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
 {
        struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
@@ -330,13 +527,19 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
 
 static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
 {
-       /* no PM for now */
-       return 0;
+       /* The runtime_get() MHI callback means:
+        *    Do whatever is requested to leave M3.
+        */
+       return pm_runtime_get(mhi_cntrl->cntrl_dev);
 }
 
 static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
 {
-       /* no PM for now */
+       /* The runtime_put() MHI callback means:
+        *    Device can be moved in M3 state.
+        */
+       pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
+       pm_runtime_put(mhi_cntrl->cntrl_dev);
 }
 
 static void mhi_pci_recovery_work(struct work_struct *work)
@@ -350,6 +553,7 @@ static void mhi_pci_recovery_work(struct work_struct *work)
        dev_warn(&pdev->dev, "device recovery started\n");
 
        del_timer(&mhi_pdev->health_check_timer);
+       pm_runtime_forbid(&pdev->dev);
 
        /* Clean up MHI state */
        if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -357,7 +561,6 @@ static void mhi_pci_recovery_work(struct work_struct *work)
                mhi_unprepare_after_power_down(mhi_cntrl);
        }
 
-       /* Check if we can recover without full reset */
        pci_set_power_state(pdev, PCI_D0);
        pci_load_saved_state(pdev, mhi_pdev->pci_state);
        pci_restore_state(pdev);
@@ -391,6 +594,10 @@ static void health_check(struct timer_list *t)
        struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
        struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
 
+       if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+                       test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+               return;
+
        if (!mhi_pci_is_alive(mhi_cntrl)) {
                dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
                queue_work(system_long_wq, &mhi_pdev->recovery_work);
@@ -433,6 +640,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mhi_cntrl->status_cb = mhi_pci_status_cb;
        mhi_cntrl->runtime_get = mhi_pci_runtime_get;
        mhi_cntrl->runtime_put = mhi_pci_runtime_put;
+       mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
+       mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
+       mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
 
        err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
        if (err)
@@ -444,9 +654,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        pci_set_drvdata(pdev, mhi_pdev);
 
-       /* Have stored pci confspace at hand for restore in sudden PCI error */
+       /* Have stored pci confspace at hand for restore in sudden PCI error.
+        * cache the state locally and discard the PCI core one.
+        */
        pci_save_state(pdev);
        mhi_pdev->pci_state = pci_store_saved_state(pdev);
+       pci_load_saved_state(pdev, NULL);
 
        pci_enable_pcie_error_reporting(pdev);
 
@@ -472,6 +685,14 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* start health check */
        mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
 
+       /* Only allow runtime-suspend if PME capable (for wakeup) */
+       if (pci_pme_capable(pdev, PCI_D3hot)) {
+               pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+               pm_runtime_use_autosuspend(&pdev->dev);
+               pm_runtime_mark_last_busy(&pdev->dev);
+               pm_runtime_put_noidle(&pdev->dev);
+       }
+
        return 0;
 
 err_unprepare:
@@ -495,9 +716,19 @@ static void mhi_pci_remove(struct pci_dev *pdev)
                mhi_unprepare_after_power_down(mhi_cntrl);
        }
 
+       /* balancing probe put_noidle */
+       if (pci_pme_capable(pdev, PCI_D3hot))
+               pm_runtime_get_noresume(&pdev->dev);
+
        mhi_unregister_controller(mhi_cntrl);
 }
 
+static void mhi_pci_shutdown(struct pci_dev *pdev)
+{
+       mhi_pci_remove(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+}
+
 static void mhi_pci_reset_prepare(struct pci_dev *pdev)
 {
        struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
@@ -605,41 +836,59 @@ static const struct pci_error_handlers mhi_pci_err_handler = {
        .reset_done = mhi_pci_reset_done,
 };
 
-static int  __maybe_unused mhi_pci_suspend(struct device *dev)
+static int  __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
        struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+       int err;
+
+       if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+               return 0;
 
        del_timer(&mhi_pdev->health_check_timer);
        cancel_work_sync(&mhi_pdev->recovery_work);
 
+       if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+                       mhi_cntrl->ee != MHI_EE_AMSS)
+               goto pci_suspend; /* Nothing to do at MHI level */
+
        /* Transition to M3 state */
-       mhi_pm_suspend(mhi_cntrl);
+       err = mhi_pm_suspend(mhi_cntrl);
+       if (err) {
+               dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
+               clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
+               return -EBUSY;
+       }
 
-       pci_save_state(pdev);
+pci_suspend:
        pci_disable_device(pdev);
        pci_wake_from_d3(pdev, true);
-       pci_set_power_state(pdev, PCI_D3hot);
 
        return 0;
 }
 
-static int __maybe_unused mhi_pci_resume(struct device *dev)
+static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
        struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
        int err;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       pci_set_master(pdev);
+       if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+               return 0;
 
        err = pci_enable_device(pdev);
        if (err)
                goto err_recovery;
 
+       pci_set_master(pdev);
+       pci_wake_from_d3(pdev, false);
+
+       if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+                       mhi_cntrl->ee != MHI_EE_AMSS)
+               return 0; /* Nothing to do at MHI level */
+
        /* Exit M3, transition to M0 state */
        err = mhi_pm_resume(mhi_cntrl);
        if (err) {
@@ -650,16 +899,44 @@ static int __maybe_unused mhi_pci_resume(struct device *dev)
        /* Resume health check */
        mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
 
+       /* It can be a remote wakeup (no mhi runtime_get), update access time */
+       pm_runtime_mark_last_busy(dev);
+
        return 0;
 
 err_recovery:
-       /* The device may have loose power or crashed, try recovering it */
+       /* Do not fail to not mess up our PCI device state, the device likely
+        * lost power (d3cold) and we simply need to reset it from the recovery
+        * procedure, trigger the recovery asynchronously to prevent system
+        * suspend exit delaying.
+        */
        queue_work(system_long_wq, &mhi_pdev->recovery_work);
+       pm_runtime_mark_last_busy(dev);
 
-       return err;
+       return 0;
+}
+
+static int  __maybe_unused mhi_pci_suspend(struct device *dev)
+{
+       pm_runtime_disable(dev);
+       return mhi_pci_runtime_suspend(dev);
+}
+
+static int __maybe_unused mhi_pci_resume(struct device *dev)
+{
+       int ret;
+
+       /* Depending the platform, device may have lost power (d3cold), we need
+        * to resume it now to check its state and recover when necessary.
+        */
+       ret = mhi_pci_runtime_resume(dev);
+       pm_runtime_enable(dev);
+
+       return ret;
 }
 
 static const struct dev_pm_ops mhi_pci_pm_ops = {
+       SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
        SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
 };
 
@@ -668,6 +945,7 @@ static struct pci_driver mhi_pci_driver = {
        .id_table       = mhi_pci_id_table,
        .probe          = mhi_pci_probe,
        .remove         = mhi_pci_remove,
+       .shutdown       = mhi_pci_shutdown,
        .err_handler    = &mhi_pci_err_handler,
        .driver.pm      = &mhi_pci_pm_ops
 };
index b20fdcbd035b21cd4625ea5b401fe4cf9181c70b..fd87a59837fa2f4cb1f659301d2a71c6a2007e92 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox module configuration bus driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #include <dt-bindings/bus/moxtet.h>
@@ -879,6 +879,6 @@ static void __exit moxtet_exit(void)
 }
 module_exit(moxtet_exit);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
 MODULE_LICENSE("GPL v2");
index dd9e7343a5e32561674c294d2f281b4183bb14b4..ea0424922de751f14b3f6a26b0ccba0f3ab63305 100644 (file)
@@ -618,7 +618,7 @@ mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
                 * This part of the memory is above 4 GB, so we don't
                 * care for the MBus bridge hole.
                 */
-               if (reg_start >= 0x100000000ULL)
+               if ((u64)reg_start >= 0x100000000ULL)
                        continue;
 
                /*
index a086dd34f932f1bd8946c3abdce1af1aedb52771..4f501e4842ab391d72a49edc6ebf10e3d13930fa 100644 (file)
@@ -125,7 +125,7 @@ config AGP_HP_ZX1
 
 config AGP_PARISC
        tristate "HP Quicksilver AGP support"
-       depends on AGP && PARISC && 64BIT
+       depends on AGP && PARISC && 64BIT && IOMMU_SBA
        help
          This option gives you AGP GART support for the HP Quicksilver
          AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
index 45ac7ab003ce30cb1b2f454c28948643366bb865..deb85a334c937506f68c80ecd235b42fdb567b44 100644 (file)
@@ -836,7 +836,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        Dummy = readb(apbs[IndexCard].RamIO + VERS);
        kfree(adgl);
        mutex_unlock(&ac_mutex);
-       return 0;
+       return ret;
 
 err:
        if (warncount) {
index 410b50b05e2104a95cd46e0dfc8c45e5ef486946..5b7ca0416490a27b78557b9939ebd80e3cd4706b 100644 (file)
@@ -170,7 +170,6 @@ static int ba431_trng_init(struct hwrng *rng)
 static int ba431_trng_probe(struct platform_device *pdev)
 {
        struct ba431_trng *ba431;
-       struct resource *res;
        int ret;
 
        ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
@@ -179,8 +178,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
 
        ba431->dev = &pdev->dev;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ba431->base = devm_ioremap_resource(&pdev->dev, res);
+       ba431->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ba431->base))
                return PTR_ERR(ba431->base);
 
@@ -193,7 +191,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ba431);
 
-       ret = hwrng_register(&ba431->rng);
+       ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
        if (ret) {
                dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
                return ret;
@@ -204,15 +202,6 @@ static int ba431_trng_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int ba431_trng_remove(struct platform_device *pdev)
-{
-       struct ba431_trng *ba431 = platform_get_drvdata(pdev);
-
-       hwrng_unregister(&ba431->rng);
-
-       return 0;
-}
-
 static const struct of_device_id ba431_trng_dt_ids[] = {
        { .compatible = "silex-insight,ba431-rng", .data = NULL },
        { /* sentinel */ }
@@ -225,7 +214,6 @@ static struct platform_driver ba431_trng_driver = {
                .of_match_table = ba431_trng_dt_ids,
        },
        .probe = ba431_trng_probe,
-       .remove = ba431_trng_remove,
 };
 
 module_platform_driver(ba431_trng_driver);
index 1a7c43b43c6b093837184b3b4a6183982dfa3436..e7dd457e9b22bde15bb896762ac170b95aa662f1 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/printk.h>
 #include <linux/clk.h>
+#include <linux/reset.h>
 
 #define RNG_CTRL       0x0
 #define RNG_STATUS     0x4
@@ -32,6 +33,7 @@ struct bcm2835_rng_priv {
        void __iomem *base;
        bool mask_interrupts;
        struct clk *clk;
+       struct reset_control *reset;
 };
 
 static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
@@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng)
        int ret = 0;
        u32 val;
 
-       if (!IS_ERR(priv->clk)) {
-               ret = clk_prepare_enable(priv->clk);
-               if (ret)
-                       return ret;
-       }
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
+
+       ret = reset_control_reset(priv->reset);
+       if (ret)
+               return ret;
 
        if (priv->mask_interrupts) {
                /* mask the interrupt */
@@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng)
        /* disable rng hardware */
        rng_writel(priv, 0, RNG_CTRL);
 
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
 }
 
 struct bcm2835_rng_of_data {
@@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
                return PTR_ERR(priv->base);
 
        /* Clock is optional on most platforms */
-       priv->clk = devm_clk_get(dev, NULL);
-       if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+       priv->clk = devm_clk_get_optional(dev, NULL);
+       if (IS_ERR(priv->clk))
+               return PTR_ERR(priv->clk);
+
+       priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+       if (IS_ERR(priv->reset))
+               return PTR_ERR(priv->reset);
 
        priv->rng.name = pdev->name;
        priv->rng.init = bcm2835_rng_init;
index 7a293f2147a00c7586a068297c4b50160dd1a02f..302ffa354c2fda6cf59432b0af2650f55f9606a6 100644 (file)
@@ -486,7 +486,6 @@ static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
 
 static int cctrng_probe(struct platform_device *pdev)
 {
-       struct resource *req_mem_cc_regs = NULL;
        struct cctrng_drvdata *drvdata;
        struct device *dev = &pdev->dev;
        int rc = 0;
@@ -510,27 +509,16 @@ static int cctrng_probe(struct platform_device *pdev)
 
        drvdata->circ.buf = (char *)drvdata->data_buf;
 
-       /* Get device resources */
-       /* First CC registers space */
-       req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       /* Map registers space */
-       drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+       drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(drvdata->cc_base)) {
                dev_err(dev, "Failed to ioremap registers");
                return PTR_ERR(drvdata->cc_base);
        }
 
-       dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
-               req_mem_cc_regs);
-       dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
-               &req_mem_cc_regs->start, drvdata->cc_base);
-
        /* Then IRQ */
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "Failed getting IRQ resource\n");
+       if (irq < 0)
                return irq;
-       }
 
        /* parse sampling rate from device tree */
        rc = cc_trng_parse_sampling_ratio(drvdata);
@@ -585,7 +573,7 @@ static int cctrng_probe(struct platform_device *pdev)
        atomic_set(&drvdata->pending_hw, 1);
 
        /* registration of the hwrng device */
-       rc = hwrng_register(&drvdata->rng);
+       rc = devm_hwrng_register(dev, &drvdata->rng);
        if (rc) {
                dev_err(dev, "Could not register hwrng device.\n");
                goto post_pm_err;
@@ -618,8 +606,6 @@ static int cctrng_remove(struct platform_device *pdev)
 
        dev_dbg(dev, "Releasing cctrng resources...\n");
 
-       hwrng_unregister(&drvdata->rng);
-
        cc_trng_pm_fini(drvdata);
 
        cc_trng_clk_fini(drvdata);
index 8c1c47dd9f4644d84af5f519c107fa29450651a3..adb3c2bd7783e8d6907c5a62da73466730b54204 100644 (file)
@@ -396,7 +396,7 @@ static ssize_t hwrng_attr_selected_show(struct device *dev,
                                        struct device_attribute *attr,
                                        char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+       return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
 }
 
 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
index eb7db27f9f196624c6ae857ffabb8bea950ff806..d740b8814bf3fe1ea02c8bc4747920e51ec60a22 100644 (file)
  */
 
 #include <linux/hw_random.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/stop_machine.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <asm/io.h>
 
 
 #define PFX    KBUILD_MODNAME ": "
index 5cc5fc504968267b0d17251477477f41ea7a3fee..cede9f15910293752281e34df0dbc35b186bfe05 100644 (file)
@@ -30,8 +30,7 @@
 #include <linux/of_address.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 
 #define RNG_REG_STATUS_RDY                     (1 << 0)
 
@@ -378,16 +377,13 @@ MODULE_DEVICE_TABLE(of, omap_rng_of_match);
 static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
                                          struct platform_device *pdev)
 {
-       const struct of_device_id *match;
        struct device *dev = &pdev->dev;
        int irq, err;
 
-       match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
-       if (!match) {
-               dev_err(dev, "no compatible OF match\n");
-               return -EINVAL;
-       }
-       priv->pdata = match->data;
+       priv->pdata = of_device_get_match_data(dev);
+       if (!priv->pdata)
+               return -ENODEV;
+
 
        if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
            of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
index e8210c1715cfd50aefff9c561b30b3fa389a873b..99c8bd0859a1487cfe933e9e9309d82cafc07b90 100644 (file)
@@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev)
        priv->rng.name = pdev->name;
        priv->rng.read = pic32_rng_read;
 
-       ret = hwrng_register(&priv->rng);
+       ret = devm_hwrng_register(&pdev->dev, &priv->rng);
        if (ret)
                goto err_register;
 
@@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev)
 {
        struct pic32_rng *rng = platform_get_drvdata(pdev);
 
-       hwrng_unregister(&rng->rng);
        writel(0, rng->base + RNGCON);
        clk_disable_unprepare(rng->clk);
        return 0;
index 7bdab8c8a6a8f40b719574750e552a82a95b7cd4..2a9fea72b2e0d0bc7426b0a5831fff0ffc078614 100644 (file)
@@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev)
        int ret;
        struct xiphera_trng *trng;
        struct device *dev = &pdev->dev;
-       struct resource *res;
 
        trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
        if (!trng)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       trng->mem = devm_ioremap_resource(dev, res);
+       trng->mem = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(trng->mem))
                return PTR_ERR(trng->mem);
 
index 862c2fd933c7beace6e01d6e573d5f510ee01cb4..0e22e3b0a04e7f621f67f919b3c00ce500f62dd6 100644 (file)
@@ -546,7 +546,7 @@ static int lp_open(struct inode *inode, struct file *file)
        }
        /* Determine if the peripheral supports ECP mode */
        lp_claim_parport_or_block(&lp_table[minor]);
-       if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
+       if ((lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
             !parport_negotiate(lp_table[minor].dev->port,
                                 IEEE1284_MODE_ECP)) {
                printk(KERN_INFO "lp%d: ECP mode\n", minor);
@@ -590,7 +590,7 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
                return -ENODEV;
        if ((LP_F(minor) & LP_EXIST) == 0)
                return -ENODEV;
-       switch ( cmd ) {
+       switch (cmd) {
                case LPTIME:
                        if (arg > UINT_MAX / HZ)
                                return -EINVAL;
index 5e1618a76b2a31df845d9b9c16787b20207a8a02..8588b51202e53a32d50fc9aedb16883d0df5d7ad 100644 (file)
@@ -177,14 +177,10 @@ int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
        return retval;
 }
 
-int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData)
+void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData)
 {
-       int retval = 0;
-
        PRINTK_2(TRACE_TP3780I,
                "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData);
-
-       return retval;
 }
 
 int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
index 07685b68538f79a0443c5dd65154731b18f2c1c7..8bd976d42faefc291078463ec8e726c0b39b8a7c 100644 (file)
@@ -91,7 +91,7 @@ int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData);
 int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData);
 int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData);
 int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities);
-int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData);
+void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData);
 int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
                                void __user *pvBuffer, unsigned int uCount,
                                unsigned long ulDSPAddr);
index 0fe9e200e4c8401681954e7f59cd785205b2924b..605969ed0f965c224b3d562ce9dd976a56aab717 100644 (file)
@@ -500,7 +500,6 @@ struct entropy_store {
        unsigned short add_ptr;
        unsigned short input_rotate;
        int entropy_count;
-       unsigned int initialized:1;
        unsigned int last_data_init:1;
        __u8 last_data[EXTRACT_SIZE];
 };
@@ -660,7 +659,7 @@ static void process_random_ready_list(void)
  */
 static void credit_entropy_bits(struct entropy_store *r, int nbits)
 {
-       int entropy_count, orig, has_initialized = 0;
+       int entropy_count, orig;
        const int pool_size = r->poolinfo->poolfracbits;
        int nfrac = nbits << ENTROPY_SHIFT;
 
@@ -717,23 +716,14 @@ retry:
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
                goto retry;
 
-       if (has_initialized) {
-               r->initialized = 1;
-               kill_fasync(&fasync, SIGIO, POLL_IN);
-       }
-
        trace_credit_entropy_bits(r->name, nbits,
                                  entropy_count >> ENTROPY_SHIFT, _RET_IP_);
 
        if (r == &input_pool) {
                int entropy_bits = entropy_count >> ENTROPY_SHIFT;
 
-               if (crng_init < 2) {
-                       if (entropy_bits < 128)
-                               return;
+               if (crng_init < 2 && entropy_bits >= 128)
                        crng_reseed(&primary_crng, r);
-                       entropy_bits = ENTROPY_BITS(r);
-               }
        }
 }
 
@@ -819,7 +809,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
 
 static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
 {
-       memcpy(&crng->state[0], "expand 32-byte k", 16);
+       chacha_init_consts(crng->state);
        _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
        crng_init_try_arch(crng);
        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
@@ -827,7 +817,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
 
 static void __init crng_initialize_primary(struct crng_state *crng)
 {
-       memcpy(&crng->state[0], "expand 32-byte k", 16);
+       chacha_init_consts(crng->state);
        _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
        if (crng_init_try_arch_early(crng) && trust_cpu) {
                invalidate_batched_entropy();
@@ -1372,8 +1362,7 @@ retry:
 }
 
 /*
- * This function does the actual extraction for extract_entropy and
- * extract_entropy_user.
+ * This function does the actual extraction for extract_entropy.
  *
  * Note: we assume that .poolwords is a multiple of 16 words.
  */
index 3633ed70f48fa71ad2d4e2c264788bb2f75e3a21..1b18ce5ebab1ef4c49348d48a52f7de2fec48627 100644 (file)
@@ -41,6 +41,27 @@ struct acpi_tcpa {
        };
 };
 
+/* Check that the given log is indeed a TPM2 log. */
+static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
+{
+       struct tcg_efi_specid_event_head *efispecid;
+       struct tcg_pcr_event *event_header;
+       int n;
+
+       if (len < sizeof(*event_header))
+               return false;
+       len -= sizeof(*event_header);
+       event_header = bios_event_log;
+
+       if (len < sizeof(*efispecid))
+               return false;
+       efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
+
+       n = memcmp(efispecid->signature, TCG_SPECID_SIG,
+                  sizeof(TCG_SPECID_SIG));
+       return n == 0;
+}
+
 /* read binary bios log */
 int tpm_read_log_acpi(struct tpm_chip *chip)
 {
@@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
        struct acpi_table_tpm2 *tbl;
        struct acpi_tpm2_phy *tpm2_phy;
        int format;
+       int ret;
 
        log = &chip->log;
 
@@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
 
        log->bios_event_log_end = log->bios_event_log + len;
 
+       ret = -EIO;
        virt = acpi_os_map_iomem(start, len);
        if (!virt)
                goto err;
@@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
        memcpy_fromio(log->bios_event_log, virt, len);
 
        acpi_os_unmap_iomem(virt, len);
+
+       if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
+           !tpm_is_tpm2_log(log->bios_event_log, len)) {
+               /* try EFI log next */
+               ret = -ENODEV;
+               goto err;
+       }
+
        return format;
 
 err:
        kfree(log->bios_event_log);
        log->bios_event_log = NULL;
-       return -EIO;
+       return ret;
 
 }
index 7460f230bae4c6dbc8ed68acfbb42200a0c5a9b0..8512ec76d5260d6788686b33bd3dc79db34fa937 100644 (file)
@@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
        int log_version;
        int rc = 0;
 
+       if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
+               return;
+
        rc = tpm_read_log(chip);
        if (rc < 0)
                return;
index 35229e5143cacde3771a460f2e1c820579eca025..e6cb9d525e30ca6457873ddc582baa21bd384264 100644 (file)
@@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
 {
 
        struct efi_tcg2_final_events_table *final_tbl = NULL;
+       int final_events_log_size = efi_tpm_final_log_size;
        struct linux_efi_tpm_eventlog *log_tbl;
        struct tpm_bios_log *log;
        u32 log_size;
@@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
        ret = tpm_log_version;
 
        if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
-           efi_tpm_final_log_size == 0 ||
+           final_events_log_size == 0 ||
            tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
                goto out;
 
        final_tbl = memremap(efi.tpm_final_log,
-                            sizeof(*final_tbl) + efi_tpm_final_log_size,
+                            sizeof(*final_tbl) + final_events_log_size,
                             MEMREMAP_WB);
        if (!final_tbl) {
                pr_err("Could not map UEFI TPM final log\n");
@@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
                goto out;
        }
 
-       efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
+       /*
+        * The 'final events log' size excludes the 'final events preboot log'
+        * at its beginning.
+        */
+       final_events_log_size -= log_tbl->final_events_preboot_size;
 
+       /*
+        * Allocate memory for the 'combined log' where we will append the
+        * 'final events log' to.
+        */
        tmp = krealloc(log->bios_event_log,
-                      log_size + efi_tpm_final_log_size,
+                      log_size + final_events_log_size,
                       GFP_KERNEL);
        if (!tmp) {
                kfree(log->bios_event_log);
@@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
        log->bios_event_log = tmp;
 
        /*
-        * Copy any of the final events log that didn't also end up in the
-        * main log. Events can be logged in both if events are generated
+        * Append any of the 'final events log' that didn't also end up in the
+        * 'main log'. Events can be logged in both if events are generated
         * between GetEventLog() and ExitBootServices().
         */
        memcpy((void *)log->bios_event_log + log_size,
               final_tbl->events + log_tbl->final_events_preboot_size,
-              efi_tpm_final_log_size);
+              final_events_log_size);
+       /*
+        * The size of the 'combined log' is the size of the 'main log' plus
+        * the size of the 'final events log'.
+        */
        log->bios_event_log_end = log->bios_event_log +
-               log_size + efi_tpm_final_log_size;
+               log_size + final_events_log_size;
 
 out:
        memunmap(final_tbl);
index ec9a65e7887dd5d49ef789e1e27f19374a8d07cd..f19c227d20f485f4151c5b8f7e6cf926eb5474b6 100644 (file)
@@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
        expected = be32_to_cpup((__be32 *)(buf + 2));
        if (expected > buf_len) {
                dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
+               rc = -E2BIG;
                goto out_err;
        }
 
index 1836cc56e357b7b428808e5a0d32163e68f2e491..59dfd9c421a1eabff55dc721639349216c699fed 100644 (file)
@@ -1456,18 +1456,15 @@ static int add_port(struct ports_device *portdev, u32 id)
         */
        send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
 
-       if (pdrvdata.debugfs_dir) {
-               /*
-                * Finally, create the debugfs file that we can use to
-                * inspect a port's state at any time
-                */
-               snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
-                        port->portdev->vdev->index, id);
-               port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
-                                                        pdrvdata.debugfs_dir,
-                                                        port,
-                                                        &port_debugfs_fops);
-       }
+       /*
+        * Finally, create the debugfs file that we can use to
+        * inspect a port's state at any time
+        */
+       snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
+                port->portdev->vdev->index, id);
+       port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+                                                pdrvdata.debugfs_dir,
+                                                port, &port_debugfs_fops);
        return 0;
 
 free_inbufs:
@@ -2244,8 +2241,6 @@ static int __init init(void)
        }
 
        pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
-       if (!pdrvdata.debugfs_dir)
-               pr_warn("Error creating debugfs dir for virtio-ports\n");
        INIT_LIST_HEAD(&pdrvdata.consoles);
        INIT_LIST_HEAD(&pdrvdata.portdevs);
 
index 4f7bf3929d6d9cfac23b40ff04b1315cd7307a14..4e4b6d36761265c7094cc9cfaf2c328f20673992 100644 (file)
@@ -66,7 +66,14 @@ EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
 
 static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res)
 {
-       clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw);
+       struct clk_fixed_factor *fix = res;
+
+       /*
+        * We can not use clk_hw_unregister_fixed_factor, since it will kfree()
+        * the hw, resulting in double free. Just unregister the hw and let
+        * devres code kfree() it.
+        */
+       clk_hw_unregister(&fix->hw);
 }
 
 static struct clk_hw *
index 5052541a0986658248867a20a531cbac673dd1d1..a3b30f7de2ef3ee84348ea28b78a115535b652b1 100644 (file)
@@ -4357,20 +4357,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
        /* search the list of notifiers for this clk */
        list_for_each_entry(cn, &clk_notifier_list, node)
                if (cn->clk == clk)
-                       break;
+                       goto found;
 
        /* if clk wasn't in the notifier list, allocate new clk_notifier */
-       if (cn->clk != clk) {
-               cn = kzalloc(sizeof(*cn), GFP_KERNEL);
-               if (!cn)
-                       goto out;
+       cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+       if (!cn)
+               goto out;
 
-               cn->clk = clk;
-               srcu_init_notifier_head(&cn->notifier_head);
+       cn->clk = clk;
+       srcu_init_notifier_head(&cn->notifier_head);
 
-               list_add(&cn->node, &clk_notifier_list);
-       }
+       list_add(&cn->node, &clk_notifier_list);
 
+found:
        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
 
        clk->core->notifier_count++;
@@ -4395,32 +4394,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
  */
 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 {
-       struct clk_notifier *cn = NULL;
-       int ret = -EINVAL;
+       struct clk_notifier *cn;
+       int ret = -ENOENT;
 
        if (!clk || !nb)
                return -EINVAL;
 
        clk_prepare_lock();
 
-       list_for_each_entry(cn, &clk_notifier_list, node)
-               if (cn->clk == clk)
-                       break;
-
-       if (cn->clk == clk) {
-               ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+       list_for_each_entry(cn, &clk_notifier_list, node) {
+               if (cn->clk == clk) {
+                       ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
 
-               clk->core->notifier_count--;
+                       clk->core->notifier_count--;
 
-               /* XXX the notifier code should handle this better */
-               if (!cn->notifier_head.head) {
-                       srcu_cleanup_notifier_head(&cn->notifier_head);
-                       list_del(&cn->node);
-                       kfree(cn);
+                       /* XXX the notifier code should handle this better */
+                       if (!cn->notifier_head.head) {
+                               srcu_cleanup_notifier_head(&cn->notifier_head);
+                               list_del(&cn->node);
+                               kfree(cn);
+                       }
+                       break;
                }
-
-       } else {
-               ret = -ENOENT;
        }
 
        clk_prepare_unlock();
@@ -4615,6 +4610,8 @@ int of_clk_add_hw_provider(struct device_node *np,
        if (ret < 0)
                of_clk_del_provider(np);
 
+       fwnode_dev_initialized(&np->fwnode, true);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
index dbac5651ab855cba1676709e5846f88ae280333d..9bcf2f8ed4de1cbbd9905744a201e6c9c4533da5 100644 (file)
@@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
                .name = "cam_cc_bps_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
                .name = "cam_cc_cci_0_clk_src",
                .parent_data = cam_cc_parent_data_5,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
                .name = "cam_cc_cci_1_clk_src",
                .parent_data = cam_cc_parent_data_5,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
                .name = "cam_cc_cphy_rx_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
                .name = "cam_cc_csi0phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
                .name = "cam_cc_csi1phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
                .name = "cam_cc_csi2phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
                .name = "cam_cc_csi3phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
                .name = "cam_cc_fast_ahb_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
                .name = "cam_cc_icp_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
                .name = "cam_cc_ife_0_clk_src",
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
                .name = "cam_cc_ife_0_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
                .name = "cam_cc_ife_1_clk_src",
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
                .name = "cam_cc_ife_1_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
                .flags = CLK_SET_RATE_PARENT,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
                .name = "cam_cc_ife_lite_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
                .name = "cam_cc_ipe_0_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
                .name = "cam_cc_jpeg_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
                .name = "cam_cc_lrme_clk_src",
                .parent_data = cam_cc_parent_data_6,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
                .name = "cam_cc_mclk0_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
                .name = "cam_cc_mclk1_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
                .name = "cam_cc_mclk2_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
                .name = "cam_cc_mclk3_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
                .name = "cam_cc_mclk4_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
                .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
index 43ecd507bf836b77e4a9a754499dd3eb3199d28b..cf94a12459ea4715f12cbee603796799b88943af 100644 (file)
@@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
                val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
                val &= GENMASK(socfpgaclk->width - 1, 0);
                /* Check for GPIO_DB_CLK by its offset */
-               if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
+               if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
                        div = val + 1;
                else
                        div = (1 << val);
index d0177824c518b88daa749d2f8d3d4b9ac183b8d0..1b885964fb34d2e8c4541dbb64ab83e98ad2db97 100644 (file)
@@ -51,7 +51,7 @@
 
 static unsigned arch_timers_present __initdata;
 
-static void __iomem *arch_counter_base;
+static void __iomem *arch_counter_base __ro_after_init;
 
 struct arch_timer {
        void __iomem *base;
@@ -60,15 +60,16 @@ struct arch_timer {
 
 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
 
-static u32 arch_timer_rate;
-static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
+static u32 arch_timer_rate __ro_after_init;
+u32 arch_timer_rate1 __ro_after_init;
+static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
 
 static struct clock_event_device __percpu *arch_timer_evt;
 
-static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
-static bool arch_timer_c3stop;
-static bool arch_timer_mem_use_virtual;
-static bool arch_counter_suspend_stop;
+static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
+static bool arch_timer_c3stop __ro_after_init;
+static bool arch_timer_mem_use_virtual __ro_after_init;
+static bool arch_counter_suspend_stop __ro_after_init;
 #ifdef CONFIG_GENERIC_GETTIMEOFDAY
 static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
 #else
@@ -76,7 +77,7 @@ static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
 #endif /* CONFIG_GENERIC_GETTIMEOFDAY */
 
 static cpumask_t evtstrm_available = CPU_MASK_NONE;
-static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
+static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
 
 static int __init early_evtstrm_cfg(char *buf)
 {
@@ -176,7 +177,7 @@ static notrace u64 arch_counter_get_cntvct(void)
  * to exist on arm64. arm doesn't use this before DT is probed so even
  * if we don't have the cp15 accessors we won't have a problem.
  */
-u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
+u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct;
 EXPORT_SYMBOL_GPL(arch_timer_read_counter);
 
 static u64 arch_counter_read(struct clocksource *cs)
@@ -925,7 +926,7 @@ static int validate_timer_rate(void)
  * rate was probed first, and don't verify that others match. If the first node
  * probed has a clock-frequency property, this overrides the HW register.
  */
-static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
+static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np)
 {
        /* Who has more than one independent system counter? */
        if (arch_timer_rate)
@@ -939,7 +940,7 @@ static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
                pr_warn("frequency not available\n");
 }
 
-static void arch_timer_banner(unsigned type)
+static void __init arch_timer_banner(unsigned type)
 {
        pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
                type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
index 996900d017c61e41bbe734f5124e44d9e99f46c8..2fc93e46cea38e2c0d0fe2d69e0ae109bd8a979d 100644 (file)
@@ -18,7 +18,7 @@
 
 #define RATE_32K               32768
 
-#define TIMER_MODE_CONTINOUS   0x1
+#define TIMER_MODE_CONTINUOUS  0x1
 #define TIMER_DOWNCOUNT_VAL    0xffffffff
 
 #define PRCMU_TIMER_REF                0
@@ -55,13 +55,13 @@ static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
 
        /*
         * The A9 sub system expects the timer to be configured as
-        * a continous looping timer.
+        * a continuous looping timer.
         * The PRCMU should configure it but if it for some reason
         * don't we do it here.
         */
        if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) !=
-           TIMER_MODE_CONTINOUS) {
-               writel(TIMER_MODE_CONTINOUS,
+           TIMER_MODE_CONTINUOUS) {
+               writel(TIMER_MODE_CONTINUOUS,
                       clksrc_dbx500_timer_base + PRCMU_TIMER_MODE);
                writel(TIMER_DOWNCOUNT_VAL,
                       clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
index 42e7e43b8fcd9e82c7f740c934b9a1edd69e8d64..3819ef5b709894621d4f18c7ecd7145595d4d46b 100644 (file)
@@ -38,7 +38,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
        }
 
        /*
-        * Not all implementations use a periphal clock, so don't panic
+        * Not all implementations use a peripheral clock, so don't panic
         * if it's not present
         */
        pclk = of_clk_get_by_name(np, "pclk");
@@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
                return 0;
 
        timer_clk = of_clk_get_by_name(np, "timer");
-       if (IS_ERR(timer_clk))
-               return PTR_ERR(timer_clk);
+       if (IS_ERR(timer_clk)) {
+               ret = PTR_ERR(timer_clk);
+               goto out_pclk_disable;
+       }
 
        ret = clk_prepare_enable(timer_clk);
        if (ret)
-               return ret;
+               goto out_timer_clk_put;
 
        *rate = clk_get_rate(timer_clk);
-       if (!(*rate))
-               return -EINVAL;
+       if (!(*rate)) {
+               ret = -EINVAL;
+               goto out_timer_clk_disable;
+       }
 
        return 0;
+
+out_timer_clk_disable:
+       clk_disable_unprepare(timer_clk);
+out_timer_clk_put:
+       clk_put(timer_clk);
+out_pclk_disable:
+       if (!IS_ERR(pclk)) {
+               clk_disable_unprepare(pclk);
+               clk_put(pclk);
+       }
+       iounmap(*base);
+       return ret;
 }
 
 static int __init add_clockevent(struct device_node *event_timer)
index 269a691bd2c4502485261a59baa7a73a718692bb..977fd05ac35f62e1db380a8f289f475b52109355 100644 (file)
@@ -18,6 +18,9 @@
 #include <linux/sched_clock.h>
 #include <linux/mm.h>
 #include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
 #include <clocksource/hyperv_timer.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/mshyperv.h>
@@ -43,14 +46,13 @@ static u64 hv_sched_clock_offset __ro_after_init;
  */
 static bool direct_mode_enabled;
 
-static int stimer0_irq;
-static int stimer0_vector;
+static int stimer0_irq = -1;
 static int stimer0_message_sint;
+static DEFINE_PER_CPU(long, stimer0_evt);
 
 /*
- * ISR for when stimer0 is operating in Direct Mode.  Direct Mode
- * does not use VMbus or any VMbus messages, so process here and not
- * in the VMbus driver code.
+ * Common code for stimer0 interrupts coming via Direct Mode or
+ * as a VMbus message.
  */
 void hv_stimer0_isr(void)
 {
@@ -61,6 +63,16 @@ void hv_stimer0_isr(void)
 }
 EXPORT_SYMBOL_GPL(hv_stimer0_isr);
 
+/*
+ * stimer0 interrupt handler for architectures that support
+ * per-cpu interrupts, which also implies Direct Mode.
+ */
+static irqreturn_t hv_stimer0_percpu_isr(int irq, void *dev_id)
+{
+       hv_stimer0_isr();
+       return IRQ_HANDLED;
+}
+
 static int hv_ce_set_next_event(unsigned long delta,
                                struct clock_event_device *evt)
 {
@@ -68,16 +80,16 @@ static int hv_ce_set_next_event(unsigned long delta,
 
        current_tick = hv_read_reference_counter();
        current_tick += delta;
-       hv_init_timer(0, current_tick);
+       hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
        return 0;
 }
 
 static int hv_ce_shutdown(struct clock_event_device *evt)
 {
-       hv_init_timer(0, 0);
-       hv_init_timer_config(0, 0);
-       if (direct_mode_enabled)
-               hv_disable_stimer0_percpu_irq(stimer0_irq);
+       hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
+       hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
+       if (direct_mode_enabled && stimer0_irq >= 0)
+               disable_percpu_irq(stimer0_irq);
 
        return 0;
 }
@@ -95,8 +107,9 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
                 * on the specified hardware vector/IRQ.
                 */
                timer_cfg.direct_mode = 1;
-               timer_cfg.apic_vector = stimer0_vector;
-               hv_enable_stimer0_percpu_irq(stimer0_irq);
+               timer_cfg.apic_vector = HYPERV_STIMER0_VECTOR;
+               if (stimer0_irq >= 0)
+                       enable_percpu_irq(stimer0_irq, IRQ_TYPE_NONE);
        } else {
                /*
                 * When it expires, the timer will generate a VMbus message,
@@ -105,7 +118,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
                timer_cfg.direct_mode = 0;
                timer_cfg.sintx = stimer0_message_sint;
        }
-       hv_init_timer_config(0, timer_cfg.as_uint64);
+       hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
        return 0;
 }
 
@@ -169,10 +182,58 @@ int hv_stimer_cleanup(unsigned int cpu)
 }
 EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
 
+/*
+ * These placeholders are overridden by arch specific code on
+ * architectures that need special setup of the stimer0 IRQ because
+ * they don't support per-cpu IRQs (such as x86/x64).
+ */
+void __weak hv_setup_stimer0_handler(void (*handler)(void))
+{
+};
+
+void __weak hv_remove_stimer0_handler(void)
+{
+};
+
+/* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */
+static int hv_setup_stimer0_irq(void)
+{
+       int ret;
+
+       ret = acpi_register_gsi(NULL, HYPERV_STIMER0_VECTOR,
+                       ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH);
+       if (ret < 0) {
+               pr_err("Can't register Hyper-V stimer0 GSI. Error %d", ret);
+               return ret;
+       }
+       stimer0_irq = ret;
+
+       ret = request_percpu_irq(stimer0_irq, hv_stimer0_percpu_isr,
+               "Hyper-V stimer0", &stimer0_evt);
+       if (ret) {
+               pr_err("Can't request Hyper-V stimer0 IRQ %d. Error %d",
+                       stimer0_irq, ret);
+               acpi_unregister_gsi(stimer0_irq);
+               stimer0_irq = -1;
+       }
+       return ret;
+}
+
+static void hv_remove_stimer0_irq(void)
+{
+       if (stimer0_irq == -1) {
+               hv_remove_stimer0_handler();
+       } else {
+               free_percpu_irq(stimer0_irq, &stimer0_evt);
+               acpi_unregister_gsi(stimer0_irq);
+               stimer0_irq = -1;
+       }
+}
+
 /* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
-int hv_stimer_alloc(void)
+int hv_stimer_alloc(bool have_percpu_irqs)
 {
-       int ret = 0;
+       int ret;
 
        /*
         * Synthetic timers are always available except on old versions of
@@ -188,29 +249,37 @@ int hv_stimer_alloc(void)
 
        direct_mode_enabled = ms_hyperv.misc_features &
                        HV_STIMER_DIRECT_MODE_AVAILABLE;
-       if (direct_mode_enabled) {
-               ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
-                               hv_stimer0_isr);
+
+       /*
+        * If Direct Mode isn't enabled, the remainder of the initialization
+        * is done later by hv_stimer_legacy_init()
+        */
+       if (!direct_mode_enabled)
+               return 0;
+
+       if (have_percpu_irqs) {
+               ret = hv_setup_stimer0_irq();
                if (ret)
-                       goto free_percpu;
+                       goto free_clock_event;
+       } else {
+               hv_setup_stimer0_handler(hv_stimer0_isr);
+       }
 
-               /*
-                * Since we are in Direct Mode, stimer initialization
-                * can be done now with a CPUHP value in the same range
-                * as other clockevent devices.
-                */
-               ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
-                               "clockevents/hyperv/stimer:starting",
-                               hv_stimer_init, hv_stimer_cleanup);
-               if (ret < 0)
-                       goto free_stimer0_irq;
+       /*
+        * Since we are in Direct Mode, stimer initialization
+        * can be done now with a CPUHP value in the same range
+        * as other clockevent devices.
+        */
+       ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING,
+                       "clockevents/hyperv/stimer:starting",
+                       hv_stimer_init, hv_stimer_cleanup);
+       if (ret < 0) {
+               hv_remove_stimer0_irq();
+               goto free_clock_event;
        }
        return ret;
 
-free_stimer0_irq:
-       hv_remove_stimer0_irq(stimer0_irq);
-       stimer0_irq = 0;
-free_percpu:
+free_clock_event:
        free_percpu(hv_clock_event);
        hv_clock_event = NULL;
        return ret;
@@ -254,23 +323,6 @@ void hv_stimer_legacy_cleanup(unsigned int cpu)
 }
 EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup);
 
-
-/* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
-void hv_stimer_free(void)
-{
-       if (!hv_clock_event)
-               return;
-
-       if (direct_mode_enabled) {
-               cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
-               hv_remove_stimer0_irq(stimer0_irq);
-               stimer0_irq = 0;
-       }
-       free_percpu(hv_clock_event);
-       hv_clock_event = NULL;
-}
-EXPORT_SYMBOL_GPL(hv_stimer_free);
-
 /*
  * Do a global cleanup of clockevents for the cases of kexec and
  * vmbus exit
@@ -287,12 +339,17 @@ void hv_stimer_global_cleanup(void)
                hv_stimer_legacy_cleanup(cpu);
        }
 
-       /*
-        * If Direct Mode is enabled, the cpuhp teardown callback
-        * (hv_stimer_cleanup) will be run on all CPUs to stop the
-        * stimers.
-        */
-       hv_stimer_free();
+       if (!hv_clock_event)
+               return;
+
+       if (direct_mode_enabled) {
+               cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING);
+               hv_remove_stimer0_irq();
+               stimer0_irq = -1;
+       }
+       free_percpu(hv_clock_event);
+       hv_clock_event = NULL;
+
 }
 EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
 
@@ -302,14 +359,6 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
  * the other that uses the TSC reference page feature as defined in the
  * TLFS.  The MSR version is for compatibility with old versions of
  * Hyper-V and 32-bit x86.  The TSC reference page version is preferred.
- *
- * The Hyper-V clocksource ratings of 250 are chosen to be below the
- * TSC clocksource rating of 300.  In configurations where Hyper-V offers
- * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource
- * is available and preferred.  With the higher rating, it will be the
- * default.  On older hardware and Hyper-V versions, the TSC is marked
- * "unstable", so no TSC clocksource is created and the selected Hyper-V
- * clocksource will be the default.
  */
 
 u64 (*hv_read_reference_counter)(void);
@@ -331,7 +380,7 @@ static u64 notrace read_hv_clock_tsc(void)
        u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
 
        if (current_tick == U64_MAX)
-               hv_get_time_ref_count(current_tick);
+               current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT);
 
        return current_tick;
 }
@@ -352,9 +401,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
        u64 tsc_msr;
 
        /* Disable the TSC page */
-       hv_get_reference_tsc(tsc_msr);
+       tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
        tsc_msr &= ~BIT_ULL(0);
-       hv_set_reference_tsc(tsc_msr);
+       hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
 }
 
 
@@ -364,39 +413,44 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
        u64 tsc_msr;
 
        /* Re-enable the TSC page */
-       hv_get_reference_tsc(tsc_msr);
+       tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
        tsc_msr &= GENMASK_ULL(11, 0);
        tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
-       hv_set_reference_tsc(tsc_msr);
+       hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
 }
 
+#ifdef VDSO_CLOCKMODE_HVCLOCK
 static int hv_cs_enable(struct clocksource *cs)
 {
-       hv_enable_vdso_clocksource();
+       vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
        return 0;
 }
+#endif
 
 static struct clocksource hyperv_cs_tsc = {
        .name   = "hyperv_clocksource_tsc_page",
-       .rating = 250,
+       .rating = 500,
        .read   = read_hv_clock_tsc_cs,
        .mask   = CLOCKSOURCE_MASK(64),
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
        .suspend= suspend_hv_clock_tsc,
        .resume = resume_hv_clock_tsc,
+#ifdef VDSO_CLOCKMODE_HVCLOCK
        .enable = hv_cs_enable,
+       .vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
+#else
+       .vdso_clock_mode = VDSO_CLOCKMODE_NONE,
+#endif
 };
 
 static u64 notrace read_hv_clock_msr(void)
 {
-       u64 current_tick;
        /*
         * Read the partition counter to get the current tick count. This count
         * is set to 0 when the partition is created and is incremented in
         * 100 nanosecond units.
         */
-       hv_get_time_ref_count(current_tick);
-       return current_tick;
+       return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
 }
 
 static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
@@ -412,12 +466,36 @@ static u64 notrace read_hv_sched_clock_msr(void)
 
 static struct clocksource hyperv_cs_msr = {
        .name   = "hyperv_clocksource_msr",
-       .rating = 250,
+       .rating = 500,
        .read   = read_hv_clock_msr_cs,
        .mask   = CLOCKSOURCE_MASK(64),
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+/*
+ * Reference to pv_ops must be inline so objtool
+ * detection of noinstr violations can work correctly.
+ */
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+static __always_inline void hv_setup_sched_clock(void *sched_clock)
+{
+       /*
+        * We're on an architecture with generic sched clock (not x86/x64).
+        * The Hyper-V sched clock read function returns nanoseconds, not
+        * the normal 100ns units of the Hyper-V synthetic clock.
+        */
+       sched_clock_register(sched_clock, 64, NSEC_PER_SEC);
+}
+#elif defined CONFIG_PARAVIRT
+static __always_inline void hv_setup_sched_clock(void *sched_clock)
+{
+       /* We're on x86/x64 *and* using PV ops */
+       paravirt_set_sched_clock(sched_clock);
+}
+#else /* !CONFIG_GENERIC_SCHED_CLOCK && !CONFIG_PARAVIRT */
+static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
+#endif /* CONFIG_GENERIC_SCHED_CLOCK */
+
 static bool __init hv_init_tsc_clocksource(void)
 {
        u64             tsc_msr;
@@ -429,6 +507,22 @@ static bool __init hv_init_tsc_clocksource(void)
        if (hv_root_partition)
                return false;
 
+       /*
+        * If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
+        * handles frequency and offset changes due to live migration,
+        * pause/resume, and other VM management operations.  So lower the
+        * Hyper-V Reference TSC rating, causing the generic TSC to be used.
+        * TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
+        * TSC will be preferred over the virtualized ARM64 arch counter.
+        * While the Hyper-V MSR clocksource won't be used since the
+        * Reference TSC clocksource is present, change its rating as
+        * well for consistency.
+        */
+       if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+               hyperv_cs_tsc.rating = 250;
+               hyperv_cs_msr.rating = 250;
+       }
+
        hv_read_reference_counter = read_hv_clock_tsc;
        phys_addr = virt_to_phys(hv_get_tsc_page());
 
@@ -439,12 +533,11 @@ static bool __init hv_init_tsc_clocksource(void)
         * (which already has at least the low 12 bits set to zero since
         * it is page aligned). Also set the "enable" bit, which is bit 0.
         */
-       hv_get_reference_tsc(tsc_msr);
+       tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
        tsc_msr &= GENMASK_ULL(11, 0);
        tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
-       hv_set_reference_tsc(tsc_msr);
+       hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
 
-       hv_set_clocksource_vdso(hyperv_cs_tsc);
        clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
 
        hv_sched_clock_offset = hv_read_reference_counter();
@@ -457,7 +550,7 @@ void __init hv_init_clocksource(void)
 {
        /*
         * Try to set up the TSC page clocksource. If it succeeds, we're
-        * done. Otherwise, set up the MSR clocksoruce.  At least one of
+        * done. Otherwise, set up the MSR clocksource.  At least one of
         * these will always be available except on very old versions of
         * Hyper-V on x86.  In that case we won't have a Hyper-V
         * clocksource, but Linux will still run with a clocksource based
index 029efc2731b49f35b89f3b3c07011a2152f8d40b..06d25754e606cb7cc9f2efbb8c0e673cebd34e76 100644 (file)
@@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
                return PTR_ERR(ost->regs);
 
        map = device_node_to_regmap(dev->parent->of_node);
-       if (!map) {
+       if (IS_ERR(map)) {
                dev_err(dev, "regmap not found");
-               return -EINVAL;
+               return PTR_ERR(map);
        }
 
        ost->clk = devm_clk_get(dev, "ost");
@@ -167,13 +167,14 @@ static const struct ingenic_ost_soc_info jz4725b_ost_soc_info = {
        .is64bit = false,
 };
 
-static const struct ingenic_ost_soc_info jz4770_ost_soc_info = {
+static const struct ingenic_ost_soc_info jz4760b_ost_soc_info = {
        .is64bit = true,
 };
 
 static const struct of_device_id ingenic_ost_of_match[] = {
        { .compatible = "ingenic,jz4725b-ost", .data = &jz4725b_ost_soc_info, },
-       { .compatible = "ingenic,jz4770-ost", .data = &jz4770_ost_soc_info, },
+       { .compatible = "ingenic,jz4760b-ost", .data = &jz4760b_ost_soc_info, },
+       { .compatible = "ingenic,jz4770-ost", .data = &jz4760b_ost_soc_info, },
        { }
 };
 
index 905fd6b163a8190bf2bad79a7e33c6620c0a312a..24ed0f1f089b8dca48e684a7caf888fb8c920b3d 100644 (file)
@@ -264,6 +264,7 @@ static const struct ingenic_soc_info jz4725b_soc_info = {
 static const struct of_device_id ingenic_tcu_of_match[] = {
        { .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
        { .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
+       { .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, },
        { .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
        { .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
        { /* sentinel */ }
@@ -358,6 +359,7 @@ err_free_ingenic_tcu:
 
 TIMER_OF_DECLARE(jz4740_tcu_intc,  "ingenic,jz4740-tcu",  ingenic_tcu_init);
 TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
+TIMER_OF_DECLARE(jz4760_tcu_intc,  "ingenic,jz4760-tcu",  ingenic_tcu_init);
 TIMER_OF_DECLARE(jz4770_tcu_intc,  "ingenic,jz4770-tcu",  ingenic_tcu_init);
 TIMER_OF_DECLARE(x1000_tcu_intc,  "ingenic,x1000-tcu",  ingenic_tcu_init);
 
index c98f8851fd6804540bfb8e40d96703df7c736407..d7ed99f0001f890d85a7d8ea95575f284cf0d90d 100644 (file)
@@ -339,8 +339,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
                sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
                                   SH_CMT16_CMCSR_CKS512);
        } else {
-               sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
-                                  SH_CMT32_CMCSR_CMTOUT_IE |
+               u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ?
+                             SH_CMT32_CMCSR_CMTOUT_IE : 0;
+               sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM |
                                   SH_CMT32_CMCSR_CMR_IRQ |
                                   SH_CMT32_CMCSR_CKS_RCLK8);
        }
index 787dbebbb4324469dbdabc4e588a7b4c53672b1e..27af17c995900477fd3c0037bff9476364c5ecf0 100644 (file)
@@ -455,9 +455,9 @@ static int __init tcb_clksrc_init(struct device_node *node)
        tcaddr = tc.regs;
 
        if (bits == 32) {
-               /* use apropriate function to read 32 bit counter */
+               /* use appropriate function to read 32 bit counter */
                clksrc.read = tc_get_cycles32;
-               /* setup ony channel 0 */
+               /* setup only channel 0 */
                tcb_setup_single_chan(&tc, best_divisor_idx);
                tc_sched_clock = tc_sched_clock_read32;
                tc_delay_timer.read_current_timer = tc_delay_timer_read32;
index 12a2ed7cfaff9eeb2d32ed2be641322f1b17be72..93f336ec875a221544d5fc31a54e4bd7e499764a 100644 (file)
@@ -116,7 +116,7 @@ static int ftm_set_next_event(unsigned long delta,
         * to the MOD register latches the value into a buffer. The MOD
         * register is updated with the value of its write buffer with
         * the following scenario:
-        * a, the counter source clock is diabled.
+        * a, the counter source clock is disabled.
         */
        ftm_counter_disable(priv->clkevt_base);
 
index ab623b25a47b7682f5ec904e1a31c99feb79281f..cfa4ec7ef396814ccd3ea592c87b675835408a55 100644 (file)
@@ -237,7 +237,7 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
                        break;
        }
 
-       /* Use the bigest prescaler if we didn't match one. */
+       /* Use the biggest prescaler if we didn't match one. */
        if (*pres == MCHP_PIT64B_PRES_MAX)
                *pres = MCHP_PIT64B_PRES_MAX - 1;
 }
index 9780ffd8010e686697a57eded89aa2d1d02d826c..a00520cbb660a1c8ee2e14e3cee30043390356f4 100644 (file)
@@ -208,5 +208,6 @@ static int __init npcm7xx_timer_init(struct device_node *np)
        return 0;
 }
 
+TIMER_OF_DECLARE(wpcm450, "nuvoton,wpcm450-timer", npcm7xx_timer_init);
 TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);
 
index 572da477c6d35c5edc64f16b5d8562cde5311d53..529cc6a51cdb3bf842d30ba693ae20ca9df5ec65 100644 (file)
@@ -211,10 +211,10 @@ out_fail:
 }
 
 /**
- * timer_of_cleanup - release timer_of ressources
+ * timer_of_cleanup - release timer_of resources
  * @to: timer_of structure
  *
- * Release the ressources that has been used in timer_of_init().
+ * Release the resources that has been used in timer_of_init().
  * This function should be called in init error cases
  */
 void __init timer_of_cleanup(struct timer_of *to)
index a2dd85d0c1d75471b4061c95c93d30bdeb1e068a..6f37181a8c6330f65fa34d883eb0810b07e25da7 100644 (file)
@@ -71,7 +71,7 @@ static u64 notrace
 pistachio_clocksource_read_cycles(struct clocksource *cs)
 {
        struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
-       u32 counter, overflw;
+       u32 counter, overflow;
        unsigned long flags;
 
        /*
@@ -80,7 +80,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs)
         */
 
        raw_spin_lock_irqsave(&pcs->lock, flags);
-       overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
+       overflow = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
        counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
        raw_spin_unlock_irqrestore(&pcs->lock, flags);
 
index 33b3e8aa2cc50c66eaea9906e593a9ce0259268c..b6f97960d8ee020f908dbd260b6c04c188c0b4d8 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/clk.h>
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
@@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
        struct dmtimer_systimer *t = &clkevt->t;
        void __iomem *pend = t->base + t->pend;
 
-       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
        while (readl_relaxed(pend) & WP_TCRR)
                cpu_relax();
+       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
 
-       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
        while (readl_relaxed(pend) & WP_TCLR)
                cpu_relax();
+       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
 
        return 0;
 }
@@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
        dmtimer_clockevent_shutdown(evt);
 
        /* Looks like we need to first set the load value separately */
-       writel_relaxed(clkevt->period, t->base + t->load);
        while (readl_relaxed(pend) & WP_TLDR)
                cpu_relax();
+       writel_relaxed(clkevt->period, t->base + t->load);
 
-       writel_relaxed(clkevt->period, t->base + t->counter);
        while (readl_relaxed(pend) & WP_TCRR)
                cpu_relax();
+       writel_relaxed(clkevt->period, t->base + t->counter);
 
-       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
-                      t->base + t->ctrl);
        while (readl_relaxed(pend) & WP_TCLR)
                cpu_relax();
+       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+                      t->base + t->ctrl);
 
        return 0;
 }
@@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
        writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
 }
 
-static int __init dmtimer_clockevent_init(struct device_node *np)
+static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
+                                            struct device_node *np,
+                                            unsigned int features,
+                                            const struct cpumask *cpumask,
+                                            const char *name,
+                                            int rating)
 {
-       struct dmtimer_clockevent *clkevt;
        struct clock_event_device *dev;
        struct dmtimer_systimer *t;
        int error;
 
-       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
-       if (!clkevt)
-               return -ENOMEM;
-
        t = &clkevt->t;
        dev = &clkevt->dev;
 
@@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
         * We mostly use cpuidle_coupled with ARM local timers for runtime,
         * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
         */
-       dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-       dev->rating = 300;
+       dev->features = features;
+       dev->rating = rating;
        dev->set_next_event = dmtimer_set_next_event;
        dev->set_state_shutdown = dmtimer_clockevent_shutdown;
        dev->set_state_periodic = dmtimer_set_periodic;
        dev->set_state_oneshot = dmtimer_clockevent_shutdown;
+       dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
        dev->tick_resume = dmtimer_clockevent_shutdown;
-       dev->cpumask = cpu_possible_mask;
+       dev->cpumask = cpumask;
 
        dev->irq = irq_of_parse_and_map(np, 0);
-       if (!dev->irq) {
-               error = -ENXIO;
-               goto err_out_free;
-       }
+       if (!dev->irq)
+               return -ENXIO;
 
        error = dmtimer_systimer_setup(np, &clkevt->t);
        if (error)
-               goto err_out_free;
+               return error;
 
        clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
 
@@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
        writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
 
        error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
-                           IRQF_TIMER, "clockevent", clkevt);
+                           IRQF_TIMER, name, clkevt);
        if (error)
                goto err_out_unmap;
 
        writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
        writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
 
-       pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
-               of_find_property(np, "ti,timer-alwon", NULL) ?
+       pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
+               name, of_find_property(np, "ti,timer-alwon", NULL) ?
                "always-on " : "", t->rate, np->parent);
 
-       clockevents_config_and_register(dev, t->rate,
-                                       3, /* Timer internal resynch latency */
+       return 0;
+
+err_out_unmap:
+       iounmap(t->base);
+
+       return error;
+}
+
+static int __init dmtimer_clockevent_init(struct device_node *np)
+{
+       struct dmtimer_clockevent *clkevt;
+       int error;
+
+       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+       if (!clkevt)
+               return -ENOMEM;
+
+       error = dmtimer_clkevt_init_common(clkevt, np,
+                                          CLOCK_EVT_FEAT_PERIODIC |
+                                          CLOCK_EVT_FEAT_ONESHOT,
+                                          cpu_possible_mask, "clockevent",
+                                          300);
+       if (error)
+               goto err_out_free;
+
+       clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
+                                       3, /* Timer internal resync latency */
                                        0xffffffff);
 
        if (of_machine_is_compatible("ti,am33xx") ||
            of_machine_is_compatible("ti,am43")) {
-               dev->suspend = omap_clockevent_idle;
-               dev->resume = omap_clockevent_unidle;
+               clkevt->dev.suspend = omap_clockevent_idle;
+               clkevt->dev.resume = omap_clockevent_unidle;
        }
 
        return 0;
 
-err_out_unmap:
-       iounmap(t->base);
-
 err_out_free:
        kfree(clkevt);
 
        return error;
 }
 
+/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
+static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
+
+static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
+{
+       struct dmtimer_clockevent *clkevt;
+       int error;
+
+       if (!cpu_possible(cpu))
+               return -EINVAL;
+
+       if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
+           !of_property_read_bool(np->parent, "ti,no-idle"))
+               pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
+
+       clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
+
+       error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
+                                          cpumask_of(cpu), "percpu-dmtimer",
+                                          500);
+       if (error)
+               return error;
+
+       return 0;
+}
+
+/* See TRM for timer internal resynch latency */
+static int omap_dmtimer_starting_cpu(unsigned int cpu)
+{
+       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
+       struct clock_event_device *dev = &clkevt->dev;
+       struct dmtimer_systimer *t = &clkevt->t;
+
+       clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
+       irq_force_affinity(dev->irq, cpumask_of(cpu));
+
+       return 0;
+}
+
+static int __init dmtimer_percpu_timer_startup(void)
+{
+       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
+       struct dmtimer_systimer *t = &clkevt->t;
+
+       if (t->sysc) {
+               cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
+                                 "clockevents/omap/gptimer:starting",
+                                 omap_dmtimer_starting_cpu, NULL);
+       }
+
+       return 0;
+}
+subsys_initcall(dmtimer_percpu_timer_startup);
+
+static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
+{
+       struct device_node *arm_timer;
+
+       arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
+       if (of_device_is_available(arm_timer)) {
+               pr_warn_once("ARM architected timer wrap issue i940 detected\n");
+               return 0;
+       }
+
+       if (pa == 0x48034000)           /* dra7 dmtimer3 */
+               return dmtimer_percpu_timer_init(np, 0);
+       else if (pa == 0x48036000)      /* dra7 dmtimer4 */
+               return dmtimer_percpu_timer_init(np, 1);
+
+       return 0;
+}
+
 /* Clocksource */
 static struct dmtimer_clocksource *
 to_dmtimer_clocksource(struct clocksource *cs)
@@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
        if (clockevent == pa)
                return dmtimer_clockevent_init(np);
 
+       if (of_machine_is_compatible("ti,dra7"))
+               return dmtimer_percpu_quirk_init(np, pa);
+
        return 0;
 }
 
index 1a86a4e7e344303fc25d6df100753e984eca8992..911c92146eca6d4db3cd87deeafd1e7df04d8324 100644 (file)
@@ -136,7 +136,7 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
        /*
         * The value for the LDVAL register trigger is calculated as:
         * LDVAL trigger = (period / clock period) - 1
-        * The pit is a 32-bit down count timer, when the conter value
+        * The pit is a 32-bit down count timer, when the counter value
         * reaches 0, it will generate an interrupt, thus the minimal
         * LDVAL trigger value is 1. And then the min_delta is
         * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
index 856fb20456566ab8dc1fbe78b0727c6c6693fd17..b8e75210a0e3150900f2eeed78756f5c282b40b4 100644 (file)
@@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
 config CRYPTO_DEV_SUN8I_CE_HASH
        bool "Enable support for hash on sun8i-ce"
        depends on CRYPTO_DEV_SUN8I_CE
-       select MD5
-       select SHA1
-       select SHA256
-       select SHA512
+       select CRYPTO_MD5
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
+       select CRYPTO_SHA512
        help
          Say y to enable support for hash algorithms.
 
@@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
 config CRYPTO_DEV_SUN8I_SS_HASH
        bool "Enable support for hash on sun8i-ss"
        depends on CRYPTO_DEV_SUN8I_SS
-       select MD5
-       select SHA1
-       select SHA256
+       select CRYPTO_MD5
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
        help
          Say y to enable support for hash algorithms.
index c2e6f5ed1d79744a7bc0b0e4179fdf81c9cd8ec7..dec79fa3ebafb83951901b07044ee1f994639c93 100644 (file)
@@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
                                    sizeof(struct sun4i_cipher_req_ctx) +
                                    crypto_skcipher_reqsize(op->fallback_tfm));
 
-       err = pm_runtime_get_sync(op->ss->dev);
+       err = pm_runtime_resume_and_get(op->ss->dev);
        if (err < 0)
                goto error_pm;
 
index 709905ec46806d3303bcd79faa7878bc471c3dd4..44b8fc4b786dc8c724373c415d3e8ecc409e1753 100644 (file)
@@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev)
 {
        struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
 
-       if (ss->reset)
-               reset_control_assert(ss->reset);
+       reset_control_assert(ss->reset);
 
        clk_disable_unprepare(ss->ssclk);
        clk_disable_unprepare(ss->busclk);
@@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev)
                goto err_enable;
        }
 
-       if (ss->reset) {
-               err = reset_control_deassert(ss->reset);
-               if (err) {
-                       dev_err(ss->dev, "Cannot deassert reset control\n");
-                       goto err_enable;
-               }
+       err = reset_control_deassert(ss->reset);
+       if (err) {
+               dev_err(ss->dev, "Cannot deassert reset control\n");
+               goto err_enable;
        }
 
        return err;
@@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev)
        dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
 
        ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
-       if (IS_ERR(ss->reset)) {
-               if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
-                       return PTR_ERR(ss->reset);
+       if (IS_ERR(ss->reset))
+               return PTR_ERR(ss->reset);
+       if (!ss->reset)
                dev_info(&pdev->dev, "no reset control found\n");
-               ss->reset = NULL;
-       }
 
        /*
         * Check that clock have the correct rates given in the datasheet
@@ -459,7 +454,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
         * this info could be useful
         */
 
-       err = pm_runtime_get_sync(ss->dev);
+       err = pm_runtime_resume_and_get(ss->dev);
        if (err < 0)
                goto error_pm;
 
index c1b4585e9bbc7d459bfb6c47d66adbf145735f9a..d28292762b3247a6c999b6f382d28a25d6c7fabe 100644 (file)
@@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
        algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
        op->ss = algt->ss;
 
-       err = pm_runtime_get_sync(op->ss->dev);
+       err = pm_runtime_resume_and_get(op->ss->dev);
        if (err < 0)
                return err;
 
index 443160a114bb031a0d53e78106d82eb01c0e51a5..491fcb7b81b40b6f989bb27d9414804d81bd23cc 100644 (file)
@@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
        algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
        ss = algt->ss;
 
-       err = pm_runtime_get_sync(ss->dev);
+       err = pm_runtime_resume_and_get(ss->dev);
        if (err < 0)
                return err;
 
index 33707a2e55ff0c2dd0e2f16c1487c708038db867..54ae8d16e4931ab8aea18f6a96ccccc38c61df7c 100644 (file)
@@ -240,11 +240,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
 
 theend_sgs:
        if (areq->src == areq->dst) {
-               dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+               dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+                            DMA_BIDIRECTIONAL);
        } else {
                if (nr_sgs > 0)
-                       dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
-               dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+                       dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+                                    DMA_TO_DEVICE);
+               dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+                            DMA_FROM_DEVICE);
        }
 
 theend_iv:
index 158422ff5695ce66a625ba0427d0cb386ba63684..00194d1d9ae69b153400505ef4b45474caa8ef5e 100644 (file)
@@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
        if (err)
                goto error_alg;
 
-       err = pm_runtime_get_sync(ce->dev);
+       err = pm_runtime_resume_and_get(ce->dev);
        if (err < 0)
                goto error_alg;
 
index 2f09a37306e28412c81c3f4a7576526c5312af10..88194718a806ce8452dcb1ea06661dd2c9581c17 100644 (file)
@@ -405,7 +405,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
        err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
 
        dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
-       dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+       dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+                    DMA_TO_DEVICE);
        dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
 
 
index cfde9ee4356b138260ae51f110a991febb3d4b30..cd1baee424a18a6fd56187f64a399d631e2db3d9 100644 (file)
@@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
        dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
        if (dma_mapping_error(ce->dev, dma_iv)) {
                dev_err(ce->dev, "Cannot DMA MAP IV\n");
+               err = -EFAULT;
                goto err_iv;
        }
 
index ed2a69f82e1c189ee382b0692f8e4d3d686e0f7a..9ef1c85c4aaa5590fe9c521db24ad1d4121d2f8f 100644 (file)
@@ -232,10 +232,13 @@ sgd_next:
 
 theend_sgs:
        if (areq->src == areq->dst) {
-               dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+               dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+                            DMA_BIDIRECTIONAL);
        } else {
-               dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
-               dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+               dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+                            DMA_TO_DEVICE);
+               dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst),
+                            DMA_FROM_DEVICE);
        }
 
 theend_iv:
@@ -351,7 +354,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
        op->enginectx.op.prepare_request = NULL;
        op->enginectx.op.unprepare_request = NULL;
 
-       err = pm_runtime_get_sync(op->ss->dev);
+       err = pm_runtime_resume_and_get(op->ss->dev);
        if (err < 0) {
                dev_err(op->ss->dev, "pm error %d\n", err);
                goto error_pm;
index e0ddc684798dc50e67379943c18cbe3f0179f932..80e89066dbd1ae60f3a646a8a69481267b1d5074 100644 (file)
@@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
        if (err)
                goto error_alg;
 
-       err = pm_runtime_get_sync(ss->dev);
+       err = pm_runtime_resume_and_get(ss->dev);
        if (err < 0)
                goto error_alg;
 
index 11cbcbc83a7b67b7b94d8be58017ba0911f9243a..3c073eb3db038a148b304c73bcae0c1af108f398 100644 (file)
@@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
        bf = (__le32 *)pad;
 
        result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
-       if (!result)
+       if (!result) {
+               kfree(pad);
                return -ENOMEM;
+       }
 
        for (i = 0; i < MAX_SG; i++) {
                rctx->t_dst[i].addr = 0;
@@ -432,14 +434,14 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
        err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
 
        dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
-       dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+       dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+                    DMA_TO_DEVICE);
        dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
 
-       kfree(pad);
-
        memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
-       kfree(result);
 theend:
+       kfree(pad);
+       kfree(result);
        crypto_finalize_hash_request(engine, breq, err);
        return 0;
 }
index 08a1473b214578543c5531e29b040680244c1c7d..3191527928e4163df5c0017029076413977c8322 100644 (file)
@@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
        dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
        if (dma_mapping_error(ss->dev, dma_iv)) {
                dev_err(ss->dev, "Cannot DMA MAP IV\n");
-               return -EFAULT;
+               err = -EFAULT;
+               goto err_free;
        }
 
        dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
@@ -167,6 +168,7 @@ err_iv:
                memcpy(ctx->seed, d + dlen, ctx->slen);
        }
        memzero_explicit(d, todo);
+err_free:
        kfree(d);
 
        return err;
index a3fa849b139ae6af29d6353e5706eadf2e3c995d..ded7322427329596b98c2bba1483106e13eac7dd 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
  * AMCC SoC PPC4xx Crypto Driver
  *
  * Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -115,7 +115,7 @@ int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
        return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
 }
 
-/**
+/*
  * AES Functions
  */
 static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
@@ -374,7 +374,7 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
        return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
 }
 
-/**
+/*
  * AES-CCM Functions
  */
 
@@ -489,7 +489,7 @@ int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
        return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
 }
 
-/**
+/*
  * AES-GCM Functions
  */
 
@@ -617,7 +617,7 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
        return crypto4xx_crypt_aes_gcm(req, true);
 }
 
-/**
+/*
  * HASH SHA1 Functions
  */
 static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
@@ -711,7 +711,7 @@ int crypto4xx_hash_digest(struct ahash_request *req)
                                  ctx->sa_len, 0, NULL);
 }
 
-/**
+/*
  * SHA1 Algorithm
  */
 int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
index 8d1b918a05335daed33140973f5b67b4a65ecb60..8278d98074e9ada52f58748a1764d89df73acb6f 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
  * AMCC SoC PPC4xx Crypto Driver
  *
  * Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -44,7 +44,7 @@
 
 #define PPC4XX_SEC_VERSION_STR                 "0.5"
 
-/**
+/*
  * PPC4xx Crypto Engine Initialization Routine
  */
 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
@@ -159,7 +159,7 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
        ctx->sa_len = 0;
 }
 
-/**
+/*
  * alloc memory for the gather ring
  * no need to alloc buf for the ring
  * gdr_tail, gdr_head and gdr_count are initialized by this function
@@ -268,7 +268,7 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
        return tail;
 }
 
-/**
+/*
  * alloc memory for the gather ring
  * no need to alloc buf for the ring
  * gdr_tail, gdr_head and gdr_count are initialized by this function
@@ -346,7 +346,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
        return &dev->gdr[idx];
 }
 
-/**
+/*
  * alloc memory for the scatter ring
  * need to alloc buf for the ring
  * sdr_tail, sdr_head and sdr_count are initialized by this function
@@ -930,7 +930,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
        return is_busy ? -EBUSY : -EINPROGRESS;
 }
 
-/**
+/*
  * Algorithm Registration Functions
  */
 static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
@@ -1097,7 +1097,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
        } while (head != tail);
 }
 
-/**
+/*
  * Top Half of isr.
  */
 static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
@@ -1186,7 +1186,7 @@ static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
        return 0;
 }
 
-/**
+/*
  * Supported Crypto Algorithms
  */
 static struct crypto4xx_alg_common crypto4xx_alg[] = {
@@ -1369,7 +1369,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
        } },
 };
 
-/**
+/*
  * Module Initialization Routine
  */
 static int crypto4xx_probe(struct platform_device *ofdev)
index a4e25b46cd0ab310b5b45020d872e0d8faf695ad..56c10668c0ab0ae514d4be59463ca2be6b0e8a79 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
  * AMCC SoC PPC4xx Crypto Driver
  *
  * Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -188,7 +188,7 @@ int crypto4xx_hash_final(struct ahash_request *req);
 int crypto4xx_hash_update(struct ahash_request *req);
 int crypto4xx_hash_init(struct ahash_request *req);
 
-/**
+/*
  * Note: Only use this function to copy items that is word aligned.
  */
 static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
index c4c0a1a7594156e7950f23d158b337775c788e41..1038061224da667d47a057df224e6c669c97e4c1 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
  * AMCC SoC PPC4xx Crypto Driver
  *
  * Copyright (c) 2008 Applied Micro Circuits Corporation.
 #define CRYPTO4XX_PRNG_LFSR_L                  0x00070030
 #define CRYPTO4XX_PRNG_LFSR_H                  0x00070034
 
-/**
+/*
  * Initialize CRYPTO ENGINE registers, and memory bases.
  */
 #define PPC4XX_PDR_POLL                                0x3ff
 #define PPC4XX_INT_TIMEOUT_CNT                 0
 #define PPC4XX_INT_TIMEOUT_CNT_REVB            0x3FF
 #define PPC4XX_INT_CFG                         1
-/**
+/*
  * all follow define are ad hoc
  */
 #define PPC4XX_RING_RETRY                      100
 #define PPC4XX_SDR_SIZE                                PPC4XX_NUM_SD
 #define PPC4XX_GDR_SIZE                                PPC4XX_NUM_GD
 
-/**
+/*
   * Generic Security Association (SA) with all possible fields. These will
  * never likely used except for reference purpose. These structure format
  * can be not changed as the hardware expects them to be layout as defined.
index fe756abfc19f91f398960a1d2ca74fa6f5a21e13..e98e4e7abbad8d9a21fbc05e871ae894e3f0c4f3 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
  * AMCC SoC PPC4xx Crypto Driver
  *
  * Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -14,7 +14,7 @@
 
 #define AES_IV_SIZE                            16
 
-/**
+/*
  * Contents of Dynamic Security Association (SA) with all possible fields
  */
 union dynamic_sa_contents {
@@ -122,7 +122,7 @@ union sa_command_0 {
 #define SA_AES_KEY_LEN_256                     4
 
 #define SA_REV2                                        1
-/**
+/*
  * The follow defines bits sa_command_1
  * In Basic hash mode  this bit define simple hash or hmac.
  * In IPsec mode, this bit define muting control.
@@ -172,7 +172,7 @@ struct dynamic_sa_ctl {
        union sa_command_1 sa_command_1;
 } __attribute__((packed));
 
-/**
+/*
  * State Record for Security Association (SA)
  */
 struct  sa_state_record {
@@ -184,7 +184,7 @@ struct  sa_state_record {
        };
 } __attribute__((packed));
 
-/**
+/*
  * Security Association (SA) for AES128
  *
  */
@@ -213,7 +213,7 @@ struct dynamic_sa_aes192 {
 #define SA_AES192_LEN          (sizeof(struct dynamic_sa_aes192)/4)
 #define SA_AES192_CONTENTS     0x3e000062
 
-/**
+/*
  * Security Association (SA) for AES256
  */
 struct dynamic_sa_aes256 {
@@ -228,7 +228,7 @@ struct dynamic_sa_aes256 {
 #define SA_AES256_CONTENTS     0x3e000082
 #define SA_AES_CONTENTS                0x3e000002
 
-/**
+/*
  * Security Association (SA) for AES128 CCM
  */
 struct dynamic_sa_aes128_ccm {
@@ -242,7 +242,7 @@ struct dynamic_sa_aes128_ccm {
 #define SA_AES128_CCM_CONTENTS 0x3e000042
 #define SA_AES_CCM_CONTENTS    0x3e000002
 
-/**
+/*
  * Security Association (SA) for AES128_GCM
  */
 struct dynamic_sa_aes128_gcm {
@@ -258,7 +258,7 @@ struct dynamic_sa_aes128_gcm {
 #define SA_AES128_GCM_CONTENTS 0x3e000442
 #define SA_AES_GCM_CONTENTS    0x3e000402
 
-/**
+/*
  * Security Association (SA) for HASH160: HMAC-SHA1
  */
 struct dynamic_sa_hash160 {
index 3af732f25c1c04a5af59f470d5a804c0346db776..7356716274cba030ff257588d16d285dd761250a 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
  * AMCC SoC PPC4xx Crypto Driver
  *
  * Copyright (c) 2008 Applied Micro Circuits Corporation.
index 8b5e07316352c3df6ac52ea27bebd11c0897fe72..c6865cbd334b293bec18184dc128bc2863d7546f 100644 (file)
@@ -236,10 +236,10 @@ static int meson_cipher(struct skcipher_request *areq)
        dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
 
        if (areq->src == areq->dst) {
-               dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+               dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
        } else {
-               dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
-               dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+               dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+               dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
        }
 
        if (areq->iv && ivsize > 0) {
index 5bbeff433c8c09d03486e2f8956fa11789545ad8..6e7ae896717cd2a21600f4282fc27145c3e18da3 100644 (file)
@@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
        struct meson_dev *mc;
        int err, i;
 
-       if (!pdev->dev.of_node)
-               return -ENODEV;
-
        mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
        if (!mc)
                return -ENOMEM;
index 9bd8e5167be34bfc9ad67381f45853664a580c75..333fbefbbccbd94fda92eba0b6c5e666fd7e10ba 100644 (file)
@@ -26,7 +26,7 @@
 static struct atmel_ecc_driver_data driver_data;
 
 /**
- * atmel_ecdh_ctx - transformation context
+ * struct atmel_ecdh_ctx - transformation context
  * @client     : pointer to i2c client device
  * @fallback   : used for unsupported curves or when user wants to use its own
  *               private key.
@@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data;
  *               of the user to not call set_secret() while
  *               generate_public_key() or compute_shared_secret() are in flight.
  * @curve_id   : elliptic curve id
- * @n_sz       : size in bytes of the n prime
  * @do_fallback: true when the device doesn't support the curve or when the user
  *               wants to use its own private key.
  */
@@ -43,7 +42,6 @@ struct atmel_ecdh_ctx {
        struct crypto_kpp *fallback;
        const u8 *public_key;
        unsigned int curve_id;
-       size_t n_sz;
        bool do_fallback;
 };
 
@@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
                            int status)
 {
        struct kpp_request *req = areq;
-       struct atmel_ecdh_ctx *ctx = work_data->ctx;
        struct atmel_i2c_cmd *cmd = &work_data->cmd;
        size_t copied, n_sz;
 
@@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
                goto free_work_data;
 
        /* might want less than we've got */
-       n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+       n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len);
 
        /* copy the shared secret */
        copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
@@ -73,14 +70,6 @@ free_work_data:
        kpp_request_complete(req, status);
 }
 
-static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
-{
-       if (curve_id == ECC_CURVE_NIST_P256)
-               return ATMEL_ECC_NIST_P256_N_SIZE;
-
-       return 0;
-}
-
 /*
  * A random private key is generated and stored in the device. The device
  * returns the pair public key.
@@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
                return -EINVAL;
        }
 
-       ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
-       if (!ctx->n_sz || params.key_size) {
+       if (params.key_size) {
                /* fallback to ecdh software implementation */
                ctx->do_fallback = true;
                return crypto_kpp_set_secret(ctx->fallback, buf, len);
@@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
                goto free_cmd;
 
        ctx->do_fallback = false;
-       ctx->curve_id = params.curve_id;
 
        atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
 
@@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
        struct crypto_kpp *fallback;
        struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
 
+       ctx->curve_id = ECC_CURVE_NIST_P256;
        ctx->client = atmel_ecc_i2c_client_alloc();
        if (IS_ERR(ctx->client)) {
                pr_err("tfm - i2c_client binding failed\n");
@@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
        return ATMEL_ECC_PUBKEY_SIZE;
 }
 
-static struct kpp_alg atmel_ecdh = {
+static struct kpp_alg atmel_ecdh_nist_p256 = {
        .set_secret = atmel_ecdh_set_secret,
        .generate_public_key = atmel_ecdh_generate_public_key,
        .compute_shared_secret = atmel_ecdh_compute_shared_secret,
@@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = {
        .max_size = atmel_ecdh_max_size,
        .base = {
                .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
-               .cra_name = "ecdh",
+               .cra_name = "ecdh-nist-p256",
                .cra_driver_name = "atmel-ecdh",
                .cra_priority = ATMEL_ECC_PRIORITY,
                .cra_module = THIS_MODULE,
@@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client,
                      &driver_data.i2c_client_list);
        spin_unlock(&driver_data.i2c_list_lock);
 
-       ret = crypto_register_kpp(&atmel_ecdh);
+       ret = crypto_register_kpp(&atmel_ecdh_nist_p256);
        if (ret) {
                spin_lock(&driver_data.i2c_list_lock);
                list_del(&i2c_priv->i2c_client_list_node);
                spin_unlock(&driver_data.i2c_list_lock);
 
                dev_err(&client->dev, "%s alg registration failed\n",
-                       atmel_ecdh.base.cra_driver_name);
+                       atmel_ecdh_nist_p256.base.cra_driver_name);
        } else {
                dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
        }
@@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client)
                return -EBUSY;
        }
 
-       crypto_unregister_kpp(&atmel_ecdh);
+       crypto_unregister_kpp(&atmel_ecdh_nist_p256);
 
        spin_lock(&driver_data.i2c_list_lock);
        list_del(&i2c_priv->i2c_client_list_node);
index e8e8281e027d9c9d6161325b549aed5eb4d83883..6fd3e969211d7aa7d14fc14fb94b18610d093b62 100644 (file)
@@ -339,7 +339,7 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
        }
 
        if (bus_clk_rate > 1000000L) {
-               dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+               dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n",
                        bus_clk_rate);
                return -EINVAL;
        }
index 352d80cb5ae95e755519203460cacf69eeb326f3..1b13f601fd959c372c6913d63bb65734cb4da8cb 100644 (file)
@@ -434,7 +434,7 @@ static int atmel_sha_init(struct ahash_request *req)
 
        ctx->flags = 0;
 
-       dev_dbg(dd->dev, "init: digest size: %d\n",
+       dev_dbg(dd->dev, "init: digest size: %u\n",
                crypto_ahash_digestsize(tfm));
 
        switch (crypto_ahash_digestsize(tfm)) {
@@ -1102,7 +1102,7 @@ static int atmel_sha_start(struct atmel_sha_dev *dd)
        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
        int err;
 
-       dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+       dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n",
                                                ctx->op, req->nbytes);
 
        err = atmel_sha_hw_init(dd);
index 4d63cb13a54f975fc13a652927fce3583ccedcc4..6f01c51e3c37644d903ba9b2b0a94336fa92cb77 100644 (file)
@@ -1217,7 +1217,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
 
        tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
        if (IS_ERR(tdes_dd->io_base)) {
-               dev_err(dev, "can't ioremap\n");
                err = PTR_ERR(tdes_dd->io_base);
                goto err_tasklet_kill;
        }
index 851b149f71701d48658138cc23ccbc533b1f5417..053315e260c2267b4a47cf1c15d732fcbec6db30 100644 (file)
@@ -1019,6 +1019,7 @@ static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
  * a SPU response message for an AEAD request. Includes buffers to catch SPU
  * message headers and the response data.
  * @mssg:      mailbox message containing the receive sg
+ * @req:       Crypto API request
  * @rctx:      crypto request context
  * @rx_frag_num: number of scatterlist elements required to hold the
  *             SPU response message
@@ -2952,9 +2953,9 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
 
 /**
  * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
- * cipher: AEAD structure
- * key:    Key followed by 4 bytes of salt
- * keylen: Length of key plus salt, in bytes
+ * @cipher: AEAD structure
+ * @key:    Key followed by 4 bytes of salt
+ * @keylen: Length of key plus salt, in bytes
  *
  * Extracts salt from key and stores it to be prepended to IV on each request.
  * Digest is always 16 bytes
index 007abf92cc05be6541ac11c26676abbb72f7a6ac..6283e8c6d51d3ea4ffb75fd19427a44edf76a145 100644 (file)
@@ -457,7 +457,7 @@ u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
  * @cipher_mode:       Algo type
  * @data_size:         Length of plaintext (bytes)
  *
- * @Return: Length of padding, in bytes
+ * Return: Length of padding, in bytes
  */
 u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
                         unsigned int data_size)
@@ -510,10 +510,10 @@ u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
 }
 
 /**
- * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+ * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included
  * in a SPU request after the AAD and before the payload.
  * @cipher_mode:  cipher mode
- * @iv_ctr_len:   initialization vector length in bytes
+ * @iv_len:   initialization vector length in bytes
  *
  * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
  * to include the IV as a separate field in the SPU request msg.
@@ -543,9 +543,9 @@ enum hash_type spum_hash_type(u32 src_sent)
 /**
  * spum_digest_size() - Determine the size of a hash digest to expect the SPU to
  * return.
- * alg_digest_size: Number of bytes in the final digest for the given algo
- * alg:             The hash algorithm
- * htype:           Type of hash operation (init, update, full, etc)
+ * @alg_digest_size: Number of bytes in the final digest for the given algo
+ * @alg:             The hash algorithm
+ * @htype:           Type of hash operation (init, update, full, etc)
  *
  * When doing incremental hashing for an algorithm with a truncated hash
  * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
@@ -580,7 +580,7 @@ u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
  * @aead_parms:   Parameters related to AEAD operation
  * @data_size:    Length of data to be encrypted or authenticated. If AEAD, does
  *               not include length of AAD.
-
+ *
  * Return: the length of the SPU header in bytes. 0 if an error occurs.
  */
 u32 spum_create_request(u8 *spu_hdr,
@@ -911,7 +911,7 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
  * setkey() time in spu_cipher_req_init().
  * @spu_hdr:         Start of the request message header (MH field)
  * @spu_req_hdr_len: Length in bytes of the SPU request header
- * @isInbound:       0 encrypt, 1 decrypt
+ * @is_inbound:      0 encrypt, 1 decrypt
  * @cipher_parms:    Parameters describing cipher operation to be performed
  * @data_size:       Length of the data in the BD field
  *
index 2db35b5ccaa245f59fac09a39daccca394a65b8e..07989bb8c220a4909cd07bb513330ae58cd0c888 100644 (file)
@@ -543,7 +543,8 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
 /**
  * spu2_fmd_init() - At setkey time, initialize the fixed meta data for
  * subsequent skcipher requests for this context.
- * @spu2_cipher_type:  Cipher algorithm
+ * @fmd:               Start of FMD field to be written
+ * @spu2_type:         Cipher algorithm
  * @spu2_mode:         Cipher mode
  * @cipher_key_len:    Length of cipher key, in bytes
  * @cipher_iv_len:     Length of cipher initialization vector, in bytes
@@ -598,7 +599,7 @@ static int spu2_fmd_init(struct SPU2_FMD *fmd,
  * SPU request packet.
  * @fmd:            Start of FMD field to be written
  * @is_inbound:     true if decrypting. false if encrypting.
- * @authFirst:      true if alg authenticates before encrypting
+ * @auth_first:     true if alg authenticates before encrypting
  * @protocol:       protocol selector
  * @cipher_type:    cipher algorithm
  * @cipher_mode:    cipher mode
@@ -640,6 +641,7 @@ static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
  * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
  * SPU request packet.
  * @fmd:            Start of FMD field to be written
+ * @is_inbound:     true if decrypting. false if encrypting.
  * @assoc_size:     Length of additional associated data, in bytes
  * @auth_key_len:   Length of authentication key, in bytes
  * @cipher_key_len: Length of cipher key, in bytes
@@ -793,7 +795,7 @@ u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
 }
 
 /**
- * spu_payload_length() -  Given a SPU2 message header, extract the payload
+ * spu2_payload_length() -  Given a SPU2 message header, extract the payload
  * length.
  * @spu_hdr:  Start of SPU message header (FMD)
  *
@@ -812,10 +814,11 @@ u32 spu2_payload_length(u8 *spu_hdr)
 }
 
 /**
- * spu_response_hdr_len() - Determine the expected length of a SPU response
+ * spu2_response_hdr_len() - Determine the expected length of a SPU response
  * header.
  * @auth_key_len:  Length of authentication key, in bytes
  * @enc_key_len:   Length of encryption key, in bytes
+ * @is_hash:       Unused
  *
  * For SPU2, includes just FMD. OMD is never requested.
  *
@@ -827,7 +830,7 @@ u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
 }
 
 /**
- * spu_hash_pad_len() - Calculate the length of hash padding required to extend
+ * spu2_hash_pad_len() - Calculate the length of hash padding required to extend
  * data to a full block size.
  * @hash_alg:        hash algorithm
  * @hash_mode:       hash mode
@@ -845,8 +848,10 @@ u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
 }
 
 /**
- * spu2_gcm_ccm_padlen() -  Determine the length of GCM/CCM padding for either
+ * spu2_gcm_ccm_pad_len() -  Determine the length of GCM/CCM padding for either
  * the AAD field or the data.
+ * @cipher_mode:  Unused
+ * @data_size:    Unused
  *
  * Return:  0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
  */
@@ -857,7 +862,7 @@ u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
 }
 
 /**
- * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
+ * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
  * associated data in a SPU2 output packet.
  * @cipher_mode:   cipher mode
  * @assoc_len:     length of additional associated data, in bytes
@@ -878,11 +883,11 @@ u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
        return resp_len;
 }
 
-/*
- * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+/**
+ * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included
  * in a SPU request after the AAD and before the payload.
  * @cipher_mode:  cipher mode
- * @iv_ctr_len:   initialization vector length in bytes
+ * @iv_len:   initialization vector length in bytes
  *
  * For SPU2, AEAD IV is included in OMD and does not need to be repeated
  * prior to the payload.
@@ -909,9 +914,9 @@ enum hash_type spu2_hash_type(u32 src_sent)
 /**
  * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
  * return.
- * alg_digest_size: Number of bytes in the final digest for the given algo
- * alg:             The hash algorithm
- * htype:           Type of hash operation (init, update, full, etc)
+ * @alg_digest_size: Number of bytes in the final digest for the given algo
+ * @alg:             The hash algorithm
+ * @htype:           Type of hash operation (init, update, full, etc)
  *
  */
 u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
@@ -921,7 +926,7 @@ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
 }
 
 /**
- * spu_create_request() - Build a SPU2 request message header, includint FMD and
+ * spu2_create_request() - Build a SPU2 request message header, includint FMD and
  * OMD.
  * @spu_hdr: Start of buffer where SPU request header is to be written
  * @req_opts: SPU request message options
@@ -1105,7 +1110,7 @@ u32 spu2_create_request(u8 *spu_hdr,
 }
 
 /**
- * spu_cipher_req_init() - Build an skcipher SPU2 request message header,
+ * spu2_cipher_req_init() - Build an skcipher SPU2 request message header,
  * including FMD and OMD.
  * @spu_hdr:       Location of start of SPU request (FMD field)
  * @cipher_parms:  Parameters describing cipher request
@@ -1162,11 +1167,11 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
 }
 
 /**
- * spu_cipher_req_finish() - Finish building a SPU request message header for a
+ * spu2_cipher_req_finish() - Finish building a SPU request message header for a
  * block cipher request.
  * @spu_hdr:         Start of the request message header (MH field)
  * @spu_req_hdr_len: Length in bytes of the SPU request header
- * @isInbound:       0 encrypt, 1 decrypt
+ * @is_inbound:      0 encrypt, 1 decrypt
  * @cipher_parms:    Parameters describing cipher operation to be performed
  * @data_size:       Length of the data in the BD field
  *
@@ -1222,7 +1227,7 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
 }
 
 /**
- * spu_request_pad() - Create pad bytes at the end of the data.
+ * spu2_request_pad() - Create pad bytes at the end of the data.
  * @pad_start:      Start of buffer where pad bytes are to be written
  * @gcm_padding:    Length of GCM padding, in bytes
  * @hash_pad_len:   Number of bytes of padding extend data to full block
@@ -1311,7 +1316,7 @@ u8 spu2_rx_status_len(void)
 }
 
 /**
- * spu_status_process() - Process the status from a SPU response message.
+ * spu2_status_process() - Process the status from a SPU response message.
  * @statp:  start of STATUS word
  *
  * Return:  0 - if status is good and response should be processed
index c4669a96eaecf1258acdf3b57e9211c9d71c221f..d5d9cabea55aa4d4d621731a6200977e76928ac7 100644 (file)
@@ -119,8 +119,8 @@ int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
  * @from_skip:   number of bytes to skip in from_sg. Non-zero when previous
  *              request included part of the buffer in entry in from_sg.
  *              Assumes from_skip < from_sg->length.
- * @from_nents   number of entries in from_sg
- * @length       number of bytes to copy. may reach this limit before exhausting
+ * @from_nents:  number of entries in from_sg
+ * @length:      number of bytes to copy. may reach this limit before exhausting
  *              from_sg.
  *
  * Copies the entries themselves, not the data in the entries. Assumes to_sg has
index a780e627838ae9a3aa41835a32d353c4d4ea00ff..8b8ed77d8715d25595a7f26ed6c9cdf7ab4d5b22 100644 (file)
@@ -71,6 +71,9 @@ struct caam_skcipher_alg {
  * @adata: authentication algorithm details
  * @cdata: encryption algorithm details
  * @authsize: authentication tag (a.k.a. ICV / MAC) size
+ * @xts_key_fallback: true if fallback tfm needs to be used due
+ *                   to unsupported xts key lengths
+ * @fallback: xts fallback tfm
  */
 struct caam_ctx {
        struct caam_flc flc[NUM_OP];
index dd5f101e43f83ce1a1f358b06a3f3f1686e4a94c..e313233ec6de79e48e25bc88c490a82a4e21991e 100644 (file)
@@ -187,7 +187,8 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
 }
 
 /**
- * Count leading zeros, need it to strip, from a given scatterlist
+ * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
+ *                                from a given scatterlist
  *
  * @sgl   : scatterlist to count zeros from
  * @nbytes: number of zeros, in bytes, to strip
index 711b1acdd4e0c269e897d2b82b1b8c9ebaf0bf7d..06ee42e8a24584e68ecc129d5fc4fdc52c7536f9 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
 #include <linux/printk.h>
-#include <linux/version.h>
 
 #include "cptpf.h"
 
index 99b053094f5aff2290e7b23d26c4ae309a03eec0..c288c4b51783da60b2a188b44fe8ef7b4c53ae4e 100644 (file)
@@ -10,7 +10,7 @@
 #include "nitrox_isr.h"
 #include "nitrox_mbx.h"
 
-/**
+/*
  * One vector for each type of ring
  *  - NPS packet ring, AQMQ ring and ZQMQ ring
  */
@@ -216,7 +216,7 @@ static void nps_core_int_tasklet(unsigned long data)
        }
 }
 
-/**
+/*
  * nps_core_int_isr - interrupt handler for NITROX errors and
  *   mailbox communication
  */
index 53ef0679213301b81a79a47a66368952d34cbd93..df95ba26b414150c9b8163323973fd8f98e5e9c2 100644 (file)
@@ -58,14 +58,15 @@ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
        struct device *dev = DEV(ndev);
 
 
-       dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
+       dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg),
+                    DMA_BIDIRECTIONAL);
        dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
                         DMA_TO_DEVICE);
        kfree(sr->in.sgcomp);
        sr->in.sg = NULL;
        sr->in.sgmap_cnt = 0;
 
-       dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
+       dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg),
                     DMA_BIDIRECTIONAL);
        dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
                         DMA_TO_DEVICE);
@@ -178,7 +179,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
        return 0;
 
 incomp_err:
-       dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
+       dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
        sr->in.sgmap_cnt = 0;
        return ret;
 }
@@ -203,7 +204,7 @@ static int dma_map_outbufs(struct nitrox_softreq *sr,
        return 0;
 
 outcomp_map_err:
-       dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
+       dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL);
        sr->out.sgmap_cnt = 0;
        sr->out.sg = NULL;
        return ret;
index 58fb3ed6e64424766ac6907b62f77a934bedcc78..54f6fb054119f0b518e54a73754888a4a07136bc 100644 (file)
@@ -56,7 +56,6 @@
 #include <linux/seq_file.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/version.h>
 
 /* Device specific zlib function definitions */
 #include "zip_device.h"
index 88275b4867ea36e9a23bb1a7e6e7b10a6c0c522b..5976530c00a8a16e90c420925bc90c2e8b792c5c 100644 (file)
@@ -59,7 +59,7 @@ struct ccp_crypto_queue {
 #define CCP_CRYPTO_MAX_QLEN    100
 
 static struct ccp_crypto_queue req_queue;
-static spinlock_t req_queue_lock;
+static DEFINE_SPINLOCK(req_queue_lock);
 
 struct ccp_crypto_cmd {
        struct list_head entry;
@@ -410,7 +410,6 @@ static int ccp_crypto_init(void)
                return ret;
        }
 
-       spin_lock_init(&req_queue_lock);
        INIT_LIST_HEAD(&req_queue.cmds);
        req_queue.backlog = &req_queue.cmds;
        req_queue.cmd_count = 0;
index 0971ee60f840081717646ad2d18a55fbde36a185..6777582aa1ceee3fcb0be1ac641da9b99ac3ab6e 100644 (file)
@@ -548,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
        return ccp->cmd_q_count == suspended;
 }
 
-int ccp_dev_suspend(struct sp_device *sp)
+void ccp_dev_suspend(struct sp_device *sp)
 {
        struct ccp_device *ccp = sp->ccp_data;
        unsigned long flags;
@@ -556,7 +556,7 @@ int ccp_dev_suspend(struct sp_device *sp)
 
        /* If there's no device there's nothing to do */
        if (!ccp)
-               return 0;
+               return;
 
        spin_lock_irqsave(&ccp->cmd_lock, flags);
 
@@ -572,11 +572,9 @@ int ccp_dev_suspend(struct sp_device *sp)
        while (!ccp_queues_suspended(ccp))
                wait_event_interruptible(ccp->suspend_queue,
                                         ccp_queues_suspended(ccp));
-
-       return 0;
 }
 
-int ccp_dev_resume(struct sp_device *sp)
+void ccp_dev_resume(struct sp_device *sp)
 {
        struct ccp_device *ccp = sp->ccp_data;
        unsigned long flags;
@@ -584,7 +582,7 @@ int ccp_dev_resume(struct sp_device *sp)
 
        /* If there's no device there's nothing to do */
        if (!ccp)
-               return 0;
+               return;
 
        spin_lock_irqsave(&ccp->cmd_lock, flags);
 
@@ -597,8 +595,6 @@ int ccp_dev_resume(struct sp_device *sp)
        }
 
        spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
-       return 0;
 }
 
 int ccp_dev_init(struct sp_device *sp)
index d6a8f4e4b14a8099649404ba0a34abadf9c99696..bb88198c874e0ed2b8ed55a03fb93a9d9bef0aa0 100644 (file)
@@ -2418,7 +2418,6 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
        dst.address += CCP_ECC_OUTPUT_SIZE;
        ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
                                CCP_ECC_MODULUS_BYTES);
-       dst.address += CCP_ECC_OUTPUT_SIZE;
 
        /* Restore the workarea address */
        dst.address = save;
index cb9b4c4e371edb43a4d2b593ba39bda330189a0f..da3872c48308e176550bd03becfe584501482294 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/ccp.h>
 #include <linux/firmware.h>
 #include <linux/gfp.h>
+#include <linux/cpufeature.h>
 
 #include <asm/smp.h>
 
@@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp)
        struct sev_device *sev;
        int ret = -ENOMEM;
 
+       if (!boot_cpu_has(X86_FEATURE_SEV)) {
+               dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n");
+               return 0;
+       }
+
        sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
        if (!sev)
                goto e_err;
index 6284a15e50472911071404cd2327d82b20141985..7eb3e46682860c975f74e392beb87995f78d2913 100644 (file)
@@ -213,12 +213,8 @@ void sp_destroy(struct sp_device *sp)
 
 int sp_suspend(struct sp_device *sp)
 {
-       int ret;
-
        if (sp->dev_vdata->ccp_vdata) {
-               ret = ccp_dev_suspend(sp);
-               if (ret)
-                       return ret;
+               ccp_dev_suspend(sp);
        }
 
        return 0;
@@ -226,12 +222,8 @@ int sp_suspend(struct sp_device *sp)
 
 int sp_resume(struct sp_device *sp)
 {
-       int ret;
-
        if (sp->dev_vdata->ccp_vdata) {
-               ret = ccp_dev_resume(sp);
-               if (ret)
-                       return ret;
+               ccp_dev_resume(sp);
        }
 
        return 0;
index 0218d0670eeefdf274f0a9a2d45aac0491b1d385..20377e67f65dfd959e4df11d834e913151d87f48 100644 (file)
@@ -134,8 +134,8 @@ struct sp_device *sp_get_psp_master_device(void);
 int ccp_dev_init(struct sp_device *sp);
 void ccp_dev_destroy(struct sp_device *sp);
 
-int ccp_dev_suspend(struct sp_device *sp);
-int ccp_dev_resume(struct sp_device *sp);
+void ccp_dev_suspend(struct sp_device *sp);
+void ccp_dev_resume(struct sp_device *sp);
 
 #else  /* !CONFIG_CRYPTO_DEV_SP_CCP */
 
@@ -144,15 +144,8 @@ static inline int ccp_dev_init(struct sp_device *sp)
        return 0;
 }
 static inline void ccp_dev_destroy(struct sp_device *sp) { }
-
-static inline int ccp_dev_suspend(struct sp_device *sp)
-{
-       return 0;
-}
-static inline int ccp_dev_resume(struct sp_device *sp)
-{
-       return 0;
-}
+static inline void ccp_dev_suspend(struct sp_device *sp) { }
+static inline void ccp_dev_resume(struct sp_device *sp) { }
 #endif /* CONFIG_CRYPTO_DEV_SP_CCP */
 
 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
index f471dbaef1fbc35e4bd9d40c38b5fdda4c74047e..f468594ef8afaaf2bec250a972db9382fd6bc5fa 100644 (file)
@@ -356,6 +356,7 @@ static const struct pci_device_id sp_pci_table[] = {
        { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
        { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
        { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
+       { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
        /* Last entry must be zero */
        { 0, }
 };
index 5e697a90ea7f497082e05d9ac6aac49d1cfec530..5c9d47f3be375093fc31c96f336570555fa04df0 100644 (file)
@@ -5,7 +5,7 @@
  * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
  * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
  *
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
  */
 
 #include <linux/types.h>
@@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
        if (!start_addr)
                return -ENOMEM;
 
+       memset(start_addr, 0x0, ring_size);
        rb_mgr->ring_start = start_addr;
        rb_mgr->ring_size = ring_size;
        rb_mgr->ring_pa = __psp_pa(start_addr);
@@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
                          void *buf, size_t len, struct tee_ring_cmd **resp)
 {
        struct tee_ring_cmd *cmd;
-       u32 rptr, wptr;
        int nloop = 1000, ret = 0;
+       u32 rptr;
 
        *resp = NULL;
 
        mutex_lock(&tee->rb_mgr.mutex);
 
-       wptr = tee->rb_mgr.wptr;
-
-       /* Check if ring buffer is full */
+       /* Loop until empty entry found in ring buffer */
        do {
+               /* Get pointer to ring buffer command entry */
+               cmd = (struct tee_ring_cmd *)
+                       (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
+
                rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
 
-               if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+               /* Check if ring buffer is full or command entry is waiting
+                * for response from TEE
+                */
+               if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
+                     cmd->flag == CMD_WAITING_FOR_RESPONSE))
                        break;
 
-               dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
-                        rptr, wptr);
+               dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+                       rptr, tee->rb_mgr.wptr);
 
-               /* Wait if ring buffer is full */
+               /* Wait if ring buffer is full or TEE is processing data */
                mutex_unlock(&tee->rb_mgr.mutex);
                schedule_timeout_interruptible(msecs_to_jiffies(10));
                mutex_lock(&tee->rb_mgr.mutex);
 
        } while (--nloop);
 
-       if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
-               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
-                       rptr, wptr);
+       if (!nloop &&
+           (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
+            cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
+               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
+                       rptr, tee->rb_mgr.wptr, cmd->flag);
                ret = -EBUSY;
                goto unlock;
        }
 
-       /* Pointer to empty data entry in ring buffer */
-       cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+       /* Do not submit command if PSP got disabled while processing any
+        * command in another thread
+        */
+       if (psp_dead) {
+               ret = -EBUSY;
+               goto unlock;
+       }
 
        /* Write command data into ring buffer */
        cmd->cmd_id = cmd_id;
@@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
        memset(&cmd->buf[0], 0, sizeof(cmd->buf));
        memcpy(&cmd->buf[0], buf, len);
 
+       /* Indicate driver is waiting for response */
+       cmd->flag = CMD_WAITING_FOR_RESPONSE;
+
        /* Update local copy of write pointer */
        tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
        if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
@@ -309,14 +326,14 @@ static int tee_wait_cmd_completion(struct psp_tee_device *tee,
                                   struct tee_ring_cmd *resp,
                                   unsigned int timeout)
 {
-       /* ~5ms sleep per loop => nloop = timeout * 200 */
-       int nloop = timeout * 200;
+       /* ~1ms sleep per loop => nloop = timeout * 1000 */
+       int nloop = timeout * 1000;
 
        while (--nloop) {
                if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
                        return 0;
 
-               usleep_range(5000, 5100);
+               usleep_range(1000, 1100);
        }
 
        dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
@@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
                return ret;
 
        ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
-       if (ret)
+       if (ret) {
+               resp->flag = CMD_RESPONSE_TIMEDOUT;
                return ret;
+       }
 
        memcpy(buf, &resp->buf[0], len);
        *status = resp->status;
 
+       resp->flag = CMD_RESPONSE_COPIED;
+
        return 0;
 }
 EXPORT_SYMBOL(psp_tee_process_cmd);
index f099601121150dc7c38e4ef89e22d8a61475f4b1..49d26158b71e31635557d971ad19a148b9859043 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: MIT */
 /*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
  *
  * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
  * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
@@ -18,7 +18,7 @@
 #include <linux/mutex.h>
 
 #define TEE_DEFAULT_TIMEOUT            10
-#define MAX_BUFFER_SIZE                        992
+#define MAX_BUFFER_SIZE                        988
 
 /**
  * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
@@ -81,6 +81,20 @@ enum tee_cmd_state {
        TEE_CMD_STATE_COMPLETED,
 };
 
+/**
+ * enum cmd_resp_state - TEE command's response status maintained by driver
+ * @CMD_RESPONSE_INVALID:      initial state when no command is written to ring
+ * @CMD_WAITING_FOR_RESPONSE:  driver waiting for response from TEE
+ * @CMD_RESPONSE_TIMEDOUT:     failed to get response from TEE
+ * @CMD_RESPONSE_COPIED:       driver has copied response from TEE
+ */
+enum cmd_resp_state {
+       CMD_RESPONSE_INVALID,
+       CMD_WAITING_FOR_RESPONSE,
+       CMD_RESPONSE_TIMEDOUT,
+       CMD_RESPONSE_COPIED,
+};
+
 /**
  * struct tee_ring_cmd - Structure of the command buffer in TEE ring
  * @cmd_id:      refers to &enum tee_cmd_id. Command id for the ring buffer
@@ -91,6 +105,7 @@ enum tee_cmd_state {
  * @pdata:       private data (currently unused)
  * @res1:        reserved region
  * @buf:         TEE command specific buffer
+ * @flag:       refers to &enum cmd_resp_state
  */
 struct tee_ring_cmd {
        u32 cmd_id;
@@ -100,6 +115,7 @@ struct tee_ring_cmd {
        u64 pdata;
        u32 res1[2];
        u8 buf[MAX_BUFFER_SIZE];
+       u32 flag;
 
        /* Total size: 1024 bytes */
 } __packed;
index d0e59e942568a0e7837b4c517559eb56a5df829f..e599ac6dc162a26c1246e24b45dddd982aeceb1a 100644 (file)
@@ -352,10 +352,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
        req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
        /* Map registers space */
        new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
-       if (IS_ERR(new_drvdata->cc_base)) {
-               dev_err(dev, "Failed to ioremap registers");
+       if (IS_ERR(new_drvdata->cc_base))
                return PTR_ERR(new_drvdata->cc_base);
-       }
 
        dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
                req_mem_cc_regs);
index f5a336634daa6333ddd5b28d5fb4411a4f37d5bb..6933546f87b1a863e9eecc94470d09168bde5b51 100644 (file)
@@ -126,11 +126,6 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
        return container_of(ctx->dev, struct uld_ctx, dev);
 }
 
-static inline int is_ofld_imm(const struct sk_buff *skb)
-{
-       return (skb->len <= SGE_MAX_WR_LEN);
-}
-
 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 {
        memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
@@ -769,13 +764,14 @@ static inline void create_wreq(struct chcr_context *ctx,
        struct uld_ctx *u_ctx = ULD_CTX(ctx);
        unsigned int tx_channel_id, rx_channel_id;
        unsigned int txqidx = 0, rxqidx = 0;
-       unsigned int qid, fid;
+       unsigned int qid, fid, portno;
 
        get_qidxs(req, &txqidx, &rxqidx);
        qid = u_ctx->lldi.rxq_ids[rxqidx];
        fid = u_ctx->lldi.rxq_ids[0];
+       portno = rxqidx / ctx->rxq_perchan;
        tx_channel_id = txqidx / ctx->txq_perchan;
-       rx_channel_id = rxqidx / ctx->rxq_perchan;
+       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
 
 
        chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
@@ -797,15 +793,13 @@ static inline void create_wreq(struct chcr_context *ctx,
 
 /**
  *     create_cipher_wr - form the WR for cipher operations
- *     @req: cipher req.
- *     @ctx: crypto driver context of the request.
- *     @qid: ingress qid where response of this WR should be received.
- *     @op_type:       encryption or decryption
+ *     @wrparam: Container for create_cipher_wr()'s parameters
  */
 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
        struct chcr_context *ctx = c_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
        struct sk_buff *skb = NULL;
        struct chcr_wr *chcr_req;
@@ -822,6 +816,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
        struct adapter *adap = padap(ctx->dev);
        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 
+       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
        nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
                              reqctx->dst_ofst);
        dst_size = get_space_for_phys_dsgl(nents);
@@ -1559,7 +1554,8 @@ static inline void chcr_free_shash(struct crypto_shash *base_hash)
 
 /**
  *     create_hash_wr - Create hash work request
- *     @req - Cipher req base
+ *     @req: Cipher req base
+ *     @param: Container for create_hash_wr()'s parameters
  */
 static struct sk_buff *create_hash_wr(struct ahash_request *req,
                                      struct hash_wr_param *param)
@@ -1580,6 +1576,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
        int error = 0;
        unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
 
+       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
        transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
        req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
                                param->sg_len) <= SGE_MAX_WR_LEN;
@@ -2438,6 +2435,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 {
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct chcr_context *ctx = a_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
        struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
        struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2457,6 +2455,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        struct adapter *adap = padap(ctx->dev);
        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 
+       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
        if (req->cryptlen == 0)
                return NULL;
 
@@ -2710,9 +2709,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        struct dsgl_walk dsgl_walk;
        unsigned int authsize = crypto_aead_authsize(tfm);
        struct chcr_context *ctx = a_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
        u32 temp;
        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 
+       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
        dsgl_walk_init(&dsgl_walk, phys_cpl);
        dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
        temp = req->assoclen + req->cryptlen +
@@ -2752,9 +2753,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
        struct chcr_context *ctx = c_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
        struct dsgl_walk dsgl_walk;
        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 
+       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
        dsgl_walk_init(&dsgl_walk, phys_cpl);
        dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
                         reqctx->dst_ofst);
@@ -2958,6 +2961,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
 {
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct chcr_context *ctx = a_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
        struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
        unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
@@ -2967,6 +2971,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
        unsigned int tag_offset = 0, auth_offset = 0;
        unsigned int assoclen;
 
+       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+
        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
                assoclen = req->assoclen - 8;
        else
@@ -3127,6 +3133,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 {
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct chcr_context *ctx = a_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
        struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
        struct sk_buff *skb = NULL;
@@ -3143,6 +3150,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        struct adapter *adap = padap(ctx->dev);
        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 
+       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
                assoclen = req->assoclen - 8;
 
index f91f9d762a45ea1b980914bfdcd9d224f064370e..39c70e6255f9939fe580310ed22a904bbb68862e 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
  *
  * Copyright (C) 2011-2016 Chelsio Communications.  All rights reserved.
@@ -184,7 +184,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
        struct uld_ctx *u_ctx;
 
        /* Create the device and add it in the device list */
-       pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
+       pr_info_once("%s\n", DRV_DESC);
        if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
                return ERR_PTR(-EOPNOTSUPP);
 
@@ -309,4 +309,3 @@ module_exit(chcr_crypto_exit);
 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Chelsio Communications");
-MODULE_VERSION(DRV_VERSION);
index b02f981e7c3234f66d0da99588578e508e7a03bc..f7c8bb95a71b41f63cfa356133b32d8200c9e5d0 100644 (file)
@@ -44,7 +44,6 @@
 #include "cxgb4_uld.h"
 
 #define DRV_MODULE_NAME "chcr"
-#define DRV_VERSION "1.0.0.0-ko"
 #define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
 
 #define MAX_PENDING_REQ_TO_HW 20
index 4ee010f399124ae5df0afd14b510f1db4cc7569a..fa5a9f207bc90afb94a26b101e8b018e3f3eb0aa 100644 (file)
@@ -21,7 +21,7 @@
 /* Static structures */
 
 static void __iomem *_iobase;
-static spinlock_t lock;
+static DEFINE_SPINLOCK(lock);
 
 /* Write a 128 bit field (either a writable key or IV) */
 static inline void
@@ -383,8 +383,6 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
                goto erequest;
        }
 
-       spin_lock_init(&lock);
-
        /* Clear any pending activity */
        iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
 
index 843192666dc38a5ec0c4c385aea7b4ca254fe682..e572f9982d4ef4bbd2e0c177170d31e60436440d 100644 (file)
@@ -68,6 +68,8 @@ config CRYPTO_DEV_HISI_HPRE
        select CRYPTO_DEV_HISI_QM
        select CRYPTO_DH
        select CRYPTO_RSA
+       select CRYPTO_CURVE25519
+       select CRYPTO_ECDH
        help
          Support for HiSilicon HPRE(High Performance RSA Engine)
          accelerator, which can accelerate RSA and DH algorithms.
index 181c109b19f7c4219be4db42739c04b4bd984382..e0b4a1982ee9ef709db645b5e755a2edaaa1605b 100644 (file)
 #define HPRE_PF_DEF_Q_NUM              64
 #define HPRE_PF_DEF_Q_BASE             0
 
+/*
+ * type used in qm sqc DW6.
+ * 0 - Algorithm which has been supported in V2, like RSA, DH and so on;
+ * 1 - ECC algorithm in V3.
+ */
+#define HPRE_V2_ALG_TYPE       0
+#define HPRE_V3_ECC_ALG_TYPE   1
+
 enum {
        HPRE_CLUSTER0,
        HPRE_CLUSTER1,
@@ -18,7 +26,6 @@ enum {
 };
 
 enum hpre_ctrl_dbgfs_file {
-       HPRE_CURRENT_QM,
        HPRE_CLEAR_ENABLE,
        HPRE_CLUSTER_CTRL,
        HPRE_DEBUG_FILE_NUM,
@@ -75,6 +82,9 @@ enum hpre_alg_type {
        HPRE_ALG_KG_CRT = 0x3,
        HPRE_ALG_DH_G2 = 0x4,
        HPRE_ALG_DH = 0x5,
+       HPRE_ALG_ECC_MUL = 0xD,
+       /* shared by x25519 and x448, but x448 is not supported now */
+       HPRE_ALG_CURVE25519_MUL = 0x10,
 };
 
 struct hpre_sqe {
@@ -92,8 +102,8 @@ struct hpre_sqe {
        __le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
 };
 
-struct hisi_qp *hpre_create_qp(void);
-int hpre_algs_register(void);
-void hpre_algs_unregister(void);
+struct hisi_qp *hpre_create_qp(u8 type);
+int hpre_algs_register(struct hisi_qm *qm);
+void hpre_algs_unregister(struct hisi_qm *qm);
 
 #endif
index a87f9904087aac2d02481e2b309acc99d4656edd..a380087c83f771ee39b6c2c930768ed2e7d2918a 100644 (file)
@@ -1,7 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2019 HiSilicon Limited. */
 #include <crypto/akcipher.h>
+#include <crypto/curve25519.h>
 #include <crypto/dh.h>
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
 #include <crypto/internal/akcipher.h>
 #include <crypto/internal/kpp.h>
 #include <crypto/internal/rsa.h>
@@ -36,6 +39,13 @@ struct hpre_ctx;
 #define HPRE_DFX_SEC_TO_US     1000000
 #define HPRE_DFX_US_TO_NS      1000
 
+/* size in bytes of the n prime */
+#define HPRE_ECC_NIST_P192_N_SIZE      24
+#define HPRE_ECC_NIST_P256_N_SIZE      32
+
+/* size in bytes */
+#define HPRE_ECC_HW256_KSZ_B   32
+
 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
 
 struct hpre_rsa_ctx {
@@ -61,14 +71,35 @@ struct hpre_dh_ctx {
         * else if base if the counterpart public key we
         * compute the shared secret
         *      ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+        * low address: d--->n, please refer to Hisilicon HPRE UM
         */
-       char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
+       char *xa_p;
        dma_addr_t dma_xa_p;
 
        char *g; /* m */
        dma_addr_t dma_g;
 };
 
+struct hpre_ecdh_ctx {
+       /* low address: p->a->k->b */
+       unsigned char *p;
+       dma_addr_t dma_p;
+
+       /* low address: x->y */
+       unsigned char *g;
+       dma_addr_t dma_g;
+};
+
+struct hpre_curve25519_ctx {
+       /* low address: p->a->k */
+       unsigned char *p;
+       dma_addr_t dma_p;
+
+       /* gx coordinate */
+       unsigned char *g;
+       dma_addr_t dma_g;
+};
+
 struct hpre_ctx {
        struct hisi_qp *qp;
        struct hpre_asym_request **req_list;
@@ -80,7 +111,11 @@ struct hpre_ctx {
        union {
                struct hpre_rsa_ctx rsa;
                struct hpre_dh_ctx dh;
+               struct hpre_ecdh_ctx ecdh;
+               struct hpre_curve25519_ctx curve25519;
        };
+       /* for ecc algorithms */
+       unsigned int curve_id;
 };
 
 struct hpre_asym_request {
@@ -91,6 +126,8 @@ struct hpre_asym_request {
        union {
                struct akcipher_request *rsa;
                struct kpp_request *dh;
+               struct kpp_request *ecdh;
+               struct kpp_request *curve25519;
        } areq;
        int err;
        int req_id;
@@ -152,12 +189,12 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
        }
 }
 
-static struct hisi_qp *hpre_get_qp_and_start(void)
+static struct hisi_qp *hpre_get_qp_and_start(u8 type)
 {
        struct hisi_qp *qp;
        int ret;
 
-       qp = hpre_create_qp();
+       qp = hpre_create_qp(type);
        if (!qp) {
                pr_err("Can not create hpre qp!\n");
                return ERR_PTR(-ENODEV);
@@ -261,8 +298,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
        dma_addr_t tmp;
 
        tmp = le64_to_cpu(sqe->in);
-       if (unlikely(!tmp))
-               return;
 
        if (src) {
                if (req->src)
@@ -272,8 +307,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
        }
 
        tmp = le64_to_cpu(sqe->out);
-       if (unlikely(!tmp))
-               return;
 
        if (req->dst) {
                if (dst)
@@ -288,13 +321,16 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
                                void **kreq)
 {
+       struct device *dev = HPRE_DEV(ctx);
        struct hpre_asym_request *req;
-       int err, id, done;
+       unsigned int err, done, alg;
+       int id;
 
 #define HPRE_NO_HW_ERR         0
 #define HPRE_HW_TASK_DONE      3
 #define HREE_HW_ERR_MASK       0x7ff
 #define HREE_SQE_DONE_MASK     0x3
+#define HREE_ALG_TYPE_MASK     0x1f
        id = (int)le16_to_cpu(sqe->tag);
        req = ctx->req_list[id];
        hpre_rm_req_from_ctx(req);
@@ -307,7 +343,11 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
                HREE_SQE_DONE_MASK;
 
        if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
-               return  0;
+               return 0;
+
+       alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
+       dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
+               alg, done, err);
 
        return -EINVAL;
 }
@@ -413,7 +453,6 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
        struct hpre_sqe *sqe = resp;
        struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
 
-
        if (unlikely(!req)) {
                atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
                return;
@@ -422,18 +461,29 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
        req->cb(ctx, resp);
 }
 
-static int hpre_ctx_init(struct hpre_ctx *ctx)
+static void hpre_stop_qp_and_put(struct hisi_qp *qp)
+{
+       hisi_qm_stop_qp(qp);
+       hisi_qm_free_qps(&qp, 1);
+}
+
+static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
 {
        struct hisi_qp *qp;
+       int ret;
 
-       qp = hpre_get_qp_and_start();
+       qp = hpre_get_qp_and_start(type);
        if (IS_ERR(qp))
                return PTR_ERR(qp);
 
        qp->qp_ctx = ctx;
        qp->req_cb = hpre_alg_cb;
 
-       return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+       ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+       if (ret)
+               hpre_stop_qp_and_put(qp);
+
+       return ret;
 }
 
 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
@@ -510,7 +560,6 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
        return ret;
 }
 
-#ifdef CONFIG_CRYPTO_DH
 static int hpre_dh_compute_value(struct kpp_request *req)
 {
        struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@@ -674,7 +723,7 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
 {
        struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
 
-       return hpre_ctx_init(ctx);
+       return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
 }
 
 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
@@ -683,7 +732,6 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
 
        hpre_dh_clear_ctx(ctx, true);
 }
-#endif
 
 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
 {
@@ -1100,7 +1148,7 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
                return PTR_ERR(ctx->rsa.soft_tfm);
        }
 
-       ret = hpre_ctx_init(ctx);
+       ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
        if (ret)
                crypto_free_akcipher(ctx->rsa.soft_tfm);
 
@@ -1115,6 +1163,734 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
        crypto_free_akcipher(ctx->rsa.soft_tfm);
 }
 
+static void hpre_key_to_big_end(u8 *data, int len)
+{
+       int i, j;
+       u8 tmp;
+
+       for (i = 0; i < len / 2; i++) {
+               j = len - i - 1;
+               tmp = data[j];
+               data[j] = data[i];
+               data[i] = tmp;
+       }
+}
+
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
+                              bool is_ecdh)
+{
+       struct device *dev = HPRE_DEV(ctx);
+       unsigned int sz = ctx->key_sz;
+       unsigned int shift = sz << 1;
+
+       if (is_clear_all)
+               hisi_qm_stop_qp(ctx->qp);
+
+       if (is_ecdh && ctx->ecdh.p) {
+               /* ecdh: p->a->k->b */
+               memzero_explicit(ctx->ecdh.p + shift, sz);
+               dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+               ctx->ecdh.p = NULL;
+       } else if (!is_ecdh && ctx->curve25519.p) {
+               /* curve25519: p->a->k */
+               memzero_explicit(ctx->curve25519.p + shift, sz);
+               dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
+                                 ctx->curve25519.dma_p);
+               ctx->curve25519.p = NULL;
+       }
+
+       hpre_ctx_clear(ctx, is_clear_all);
+}
+
+static unsigned int hpre_ecdh_supported_curve(unsigned short id)
+{
+       switch (id) {
+       case ECC_CURVE_NIST_P192:
+       case ECC_CURVE_NIST_P256:
+               return HPRE_ECC_HW256_KSZ_B;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
+{
+       unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
+       u8 i = 0;
+
+       while (i < ndigits - 1) {
+               memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
+               i++;
+       }
+
+       memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
+       hpre_key_to_big_end((u8 *)addr, cur_sz);
+}
+
+static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
+                               unsigned int cur_sz)
+{
+       unsigned int shifta = ctx->key_sz << 1;
+       unsigned int shiftb = ctx->key_sz << 2;
+       void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
+       void *a = ctx->ecdh.p + shifta - cur_sz;
+       void *b = ctx->ecdh.p + shiftb - cur_sz;
+       void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
+       void *y = ctx->ecdh.g + shifta - cur_sz;
+       const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
+       char *n;
+
+       if (unlikely(!curve))
+               return -EINVAL;
+
+       n = kzalloc(ctx->key_sz, GFP_KERNEL);
+       if (!n)
+               return -ENOMEM;
+
+       fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
+       fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
+       fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
+       fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
+       fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
+       fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
+
+       if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
+               kfree(n);
+               return -EINVAL;
+       }
+
+       kfree(n);
+       return 0;
+}
+
+static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
+{
+       switch (id) {
+       case ECC_CURVE_NIST_P192:
+               return HPRE_ECC_NIST_P192_N_SIZE;
+       case ECC_CURVE_NIST_P256:
+               return HPRE_ECC_NIST_P256_N_SIZE;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
+{
+       struct device *dev = HPRE_DEV(ctx);
+       unsigned int sz, shift, curve_sz;
+       int ret;
+
+       ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
+       if (!ctx->key_sz)
+               return -EINVAL;
+
+       curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+       if (!curve_sz || params->key_size > curve_sz)
+               return -EINVAL;
+
+       sz = ctx->key_sz;
+
+       if (!ctx->ecdh.p) {
+               ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
+                                                GFP_KERNEL);
+               if (!ctx->ecdh.p)
+                       return -ENOMEM;
+       }
+
+       shift = sz << 2;
+       ctx->ecdh.g = ctx->ecdh.p + shift;
+       ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
+
+       ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
+       if (ret) {
+               dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
+               dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+               ctx->ecdh.p = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
+static bool hpre_key_is_zero(char *key, unsigned short key_sz)
+{
+       int i;
+
+       for (i = 0; i < key_sz; i++)
+               if (key[i])
+                       return false;
+
+       return true;
+}
+
+static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+                               unsigned int len)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct device *dev = HPRE_DEV(ctx);
+       unsigned int sz, sz_shift;
+       struct ecdh params;
+       int ret;
+
+       if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+               dev_err(dev, "failed to decode ecdh key!\n");
+               return -EINVAL;
+       }
+
+       if (hpre_key_is_zero(params.key, params.key_size)) {
+               dev_err(dev, "Invalid hpre key!\n");
+               return -EINVAL;
+       }
+
+       hpre_ecc_clear_ctx(ctx, false, true);
+
+       ret = hpre_ecdh_set_param(ctx, &params);
+       if (ret < 0) {
+               dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
+               return ret;
+       }
+
+       sz = ctx->key_sz;
+       sz_shift = (sz << 1) + sz - params.key_size;
+       memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
+
+       return 0;
+}
+
+static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
+                                     struct hpre_asym_request *req,
+                                     struct scatterlist *dst,
+                                     struct scatterlist *src)
+{
+       struct device *dev = HPRE_DEV(ctx);
+       struct hpre_sqe *sqe = &req->req;
+       dma_addr_t dma;
+
+       dma = le64_to_cpu(sqe->in);
+
+       if (src && req->src)
+               dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
+
+       dma = le64_to_cpu(sqe->out);
+
+       if (req->dst)
+               dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
+       if (dst)
+               dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
+}
+
+static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
+{
+       unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+       struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+       struct hpre_asym_request *req = NULL;
+       struct kpp_request *areq;
+       u64 overtime_thrhld;
+       char *p;
+       int ret;
+
+       ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+       areq = req->areq.ecdh;
+       areq->dst_len = ctx->key_sz << 1;
+
+       overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+       if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+               atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+       p = sg_virt(areq->dst);
+       memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
+       memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
+
+       hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+       kpp_request_complete(areq, ret);
+
+       atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
+                                    struct kpp_request *req)
+{
+       struct hpre_asym_request *h_req;
+       struct hpre_sqe *msg;
+       int req_id;
+       void *tmp;
+
+       if (req->dst_len < ctx->key_sz << 1) {
+               req->dst_len = ctx->key_sz << 1;
+               return -EINVAL;
+       }
+
+       tmp = kpp_request_ctx(req);
+       h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+       h_req->cb = hpre_ecdh_cb;
+       h_req->areq.ecdh = req;
+       msg = &h_req->req;
+       memset(msg, 0, sizeof(*msg));
+       msg->key = cpu_to_le64(ctx->ecdh.dma_p);
+
+       msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+       msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+       h_req->ctx = ctx;
+
+       req_id = hpre_add_req_to_ctx(h_req);
+       if (req_id < 0)
+               return -EBUSY;
+
+       msg->tag = cpu_to_le16((u16)req_id);
+       return 0;
+}
+
+static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
+                                  struct scatterlist *data, unsigned int len)
+{
+       struct hpre_sqe *msg = &hpre_req->req;
+       struct hpre_ctx *ctx = hpre_req->ctx;
+       struct device *dev = HPRE_DEV(ctx);
+       unsigned int tmpshift;
+       dma_addr_t dma = 0;
+       void *ptr;
+       int shift;
+
+       /* Src_data include gx and gy. */
+       shift = ctx->key_sz - (len >> 1);
+       if (unlikely(shift < 0))
+               return -EINVAL;
+
+       ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
+       if (unlikely(!ptr))
+               return -ENOMEM;
+
+       tmpshift = ctx->key_sz << 1;
+       scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
+       memcpy(ptr + shift, ptr + tmpshift, len >> 1);
+       memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
+
+       hpre_req->src = ptr;
+       msg->in = cpu_to_le64(dma);
+       return 0;
+}
+
+static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
+                                  struct scatterlist *data, unsigned int len)
+{
+       struct hpre_sqe *msg = &hpre_req->req;
+       struct hpre_ctx *ctx = hpre_req->ctx;
+       struct device *dev = HPRE_DEV(ctx);
+       dma_addr_t dma = 0;
+
+       if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
+               dev_err(dev, "data or data length is illegal!\n");
+               return -EINVAL;
+       }
+
+       hpre_req->dst = NULL;
+       dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, dma))) {
+               dev_err(dev, "dma map data err!\n");
+               return -ENOMEM;
+       }
+
+       msg->out = cpu_to_le64(dma);
+       return 0;
+}
+
+static int hpre_ecdh_compute_value(struct kpp_request *req)
+{
+       struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct device *dev = HPRE_DEV(ctx);
+       void *tmp = kpp_request_ctx(req);
+       struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+       struct hpre_sqe *msg = &hpre_req->req;
+       int ret;
+
+       ret = hpre_ecdh_msg_request_set(ctx, req);
+       if (unlikely(ret)) {
+               dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
+               return ret;
+       }
+
+       if (req->src) {
+               ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
+               if (unlikely(ret)) {
+                       dev_err(dev, "failed to init src data, ret = %d!\n", ret);
+                       goto clear_all;
+               }
+       } else {
+               msg->in = cpu_to_le64(ctx->ecdh.dma_g);
+       }
+
+       ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
+       if (unlikely(ret)) {
+               dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+               goto clear_all;
+       }
+
+       msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
+       ret = hpre_send(ctx, msg);
+       if (likely(!ret))
+               return -EINPROGRESS;
+
+clear_all:
+       hpre_rm_req_from_ctx(hpre_req);
+       hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+       return ret;
+}
+
+static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       /* max size is the pub_key_size, include x and y */
+       return ctx->key_sz << 1;
+}
+
+static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       ctx->curve_id = ECC_CURVE_NIST_P192;
+
+       return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       ctx->curve_id = ECC_CURVE_NIST_P256;
+
+       return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       hpre_ecc_clear_ctx(ctx, true, true);
+}
+
+static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
+                                      unsigned int len)
+{
+       u8 secret[CURVE25519_KEY_SIZE] = { 0 };
+       unsigned int sz = ctx->key_sz;
+       const struct ecc_curve *curve;
+       unsigned int shift = sz << 1;
+       void *p;
+
+       /*
+        * The key from 'buf' is in little-endian, we should preprocess it as
+        * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
+        * then convert it to big endian. Only in this way, the result can be
+        * the same as the software curve-25519 that exists in crypto.
+        */
+       memcpy(secret, buf, len);
+       curve25519_clamp_secret(secret);
+       hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
+
+       p = ctx->curve25519.p + sz - len;
+
+       curve = ecc_get_curve25519();
+
+       /* fill curve parameters */
+       fill_curve_param(p, curve->p, len, curve->g.ndigits);
+       fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
+       memcpy(p + shift, secret, len);
+       fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
+       memzero_explicit(secret, CURVE25519_KEY_SIZE);
+}
+
+static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
+                                    unsigned int len)
+{
+       struct device *dev = HPRE_DEV(ctx);
+       unsigned int sz = ctx->key_sz;
+       unsigned int shift = sz << 1;
+
+       /* p->a->k->gx */
+       if (!ctx->curve25519.p) {
+               ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
+                                                      &ctx->curve25519.dma_p,
+                                                      GFP_KERNEL);
+               if (!ctx->curve25519.p)
+                       return -ENOMEM;
+       }
+
+       ctx->curve25519.g = ctx->curve25519.p + shift + sz;
+       ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
+
+       hpre_curve25519_fill_curve(ctx, buf, len);
+
+       return 0;
+}
+
+static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
+                                     unsigned int len)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct device *dev = HPRE_DEV(ctx);
+       int ret = -EINVAL;
+
+       if (len != CURVE25519_KEY_SIZE ||
+           !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+               dev_err(dev, "key is null or key len is not 32bytes!\n");
+               return ret;
+       }
+
+       /* Free old secret if any */
+       hpre_ecc_clear_ctx(ctx, false, false);
+
+       ctx->key_sz = CURVE25519_KEY_SIZE;
+       ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
+       if (ret) {
+               dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
+               hpre_ecc_clear_ctx(ctx, false, false);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
+                                           struct hpre_asym_request *req,
+                                           struct scatterlist *dst,
+                                           struct scatterlist *src)
+{
+       struct device *dev = HPRE_DEV(ctx);
+       struct hpre_sqe *sqe = &req->req;
+       dma_addr_t dma;
+
+       dma = le64_to_cpu(sqe->in);
+
+       if (src && req->src)
+               dma_free_coherent(dev, ctx->key_sz, req->src, dma);
+
+       dma = le64_to_cpu(sqe->out);
+
+       if (req->dst)
+               dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
+       if (dst)
+               dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
+}
+
+static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
+{
+       struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+       struct hpre_asym_request *req = NULL;
+       struct kpp_request *areq;
+       u64 overtime_thrhld;
+       int ret;
+
+       ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+       areq = req->areq.curve25519;
+       areq->dst_len = ctx->key_sz;
+
+       overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+       if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+               atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+       hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
+
+       hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+       kpp_request_complete(areq, ret);
+
+       atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
+                                          struct kpp_request *req)
+{
+       struct hpre_asym_request *h_req;
+       struct hpre_sqe *msg;
+       int req_id;
+       void *tmp;
+
+       if (unlikely(req->dst_len < ctx->key_sz)) {
+               req->dst_len = ctx->key_sz;
+               return -EINVAL;
+       }
+
+       tmp = kpp_request_ctx(req);
+       h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+       h_req->cb = hpre_curve25519_cb;
+       h_req->areq.curve25519 = req;
+       msg = &h_req->req;
+       memset(msg, 0, sizeof(*msg));
+       msg->key = cpu_to_le64(ctx->curve25519.dma_p);
+
+       msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+       msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+       h_req->ctx = ctx;
+
+       req_id = hpre_add_req_to_ctx(h_req);
+       if (req_id < 0)
+               return -EBUSY;
+
+       msg->tag = cpu_to_le16((u16)req_id);
+       return 0;
+}
+
+static void hpre_curve25519_src_modulo_p(u8 *ptr)
+{
+       int i;
+
+       for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
+               ptr[i] = 0;
+
+       /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
+       ptr[i] -= 0xed;
+}
+
+static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
+                                   struct scatterlist *data, unsigned int len)
+{
+       struct hpre_sqe *msg = &hpre_req->req;
+       struct hpre_ctx *ctx = hpre_req->ctx;
+       struct device *dev = HPRE_DEV(ctx);
+       u8 p[CURVE25519_KEY_SIZE] = { 0 };
+       const struct ecc_curve *curve;
+       dma_addr_t dma = 0;
+       u8 *ptr;
+
+       if (len != CURVE25519_KEY_SIZE) {
+               dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
+               return -EINVAL;
+       }
+
+       ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
+       if (unlikely(!ptr))
+               return -ENOMEM;
+
+       scatterwalk_map_and_copy(ptr, data, 0, len, 0);
+
+       if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+               dev_err(dev, "gx is null!\n");
+               goto err;
+       }
+
+       /*
+        * Src_data(gx) is in little-endian order, MSB in the final byte should
+        * be masked as described in RFC7748, then transform it to big-endian
+        * form, then hisi_hpre can use the data.
+        */
+       ptr[31] &= 0x7f;
+       hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
+
+       curve = ecc_get_curve25519();
+
+       fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
+
+       /*
+        * When src_data equals (2^255 - 19) ~  (2^255 - 1), it is out of p,
+        * we get its modulus to p, and then use it.
+        */
+       if (memcmp(ptr, p, ctx->key_sz) >= 0)
+               hpre_curve25519_src_modulo_p(ptr);
+
+       hpre_req->src = ptr;
+       msg->in = cpu_to_le64(dma);
+       return 0;
+
+err:
+       dma_free_coherent(dev, ctx->key_sz, ptr, dma);
+       return -EINVAL;
+}
+
+static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
+                                   struct scatterlist *data, unsigned int len)
+{
+       struct hpre_sqe *msg = &hpre_req->req;
+       struct hpre_ctx *ctx = hpre_req->ctx;
+       struct device *dev = HPRE_DEV(ctx);
+       dma_addr_t dma = 0;
+
+       if (!data || !sg_is_last(data) || len != ctx->key_sz) {
+               dev_err(dev, "data or data length is illegal!\n");
+               return -EINVAL;
+       }
+
+       hpre_req->dst = NULL;
+       dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, dma))) {
+               dev_err(dev, "dma map data err!\n");
+               return -ENOMEM;
+       }
+
+       msg->out = cpu_to_le64(dma);
+       return 0;
+}
+
+static int hpre_curve25519_compute_value(struct kpp_request *req)
+{
+       struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct device *dev = HPRE_DEV(ctx);
+       void *tmp = kpp_request_ctx(req);
+       struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+       struct hpre_sqe *msg = &hpre_req->req;
+       int ret;
+
+       ret = hpre_curve25519_msg_request_set(ctx, req);
+       if (unlikely(ret)) {
+               dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
+               return ret;
+       }
+
+       if (req->src) {
+               ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
+               if (unlikely(ret)) {
+                       dev_err(dev, "failed to init src data, ret = %d!\n",
+                               ret);
+                       goto clear_all;
+               }
+       } else {
+               msg->in = cpu_to_le64(ctx->curve25519.dma_g);
+       }
+
+       ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
+       if (unlikely(ret)) {
+               dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+               goto clear_all;
+       }
+
+       msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
+       ret = hpre_send(ctx, msg);
+       if (likely(!ret))
+               return -EINPROGRESS;
+
+clear_all:
+       hpre_rm_req_from_ctx(hpre_req);
+       hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+       return ret;
+}
+
+static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       return ctx->key_sz;
+}
+
+static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
+{
+       struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       hpre_ecc_clear_ctx(ctx, true, false);
+}
+
 static struct akcipher_alg rsa = {
        .sign = hpre_rsa_dec,
        .verify = hpre_rsa_enc,
@@ -1135,7 +1911,6 @@ static struct akcipher_alg rsa = {
        },
 };
 
-#ifdef CONFIG_CRYPTO_DH
 static struct kpp_alg dh = {
        .set_secret = hpre_dh_set_secret,
        .generate_public_key = hpre_dh_compute_value,
@@ -1152,9 +1927,83 @@ static struct kpp_alg dh = {
                .cra_module = THIS_MODULE,
        },
 };
-#endif
 
-int hpre_algs_register(void)
+static struct kpp_alg ecdh_nist_p192 = {
+       .set_secret = hpre_ecdh_set_secret,
+       .generate_public_key = hpre_ecdh_compute_value,
+       .compute_shared_secret = hpre_ecdh_compute_value,
+       .max_size = hpre_ecdh_max_size,
+       .init = hpre_ecdh_nist_p192_init_tfm,
+       .exit = hpre_ecdh_exit_tfm,
+       .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+       .base = {
+               .cra_ctxsize = sizeof(struct hpre_ctx),
+               .cra_priority = HPRE_CRYPTO_ALG_PRI,
+               .cra_name = "ecdh-nist-p192",
+               .cra_driver_name = "hpre-ecdh",
+               .cra_module = THIS_MODULE,
+       },
+};
+
+static struct kpp_alg ecdh_nist_p256 = {
+       .set_secret = hpre_ecdh_set_secret,
+       .generate_public_key = hpre_ecdh_compute_value,
+       .compute_shared_secret = hpre_ecdh_compute_value,
+       .max_size = hpre_ecdh_max_size,
+       .init = hpre_ecdh_nist_p256_init_tfm,
+       .exit = hpre_ecdh_exit_tfm,
+       .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+       .base = {
+               .cra_ctxsize = sizeof(struct hpre_ctx),
+               .cra_priority = HPRE_CRYPTO_ALG_PRI,
+               .cra_name = "ecdh-nist-p256",
+               .cra_driver_name = "hpre-ecdh",
+               .cra_module = THIS_MODULE,
+       },
+};
+
+static struct kpp_alg curve25519_alg = {
+       .set_secret = hpre_curve25519_set_secret,
+       .generate_public_key = hpre_curve25519_compute_value,
+       .compute_shared_secret = hpre_curve25519_compute_value,
+       .max_size = hpre_curve25519_max_size,
+       .init = hpre_curve25519_init_tfm,
+       .exit = hpre_curve25519_exit_tfm,
+       .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+       .base = {
+               .cra_ctxsize = sizeof(struct hpre_ctx),
+               .cra_priority = HPRE_CRYPTO_ALG_PRI,
+               .cra_name = "curve25519",
+               .cra_driver_name = "hpre-curve25519",
+               .cra_module = THIS_MODULE,
+       },
+};
+
+
+static int hpre_register_ecdh(void)
+{
+       int ret;
+
+       ret = crypto_register_kpp(&ecdh_nist_p192);
+       if (ret)
+               return ret;
+
+       ret = crypto_register_kpp(&ecdh_nist_p256);
+       if (ret) {
+               crypto_unregister_kpp(&ecdh_nist_p192);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void hpre_unregister_ecdh(void)
+{
+       crypto_unregister_kpp(&ecdh_nist_p256);
+       crypto_unregister_kpp(&ecdh_nist_p192);
+}
+
+int hpre_algs_register(struct hisi_qm *qm)
 {
        int ret;
 
@@ -1162,19 +2011,37 @@ int hpre_algs_register(void)
        ret = crypto_register_akcipher(&rsa);
        if (ret)
                return ret;
-#ifdef CONFIG_CRYPTO_DH
+
        ret = crypto_register_kpp(&dh);
        if (ret)
-               crypto_unregister_akcipher(&rsa);
-#endif
+               goto unreg_rsa;
+
+       if (qm->ver >= QM_HW_V3) {
+               ret = hpre_register_ecdh();
+               if (ret)
+                       goto unreg_dh;
+               ret = crypto_register_kpp(&curve25519_alg);
+               if (ret)
+                       goto unreg_ecdh;
+       }
+       return 0;
 
+unreg_ecdh:
+       hpre_unregister_ecdh();
+unreg_dh:
+       crypto_unregister_kpp(&dh);
+unreg_rsa:
+       crypto_unregister_akcipher(&rsa);
        return ret;
 }
 
-void hpre_algs_unregister(void)
+void hpre_algs_unregister(struct hisi_qm *qm)
 {
-       crypto_unregister_akcipher(&rsa);
-#ifdef CONFIG_CRYPTO_DH
+       if (qm->ver >= QM_HW_V3) {
+               crypto_unregister_kpp(&curve25519_alg);
+               hpre_unregister_ecdh();
+       }
+
        crypto_unregister_kpp(&dh);
-#endif
+       crypto_unregister_akcipher(&rsa);
 }
index e7a2c70eb9cf52f3cc70d6389418a781a62037e4..046bc962c8b2dd180f0cf856102a143964559a89 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/uacce.h>
 #include "hpre.h"
 
-#define HPRE_QUEUE_NUM_V2              1024
 #define HPRE_QM_ABNML_INT_MASK         0x100004
 #define HPRE_CTRL_CNT_CLR_CE_BIT       BIT(0)
 #define HPRE_COMM_CNT_CLR_CE           0x0
@@ -119,7 +118,6 @@ static struct hisi_qm_list hpre_devices = {
 };
 
 static const char * const hpre_debug_file_name[] = {
-       [HPRE_CURRENT_QM]   = "current_qm",
        [HPRE_CLEAR_ENABLE] = "rdclr_en",
        [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
 };
@@ -226,41 +224,44 @@ static u32 vfs_num;
 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
 
-struct hisi_qp *hpre_create_qp(void)
+struct hisi_qp *hpre_create_qp(u8 type)
 {
        int node = cpu_to_node(smp_processor_id());
        struct hisi_qp *qp = NULL;
        int ret;
 
-       ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
+       if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
+               return NULL;
+
+       /*
+        * type: 0 - RSA/DH. algorithm supported in V2,
+        *       1 - ECC algorithm in V3.
+        */
+       ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
        if (!ret)
                return qp;
 
        return NULL;
 }
 
-static void hpre_pasid_enable(struct hisi_qm *qm)
+static void hpre_config_pasid(struct hisi_qm *qm)
 {
-       u32 val;
-
-       val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
-       val |= BIT(HPRE_PASID_EN_BIT);
-       writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
-       val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
-       val |= BIT(HPRE_PASID_EN_BIT);
-       writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
-}
+       u32 val1, val2;
 
-static void hpre_pasid_disable(struct hisi_qm *qm)
-{
-       u32 val;
+       if (qm->ver >= QM_HW_V3)
+               return;
 
-       val = readl_relaxed(qm->io_base +  HPRE_DATA_RUSER_CFG);
-       val &= ~BIT(HPRE_PASID_EN_BIT);
-       writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
-       val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
-       val &= ~BIT(HPRE_PASID_EN_BIT);
-       writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+       val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+       val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+       if (qm->use_sva) {
+               val1 |= BIT(HPRE_PASID_EN_BIT);
+               val2 |= BIT(HPRE_PASID_EN_BIT);
+       } else {
+               val1 &= ~BIT(HPRE_PASID_EN_BIT);
+               val2 &= ~BIT(HPRE_PASID_EN_BIT);
+       }
+       writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
+       writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
 }
 
 static int hpre_cfg_by_dsm(struct hisi_qm *qm)
@@ -320,7 +321,7 @@ static int hpre_set_cluster(struct hisi_qm *qm)
 }
 
 /*
- * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
  * Or it may stay in D3 state when we bind and unbind hpre quickly,
  * as it does FLR triggered by hardware.
  */
@@ -383,15 +384,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
        if (qm->ver == QM_HW_V2) {
                ret = hpre_cfg_by_dsm(qm);
                if (ret)
-                       dev_err(dev, "acpi_evaluate_dsm err.\n");
+                       return ret;
 
                disable_flr_of_bme(qm);
-
-               /* Enable data buffer pasid */
-               if (qm->use_sva)
-                       hpre_pasid_enable(qm);
        }
 
+       /* Config data buffer pasid needed by Kunpeng 920 */
+       hpre_config_pasid(qm);
+
        return ret;
 }
 
@@ -401,10 +401,6 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
        unsigned long offset;
        int i;
 
-       /* clear current_qm */
-       writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
-       writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
        /* clear clusterX/cluster_ctrl */
        for (i = 0; i < clusters_num; i++) {
                offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
@@ -456,49 +452,6 @@ static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
        return &hpre->qm;
 }
 
-static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
-{
-       struct hisi_qm *qm = hpre_file_to_qm(file);
-
-       return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
-{
-       struct hisi_qm *qm = hpre_file_to_qm(file);
-       u32 num_vfs = qm->vfs_num;
-       u32 vfq_num, tmp;
-
-       if (val > num_vfs)
-               return -EINVAL;
-
-       /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
-       if (val == 0) {
-               qm->debug.curr_qm_qp_num = qm->qp_num;
-       } else {
-               vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
-               if (val == num_vfs) {
-                       qm->debug.curr_qm_qp_num =
-                       qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
-               } else {
-                       qm->debug.curr_qm_qp_num = vfq_num;
-               }
-       }
-
-       writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
-       writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
-       tmp = val |
-             (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
-       writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
-       tmp = val |
-             (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
-       writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
-       return  0;
-}
-
 static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
 {
        struct hisi_qm *qm = hpre_file_to_qm(file);
@@ -519,7 +472,7 @@ static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
               ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
        writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
 
-       return  0;
+       return 0;
 }
 
 static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
@@ -541,7 +494,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
 
        writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
 
-       return  0;
+       return 0;
 }
 
 static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -554,9 +507,6 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
 
        spin_lock_irq(&file->lock);
        switch (file->type) {
-       case HPRE_CURRENT_QM:
-               val = hpre_current_qm_read(file);
-               break;
        case HPRE_CLEAR_ENABLE:
                val = hpre_clear_enable_read(file);
                break;
@@ -597,11 +547,6 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
 
        spin_lock_irq(&file->lock);
        switch (file->type) {
-       case HPRE_CURRENT_QM:
-               ret = hpre_current_qm_write(file, val);
-               if (ret)
-                       goto err_input;
-               break;
        case HPRE_CLEAR_ENABLE:
                ret = hpre_clear_enable_write(file, val);
                if (ret)
@@ -740,11 +685,6 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
 {
        int ret;
 
-       ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
-                                      HPRE_CURRENT_QM);
-       if (ret)
-               return ret;
-
        ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
                                       HPRE_CLEAR_ENABLE);
        if (ret)
@@ -812,9 +752,9 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
        }
 
        if (pdev->revision >= QM_HW_V3)
-               qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n";
+               qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
        else
-               qm->algs = "rsa\ndh\n";
+               qm->algs = "rsa\ndh";
        qm->mode = uacce_mode;
        qm->pdev = pdev;
        qm->ver = pdev->revision;
@@ -867,6 +807,20 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
               HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
 }
 
+static void hpre_err_info_init(struct hisi_qm *qm)
+{
+       struct hisi_qm_err_info *err_info = &qm->err_info;
+
+       err_info->ce = QM_BASE_CE;
+       err_info->fe = 0;
+       err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+                                  HPRE_OOO_ECC_2BIT_ERR;
+       err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+       err_info->msi_wr_port = HPRE_WR_MSI_PORT;
+       err_info->acpi_rst = "HRST";
+       err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+}
+
 static const struct hisi_qm_err_ini hpre_err_ini = {
        .hw_init                = hpre_set_user_domain_and_cache,
        .hw_err_enable          = hpre_hw_error_enable,
@@ -875,16 +829,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
        .clear_dev_hw_err_status = hpre_clear_hw_err_status,
        .log_dev_hw_err         = hpre_log_hw_error,
        .open_axi_master_ooo    = hpre_open_axi_master_ooo,
-       .err_info               = {
-               .ce                     = QM_BASE_CE,
-               .nfe                    = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
-               .fe                     = 0,
-               .ecc_2bits_mask         = HPRE_CORE_ECC_2BIT_ERR |
-                                         HPRE_OOO_ECC_2BIT_ERR,
-               .dev_ce_mask            = HPRE_HAC_RAS_CE_ENABLE,
-               .msi_wr_port            = HPRE_WR_MSI_PORT,
-               .acpi_rst               = "HRST",
-       }
+       .err_info_init          = hpre_err_info_init,
 };
 
 static int hpre_pf_probe_init(struct hpre *hpre)
@@ -892,13 +837,12 @@ static int hpre_pf_probe_init(struct hpre *hpre)
        struct hisi_qm *qm = &hpre->qm;
        int ret;
 
-       qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
-
        ret = hpre_set_user_domain_and_cache(qm);
        if (ret)
                return ret;
 
        qm->err_ini = &hpre_err_ini;
+       qm->err_ini->err_info_init(qm);
        hisi_qm_dev_err_init(qm);
 
        return 0;
@@ -1006,8 +950,6 @@ static void hpre_remove(struct pci_dev *pdev)
        hisi_qm_stop(qm, QM_NORMAL);
 
        if (qm->fun_type == QM_HW_PF) {
-               if (qm->use_sva && qm->ver == QM_HW_V2)
-                       hpre_pasid_disable(qm);
                hpre_cnt_regs_clear(qm);
                qm->debug.curr_qm_qp_num = 0;
                hisi_qm_dev_err_uninit(qm);
@@ -1016,7 +958,6 @@ static void hpre_remove(struct pci_dev *pdev)
        hisi_qm_uninit(qm);
 }
 
-
 static const struct pci_error_handlers hpre_err_handler = {
        .error_detected         = hisi_qm_dev_err_detected,
        .slot_reset             = hisi_qm_dev_slot_reset,
@@ -1075,4 +1016,5 @@ module_exit(hpre_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
 MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
index 13cb4216561a08d8e671f58cc14ee113856fe3bd..ce439a0c66c9ed534ebab22ec6261c66f60ffa86 100644 (file)
@@ -38,6 +38,7 @@
 #define QM_MB_CMD_SQC_BT               0x4
 #define QM_MB_CMD_CQC_BT               0x5
 #define QM_MB_CMD_SQC_VFT_V2           0x6
+#define QM_MB_CMD_STOP_QP              0x8
 
 #define QM_MB_CMD_SEND_BASE            0x300
 #define QM_MB_EVENT_SHIFT              8
 #define QM_DB_PRIORITY_SHIFT_V1                48
 #define QM_DOORBELL_SQ_CQ_BASE_V2      0x1000
 #define QM_DOORBELL_EQ_AEQ_BASE_V2     0x2000
+#define QM_QUE_ISO_CFG_V               0x0030
+#define QM_QUE_ISO_EN                  0x100154
+#define QM_CAPBILITY                   0x100158
+#define QM_QP_NUN_MASK                 GENMASK(10, 0)
+#define QM_QP_DB_INTERVAL              0x10000
+#define QM_QP_MAX_NUM_SHIFT            11
 #define QM_DB_CMD_SHIFT_V2             12
 #define QM_DB_RAND_SHIFT_V2            16
 #define QM_DB_INDEX_SHIFT_V2           32
 #define QM_DFX_CNT_CLR_CE              0x100118
 
 #define QM_ABNORMAL_INT_SOURCE         0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR     GENMASK(12, 0)
+#define QM_ABNORMAL_INT_SOURCE_CLR     GENMASK(14, 0)
 #define QM_ABNORMAL_INT_MASK           0x100004
-#define QM_ABNORMAL_INT_MASK_VALUE     0x1fff
+#define QM_ABNORMAL_INT_MASK_VALUE     0x7fff
 #define QM_ABNORMAL_INT_STATUS         0x100008
 #define QM_ABNORMAL_INT_SET            0x10000c
 #define QM_ABNORMAL_INF00              0x100010
 #define ACC_AM_ROB_ECC_INT_STS         0x300104
 #define ACC_ROB_ECC_ERR_MULTPL         BIT(1)
 
+#define QM_DFX_MB_CNT_VF               0x104010
+#define QM_DFX_DB_CNT_VF               0x104020
+#define QM_DFX_SQE_CNT_VF_SQN          0x104030
+#define QM_DFX_CQE_CNT_VF_CQN          0x104040
+#define QM_DFX_QN_SHIFT                        16
+#define CURRENT_FUN_MASK               GENMASK(5, 0)
+#define CURRENT_Q_MASK                 GENMASK(31, 16)
+
 #define POLL_PERIOD                    10
 #define POLL_TIMEOUT                   1000
 #define WAIT_PERIOD_US_MAX             200
 #define QM_CACHE_WB_DONE               0x208
 
 #define PCI_BAR_2                      2
+#define PCI_BAR_4                      4
 #define QM_SQE_DATA_ALIGN_MASK         GENMASK(6, 0)
 #define QMC_ALIGN(sz)                  ALIGN(sz, 32)
 
@@ -334,6 +350,7 @@ struct hisi_qm_hw_ops {
        void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
        void (*hw_error_uninit)(struct hisi_qm *qm);
        enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
+       int (*stop_qp)(struct hisi_qp *qp);
 };
 
 struct qm_dfx_item {
@@ -350,6 +367,7 @@ static struct qm_dfx_item qm_dfx_files[] = {
 };
 
 static const char * const qm_debug_file_name[] = {
+       [CURRENT_QM]   = "current_qm",
        [CURRENT_Q]    = "current_q",
        [CLEAR_ENABLE] = "clear_enable",
 };
@@ -373,6 +391,8 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
        { .int_msk = BIT(10), .msg = "qm_db_timeout" },
        { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
        { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
+       { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
+       { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
        { /* sentinel */ }
 };
 
@@ -557,21 +577,22 @@ static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
 
 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
 {
-       u64 doorbell;
-       u64 dbase;
+       void __iomem *io_base = qm->io_base;
        u16 randata = 0;
+       u64 doorbell;
 
        if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
-               dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
+               io_base = qm->db_io_base + (u64)qn * qm->db_interval +
+                         QM_DOORBELL_SQ_CQ_BASE_V2;
        else
-               dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
+               io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
 
        doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
                   ((u64)randata << QM_DB_RAND_SHIFT_V2) |
                   ((u64)index << QM_DB_INDEX_SHIFT_V2)  |
                   ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
 
-       writeq(doorbell, qm->io_base + dbase);
+       writeq(doorbell, io_base);
 }
 
 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
@@ -865,6 +886,26 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
        return 0;
 }
 
+static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
+{
+       u32 remain_q_num, vfq_num;
+       u32 num_vfs = qm->vfs_num;
+
+       vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+       if (vfq_num >= qm->max_qp_num)
+               return qm->max_qp_num;
+
+       remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
+       if (vfq_num + remain_q_num <= qm->max_qp_num)
+               return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
+
+       /*
+        * if vfq_num + remain_q_num > max_qp_num, the last VFs,
+        * each with one more queue.
+        */
+       return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
+}
+
 static struct hisi_qm *file_to_qm(struct debugfs_file *file)
 {
        struct qm_debug *debug = file->debug;
@@ -918,6 +959,41 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
        return 0;
 }
 
+static u32 current_qm_read(struct debugfs_file *file)
+{
+       struct hisi_qm *qm = file_to_qm(file);
+
+       return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int current_qm_write(struct debugfs_file *file, u32 val)
+{
+       struct hisi_qm *qm = file_to_qm(file);
+       u32 tmp;
+
+       if (val > qm->vfs_num)
+               return -EINVAL;
+
+       /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+       if (!val)
+               qm->debug.curr_qm_qp_num = qm->qp_num;
+       else
+               qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
+
+       writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+       writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+       tmp = val |
+             (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+       writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+       tmp = val |
+             (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+       writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+       return 0;
+}
+
 static ssize_t qm_debug_read(struct file *filp, char __user *buf,
                             size_t count, loff_t *pos)
 {
@@ -929,6 +1005,9 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
 
        mutex_lock(&file->lock);
        switch (index) {
+       case CURRENT_QM:
+               val = current_qm_read(file);
+               break;
        case CURRENT_Q:
                val = current_q_read(file);
                break;
@@ -971,27 +1050,24 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
 
        mutex_lock(&file->lock);
        switch (index) {
+       case CURRENT_QM:
+               ret = current_qm_write(file, val);
+               break;
        case CURRENT_Q:
                ret = current_q_write(file, val);
-               if (ret)
-                       goto err_input;
                break;
        case CLEAR_ENABLE:
                ret = clear_enable_write(file, val);
-               if (ret)
-                       goto err_input;
                break;
        default:
                ret = -EINVAL;
-               goto err_input;
        }
        mutex_unlock(&file->lock);
 
-       return count;
+       if (ret)
+               return ret;
 
-err_input:
-       mutex_unlock(&file->lock);
-       return ret;
+       return count;
 }
 
 static const struct file_operations qm_debug_fops = {
@@ -1529,12 +1605,12 @@ static const struct file_operations qm_cmd_fops = {
        .write = qm_cmd_write,
 };
 
-static void qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
+static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
+                                  enum qm_debug_file index)
 {
-       struct dentry *qm_d = qm->debug.qm_d;
        struct debugfs_file *file = qm->debug.files + index;
 
-       debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
+       debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
                            &qm_debug_fops);
 
        file->index = index;
@@ -1628,7 +1704,7 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
                if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
                        writel(error_status, qm->io_base +
                               QM_ABNORMAL_INT_SOURCE);
-                       writel(qm->err_ini->err_info.nfe,
+                       writel(qm->err_info.nfe,
                               qm->io_base + QM_RAS_NFE_ENABLE);
                        return ACC_ERR_RECOVERED;
                }
@@ -1639,6 +1715,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
        return ACC_ERR_RECOVERED;
 }
 
+static int qm_stop_qp(struct hisi_qp *qp)
+{
+       return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
+}
+
 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
        .qm_db = qm_db_v1,
        .get_irq_num = qm_get_irq_num_v1,
@@ -1654,6 +1735,16 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
        .hw_error_handle = qm_hw_error_handle_v2,
 };
 
+static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
+       .get_vft = qm_get_vft_v2,
+       .qm_db = qm_db_v2,
+       .get_irq_num = qm_get_irq_num_v2,
+       .hw_error_init = qm_hw_error_init_v2,
+       .hw_error_uninit = qm_hw_error_uninit_v2,
+       .hw_error_handle = qm_hw_error_handle_v2,
+       .stop_qp = qm_stop_qp,
+};
+
 static void *qm_get_avail_sqe(struct hisi_qp *qp)
 {
        struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1933,6 +2024,14 @@ static int qm_drain_qp(struct hisi_qp *qp)
        if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
                return 0;
 
+       /* Kunpeng930 supports drain qp by device */
+       if (qm->ops->stop_qp) {
+               ret = qm->ops->stop_qp(qp);
+               if (ret)
+                       dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
+               return ret;
+       }
+
        addr = qm_ctx_alloc(qm, size, &dma_addr);
        if (IS_ERR(addr)) {
                dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
@@ -2132,6 +2231,8 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
 {
        struct hisi_qp *qp = q->priv;
        struct hisi_qm *qm = qp->qm;
+       resource_size_t phys_base = qm->db_phys_base +
+                                   qp->qp_id * qm->db_interval;
        size_t sz = vma->vm_end - vma->vm_start;
        struct pci_dev *pdev = qm->pdev;
        struct device *dev = &pdev->dev;
@@ -2143,16 +2244,19 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
                if (qm->ver == QM_HW_V1) {
                        if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
                                return -EINVAL;
-               } else {
+               } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
                        if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
                            QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
                                return -EINVAL;
+               } else {
+                       if (sz > qm->db_interval)
+                               return -EINVAL;
                }
 
                vma->vm_flags |= VM_IO;
 
                return remap_pfn_range(vma, vma->vm_start,
-                                      qm->phys_base >> PAGE_SHIFT,
+                                      phys_base >> PAGE_SHIFT,
                                       sz, pgprot_noncached(vma->vm_page_prot));
        case UACCE_QFRT_DUS:
                if (sz != qp->qdma.size)
@@ -2267,14 +2371,20 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
        uacce->priv = qm;
        uacce->algs = qm->algs;
 
-       if (qm->ver == QM_HW_V1) {
-               mmio_page_nr = QM_DOORBELL_PAGE_NR;
+       if (qm->ver == QM_HW_V1)
                uacce->api_ver = HISI_QM_API_VER_BASE;
-       } else {
+       else if (qm->ver == QM_HW_V2)
+               uacce->api_ver = HISI_QM_API_VER2_BASE;
+       else
+               uacce->api_ver = HISI_QM_API_VER3_BASE;
+
+       if (qm->ver == QM_HW_V1)
+               mmio_page_nr = QM_DOORBELL_PAGE_NR;
+       else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
                mmio_page_nr = QM_DOORBELL_PAGE_NR +
                        QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
-               uacce->api_ver = HISI_QM_API_VER2_BASE;
-       }
+       else
+               mmio_page_nr = qm->db_interval / PAGE_SIZE;
 
        dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
                       sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
@@ -2482,8 +2592,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
 
        if (qm->ver == QM_HW_V1)
                qm->ops = &qm_hw_ops_v1;
-       else
+       else if (qm->ver == QM_HW_V2)
                qm->ops = &qm_hw_ops_v2;
+       else
+               qm->ops = &qm_hw_ops_v3;
 
        pci_set_drvdata(pdev, qm);
        mutex_init(&qm->mailbox_lock);
@@ -2492,13 +2604,23 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
        qm->misc_ctl = false;
 }
 
-static void hisi_qm_pci_uninit(struct hisi_qm *qm)
+static void qm_put_pci_res(struct hisi_qm *qm)
 {
        struct pci_dev *pdev = qm->pdev;
 
-       pci_free_irq_vectors(pdev);
+       if (qm->use_db_isolation)
+               iounmap(qm->db_io_base);
+
        iounmap(qm->io_base);
        pci_release_mem_regions(pdev);
+}
+
+static void hisi_qm_pci_uninit(struct hisi_qm *qm)
+{
+       struct pci_dev *pdev = qm->pdev;
+
+       pci_free_irq_vectors(pdev);
+       qm_put_pci_res(qm);
        pci_disable_device(pdev);
 }
 
@@ -2527,7 +2649,6 @@ void hisi_qm_uninit(struct hisi_qm *qm)
                hisi_qm_cache_wb(qm);
                dma_free_coherent(dev, qm->qdma.size,
                                  qm->qdma.va, qm->qdma.dma);
-               memset(&qm->qdma, 0, sizeof(qm->qdma));
        }
 
        qm_irq_unregister(qm);
@@ -2681,7 +2802,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
 {
        int ret;
 
-       WARN_ON(!qm->qdma.dma);
+       WARN_ON(!qm->qdma.va);
 
        if (qm->fun_type == QM_HW_PF) {
                ret = qm_dev_mem_reset(qm);
@@ -2930,9 +3051,11 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
        qm->debug.qm_d = qm_d;
 
        /* only show this in PF */
-       if (qm->fun_type == QM_HW_PF)
+       if (qm->fun_type == QM_HW_PF) {
+               qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
                for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
-                       qm_create_debugfs_file(qm, i);
+                       qm_create_debugfs_file(qm, qm_d, i);
+       }
 
        debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
 
@@ -2960,6 +3083,10 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
        struct qm_dfx_registers *regs;
        int i;
 
+       /* clear current_qm */
+       writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+       writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
        /* clear current_q */
        writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
        writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
@@ -2982,7 +3109,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
 
 static void qm_hw_error_init(struct hisi_qm *qm)
 {
-       const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
+       struct hisi_qm_err_info *err_info = &qm->err_info;
 
        if (!qm->ops->hw_error_init) {
                dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
@@ -3175,30 +3302,46 @@ EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
 
 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
 {
-       u32 remain_q_num, q_num, i, j;
+       u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
+       u32 max_qp_num = qm->max_qp_num;
        u32 q_base = qm->qp_num;
        int ret;
 
        if (!num_vfs)
                return -EINVAL;
 
-       remain_q_num = qm->ctrl_qp_num - qm->qp_num;
+       vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
 
-       /* If remain queues not enough, return error. */
-       if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
+       /* If vfs_q_num is less than num_vfs, return error. */
+       if (vfs_q_num < num_vfs)
                return -EINVAL;
 
-       q_num = remain_q_num / num_vfs;
-       for (i = 1; i <= num_vfs; i++) {
-               if (i == num_vfs)
-                       q_num += remain_q_num % num_vfs;
-               ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+       q_num = vfs_q_num / num_vfs;
+       remain_q_num = vfs_q_num % num_vfs;
+
+       for (i = num_vfs; i > 0; i--) {
+               /*
+                * if q_num + remain_q_num > max_qp_num in last vf, divide the
+                * remaining queues equally.
+                */
+               if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
+                       act_q_num = q_num + remain_q_num;
+                       remain_q_num = 0;
+               } else if (remain_q_num > 0) {
+                       act_q_num = q_num + 1;
+                       remain_q_num--;
+               } else {
+                       act_q_num = q_num;
+               }
+
+               act_q_num = min_t(int, act_q_num, max_qp_num);
+               ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
                if (ret) {
-                       for (j = i; j > 0; j--)
+                       for (j = num_vfs; j > i; j--)
                                hisi_qm_set_vft(qm, j, 0, 0);
                        return ret;
                }
-               q_base += q_num;
+               q_base += act_q_num;
        }
 
        return 0;
@@ -3318,15 +3461,15 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
        /* get device hardware error status */
        err_sts = qm->err_ini->get_dev_hw_err_status(qm);
        if (err_sts) {
-               if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
+               if (err_sts & qm->err_info.ecc_2bits_mask)
                        qm->err_status.is_dev_ecc_mbit = true;
 
                if (qm->err_ini->log_dev_hw_err)
                        qm->err_ini->log_dev_hw_err(qm, err_sts);
 
                /* ce error does not need to be reset */
-               if ((err_sts | qm->err_ini->err_info.dev_ce_mask) ==
-                    qm->err_ini->err_info.dev_ce_mask) {
+               if ((err_sts | qm->err_info.dev_ce_mask) ==
+                    qm->err_info.dev_ce_mask) {
                        if (qm->err_ini->clear_dev_hw_err_status)
                                qm->err_ini->clear_dev_hw_err_status(qm,
                                                                err_sts);
@@ -3639,7 +3782,7 @@ static int qm_soft_reset(struct hisi_qm *qm)
                acpi_status s;
 
                s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
-                                         qm->err_ini->err_info.acpi_rst,
+                                         qm->err_info.acpi_rst,
                                          NULL, &value);
                if (ACPI_FAILURE(s)) {
                        pci_err(pdev, "NO controller reset method!\n");
@@ -3707,12 +3850,11 @@ static void qm_restart_prepare(struct hisi_qm *qm)
 
        /* temporarily close the OOO port used for PEH to write out MSI */
        value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
-       writel(value & ~qm->err_ini->err_info.msi_wr_port,
+       writel(value & ~qm->err_info.msi_wr_port,
               qm->io_base + ACC_AM_CFG_PORT_WR_EN);
 
        /* clear dev ecc 2bit error source if having */
-       value = qm_get_dev_err_status(qm) &
-               qm->err_ini->err_info.ecc_2bits_mask;
+       value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
        if (value && qm->err_ini->clear_dev_hw_err_status)
                qm->err_ini->clear_dev_hw_err_status(qm, value);
 
@@ -3736,7 +3878,7 @@ static void qm_restart_done(struct hisi_qm *qm)
 
        /* open the OOO port for PEH to write out MSI */
        value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
-       value |= qm->err_ini->err_info.msi_wr_port;
+       value |= qm->err_info.msi_wr_port;
        writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
 
        qm->err_status.is_qm_ecc_mbit = false;
@@ -3875,8 +4017,7 @@ static int qm_check_dev_error(struct hisi_qm *qm)
        if (ret)
                return ret;
 
-       return (qm_get_dev_err_status(qm) &
-               qm->err_ini->err_info.ecc_2bits_mask);
+       return (qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask);
 }
 
 void hisi_qm_reset_prepare(struct pci_dev *pdev)
@@ -4084,7 +4225,7 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
        mutex_unlock(&qm_list->lock);
 
        if (flag) {
-               ret = qm_list->register_to_crypto();
+               ret = qm_list->register_to_crypto(qm);
                if (ret) {
                        mutex_lock(&qm_list->lock);
                        list_del(&qm->list);
@@ -4115,59 +4256,134 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
        mutex_unlock(&qm_list->lock);
 
        if (list_empty(&qm_list->list))
-               qm_list->unregister_from_crypto();
+               qm_list->unregister_from_crypto(qm);
 }
 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
 
-static int hisi_qm_pci_init(struct hisi_qm *qm)
+static int qm_get_qp_num(struct hisi_qm *qm)
+{
+       if (qm->ver == QM_HW_V1)
+               qm->ctrl_qp_num = QM_QNUM_V1;
+       else if (qm->ver == QM_HW_V2)
+               qm->ctrl_qp_num = QM_QNUM_V2;
+       else
+               qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
+                                       QM_QP_NUN_MASK;
+
+       if (qm->use_db_isolation)
+               qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
+                                 QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
+       else
+               qm->max_qp_num = qm->ctrl_qp_num;
+
+       /* check if qp number is valid */
+       if (qm->qp_num > qm->max_qp_num) {
+               dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
+                       qm->qp_num, qm->max_qp_num);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int qm_get_pci_res(struct hisi_qm *qm)
 {
        struct pci_dev *pdev = qm->pdev;
        struct device *dev = &pdev->dev;
-       unsigned int num_vec;
        int ret;
 
-       ret = pci_enable_device_mem(pdev);
-       if (ret < 0) {
-               dev_err(dev, "Failed to enable device mem!\n");
-               return ret;
-       }
-
        ret = pci_request_mem_regions(pdev, qm->dev_name);
        if (ret < 0) {
                dev_err(dev, "Failed to request mem regions!\n");
-               goto err_disable_pcidev;
+               return ret;
        }
 
        qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
-       qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
-       qm->io_base = ioremap(qm->phys_base, qm->phys_size);
+       qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
        if (!qm->io_base) {
                ret = -EIO;
-               goto err_release_mem_regions;
+               goto err_request_mem_regions;
+       }
+
+       if (qm->ver > QM_HW_V2) {
+               if (qm->fun_type == QM_HW_PF)
+                       qm->use_db_isolation = readl(qm->io_base +
+                                                    QM_QUE_ISO_EN) & BIT(0);
+               else
+                       qm->use_db_isolation = readl(qm->io_base +
+                                                    QM_QUE_ISO_CFG_V) & BIT(0);
+       }
+
+       if (qm->use_db_isolation) {
+               qm->db_interval = QM_QP_DB_INTERVAL;
+               qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
+               qm->db_io_base = ioremap(qm->db_phys_base,
+                                        pci_resource_len(pdev, PCI_BAR_4));
+               if (!qm->db_io_base) {
+                       ret = -EIO;
+                       goto err_ioremap;
+               }
+       } else {
+               qm->db_phys_base = qm->phys_base;
+               qm->db_io_base = qm->io_base;
+               qm->db_interval = 0;
        }
 
+       if (qm->fun_type == QM_HW_PF) {
+               ret = qm_get_qp_num(qm);
+               if (ret)
+                       goto err_db_ioremap;
+       }
+
+       return 0;
+
+err_db_ioremap:
+       if (qm->use_db_isolation)
+               iounmap(qm->db_io_base);
+err_ioremap:
+       iounmap(qm->io_base);
+err_request_mem_regions:
+       pci_release_mem_regions(pdev);
+       return ret;
+}
+
+static int hisi_qm_pci_init(struct hisi_qm *qm)
+{
+       struct pci_dev *pdev = qm->pdev;
+       struct device *dev = &pdev->dev;
+       unsigned int num_vec;
+       int ret;
+
+       ret = pci_enable_device_mem(pdev);
+       if (ret < 0) {
+               dev_err(dev, "Failed to enable device mem!\n");
+               return ret;
+       }
+
+       ret = qm_get_pci_res(qm);
+       if (ret)
+               goto err_disable_pcidev;
+
        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
        if (ret < 0)
-               goto err_iounmap;
+               goto err_get_pci_res;
        pci_set_master(pdev);
 
        if (!qm->ops->get_irq_num) {
                ret = -EOPNOTSUPP;
-               goto err_iounmap;
+               goto err_get_pci_res;
        }
        num_vec = qm->ops->get_irq_num(qm);
        ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
        if (ret < 0) {
                dev_err(dev, "Failed to enable MSI vectors!\n");
-               goto err_iounmap;
+               goto err_get_pci_res;
        }
 
        return 0;
 
-err_iounmap:
-       iounmap(qm->io_base);
-err_release_mem_regions:
-       pci_release_mem_regions(pdev);
+err_get_pci_res:
+       qm_put_pci_res(qm);
 err_disable_pcidev:
        pci_disable_device(pdev);
        return ret;
@@ -4187,28 +4403,28 @@ int hisi_qm_init(struct hisi_qm *qm)
 
        hisi_qm_pre_init(qm);
 
-       ret = qm_alloc_uacce(qm);
-       if (ret < 0)
-               dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
-
        ret = hisi_qm_pci_init(qm);
        if (ret)
-               goto err_remove_uacce;
+               return ret;
 
        ret = qm_irq_register(qm);
        if (ret)
-               goto err_pci_uninit;
+               goto err_pci_init;
 
        if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
                /* v2 starts to support get vft by mailbox */
                ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
                if (ret)
-                       goto err_irq_unregister;
+                       goto err_irq_register;
        }
 
+       ret = qm_alloc_uacce(qm);
+       if (ret < 0)
+               dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
+
        ret = hisi_qm_memory_init(qm);
        if (ret)
-               goto err_irq_unregister;
+               goto err_alloc_uacce;
 
        INIT_WORK(&qm->work, qm_work_process);
        if (qm->fun_type == QM_HW_PF)
@@ -4218,13 +4434,13 @@ int hisi_qm_init(struct hisi_qm *qm)
 
        return 0;
 
-err_irq_unregister:
-       qm_irq_unregister(qm);
-err_pci_uninit:
-       hisi_qm_pci_uninit(qm);
-err_remove_uacce:
+err_alloc_uacce:
        uacce_remove(qm->uacce);
        qm->uacce = NULL;
+err_irq_register:
+       qm_irq_unregister(qm);
+err_pci_init:
+       hisi_qm_pci_uninit(qm);
        return ret;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_init);
index 54967c6b9c78874ff1ed18c9131892b812c8f0b8..acefdf8b3a50e7f5718c113cbe5efa4ab3a0cb73 100644 (file)
 #define PEH_AXUSER_CFG                 0x401001
 #define PEH_AXUSER_CFG_ENABLE          0xffffffff
 
-#define QM_DFX_MB_CNT_VF               0x104010
-#define QM_DFX_DB_CNT_VF               0x104020
-#define QM_DFX_SQE_CNT_VF_SQN          0x104030
-#define QM_DFX_CQE_CNT_VF_CQN          0x104040
-#define QM_DFX_QN_SHIFT                        16
-#define CURRENT_FUN_MASK               GENMASK(5, 0)
-#define CURRENT_Q_MASK                 GENMASK(31, 16)
-
 #define QM_AXI_RRESP                   BIT(0)
 #define QM_AXI_BRESP                   BIT(1)
 #define QM_ECC_MBIT                    BIT(2)
 #define QM_DB_TIMEOUT                  BIT(10)
 #define QM_OF_FIFO_OF                  BIT(11)
 #define QM_DB_RANDOM_INVALID           BIT(12)
+#define QM_MAILBOX_TIMEOUT             BIT(13)
+#define QM_FLR_TIMEOUT                 BIT(14)
 
 #define QM_BASE_NFE    (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
                         QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
-                        QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
+                        QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
+                        QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
 #define QM_BASE_CE                     QM_ECC_1BIT
 
 #define QM_Q_DEPTH                     1024
@@ -123,6 +118,7 @@ enum qm_fun_type {
 };
 
 enum qm_debug_file {
+       CURRENT_QM,
        CURRENT_Q,
        CLEAR_ENABLE,
        DEBUG_FILE_NUM,
@@ -193,14 +189,14 @@ struct hisi_qm_err_ini {
        void (*open_axi_master_ooo)(struct hisi_qm *qm);
        void (*close_axi_master_ooo)(struct hisi_qm *qm);
        void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
-       struct hisi_qm_err_info err_info;
+       void (*err_info_init)(struct hisi_qm *qm);
 };
 
 struct hisi_qm_list {
        struct mutex lock;
        struct list_head list;
-       int (*register_to_crypto)(void);
-       void (*unregister_from_crypto)(void);
+       int (*register_to_crypto)(struct hisi_qm *qm);
+       void (*unregister_from_crypto)(struct hisi_qm *qm);
 };
 
 struct hisi_qm {
@@ -209,12 +205,15 @@ struct hisi_qm {
        const char *dev_name;
        struct pci_dev *pdev;
        void __iomem *io_base;
+       void __iomem *db_io_base;
        u32 sqe_size;
        u32 qp_base;
        u32 qp_num;
        u32 qp_in_used;
        u32 ctrl_qp_num;
+       u32 max_qp_num;
        u32 vfs_num;
+       u32 db_interval;
        struct list_head list;
        struct hisi_qm_list *qm_list;
 
@@ -230,6 +229,7 @@ struct hisi_qm {
 
        struct hisi_qm_status status;
        const struct hisi_qm_err_ini *err_ini;
+       struct hisi_qm_err_info err_info;
        struct hisi_qm_err_status err_status;
        unsigned long misc_ctl; /* driver removing and reset sched */
 
@@ -252,8 +252,11 @@ struct hisi_qm {
        const char *algs;
        bool use_sva;
        bool is_frozen;
+
+       /* doorbell isolation enable */
+       bool use_db_isolation;
        resource_size_t phys_base;
-       resource_size_t phys_size;
+       resource_size_t db_phys_base;
        struct uacce_device *uacce;
        int mode;
 };
index 8ca945ac297ef2d2c6584d2184dc96eff81b1ed5..0a3c8f019b02518ff108492bd87ddcb9fe8cc056 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2016-2017 Hisilicon Limited. */
+/* Copyright (c) 2016-2017 HiSilicon Limited. */
 #include <linux/crypto.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmapool.h>
index 91ee2bb575df2fd8e9c4557dd2d96f5e62ff5ff1..c8de1b51c843068fba4fcc0d0aadc4a075f490a0 100644 (file)
@@ -1,8 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ * Driver for the HiSilicon SEC units found on Hip06 Hip07
  *
- * Copyright (c) 2016-2017 Hisilicon Limited.
+ * Copyright (c) 2016-2017 HiSilicon Limited.
  */
 #include <linux/acpi.h>
 #include <linux/atomic.h>
@@ -233,7 +233,7 @@ static int sec_queue_map_io(struct sec_queue *queue)
                                    IORESOURCE_MEM,
                                    2 + queue->queue_id);
        if (!res) {
-               dev_err(dev, "Failed to get queue %d memory resource\n",
+               dev_err(dev, "Failed to get queue %u memory resource\n",
                        queue->queue_id);
                return -ENOMEM;
        }
@@ -653,12 +653,12 @@ static int sec_queue_free(struct sec_queue *queue)
        struct sec_dev_info *info = queue->dev_info;
 
        if (queue->queue_id >= SEC_Q_NUM) {
-               dev_err(info->dev, "No queue %d\n", queue->queue_id);
+               dev_err(info->dev, "No queue %u\n", queue->queue_id);
                return -ENODEV;
        }
 
        if (!queue->in_use) {
-               dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+               dev_err(info->dev, "Queue %u is idle\n", queue->queue_id);
                return -ENODEV;
        }
 
@@ -834,6 +834,7 @@ int sec_queue_stop_release(struct sec_queue *queue)
 
 /**
  * sec_queue_empty() - Is this hardware queue currently empty.
+ * @queue: The queue to test
  *
  * We need to know if we have an empty queue for some of the chaining modes
  * as if it is not empty we may need to hold the message in a software queue
@@ -1315,6 +1316,6 @@ static struct platform_driver sec_driver = {
 module_platform_driver(sec_driver);
 
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_DESCRIPTION("HiSilicon Security Accelerators");
 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
 MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
index 4d9063a8b10b1d826743bb19910057e8ac63b783..179a8250d691c46c87ca411d56c76603463a7727 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2017 Hisilicon Limited. */
+/* Copyright (c) 2016-2017 HiSilicon Limited. */
 
 #ifndef _SEC_DRV_H_
 #define _SEC_DRV_H_
index 08491912afd567be84b0d9812b72509707871b4d..dfdce2f21e658855041b896f308a12ddc3a56e30 100644 (file)
@@ -4,8 +4,6 @@
 #ifndef __HISI_SEC_V2_H
 #define __HISI_SEC_V2_H
 
-#include <linux/list.h>
-
 #include "../qm.h"
 #include "sec_crypto.h"
 
@@ -50,7 +48,7 @@ struct sec_req {
 
        int err_type;
        int req_id;
-       int flag;
+       u32 flag;
 
        /* Status of the SEC request */
        bool fake_busy;
@@ -139,6 +137,7 @@ struct sec_ctx {
        bool pbuf_supported;
        struct sec_cipher_ctx c_ctx;
        struct sec_auth_ctx a_ctx;
+       struct device *dev;
 };
 
 enum sec_endian {
@@ -148,7 +147,6 @@ enum sec_endian {
 };
 
 enum sec_debug_file_index {
-       SEC_CURRENT_QM,
        SEC_CLEAR_ENABLE,
        SEC_DEBUG_FILE_NUM,
 };
@@ -183,6 +181,6 @@ struct sec_dev {
 
 void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
 struct hisi_qp **sec_create_qps(void);
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
 #endif
index 2eaa516b323118c8d897fb5c446c4f5886207a51..133aede8bf078716e8bebce4acf3d760c6f5043e 100644 (file)
@@ -7,6 +7,7 @@
 #include <crypto/des.h>
 #include <crypto/hash.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
 #include <crypto/sha1.h>
 #include <crypto/sha2.h>
 #include <crypto/skcipher.h>
@@ -43,7 +44,6 @@
 
 #define SEC_TOTAL_IV_SZ                (SEC_IV_SIZE * QM_Q_DEPTH)
 #define SEC_SGL_SGE_NR         128
-#define SEC_CTX_DEV(ctx)       (&(ctx)->sec->qm.pdev->dev)
 #define SEC_CIPHER_AUTH                0xfe
 #define SEC_AUTH_CIPHER                0x1
 #define SEC_MAX_MAC_LEN                64
@@ -96,7 +96,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
                                  0, QM_Q_DEPTH, GFP_ATOMIC);
        mutex_unlock(&qp_ctx->req_lock);
        if (unlikely(req_id < 0)) {
-               dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+               dev_err(req->ctx->dev, "alloc req id fail!\n");
                return req_id;
        }
 
@@ -112,7 +112,7 @@ static void sec_free_req_id(struct sec_req *req)
        int req_id = req->req_id;
 
        if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
-               dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
+               dev_err(req->ctx->dev, "free request id invalid!\n");
                return;
        }
 
@@ -138,7 +138,7 @@ static int sec_aead_verify(struct sec_req *req)
                                aead_req->cryptlen + aead_req->assoclen -
                                authsize);
        if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
-               dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+               dev_err(req->ctx->dev, "aead verify failure!\n");
                return -EBADMSG;
        }
 
@@ -177,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
        if (unlikely(req->err_type || done != SEC_SQE_DONE ||
            (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
            (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
-               dev_err(SEC_CTX_DEV(ctx),
+               dev_err_ratelimited(ctx->dev,
                        "err_type[%d],done[%d],flag[%d]\n",
                        req->err_type, done, flag);
                err = -EIO;
@@ -326,8 +326,8 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
                                  struct sec_qp_ctx *qp_ctx)
 {
-       struct device *dev = SEC_CTX_DEV(ctx);
        struct sec_alg_res *res = qp_ctx->res;
+       struct device *dev = ctx->dev;
        int ret;
 
        ret = sec_alloc_civ_resource(dev, res);
@@ -360,7 +360,7 @@ alloc_fail:
 static void sec_alg_resource_free(struct sec_ctx *ctx,
                                  struct sec_qp_ctx *qp_ctx)
 {
-       struct device *dev = SEC_CTX_DEV(ctx);
+       struct device *dev = ctx->dev;
 
        sec_free_civ_resource(dev, qp_ctx->res);
 
@@ -373,7 +373,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
                             int qp_ctx_id, int alg_type)
 {
-       struct device *dev = SEC_CTX_DEV(ctx);
+       struct device *dev = ctx->dev;
        struct sec_qp_ctx *qp_ctx;
        struct hisi_qp *qp;
        int ret = -ENOMEM;
@@ -428,7 +428,7 @@ err_destroy_idr:
 static void sec_release_qp_ctx(struct sec_ctx *ctx,
                               struct sec_qp_ctx *qp_ctx)
 {
-       struct device *dev = SEC_CTX_DEV(ctx);
+       struct device *dev = ctx->dev;
 
        hisi_qm_stop_qp(qp_ctx->qp);
        sec_alg_resource_free(ctx, qp_ctx);
@@ -452,6 +452,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
 
        sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
        ctx->sec = sec;
+       ctx->dev = &sec->qm.pdev->dev;
        ctx->hlf_q_num = sec->ctx_q_num >> 1;
 
        ctx->pbuf_supported = ctx->sec->iommu_used;
@@ -476,11 +477,9 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
 err_sec_release_qp_ctx:
        for (i = i - 1; i >= 0; i--)
                sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
-
        kfree(ctx->qp_ctx);
 err_destroy_qps:
        sec_destroy_qps(ctx->qps, sec->ctx_q_num);
-
        return ret;
 }
 
@@ -499,7 +498,7 @@ static int sec_cipher_init(struct sec_ctx *ctx)
 {
        struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
 
-       c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+       c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
                                          &c_ctx->c_key_dma, GFP_KERNEL);
        if (!c_ctx->c_key)
                return -ENOMEM;
@@ -512,7 +511,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx)
        struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
 
        memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
-       dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+       dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
                          c_ctx->c_key, c_ctx->c_key_dma);
 }
 
@@ -520,7 +519,7 @@ static int sec_auth_init(struct sec_ctx *ctx)
 {
        struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
 
-       a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+       a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
                                          &a_ctx->a_key_dma, GFP_KERNEL);
        if (!a_ctx->a_key)
                return -ENOMEM;
@@ -533,7 +532,7 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
        struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
 
        memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
-       dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+       dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
                          a_ctx->a_key, a_ctx->a_key_dma);
 }
 
@@ -546,7 +545,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
        crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
        ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
        if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
-               dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+               pr_err("get error skcipher iv size!\n");
                return -EINVAL;
        }
 
@@ -573,10 +572,18 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
        sec_ctx_base_uninit(ctx);
 }
 
-static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
                                    const u32 keylen,
                                    const enum sec_cmode c_mode)
 {
+       struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+       int ret;
+
+       ret = verify_skcipher_des3_key(tfm, key);
+       if (ret)
+               return ret;
+
        switch (keylen) {
        case SEC_DES3_2KEY_SIZE:
                c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
@@ -633,12 +640,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 {
        struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
        struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+       struct device *dev = ctx->dev;
        int ret;
 
        if (c_mode == SEC_CMODE_XTS) {
                ret = xts_verify_key(tfm, key, keylen);
                if (ret) {
-                       dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
+                       dev_err(dev, "xts mode key err!\n");
                        return ret;
                }
        }
@@ -648,7 +656,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 
        switch (c_alg) {
        case SEC_CALG_3DES:
-               ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
+               ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
                break;
        case SEC_CALG_AES:
        case SEC_CALG_SM4:
@@ -659,7 +667,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
        }
 
        if (ret) {
-               dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
+               dev_err(dev, "set sec key err!\n");
                return ret;
        }
 
@@ -691,7 +699,7 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
        struct aead_request *aead_req = req->aead_req.aead_req;
        struct sec_cipher_req *c_req = &req->c_req;
        struct sec_qp_ctx *qp_ctx = req->qp_ctx;
-       struct device *dev = SEC_CTX_DEV(ctx);
+       struct device *dev = ctx->dev;
        int copy_size, pbuf_length;
        int req_id = req->req_id;
 
@@ -701,21 +709,14 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
                copy_size = c_req->c_len;
 
        pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
-                               qp_ctx->res[req_id].pbuf,
-                               copy_size);
-
+                                                       qp_ctx->res[req_id].pbuf,
+                                                       copy_size);
        if (unlikely(pbuf_length != copy_size)) {
                dev_err(dev, "copy src data to pbuf error!\n");
                return -EINVAL;
        }
 
        c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
-
-       if (!c_req->c_in_dma) {
-               dev_err(dev, "fail to set pbuffer address!\n");
-               return -ENOMEM;
-       }
-
        c_req->c_out_dma = c_req->c_in_dma;
 
        return 0;
@@ -727,7 +728,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
        struct aead_request *aead_req = req->aead_req.aead_req;
        struct sec_cipher_req *c_req = &req->c_req;
        struct sec_qp_ctx *qp_ctx = req->qp_ctx;
-       struct device *dev = SEC_CTX_DEV(ctx);
+       struct device *dev = ctx->dev;
        int copy_size, pbuf_length;
        int req_id = req->req_id;
 
@@ -739,7 +740,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
        pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
                                qp_ctx->res[req_id].pbuf,
                                copy_size);
-
        if (unlikely(pbuf_length != copy_size))
                dev_err(dev, "copy pbuf data to dst error!\n");
 }
@@ -751,7 +751,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
        struct sec_aead_req *a_req = &req->aead_req;
        struct sec_qp_ctx *qp_ctx = req->qp_ctx;
        struct sec_alg_res *res = &qp_ctx->res[req->req_id];
-       struct device *dev = SEC_CTX_DEV(ctx);
+       struct device *dev = ctx->dev;
        int ret;
 
        if (req->use_pbuf) {
@@ -806,7 +806,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
                             struct scatterlist *src, struct scatterlist *dst)
 {
        struct sec_cipher_req *c_req = &req->c_req;
-       struct device *dev = SEC_CTX_DEV(ctx);
+       struct device *dev = ctx->dev;
 
        if (req->use_pbuf) {
                sec_cipher_pbuf_unmap(ctx, req, dst);
@@ -891,6 +891,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 {
        struct sec_ctx *ctx = crypto_aead_ctx(tfm);
        struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+       struct device *dev = ctx->dev;
        struct crypto_authenc_keys keys;
        int ret;
 
@@ -904,13 +905,13 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 
        ret = sec_aead_aes_set_key(c_ctx, &keys);
        if (ret) {
-               dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+               dev_err(dev, "set sec cipher key err!\n");
                goto bad_key;
        }
 
        ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
        if (ret) {
-               dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+               dev_err(dev, "set sec auth key err!\n");
                goto bad_key;
        }
 
@@ -1062,7 +1063,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
        sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
                                cryptlen - iv_size);
        if (unlikely(sz != iv_size))
-               dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
+               dev_err(req->ctx->dev, "copy output iv error!\n");
 }
 
 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
@@ -1160,7 +1161,7 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
 
        ret = sec_skcipher_bd_fill(ctx, req);
        if (unlikely(ret)) {
-               dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+               dev_err(ctx->dev, "skcipher bd fill is error!\n");
                return ret;
        }
 
@@ -1194,7 +1195,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
                                          a_req->assoclen);
 
                if (unlikely(sz != authsize)) {
-                       dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+                       dev_err(c->dev, "copy out mac err!\n");
                        err = -EINVAL;
                }
        }
@@ -1259,7 +1260,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
        ret = ctx->req_op->bd_send(ctx, req);
        if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
                (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
-               dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+               dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
                goto err_send_req;
        }
 
@@ -1325,7 +1326,7 @@ static int sec_aead_init(struct crypto_aead *tfm)
        ctx->alg_type = SEC_AEAD;
        ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
        if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
-               dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+               dev_err(ctx->dev, "get error aead iv size!\n");
                return -EINVAL;
        }
 
@@ -1374,7 +1375,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
 
        auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
        if (IS_ERR(auth_ctx->hash_tfm)) {
-               dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+               dev_err(ctx->dev, "aead alloc shash error!\n");
                sec_aead_exit(tfm);
                return PTR_ERR(auth_ctx->hash_tfm);
        }
@@ -1405,10 +1406,40 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
        return sec_aead_ctx_init(tfm, "sha512");
 }
 
+
+static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
+       struct sec_req *sreq)
+{
+       u32 cryptlen = sreq->c_req.sk_req->cryptlen;
+       struct device *dev = ctx->dev;
+       u8 c_mode = ctx->c_ctx.c_mode;
+       int ret = 0;
+
+       switch (c_mode) {
+       case SEC_CMODE_XTS:
+               if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
+                       dev_err(dev, "skcipher XTS mode input length error!\n");
+                       ret = -EINVAL;
+               }
+               break;
+       case SEC_CMODE_ECB:
+       case SEC_CMODE_CBC:
+               if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
+                       dev_err(dev, "skcipher AES input length error!\n");
+                       ret = -EINVAL;
+               }
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
 {
        struct skcipher_request *sk_req = sreq->c_req.sk_req;
-       struct device *dev = SEC_CTX_DEV(ctx);
+       struct device *dev = ctx->dev;
        u8 c_alg = ctx->c_ctx.c_alg;
 
        if (unlikely(!sk_req->src || !sk_req->dst)) {
@@ -1429,12 +1460,9 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
                }
                return 0;
        } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
-               if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
-                       dev_err(dev, "skcipher aes input length error!\n");
-                       return -EINVAL;
-               }
-               return 0;
+               return sec_skcipher_cryptlen_ckeck(ctx, sreq);
        }
+
        dev_err(dev, "skcipher algorithm error!\n");
 
        return -EINVAL;
@@ -1531,14 +1559,15 @@ static struct skcipher_alg sec_skciphers[] = {
 
 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
 {
-       u8 c_alg = ctx->c_ctx.c_alg;
        struct aead_request *req = sreq->aead_req.aead_req;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        size_t authsize = crypto_aead_authsize(tfm);
+       struct device *dev = ctx->dev;
+       u8 c_alg = ctx->c_ctx.c_alg;
 
        if (unlikely(!req->src || !req->dst || !req->cryptlen ||
                req->assoclen > SEC_MAX_AAD_LEN)) {
-               dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+               dev_err(dev, "aead input param error!\n");
                return -EINVAL;
        }
 
@@ -1550,7 +1579,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
 
        /* Support AES only */
        if (unlikely(c_alg != SEC_CALG_AES)) {
-               dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+               dev_err(dev, "aead crypto alg error!\n");
                return -EINVAL;
        }
        if (sreq->c_req.encrypt)
@@ -1559,7 +1588,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
                sreq->c_req.c_len = req->cryptlen - authsize;
 
        if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
-               dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+               dev_err(dev, "aead crypto length error!\n");
                return -EINVAL;
        }
 
@@ -1634,7 +1663,7 @@ static struct aead_alg sec_aeads[] = {
                     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
 };
 
-int sec_register_to_crypto(void)
+int sec_register_to_crypto(struct hisi_qm *qm)
 {
        int ret;
 
@@ -1651,7 +1680,7 @@ int sec_register_to_crypto(void)
        return ret;
 }
 
-void sec_unregister_from_crypto(void)
+void sec_unregister_from_crypto(struct hisi_qm *qm)
 {
        crypto_unregister_skciphers(sec_skciphers,
                                    ARRAY_SIZE(sec_skciphers));
index b2786e17d8fe20f8ead13da7f20c12d0c09275c4..9c78edac56a4bd8a6bd5e501ef3677f24ec4950b 100644 (file)
@@ -64,7 +64,6 @@ enum sec_addr_type {
 };
 
 struct sec_sqe_type2 {
-
        /*
         * mac_len: 0~4 bits
         * a_key_len: 5~10 bits
@@ -120,7 +119,6 @@ struct sec_sqe_type2 {
        /* c_pad_len_field: 0~1 bits */
        __le16 c_pad_len_field;
 
-
        __le64 long_a_data_len;
        __le64 a_ivin_addr;
        __le64 a_key_addr;
@@ -211,6 +209,6 @@ struct sec_sqe {
        struct sec_sqe_type2 type2;
 };
 
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
 #endif
index dc68ba76f65e585bddfb8d4301fcf7f3bd54e165..6f0062d4408c35dcfd0a362f6ba63a9867379be0 100644 (file)
@@ -19,7 +19,6 @@
 
 #define SEC_VF_NUM                     63
 #define SEC_QUEUE_NUM_V1               4096
-#define SEC_QUEUE_NUM_V2               1024
 #define SEC_PF_PCI_DEVICE_ID           0xa255
 #define SEC_VF_PCI_DEVICE_ID           0xa256
 
 #define SEC_CTX_Q_NUM_MAX              32
 
 #define SEC_CTRL_CNT_CLR_CE            0x301120
-#define SEC_CTRL_CNT_CLR_CE_BIT                BIT(0)
-#define SEC_ENGINE_PF_CFG_OFF          0x300000
-#define SEC_ACC_COMMON_REG_OFF         0x1000
+#define SEC_CTRL_CNT_CLR_CE_BIT        BIT(0)
 #define SEC_CORE_INT_SOURCE            0x301010
 #define SEC_CORE_INT_MASK              0x301000
 #define SEC_CORE_INT_STATUS            0x301008
 #define SEC_CORE_SRAM_ECC_ERR_INFO     0x301C14
-#define SEC_ECC_NUM(err)                       (((err) >> 16) & 0xFF)
-#define SEC_ECC_ADDR(err)                      ((err) >> 0)
+#define SEC_ECC_NUM                    16
+#define SEC_ECC_MASH                   0xFF
 #define SEC_CORE_INT_DISABLE           0x0
-#define SEC_CORE_INT_ENABLE            0x1ff
-#define SEC_CORE_INT_CLEAR             0x1ff
+#define SEC_CORE_INT_ENABLE            0x7c1ff
+#define SEC_CORE_INT_CLEAR             0x7c1ff
 #define SEC_SAA_ENABLE                 0x17f
 
 #define SEC_RAS_CE_REG                 0x301050
 #define SEC_RAS_NFE_REG                        0x301058
 #define SEC_RAS_CE_ENB_MSK             0x88
 #define SEC_RAS_FE_ENB_MSK             0x0
-#define SEC_RAS_NFE_ENB_MSK            0x177
-#define SEC_RAS_DISABLE                        0x0
-#define SEC_MEM_START_INIT_REG         0x0100
-#define SEC_MEM_INIT_DONE_REG          0x0104
+#define SEC_RAS_NFE_ENB_MSK            0x7c177
+#define SEC_RAS_DISABLE                0x0
+#define SEC_MEM_START_INIT_REG 0x301100
+#define SEC_MEM_INIT_DONE_REG          0x301104
 
-#define SEC_CONTROL_REG                        0x0200
+#define SEC_CONTROL_REG                0x301200
 #define SEC_TRNG_EN_SHIFT              8
 #define SEC_CLK_GATE_ENABLE            BIT(3)
 #define SEC_CLK_GATE_DISABLE           (~BIT(3))
 #define SEC_AXI_SHUTDOWN_ENABLE        BIT(12)
 #define SEC_AXI_SHUTDOWN_DISABLE       0xFFFFEFFF
 
-#define SEC_INTERFACE_USER_CTRL0_REG   0x0220
-#define SEC_INTERFACE_USER_CTRL1_REG   0x0224
-#define SEC_SAA_EN_REG                                 0x0270
-#define SEC_BD_ERR_CHK_EN_REG0         0x0380
-#define SEC_BD_ERR_CHK_EN_REG1         0x0384
-#define SEC_BD_ERR_CHK_EN_REG3         0x038c
+#define SEC_INTERFACE_USER_CTRL0_REG   0x301220
+#define SEC_INTERFACE_USER_CTRL1_REG   0x301224
+#define SEC_SAA_EN_REG                 0x301270
+#define SEC_BD_ERR_CHK_EN_REG0         0x301380
+#define SEC_BD_ERR_CHK_EN_REG1         0x301384
+#define SEC_BD_ERR_CHK_EN_REG3         0x30138c
 
 #define SEC_USER0_SMMU_NORMAL          (BIT(23) | BIT(15))
 #define SEC_USER1_SMMU_NORMAL          (BIT(31) | BIT(23) | BIT(15) | BIT(7))
@@ -95,9 +92,6 @@
 #define SEC_SQE_MASK_OFFSET            64
 #define SEC_SQE_MASK_LEN               48
 
-#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
-                            SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
-
 struct sec_hw_error {
        u32 int_msk;
        const char *msg;
@@ -117,20 +111,66 @@ static struct hisi_qm_list sec_devices = {
 };
 
 static const struct sec_hw_error sec_hw_errors[] = {
-       {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
-       {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
-       {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
-       {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
-       {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
-       {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
-       {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
-       {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
-       {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
-       { /* sentinel */ }
+       {
+               .int_msk = BIT(0),
+               .msg = "sec_axi_rresp_err_rint"
+       },
+       {
+               .int_msk = BIT(1),
+               .msg = "sec_axi_bresp_err_rint"
+       },
+       {
+               .int_msk = BIT(2),
+               .msg = "sec_ecc_2bit_err_rint"
+       },
+       {
+               .int_msk = BIT(3),
+               .msg = "sec_ecc_1bit_err_rint"
+       },
+       {
+               .int_msk = BIT(4),
+               .msg = "sec_req_trng_timeout_rint"
+       },
+       {
+               .int_msk = BIT(5),
+               .msg = "sec_fsm_hbeat_rint"
+       },
+       {
+               .int_msk = BIT(6),
+               .msg = "sec_channel_req_rng_timeout_rint"
+       },
+       {
+               .int_msk = BIT(7),
+               .msg = "sec_bd_err_rint"
+       },
+       {
+               .int_msk = BIT(8),
+               .msg = "sec_chain_buff_err_rint"
+       },
+       {
+               .int_msk = BIT(14),
+               .msg = "sec_no_secure_access"
+       },
+       {
+               .int_msk = BIT(15),
+               .msg = "sec_wrapping_key_auth_err"
+       },
+       {
+               .int_msk = BIT(16),
+               .msg = "sec_km_key_crc_fail"
+       },
+       {
+               .int_msk = BIT(17),
+               .msg = "sec_axi_poison_err"
+       },
+       {
+               .int_msk = BIT(18),
+               .msg = "sec_sva_err"
+       },
+       {}
 };
 
 static const char * const sec_dbg_file_name[] = {
-       [SEC_CURRENT_QM] = "current_qm",
        [SEC_CLEAR_ENABLE] = "clear_enable",
 };
 
@@ -277,9 +317,7 @@ static u8 sec_get_endian(struct hisi_qm *qm)
                                    "cannot access a register in VF!\n");
                return SEC_LE;
        }
-       reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
-                           SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
-
+       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
        /* BD little endian mode */
        if (!(reg & BIT(0)))
                return SEC_LE;
@@ -299,13 +337,13 @@ static int sec_engine_init(struct hisi_qm *qm)
        u32 reg;
 
        /* disable clock gate control */
-       reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
        reg &= SEC_CLK_GATE_DISABLE;
-       writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
 
-       writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+       writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
 
-       ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+       ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
                                         reg, reg & 0x1, SEC_DELAY_10_US,
                                         SEC_POLL_TIMEOUT_US);
        if (ret) {
@@ -313,40 +351,40 @@ static int sec_engine_init(struct hisi_qm *qm)
                return ret;
        }
 
-       reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
        reg |= (0x1 << SEC_TRNG_EN_SHIFT);
-       writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
 
-       reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+       reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
        reg |= SEC_USER0_SMMU_NORMAL;
-       writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+       writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
 
-       reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+       reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
        reg &= SEC_USER1_SMMU_MASK;
        if (qm->use_sva && qm->ver == QM_HW_V2)
                reg |= SEC_USER1_SMMU_SVA;
        else
                reg |= SEC_USER1_SMMU_NORMAL;
-       writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+       writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
 
        writel(SEC_SINGLE_PORT_MAX_TRANS,
               qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
 
-       writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
+       writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
 
        /* Enable sm4 extra mode, as ctr/ecb */
        writel_relaxed(SEC_BD_ERR_CHK_EN0,
-                      SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
+                      qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
        /* Enable sm4 xts mode multiple iv */
        writel_relaxed(SEC_BD_ERR_CHK_EN1,
-                      SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+                      qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
        writel_relaxed(SEC_BD_ERR_CHK_EN3,
-                      SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
+                      qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
 
        /* config endian */
-       reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
        reg |= sec_get_endian(qm);
-       writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
 
        return 0;
 }
@@ -381,10 +419,6 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
 {
        int i;
 
-       /* clear current_qm */
-       writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
-       writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
        /* clear sec dfx regs */
        writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
        for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
@@ -406,7 +440,7 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
                return;
        }
 
-       val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+       val = readl(qm->io_base + SEC_CONTROL_REG);
 
        /* clear SEC hw error source if having */
        writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
@@ -422,14 +456,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
        /* enable SEC block master OOO when m-bit error occur */
        val = val | SEC_AXI_SHUTDOWN_ENABLE;
 
-       writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
+       writel(val, qm->io_base + SEC_CONTROL_REG);
 }
 
 static void sec_hw_error_disable(struct hisi_qm *qm)
 {
        u32 val;
 
-       val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+       val = readl(qm->io_base + SEC_CONTROL_REG);
 
        /* disable RAS int */
        writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
@@ -442,51 +476,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
        /* disable SEC block master OOO when m-bit error occur */
        val = val & SEC_AXI_SHUTDOWN_DISABLE;
 
-       writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
-}
-
-static u32 sec_current_qm_read(struct sec_debug_file *file)
-{
-       struct hisi_qm *qm = file->qm;
-
-       return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
-{
-       struct hisi_qm *qm = file->qm;
-       u32 vfq_num;
-       u32 tmp;
-
-       if (val > qm->vfs_num)
-               return -EINVAL;
-
-       /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
-       if (!val) {
-               qm->debug.curr_qm_qp_num = qm->qp_num;
-       } else {
-               vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
-
-               if (val == qm->vfs_num)
-                       qm->debug.curr_qm_qp_num =
-                               qm->ctrl_qp_num - qm->qp_num -
-                               (qm->vfs_num - 1) * vfq_num;
-               else
-                       qm->debug.curr_qm_qp_num = vfq_num;
-       }
-
-       writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
-       writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
-       tmp = val |
-             (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
-       writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
-       tmp = val |
-             (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
-       writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
-       return 0;
+       writel(val, qm->io_base + SEC_CONTROL_REG);
 }
 
 static u32 sec_clear_enable_read(struct sec_debug_file *file)
@@ -523,9 +513,6 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf,
        spin_lock_irq(&file->lock);
 
        switch (file->index) {
-       case SEC_CURRENT_QM:
-               val = sec_current_qm_read(file);
-               break;
        case SEC_CLEAR_ENABLE:
                val = sec_clear_enable_read(file);
                break;
@@ -566,11 +553,6 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
        spin_lock_irq(&file->lock);
 
        switch (file->index) {
-       case SEC_CURRENT_QM:
-               ret = sec_current_qm_write(file, val);
-               if (ret)
-                       goto err_input;
-               break;
        case SEC_CLEAR_ENABLE:
                ret = sec_clear_enable_write(file, val);
                if (ret)
@@ -655,7 +637,7 @@ static int sec_debug_init(struct hisi_qm *qm)
        int i;
 
        if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
-               for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+               for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
                        spin_lock_init(&sec->debug.files[i].lock);
                        sec->debug.files[i].index = i;
                        sec->debug.files[i].qm = qm;
@@ -712,7 +694,8 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
                                err_val = readl(qm->io_base +
                                                SEC_CORE_SRAM_ECC_ERR_INFO);
                                dev_err(dev, "multi ecc sram num=0x%x\n",
-                                               SEC_ECC_NUM(err_val));
+                                               ((err_val) >> SEC_ECC_NUM) &
+                                               SEC_ECC_MASH);
                        }
                }
                errs++;
@@ -733,9 +716,23 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
 {
        u32 val;
 
-       val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
-       writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
-       writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+       val = readl(qm->io_base + SEC_CONTROL_REG);
+       writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
+       writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_err_info_init(struct hisi_qm *qm)
+{
+       struct hisi_qm_err_info *err_info = &qm->err_info;
+
+       err_info->ce = QM_BASE_CE;
+       err_info->fe = 0;
+       err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
+       err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+       err_info->msi_wr_port = BIT(0);
+       err_info->acpi_rst = "SRST";
+       err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+                       QM_ACC_WB_NOT_READY_TIMEOUT;
 }
 
 static const struct hisi_qm_err_ini sec_err_ini = {
@@ -746,16 +743,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
        .clear_dev_hw_err_status = sec_clear_hw_err_status,
        .log_dev_hw_err         = sec_log_hw_error,
        .open_axi_master_ooo    = sec_open_axi_master_ooo,
-       .err_info               = {
-               .ce             = QM_BASE_CE,
-               .nfe            = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
-                                 QM_ACC_WB_NOT_READY_TIMEOUT,
-               .fe             = 0,
-               .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
-               .dev_ce_mask    = SEC_RAS_CE_ENB_MSK,
-               .msi_wr_port    = BIT(0),
-               .acpi_rst       = "SRST",
-       }
+       .err_info_init          = sec_err_info_init,
 };
 
 static int sec_pf_probe_init(struct sec_dev *sec)
@@ -763,12 +751,8 @@ static int sec_pf_probe_init(struct sec_dev *sec)
        struct hisi_qm *qm = &sec->qm;
        int ret;
 
-       if (qm->ver == QM_HW_V1)
-               qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
-       else
-               qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
-
        qm->err_ini = &sec_err_ini;
+       qm->err_ini->err_info_init(qm);
 
        ret = sec_set_user_domain_and_cache(qm);
        if (ret)
@@ -786,7 +770,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 
        qm->pdev = pdev;
        qm->ver = pdev->revision;
-       qm->algs = "cipher\ndigest\naead\n";
+       qm->algs = "cipher\ndigest\naead";
        qm->mode = uacce_mode;
        qm->sqe_size = SEC_SQE_SIZE;
        qm->dev_name = sec_name;
@@ -909,10 +893,15 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ret)
                pci_warn(pdev, "Failed to init debugfs!\n");
 
-       ret = hisi_qm_alg_register(qm, &sec_devices);
-       if (ret < 0) {
-               pr_err("Failed to register driver to crypto.\n");
-               goto err_qm_stop;
+       if (qm->qp_num >= ctx_q_num) {
+               ret = hisi_qm_alg_register(qm, &sec_devices);
+               if (ret < 0) {
+                       pr_err("Failed to register driver to crypto.\n");
+                       goto err_qm_stop;
+               }
+       } else {
+               pci_warn(qm->pdev,
+                       "Failed to use kernel mode, qp not enough!\n");
        }
 
        if (qm->uacce) {
@@ -948,7 +937,9 @@ static void sec_remove(struct pci_dev *pdev)
        struct hisi_qm *qm = pci_get_drvdata(pdev);
 
        hisi_qm_wait_task_finish(qm, &sec_devices);
-       hisi_qm_alg_unregister(qm, &sec_devices);
+       if (qm->qp_num >= ctx_q_num)
+               hisi_qm_alg_unregister(qm, &sec_devices);
+
        if (qm->fun_type == QM_HW_PF && qm->vfs_num)
                hisi_qm_sriov_disable(pdev, true);
 
index 3bff6394acafdbdad07c8a92d26d3a618bc19faf..057273769f264eebe026942718ac26de93bedf83 100644 (file)
@@ -56,7 +56,7 @@ struct hisi_acc_sgl_pool {
 struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
                                                   u32 count, u32 sge_nr)
 {
-       u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0;
+       u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl;
        struct hisi_acc_sgl_pool *pool;
        struct mem_block *block;
        u32 i, j;
@@ -66,6 +66,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
 
        sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
                   sizeof(struct hisi_acc_hw_sgl);
+
+       /*
+        * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1),
+        * block size may exceed 2^31 on ia64, so the max of block size is 2^31
+        */
        block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
                           PAGE_SHIFT + MAX_ORDER - 1 : 31);
        sgl_num_per_block = block_size / sgl_size;
@@ -85,8 +90,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
                block[i].sgl = dma_alloc_coherent(dev, block_size,
                                                  &block[i].sgl_dma,
                                                  GFP_KERNEL);
-               if (!block[i].sgl)
+               if (!block[i].sgl) {
+                       dev_err(dev, "Fail to allocate hw SG buffer!\n");
                        goto err_free_mem;
+               }
 
                block[i].size = block_size;
        }
@@ -95,8 +102,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
                block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
                                                  &block[i].sgl_dma,
                                                  GFP_KERNEL);
-               if (!block[i].sgl)
+               if (!block[i].sgl) {
+                       dev_err(dev, "Fail to allocate remained hw SG buffer!\n");
                        goto err_free_mem;
+               }
 
                block[i].size = remain_sgl * sgl_size;
        }
@@ -167,6 +176,7 @@ static void sg_map_to_hw_sg(struct scatterlist *sgl,
 {
        hw_sge->buf = sg_dma_address(sgl);
        hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
+       hw_sge->page_ctrl = sg_virt(sgl);
 }
 
 static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
@@ -182,6 +192,18 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
        hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
 }
 
+static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
+{
+       struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
+       int i;
+
+       for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
+               hw_sge[i].page_ctrl = NULL;
+               hw_sge[i].buf = 0;
+               hw_sge[i].len = 0;
+       }
+}
+
 /**
  * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl.
  * @dev: The device which hw sgl belongs to.
@@ -211,16 +233,19 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
        sg_n = sg_nents(sgl);
 
        sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
-       if (!sg_n_mapped)
+       if (!sg_n_mapped) {
+               dev_err(dev, "DMA mapping for SG error!\n");
                return ERR_PTR(-EINVAL);
+       }
 
        if (sg_n_mapped > pool->sge_nr) {
-               dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+               dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
                return ERR_PTR(-EINVAL);
        }
 
        curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
        if (IS_ERR(curr_hw_sgl)) {
+               dev_err(dev, "Get SGL error!\n");
                dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
                return ERR_PTR(-ENOMEM);
 
@@ -256,7 +281,7 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
                return;
 
        dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
-
+       clear_hw_sgl_sge(hw_sgl);
        hw_sgl->entry_sum_in_chain = 0;
        hw_sgl->entry_sum_in_sgl = 0;
        hw_sgl->entry_length_in_sgl = 0;
index 29712685498a78b35f7f1a5599ccbc17505d6eef..829f2caf0f67fd675b43b980fa0db4ab22b79d68 100644 (file)
@@ -18,6 +18,8 @@
 #define HISI_TRNG_REG          0x00F0
 #define HISI_TRNG_BYTES                4
 #define HISI_TRNG_QUALITY      512
+#define HISI_TRNG_VERSION      0x01B8
+#define HISI_TRNG_VER_V1       GENMASK(31, 0)
 #define SLEEP_US               10
 #define TIMEOUT_US             10000
 #define SW_DRBG_NUM_SHIFT      2
@@ -50,6 +52,7 @@ struct hisi_trng {
        struct hisi_trng_list *trng_list;
        struct list_head list;
        struct hwrng rng;
+       u32 ver;
        bool is_used;
        struct mutex mutex;
 };
@@ -260,6 +263,7 @@ static int hisi_trng_probe(struct platform_device *pdev)
                return PTR_ERR(trng->base);
 
        trng->is_used = false;
+       trng->ver = readl(trng->base + HISI_TRNG_VERSION);
        if (!trng_devices.is_init) {
                INIT_LIST_HEAD(&trng_devices.list);
                mutex_init(&trng_devices.lock);
@@ -267,7 +271,8 @@ static int hisi_trng_probe(struct platform_device *pdev)
        }
 
        hisi_trng_add_to_list(trng);
-       if (atomic_inc_return(&trng_active_devs) == 1) {
+       if (trng->ver != HISI_TRNG_VER_V1 &&
+           atomic_inc_return(&trng_active_devs) == 1) {
                ret = crypto_register_rng(&hisi_trng_alg);
                if (ret) {
                        dev_err(&pdev->dev,
@@ -289,7 +294,8 @@ static int hisi_trng_probe(struct platform_device *pdev)
        return ret;
 
 err_crypto_unregister:
-       if (atomic_dec_return(&trng_active_devs) == 0)
+       if (trng->ver != HISI_TRNG_VER_V1 &&
+           atomic_dec_return(&trng_active_devs) == 0)
                crypto_unregister_rng(&hisi_trng_alg);
 
 err_remove_from_list:
@@ -305,7 +311,8 @@ static int hisi_trng_remove(struct platform_device *pdev)
        while (hisi_trng_del_from_list(trng))
                ;
 
-       if (atomic_dec_return(&trng_active_devs) == 0)
+       if (trng->ver != HISI_TRNG_VER_V1 &&
+           atomic_dec_return(&trng_active_devs) == 0)
                crypto_unregister_rng(&hisi_trng_alg);
 
        return 0;
index 92397f993e237b941d7cfb99febf494d36662fbb..517fdbdff3ea476c81c327b2295638b1b646b1d8 100644 (file)
@@ -33,35 +33,55 @@ struct hisi_zip_sqe {
        u32 consumed;
        u32 produced;
        u32 comp_data_length;
+       /*
+        * status: 0~7 bits
+        * rsvd: 8~31 bits
+        */
        u32 dw3;
        u32 input_data_length;
-       u32 lba_l;
-       u32 lba_h;
+       u32 dw5;
+       u32 dw6;
+       /*
+        * in_sge_data_offset: 0~23 bits
+        * rsvd: 24~27 bits
+        * sqe_type: 29~31 bits
+        */
        u32 dw7;
+       /*
+        * out_sge_data_offset: 0~23 bits
+        * rsvd: 24~31 bits
+        */
        u32 dw8;
+       /*
+        * request_type: 0~7 bits
+        * buffer_type: 8~11 bits
+        * rsvd: 13~31 bits
+        */
        u32 dw9;
        u32 dw10;
-       u32 priv_info;
+       u32 dw11;
        u32 dw12;
-       u32 tag;
+       /* tag: in sqe type 0 */
+       u32 dw13;
        u32 dest_avail_out;
-       u32 rsvd0;
-       u32 comp_head_addr_l;
-       u32 comp_head_addr_h;
+       u32 dw15;
+       u32 dw16;
+       u32 dw17;
        u32 source_addr_l;
        u32 source_addr_h;
        u32 dest_addr_l;
        u32 dest_addr_h;
-       u32 stream_ctx_addr_l;
-       u32 stream_ctx_addr_h;
-       u32 cipher_key1_addr_l;
-       u32 cipher_key1_addr_h;
-       u32 cipher_key2_addr_l;
-       u32 cipher_key2_addr_h;
+       u32 dw22;
+       u32 dw23;
+       u32 dw24;
+       u32 dw25;
+       /* tag: in sqe type 3 */
+       u32 dw26;
+       u32 dw27;
        u32 rsvd1[4];
 };
 
 int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
-int hisi_zip_register_to_crypto(void);
-void hisi_zip_unregister_from_crypto(void);
+int hisi_zip_register_to_crypto(struct hisi_qm *qm);
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
 #endif
index 08b4660b014c6f24b1689865956e55e32059ba62..9520a4113c81e5fba635b54b48cc2cc0b87362cb 100644 (file)
@@ -10,6 +10,7 @@
 #define HZIP_BD_STATUS_M                       GENMASK(7, 0)
 /* hisi_zip_sqe dw7 */
 #define HZIP_IN_SGE_DATA_OFFSET_M              GENMASK(23, 0)
+#define HZIP_SQE_TYPE_M                                GENMASK(31, 28)
 /* hisi_zip_sqe dw8 */
 #define HZIP_OUT_SGE_DATA_OFFSET_M             GENMASK(23, 0)
 /* hisi_zip_sqe dw9 */
@@ -91,8 +92,22 @@ struct hisi_zip_qp_ctx {
        struct hisi_zip_ctx *ctx;
 };
 
+struct hisi_zip_sqe_ops {
+       u8 sqe_type;
+       void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+       void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+       void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
+       void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
+       void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+       void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
+       u32 (*get_tag)(struct hisi_zip_sqe *sqe);
+       u32 (*get_status)(struct hisi_zip_sqe *sqe);
+       u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
+};
+
 struct hisi_zip_ctx {
        struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
+       const struct hisi_zip_sqe_ops *ops;
 };
 
 static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
@@ -119,35 +134,367 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
 module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
 MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
 
-static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
+static u16 get_extra_field_size(const u8 *start)
+{
+       return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
+}
+
+static u32 get_name_field_size(const u8 *start)
+{
+       return strlen(start) + 1;
+}
+
+static u32 get_comment_field_size(const u8 *start)
+{
+       return strlen(start) + 1;
+}
+
+static u32 __get_gzip_head_size(const u8 *src)
+{
+       u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
+       u32 size = GZIP_HEAD_FEXTRA_SHIFT;
+
+       if (head_flg & GZIP_HEAD_FEXTRA_BIT)
+               size += get_extra_field_size(src + size);
+       if (head_flg & GZIP_HEAD_FNAME_BIT)
+               size += get_name_field_size(src + size);
+       if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
+               size += get_comment_field_size(src + size);
+       if (head_flg & GZIP_HEAD_FHCRC_BIT)
+               size += GZIP_HEAD_FHCRC_SIZE;
+
+       return size;
+}
+
+static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+{
+       char buf[HZIP_GZIP_HEAD_BUF];
+
+       sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
+
+       return __get_gzip_head_size(buf);
+}
+
+static int add_comp_head(struct scatterlist *dst, u8 req_type)
+{
+       int head_size = TO_HEAD_SIZE(req_type);
+       const u8 *head = TO_HEAD(req_type);
+       int ret;
+
+       ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
+       if (ret != head_size) {
+               pr_err("the head size of buffer is wrong (%d)!\n", ret);
+               return -ENOMEM;
+       }
+
+       return head_size;
+}
+
+static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
+{
+       if (!acomp_req->src || !acomp_req->slen)
+               return -EINVAL;
+
+       if (req_type == HZIP_ALG_TYPE_GZIP &&
+           acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+               return -EINVAL;
+
+       switch (req_type) {
+       case HZIP_ALG_TYPE_ZLIB:
+               return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
+       case HZIP_ALG_TYPE_GZIP:
+               return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
+       default:
+               pr_err("request type does not support!\n");
+               return -EINVAL;
+       }
+}
+
+static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
+                                               struct hisi_zip_qp_ctx *qp_ctx,
+                                               size_t head_size, bool is_comp)
+{
+       struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+       struct hisi_zip_req *q = req_q->q;
+       struct hisi_zip_req *req_cache;
+       int req_id;
+
+       write_lock(&req_q->req_lock);
+
+       req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
+       if (req_id >= req_q->size) {
+               write_unlock(&req_q->req_lock);
+               dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
+               return ERR_PTR(-EAGAIN);
+       }
+       set_bit(req_id, req_q->req_bitmap);
+
+       req_cache = q + req_id;
+       req_cache->req_id = req_id;
+       req_cache->req = req;
+
+       if (is_comp) {
+               req_cache->sskip = 0;
+               req_cache->dskip = head_size;
+       } else {
+               req_cache->sskip = head_size;
+               req_cache->dskip = 0;
+       }
+
+       write_unlock(&req_q->req_lock);
+
+       return req_cache;
+}
+
+static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
+                               struct hisi_zip_req *req)
+{
+       struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+
+       write_lock(&req_q->req_lock);
+       clear_bit(req->req_id, req_q->req_bitmap);
+       memset(req, 0, sizeof(struct hisi_zip_req));
+       write_unlock(&req_q->req_lock);
+}
+
+static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+       sqe->source_addr_l = lower_32_bits(req->dma_src);
+       sqe->source_addr_h = upper_32_bits(req->dma_src);
+       sqe->dest_addr_l = lower_32_bits(req->dma_dst);
+       sqe->dest_addr_h = upper_32_bits(req->dma_dst);
+}
+
+static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+       struct acomp_req *a_req = req->req;
+
+       sqe->input_data_length = a_req->slen - req->sskip;
+       sqe->dest_avail_out = a_req->dlen - req->dskip;
+       sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip);
+       sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip);
+}
+
+static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
 {
        u32 val;
 
-       val = (sqe->dw9) & ~HZIP_BUF_TYPE_M;
+       val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
        val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
        sqe->dw9 = val;
 }
 
-static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
+static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
+{
+       u32 val;
+
+       val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
+       val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
+       sqe->dw9 = val;
+}
+
+static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+       sqe->dw13 = req->req_id;
+}
+
+static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+       sqe->dw26 = req->req_id;
+}
+
+static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
 {
-       sqe->tag = tag;
+       u32 val;
+
+       val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
+       val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
+       sqe->dw7 = val;
 }
 
-static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
-                             dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
-                             u32 dlen, u32 sskip, u32 dskip)
+static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
+                             u8 req_type, struct hisi_zip_req *req)
 {
+       const struct hisi_zip_sqe_ops *ops = ctx->ops;
+
        memset(sqe, 0, sizeof(struct hisi_zip_sqe));
 
-       sqe->input_data_length = slen - sskip;
-       sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
-       sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
-       sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
-       sqe->dest_avail_out = dlen - dskip;
-       sqe->source_addr_l = lower_32_bits(s_addr);
-       sqe->source_addr_h = upper_32_bits(s_addr);
-       sqe->dest_addr_l = lower_32_bits(d_addr);
-       sqe->dest_addr_h = upper_32_bits(d_addr);
+       ops->fill_addr(sqe, req);
+       ops->fill_buf_size(sqe, req);
+       ops->fill_buf_type(sqe, HZIP_SGL);
+       ops->fill_req_type(sqe, req_type);
+       ops->fill_tag(sqe, req);
+       ops->fill_sqe_type(sqe, ops->sqe_type);
+}
+
+static int hisi_zip_do_work(struct hisi_zip_req *req,
+                           struct hisi_zip_qp_ctx *qp_ctx)
+{
+       struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
+       struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+       struct acomp_req *a_req = req->req;
+       struct hisi_qp *qp = qp_ctx->qp;
+       struct device *dev = &qp->qm->pdev->dev;
+       struct hisi_zip_sqe zip_sqe;
+       int ret;
+
+       if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+               return -EINVAL;
+
+       req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
+                                                   req->req_id << 1, &req->dma_src);
+       if (IS_ERR(req->hw_src)) {
+               dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
+                       PTR_ERR(req->hw_src));
+               return PTR_ERR(req->hw_src);
+       }
+
+       req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
+                                                   (req->req_id << 1) + 1,
+                                                   &req->dma_dst);
+       if (IS_ERR(req->hw_dst)) {
+               ret = PTR_ERR(req->hw_dst);
+               dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
+                       ret);
+               goto err_unmap_input;
+       }
+
+       hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
+
+       /* send command to start a task */
+       atomic64_inc(&dfx->send_cnt);
+       ret = hisi_qp_send(qp, &zip_sqe);
+       if (ret < 0) {
+               atomic64_inc(&dfx->send_busy_cnt);
+               ret = -EAGAIN;
+               dev_dbg_ratelimited(dev, "failed to send request!\n");
+               goto err_unmap_output;
+       }
+
+       return -EINPROGRESS;
+
+err_unmap_output:
+       hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
+err_unmap_input:
+       hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
+       return ret;
+}
+
+static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe)
+{
+       return sqe->dw13;
+}
+
+static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe)
+{
+       return sqe->dw26;
+}
+
+static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
+{
+       return sqe->dw3 & HZIP_BD_STATUS_M;
+}
+
+static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
+{
+       return sqe->produced;
+}
+
+static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
+{
+       struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
+       const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
+       struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+       struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+       struct device *dev = &qp->qm->pdev->dev;
+       struct hisi_zip_sqe *sqe = data;
+       u32 tag = ops->get_tag(sqe);
+       struct hisi_zip_req *req = req_q->q + tag;
+       struct acomp_req *acomp_req = req->req;
+       u32 status, dlen, head_size;
+       int err = 0;
+
+       atomic64_inc(&dfx->recv_cnt);
+       status = ops->get_status(sqe);
+       if (status != 0 && status != HZIP_NC_ERR) {
+               dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
+                       (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
+                       sqe->produced);
+               atomic64_inc(&dfx->err_bd_cnt);
+               err = -EIO;
+       }
+
+       dlen = ops->get_dstlen(sqe);
+
+       hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+       hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
+
+       head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
+       acomp_req->dlen = dlen + head_size;
+
+       if (acomp_req->base.complete)
+               acomp_request_complete(acomp_req, err);
+
+       hisi_zip_remove_req(qp_ctx, req);
+}
+
+static int hisi_zip_acompress(struct acomp_req *acomp_req)
+{
+       struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
+       struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
+       struct device *dev = &qp_ctx->qp->qm->pdev->dev;
+       struct hisi_zip_req *req;
+       int head_size;
+       int ret;
+
+       /* let's output compression head now */
+       head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
+       if (head_size < 0) {
+               dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
+                                   head_size);
+               return head_size;
+       }
+
+       req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       ret = hisi_zip_do_work(req, qp_ctx);
+       if (ret != -EINPROGRESS) {
+               dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
+               hisi_zip_remove_req(qp_ctx, req);
+       }
+
+       return ret;
+}
+
+static int hisi_zip_adecompress(struct acomp_req *acomp_req)
+{
+       struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
+       struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
+       struct device *dev = &qp_ctx->qp->qm->pdev->dev;
+       struct hisi_zip_req *req;
+       int head_size, ret;
+
+       head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
+       if (head_size < 0) {
+               dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
+                                   head_size);
+               return head_size;
+       }
+
+       req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
+       ret = hisi_zip_do_work(req, qp_ctx);
+       if (ret != -EINPROGRESS) {
+               dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
+                                    ret);
+               hisi_zip_remove_req(qp_ctx, req);
+       }
+
+       return ret;
 }
 
 static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
@@ -177,9 +524,36 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
        hisi_qm_release_qp(ctx->qp);
 }
 
+static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
+       .sqe_type               = 0,
+       .fill_addr              = hisi_zip_fill_addr,
+       .fill_buf_size          = hisi_zip_fill_buf_size,
+       .fill_buf_type          = hisi_zip_fill_buf_type,
+       .fill_req_type          = hisi_zip_fill_req_type,
+       .fill_tag               = hisi_zip_fill_tag_v1,
+       .fill_sqe_type          = hisi_zip_fill_sqe_type,
+       .get_tag                = hisi_zip_get_tag_v1,
+       .get_status             = hisi_zip_get_status,
+       .get_dstlen             = hisi_zip_get_dstlen,
+};
+
+static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = {
+       .sqe_type               = 0x3,
+       .fill_addr              = hisi_zip_fill_addr,
+       .fill_buf_size          = hisi_zip_fill_buf_size,
+       .fill_buf_type          = hisi_zip_fill_buf_type,
+       .fill_req_type          = hisi_zip_fill_req_type,
+       .fill_tag               = hisi_zip_fill_tag_v2,
+       .fill_sqe_type          = hisi_zip_fill_sqe_type,
+       .get_tag                = hisi_zip_get_tag_v2,
+       .get_status             = hisi_zip_get_status,
+       .get_dstlen             = hisi_zip_get_dstlen,
+};
+
 static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
 {
        struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
+       struct hisi_zip_qp_ctx *qp_ctx;
        struct hisi_zip *hisi_zip;
        int ret, i, j;
 
@@ -193,8 +567,9 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
 
        for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
                /* alg_type = 0 for compress, 1 for decompress in hw sqe */
-               ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
-                                       req_type);
+               qp_ctx = &hisi_zip_ctx->qp_ctx[i];
+               qp_ctx->ctx = hisi_zip_ctx;
+               ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
                if (ret) {
                        for (j = i - 1; j >= 0; j--)
                                hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
@@ -203,50 +578,23 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
                        return ret;
                }
 
-               hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip;
-       }
-
-       return 0;
-}
-
-static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
-{
-       int i;
-
-       for (i = 1; i >= 0; i--)
-               hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
-}
-
-static u16 get_extra_field_size(const u8 *start)
-{
-       return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
-}
-
-static u32 get_name_field_size(const u8 *start)
-{
-       return strlen(start) + 1;
-}
+               qp_ctx->zip_dev = hisi_zip;
+       }
 
-static u32 get_comment_field_size(const u8 *start)
-{
-       return strlen(start) + 1;
+       if (hisi_zip->qm.ver < QM_HW_V3)
+               hisi_zip_ctx->ops = &hisi_zip_ops_v1;
+       else
+               hisi_zip_ctx->ops = &hisi_zip_ops_v2;
+
+       return 0;
 }
 
-static u32 __get_gzip_head_size(const u8 *src)
+static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
 {
-       u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
-       u32 size = GZIP_HEAD_FEXTRA_SHIFT;
-
-       if (head_flg & GZIP_HEAD_FEXTRA_BIT)
-               size += get_extra_field_size(src + size);
-       if (head_flg & GZIP_HEAD_FNAME_BIT)
-               size += get_name_field_size(src + size);
-       if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
-               size += get_comment_field_size(src + size);
-       if (head_flg & GZIP_HEAD_FHCRC_BIT)
-               size += GZIP_HEAD_FHCRC_SIZE;
+       int i;
 
-       return size;
+       for (i = 1; i >= 0; i--)
+               hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
 }
 
 static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
@@ -336,52 +684,6 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
                                       ctx->qp_ctx[i].sgl_pool);
 }
 
-static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
-                               struct hisi_zip_req *req)
-{
-       struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
-
-       write_lock(&req_q->req_lock);
-       clear_bit(req->req_id, req_q->req_bitmap);
-       memset(req, 0, sizeof(struct hisi_zip_req));
-       write_unlock(&req_q->req_lock);
-}
-
-static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
-{
-       struct hisi_zip_sqe *sqe = data;
-       struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
-       struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
-       struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
-       struct hisi_zip_req *req = req_q->q + sqe->tag;
-       struct acomp_req *acomp_req = req->req;
-       struct device *dev = &qp->qm->pdev->dev;
-       u32 status, dlen, head_size;
-       int err = 0;
-
-       atomic64_inc(&dfx->recv_cnt);
-       status = sqe->dw3 & HZIP_BD_STATUS_M;
-       if (status != 0 && status != HZIP_NC_ERR) {
-               dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
-                       (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
-                       sqe->produced);
-               atomic64_inc(&dfx->err_bd_cnt);
-               err = -EIO;
-       }
-       dlen = sqe->produced;
-
-       hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
-       hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
-
-       head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
-       acomp_req->dlen = dlen + head_size;
-
-       if (acomp_req->base.complete)
-               acomp_request_complete(acomp_req, err);
-
-       hisi_zip_remove_req(qp_ctx, req);
-}
-
 static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
                                  void (*fn)(struct hisi_qp *, void *))
 {
@@ -439,204 +741,6 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
        hisi_zip_ctx_exit(ctx);
 }
 
-static int add_comp_head(struct scatterlist *dst, u8 req_type)
-{
-       int head_size = TO_HEAD_SIZE(req_type);
-       const u8 *head = TO_HEAD(req_type);
-       int ret;
-
-       ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
-       if (ret != head_size) {
-               pr_err("the head size of buffer is wrong (%d)!\n", ret);
-               return -ENOMEM;
-       }
-
-       return head_size;
-}
-
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
-{
-       char buf[HZIP_GZIP_HEAD_BUF];
-
-       sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
-
-       return __get_gzip_head_size(buf);
-}
-
-static int  get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
-{
-       if (!acomp_req->src || !acomp_req->slen)
-               return -EINVAL;
-
-       if ((req_type == HZIP_ALG_TYPE_GZIP) &&
-           (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
-               return -EINVAL;
-
-       switch (req_type) {
-       case HZIP_ALG_TYPE_ZLIB:
-               return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
-       case HZIP_ALG_TYPE_GZIP:
-               return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
-       default:
-               pr_err("request type does not support!\n");
-               return -EINVAL;
-       }
-}
-
-static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
-                                               struct hisi_zip_qp_ctx *qp_ctx,
-                                               size_t head_size, bool is_comp)
-{
-       struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
-       struct hisi_zip_req *q = req_q->q;
-       struct hisi_zip_req *req_cache;
-       int req_id;
-
-       write_lock(&req_q->req_lock);
-
-       req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
-       if (req_id >= req_q->size) {
-               write_unlock(&req_q->req_lock);
-               dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
-               return ERR_PTR(-EAGAIN);
-       }
-       set_bit(req_id, req_q->req_bitmap);
-
-       req_cache = q + req_id;
-       req_cache->req_id = req_id;
-       req_cache->req = req;
-
-       if (is_comp) {
-               req_cache->sskip = 0;
-               req_cache->dskip = head_size;
-       } else {
-               req_cache->sskip = head_size;
-               req_cache->dskip = 0;
-       }
-
-       write_unlock(&req_q->req_lock);
-
-       return req_cache;
-}
-
-static int hisi_zip_do_work(struct hisi_zip_req *req,
-                           struct hisi_zip_qp_ctx *qp_ctx)
-{
-       struct acomp_req *a_req = req->req;
-       struct hisi_qp *qp = qp_ctx->qp;
-       struct device *dev = &qp->qm->pdev->dev;
-       struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
-       struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
-       struct hisi_zip_sqe zip_sqe;
-       dma_addr_t input, output;
-       int ret;
-
-       if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
-               return -EINVAL;
-
-       req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
-                                                   req->req_id << 1, &input);
-       if (IS_ERR(req->hw_src)) {
-               dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
-                       PTR_ERR(req->hw_src));
-               return PTR_ERR(req->hw_src);
-       }
-       req->dma_src = input;
-
-       req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
-                                                   (req->req_id << 1) + 1,
-                                                   &output);
-       if (IS_ERR(req->hw_dst)) {
-               ret = PTR_ERR(req->hw_dst);
-               dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
-                       ret);
-               goto err_unmap_input;
-       }
-       req->dma_dst = output;
-
-       hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen,
-                         a_req->dlen, req->sskip, req->dskip);
-       hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL);
-       hisi_zip_config_tag(&zip_sqe, req->req_id);
-
-       /* send command to start a task */
-       atomic64_inc(&dfx->send_cnt);
-       ret = hisi_qp_send(qp, &zip_sqe);
-       if (ret < 0) {
-               atomic64_inc(&dfx->send_busy_cnt);
-               ret = -EAGAIN;
-               dev_dbg_ratelimited(dev, "failed to send request!\n");
-               goto err_unmap_output;
-       }
-
-       return -EINPROGRESS;
-
-err_unmap_output:
-       hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
-err_unmap_input:
-       hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
-       return ret;
-}
-
-static int hisi_zip_acompress(struct acomp_req *acomp_req)
-{
-       struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
-       struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
-       struct device *dev = &qp_ctx->qp->qm->pdev->dev;
-       struct hisi_zip_req *req;
-       int head_size;
-       int ret;
-
-       /* let's output compression head now */
-       head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
-       if (head_size < 0) {
-               dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
-                                   head_size);
-               return head_size;
-       }
-
-       req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
-
-       ret = hisi_zip_do_work(req, qp_ctx);
-       if (ret != -EINPROGRESS) {
-               dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
-               hisi_zip_remove_req(qp_ctx, req);
-       }
-
-       return ret;
-}
-
-static int hisi_zip_adecompress(struct acomp_req *acomp_req)
-{
-       struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
-       struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
-       struct device *dev = &qp_ctx->qp->qm->pdev->dev;
-       struct hisi_zip_req *req;
-       int head_size, ret;
-
-       head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
-       if (head_size < 0) {
-               dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
-                                   head_size);
-               return head_size;
-       }
-
-       req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
-
-       ret = hisi_zip_do_work(req, qp_ctx);
-       if (ret != -EINPROGRESS) {
-               dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
-                                    ret);
-               hisi_zip_remove_req(qp_ctx, req);
-       }
-
-       return ret;
-}
-
 static struct acomp_alg hisi_zip_acomp_zlib = {
        .init                   = hisi_zip_acomp_init,
        .exit                   = hisi_zip_acomp_exit,
@@ -665,7 +769,7 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
        }
 };
 
-int hisi_zip_register_to_crypto(void)
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
 {
        int ret;
 
@@ -684,7 +788,7 @@ int hisi_zip_register_to_crypto(void)
        return ret;
 }
 
-void hisi_zip_unregister_from_crypto(void)
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
 {
        crypto_unregister_acomp(&hisi_zip_acomp_gzip);
        crypto_unregister_acomp(&hisi_zip_acomp_zlib);
index 02c445722445d6d4ae2e9c9dbbec2bd98630a3b3..2178b40e9f825bcb5114f6f16efc66abca2e9c2d 100644 (file)
@@ -18,7 +18,6 @@
 #define PCI_DEVICE_ID_ZIP_VF           0xa251
 
 #define HZIP_QUEUE_NUM_V1              4096
-#define HZIP_QUEUE_NUM_V2              1024
 
 #define HZIP_CLOCK_GATE_CTRL           0x301004
 #define COMP0_ENABLE                   BIT(0)
 #define HZIP_CORE_INT_RAS_CE_ENABLE    0x1
 #define HZIP_CORE_INT_RAS_NFE_ENB      0x301164
 #define HZIP_CORE_INT_RAS_FE_ENB        0x301168
-#define HZIP_CORE_INT_RAS_NFE_ENABLE   0x7FE
+#define HZIP_CORE_INT_RAS_NFE_ENABLE   0x1FFE
 #define HZIP_SRAM_ECC_ERR_NUM_SHIFT    16
 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT   24
-#define HZIP_CORE_INT_MASK_ALL         GENMASK(10, 0)
+#define HZIP_CORE_INT_MASK_ALL         GENMASK(12, 0)
 #define HZIP_COMP_CORE_NUM             2
 #define HZIP_DECOMP_CORE_NUM           6
 #define HZIP_CORE_NUM                  (HZIP_COMP_CORE_NUM + \
@@ -134,17 +133,17 @@ static const struct hisi_zip_hw_error zip_hw_error[] = {
        { .int_msk = BIT(8), .msg = "zip_com_inf_err" },
        { .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
        { .int_msk = BIT(10), .msg = "zip_pre_out_err" },
+       { .int_msk = BIT(11), .msg = "zip_axi_poison_err" },
+       { .int_msk = BIT(12), .msg = "zip_sva_err" },
        { /* sentinel */ }
 };
 
 enum ctrl_debug_file_index {
-       HZIP_CURRENT_QM,
        HZIP_CLEAR_ENABLE,
        HZIP_DEBUG_FILE_NUM,
 };
 
 static const char * const ctrl_debug_file_name[] = {
-       [HZIP_CURRENT_QM]   = "current_qm",
        [HZIP_CLEAR_ENABLE] = "clear_enable",
 };
 
@@ -363,48 +362,6 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
        return &hisi_zip->qm;
 }
 
-static u32 current_qm_read(struct ctrl_debug_file *file)
-{
-       struct hisi_qm *qm = file_to_qm(file);
-
-       return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int current_qm_write(struct ctrl_debug_file *file, u32 val)
-{
-       struct hisi_qm *qm = file_to_qm(file);
-       u32 vfq_num;
-       u32 tmp;
-
-       if (val > qm->vfs_num)
-               return -EINVAL;
-
-       /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
-       if (val == 0) {
-               qm->debug.curr_qm_qp_num = qm->qp_num;
-       } else {
-               vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
-               if (val == qm->vfs_num)
-                       qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
-                               qm->qp_num - (qm->vfs_num - 1) * vfq_num;
-               else
-                       qm->debug.curr_qm_qp_num = vfq_num;
-       }
-
-       writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
-       writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
-       tmp = val |
-             (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
-       writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
-       tmp = val |
-             (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
-       writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
-       return  0;
-}
-
 static u32 clear_enable_read(struct ctrl_debug_file *file)
 {
        struct hisi_qm *qm = file_to_qm(file);
@@ -438,9 +395,6 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
 
        spin_lock_irq(&file->lock);
        switch (file->index) {
-       case HZIP_CURRENT_QM:
-               val = current_qm_read(file);
-               break;
        case HZIP_CLEAR_ENABLE:
                val = clear_enable_read(file);
                break;
@@ -478,11 +432,6 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
 
        spin_lock_irq(&file->lock);
        switch (file->index) {
-       case HZIP_CURRENT_QM:
-               ret = current_qm_write(file, val);
-               if (ret)
-                       goto err_input;
-               break;
        case HZIP_CLEAR_ENABLE:
                ret = clear_enable_write(file, val);
                if (ret)
@@ -580,7 +529,7 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
        struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
        int i;
 
-       for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
+       for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) {
                spin_lock_init(&zip->ctrl->files[i].lock);
                zip->ctrl->files[i].ctrl = zip->ctrl;
                zip->ctrl->files[i].index = i;
@@ -627,10 +576,6 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
 {
        int i, j;
 
-       /* clear current_qm */
-       writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
-       writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
        /* enable register read_clear bit */
        writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
        for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
@@ -714,6 +659,22 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
               qm->io_base + HZIP_CORE_INT_SET);
 }
 
+static void hisi_zip_err_info_init(struct hisi_qm *qm)
+{
+       struct hisi_qm_err_info *err_info = &qm->err_info;
+
+       err_info->ce = QM_BASE_CE;
+       err_info->fe = 0;
+       err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+       err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+       err_info->msi_wr_port = HZIP_WR_PORT;
+       err_info->acpi_rst = "ZRST";
+       err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
+
+       if (qm->ver >= QM_HW_V3)
+               err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
+}
+
 static const struct hisi_qm_err_ini hisi_zip_err_ini = {
        .hw_init                = hisi_zip_set_user_domain_and_cache,
        .hw_err_enable          = hisi_zip_hw_error_enable,
@@ -723,16 +684,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
        .log_dev_hw_err         = hisi_zip_log_hw_error,
        .open_axi_master_ooo    = hisi_zip_open_axi_master_ooo,
        .close_axi_master_ooo   = hisi_zip_close_axi_master_ooo,
-       .err_info               = {
-               .ce                     = QM_BASE_CE,
-               .nfe                    = QM_BASE_NFE |
-                                         QM_ACC_WB_NOT_READY_TIMEOUT,
-               .fe                     = 0,
-               .ecc_2bits_mask         = HZIP_CORE_INT_STATUS_M_ECC,
-               .dev_ce_mask            = HZIP_CORE_INT_RAS_CE_ENABLE,
-               .msi_wr_port            = HZIP_WR_PORT,
-               .acpi_rst               = "ZRST",
-       }
+       .err_info_init          = hisi_zip_err_info_init,
 };
 
 static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -746,13 +698,8 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
 
        hisi_zip->ctrl = ctrl;
        ctrl->hisi_zip = hisi_zip;
-
-       if (qm->ver == QM_HW_V1)
-               qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
-       else
-               qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
-
        qm->err_ini = &hisi_zip_err_ini;
+       qm->err_ini->err_info_init(qm);
 
        hisi_zip_set_user_domain_and_cache(qm);
        hisi_qm_dev_err_init(qm);
index e813115d5432684e9c844f3eb99cbf1d77c6de95..aa4c7b2af3e2e758acfe7f8b6c09c26ec8388038 100644 (file)
@@ -963,8 +963,6 @@ static int img_hash_probe(struct platform_device *pdev)
        hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hdev->io_base)) {
                err = PTR_ERR(hdev->io_base);
-               dev_err(dev, "can't ioremap, returned %d\n", err);
-
                goto res_err;
        }
 
@@ -972,7 +970,6 @@ static int img_hash_probe(struct platform_device *pdev)
        hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
        if (IS_ERR(hdev->cpu_addr)) {
-               dev_err(dev, "can't ioremap write port\n");
                err = PTR_ERR(hdev->cpu_addr);
                goto res_err;
        }
index 6364583b88b216fdf5c0e2e8fc3f632061413f07..9ff885d50edfcdf6878ea9872dc2e650f3d989e3 100644 (file)
@@ -688,7 +688,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
                /* Leave the DSE threads reset state */
                writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
 
-               /* Configure the procesing engine thresholds */
+               /* Configure the processing engine thresholds */
                writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
                       EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
                       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
index 8b0f17fc09fb5c16da0cc7bb85c4e95f63ab5015..0616e369522e97b097850b4bf0e989ccfa154d6b 100644 (file)
@@ -265,7 +265,7 @@ static int setup_crypt_desc(void)
        return 0;
 }
 
-static spinlock_t desc_lock;
+static DEFINE_SPINLOCK(desc_lock);
 static struct crypt_ctl *get_crypt_desc(void)
 {
        int i;
@@ -293,7 +293,7 @@ static struct crypt_ctl *get_crypt_desc(void)
        }
 }
 
-static spinlock_t emerg_lock;
+static DEFINE_SPINLOCK(emerg_lock);
 static struct crypt_ctl *get_crypt_desc_emerg(void)
 {
        int i;
@@ -1379,9 +1379,6 @@ static int __init ixp_module_init(void)
        if (IS_ERR(pdev))
                return PTR_ERR(pdev);
 
-       spin_lock_init(&desc_lock);
-       spin_lock_init(&emerg_lock);
-
        err = init_ixp_crypto(&pdev->dev);
        if (err) {
                platform_device_unregister(pdev);
index b6b25d994af38515e8bc3125e250d124f8253ba1..e2a39fdaf623e45cd9ddf918bc0876ebbd0e4b0e 100644 (file)
@@ -1623,10 +1623,8 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
        }
 
        aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem);
-       if (IS_ERR(aes_dev->base_reg)) {
-               dev_err(dev, "Failed to get base address\n");
+       if (IS_ERR(aes_dev->base_reg))
                return PTR_ERR(aes_dev->base_reg);
-       }
 
        /* Get and request IRQ */
        aes_dev->irq = platform_get_irq(pdev, 0);
@@ -1649,8 +1647,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
 
        /* Initialize crypto engine */
        aes_dev->engine = crypto_engine_alloc_init(dev, true);
-       if (!aes_dev->engine)
+       if (!aes_dev->engine) {
+               rc = -ENOMEM;
                goto list_del;
+       }
 
        rc = crypto_engine_start(aes_dev->engine);
        if (rc) {
index c4b97b4160e9b60f2104a0f4059b9cacd1324df2..0379dbf32a4c46f8b6189cd14cab8358841c504d 100644 (file)
@@ -1192,10 +1192,8 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
        }
 
        hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
-       if (IS_ERR(hcu_dev->io_base)) {
-               dev_err(dev, "Could not io-remap mem resource.\n");
+       if (IS_ERR(hcu_dev->io_base))
                return PTR_ERR(hcu_dev->io_base);
-       }
 
        init_completion(&hcu_dev->irq_done);
 
@@ -1220,8 +1218,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
 
        /* Initialize crypto engine */
        hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
-       if (!hcu_dev->engine)
+       if (!hcu_dev->engine) {
+               rc = -ENOMEM;
                goto list_del;
+       }
 
        rc = crypto_engine_start(hcu_dev->engine);
        if (rc) {
index 81eecacf603adc25bde90770a2dcc3b80d042086..deb9bd460ee621a542e4ef5606e4f80bf0213e2c 100644 (file)
@@ -93,7 +93,7 @@
 #define OCS_HCU_WAIT_BUSY_TIMEOUT_US           1000000
 
 /**
- * struct ocs_hcu_dma_list - An entry in an OCS DMA linked list.
+ * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
  * @src_addr:  Source address of the data.
  * @src_len:   Length of data to be fetched.
  * @nxt_desc:  Next descriptor to fetch.
@@ -107,7 +107,7 @@ struct ocs_hcu_dma_entry {
 };
 
 /**
- * struct ocs_dma_list - OCS-specific DMA linked list.
+ * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
  * @head:      The head of the list (points to the array backing the list).
  * @tail:      The current tail of the list; NULL if the list is empty.
  * @dma_addr:  The DMA address of @head (i.e., the DMA address of the backing
@@ -597,7 +597,7 @@ int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
 }
 
 /**
- * ocs_hcu_digest() - Perform a hashing iteration.
+ * ocs_hcu_hash_update() - Perform a hashing iteration.
  * @hcu_dev:   The OCS HCU device to use.
  * @ctx:       The OCS HCU hashing context.
  * @dma_list:  The OCS DMA list mapping the input data to process.
@@ -632,7 +632,7 @@ int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
 }
 
 /**
- * ocs_hcu_hash_final() - Update and finalize hash computation.
+ * ocs_hcu_hash_finup() - Update and finalize hash computation.
  * @hcu_dev:   The OCS HCU device to use.
  * @ctx:       The OCS HCU hashing context.
  * @dma_list:  The OCS DMA list mapping the input data to process.
index 3518fac2983468b3b36b9460fae15f0808f32f7c..ecedd91a8d859c0a9addb32f536054e44a80fb8a 100644 (file)
@@ -121,14 +121,14 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
 
 int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
                                  struct pci_dev *pdev);
-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
-                            struct pci_dev *pdev, u64 reg, u64 *val);
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+                            u64 reg, u64 *val, int blkaddr);
 int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
-                             u64 reg, u64 val);
+                             u64 reg, u64 val, int blkaddr);
 int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
-                        u64 reg, u64 *val);
+                        u64 reg, u64 *val, int blkaddr);
 int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
-                         u64 reg, u64 val);
+                         u64 reg, u64 val, int blkaddr);
 struct otx2_cptlfs_info;
 int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
 int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
index 51cb6404ded7a33e62e90db928c67adf87c86d8c..9074876d38e5d4e6a09f9507e7e1a440d2add571 100644 (file)
@@ -43,7 +43,7 @@ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
 }
 
 int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
-                            u64 reg, u64 *val)
+                            u64 reg, u64 *val, int blkaddr)
 {
        struct cpt_rd_wr_reg_msg *reg_msg;
 
@@ -62,12 +62,13 @@ int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
        reg_msg->is_write = 0;
        reg_msg->reg_offset = reg;
        reg_msg->ret_val = val;
+       reg_msg->blkaddr = blkaddr;
 
        return 0;
 }
 
 int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
-                             u64 reg, u64 val)
+                             u64 reg, u64 val, int blkaddr)
 {
        struct cpt_rd_wr_reg_msg *reg_msg;
 
@@ -86,16 +87,17 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
        reg_msg->is_write = 1;
        reg_msg->reg_offset = reg;
        reg_msg->val = val;
+       reg_msg->blkaddr = blkaddr;
 
        return 0;
 }
 
 int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
-                        u64 reg, u64 *val)
+                        u64 reg, u64 *val, int blkaddr)
 {
        int ret;
 
-       ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
+       ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr);
        if (ret)
                return ret;
 
@@ -103,11 +105,11 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
 }
 
 int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
-                         u64 reg, u64 val)
+                         u64 reg, u64 val, int blkaddr)
 {
        int ret;
 
-       ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
+       ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr);
        if (ret)
                return ret;
 
index 823a4571fd674577a864a472af3c439ba0e907ed..34aba1532761231f6e01c2051c5abb44ca9762a9 100644 (file)
@@ -56,7 +56,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
 
        ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
                                   CPT_AF_LFX_CTL(lf->slot),
-                                  &lf_ctrl.u);
+                                  &lf_ctrl.u, lfs->blkaddr);
        if (ret)
                return ret;
 
@@ -64,7 +64,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
 
        ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
                                    CPT_AF_LFX_CTL(lf->slot),
-                                   lf_ctrl.u);
+                                   lf_ctrl.u, lfs->blkaddr);
        return ret;
 }
 
@@ -77,7 +77,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
 
        ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
                                   CPT_AF_LFX_CTL(lf->slot),
-                                  &lf_ctrl.u);
+                                  &lf_ctrl.u, lfs->blkaddr);
        if (ret)
                return ret;
 
@@ -85,7 +85,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
 
        ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
                                    CPT_AF_LFX_CTL(lf->slot),
-                                   lf_ctrl.u);
+                                   lf_ctrl.u, lfs->blkaddr);
        return ret;
 }
 
index 314e973541004fe43b41ce7d7b039df32decb0b6..ab1678fc564d63710cd3dba8790331ca2d6ad493 100644 (file)
@@ -95,6 +95,7 @@ struct otx2_cptlfs_info {
        u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
        u8 kvf_limits;          /* Kernel crypto limits */
        atomic_t state;         /* LF's state. started/reset */
+       int blkaddr;            /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
 };
 
 static inline void otx2_cpt_free_instruction_queues(
index 8c899ad531a5c868a3fe031be625746268b4ead8..e19af1356f1236d2462d7332b69433d9bb161804 100644 (file)
@@ -51,6 +51,7 @@ struct otx2_cptpf_dev {
        u8 max_vfs;             /* Maximum number of VFs supported by CPT */
        u8 enabled_vfs;         /* Number of enabled VFs */
        u8 kvf_limits;          /* Kernel crypto limits */
+       bool has_cpt1;
 };
 
 irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
index 5277e04badd9abd527995c55553f229375bec766..58f47e3ab62e73c2829101557bb55659771aa1a4 100644 (file)
@@ -451,19 +451,19 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
        return 0;
 }
 
-static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
 {
        int timeout = 10, ret;
        u64 reg = 0;
 
        ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
-                                   CPT_AF_BLK_RST, 0x1);
+                                   CPT_AF_BLK_RST, 0x1, blkaddr);
        if (ret)
                return ret;
 
        do {
                ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
-                                          CPT_AF_BLK_RST, &reg);
+                                          CPT_AF_BLK_RST, &reg, blkaddr);
                if (ret)
                        return ret;
 
@@ -478,11 +478,35 @@ static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
        return ret;
 }
 
+static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+{
+       int ret = 0;
+
+       if (cptpf->has_cpt1) {
+               ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
+               if (ret)
+                       return ret;
+       }
+       return cptx_device_reset(cptpf, BLKADDR_CPT0);
+}
+
+static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
+{
+       u64 cfg;
+
+       cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+                             RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
+       if (cfg & BIT_ULL(11))
+               cptpf->has_cpt1 = true;
+}
+
 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
 {
        union otx2_cptx_af_constants1 af_cnsts1 = {0};
        int ret = 0;
 
+       /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
+       cptpf_check_block_implemented(cptpf);
        /* Reset the CPT PF device */
        ret = cptpf_device_reset(cptpf);
        if (ret)
@@ -490,7 +514,8 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
 
        /* Get number of SE, IE and AE engines */
        ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
-                                  CPT_AF_CONSTANTS1, &af_cnsts1.u);
+                                  CPT_AF_CONSTANTS1, &af_cnsts1.u,
+                                  BLKADDR_CPT0);
        if (ret)
                return ret;
 
index 1dc3ba298139ff2c5337cf63df564f4cf8796c69..a531f4c8b4414ae3d5c898115a99a0a0da6a119b 100644 (file)
@@ -153,16 +153,16 @@ static int get_ucode_type(struct device *dev,
 }
 
 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
-                             dma_addr_t dma_addr)
+                             dma_addr_t dma_addr, int blkaddr)
 {
        return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
                                     CPT_AF_EXEX_UCODE_BASE(eng),
-                                    (u64)dma_addr);
+                                    (u64)dma_addr, blkaddr);
 }
 
-static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
+static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
+                              struct otx2_cptpf_dev *cptpf, int blkaddr)
 {
-       struct otx2_cptpf_dev *cptpf = obj;
        struct otx2_cpt_engs_rsvd *engs;
        dma_addr_t dma_addr;
        int i, bit, ret;
@@ -170,7 +170,7 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
        /* Set PF number for microcode fetches */
        ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
                                    CPT_AF_PF_FUNC,
-                                   cptpf->pf_id << RVU_PFVF_PF_SHIFT);
+                                   cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
        if (ret)
                return ret;
 
@@ -187,7 +187,8 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
                 */
                for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
                        if (!eng_grp->g->eng_ref_cnt[bit]) {
-                               ret = __write_ucode_base(cptpf, bit, dma_addr);
+                               ret = __write_ucode_base(cptpf, bit, dma_addr,
+                                                        blkaddr);
                                if (ret)
                                        return ret;
                        }
@@ -195,23 +196,32 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
        return 0;
 }
 
-static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
-                                       void *obj)
+static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
 {
        struct otx2_cptpf_dev *cptpf = obj;
-       struct otx2_cpt_bitmap bmap;
+       int ret;
+
+       if (cptpf->has_cpt1) {
+               ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
+               if (ret)
+                       return ret;
+       }
+       return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
+}
+
+static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+                                        struct otx2_cptpf_dev *cptpf,
+                                        struct otx2_cpt_bitmap bmap,
+                                        int blkaddr)
+{
        int i, timeout = 10;
        int busy, ret;
        u64 reg = 0;
 
-       bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
-       if (!bmap.size)
-               return -EINVAL;
-
        /* Detach the cores from group */
        for_each_set_bit(i, bmap.bits, bmap.size) {
                ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
-                                          CPT_AF_EXEX_CTL2(i), &reg);
+                                          CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
                if (ret)
                        return ret;
 
@@ -221,7 +231,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
 
                        ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
                                                    cptpf->pdev,
-                                                   CPT_AF_EXEX_CTL2(i), reg);
+                                                   CPT_AF_EXEX_CTL2(i), reg,
+                                                   blkaddr);
                        if (ret)
                                return ret;
                }
@@ -237,7 +248,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
                for_each_set_bit(i, bmap.bits, bmap.size) {
                        ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
                                                   cptpf->pdev,
-                                                  CPT_AF_EXEX_STS(i), &reg);
+                                                  CPT_AF_EXEX_STS(i), &reg,
+                                                  blkaddr);
                        if (ret)
                                return ret;
 
@@ -253,7 +265,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
                if (!eng_grp->g->eng_ref_cnt[i]) {
                        ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
                                                    cptpf->pdev,
-                                                   CPT_AF_EXEX_CTL(i), 0x0);
+                                                   CPT_AF_EXEX_CTL(i), 0x0,
+                                                   blkaddr);
                        if (ret)
                                return ret;
                }
@@ -262,22 +275,39 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
        return 0;
 }
 
-static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
-                                      void *obj)
+static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+                                       void *obj)
 {
        struct otx2_cptpf_dev *cptpf = obj;
        struct otx2_cpt_bitmap bmap;
-       u64 reg = 0;
-       int i, ret;
+       int ret;
 
        bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
        if (!bmap.size)
                return -EINVAL;
 
+       if (cptpf->has_cpt1) {
+               ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+                                                   BLKADDR_CPT1);
+               if (ret)
+                       return ret;
+       }
+       return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+                                            BLKADDR_CPT0);
+}
+
+static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+                                       struct otx2_cptpf_dev *cptpf,
+                                       struct otx2_cpt_bitmap bmap,
+                                       int blkaddr)
+{
+       u64 reg = 0;
+       int i, ret;
+
        /* Attach the cores to the group */
        for_each_set_bit(i, bmap.bits, bmap.size) {
                ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
-                                          CPT_AF_EXEX_CTL2(i), &reg);
+                                          CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
                if (ret)
                        return ret;
 
@@ -287,7 +317,8 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
 
                        ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
                                                    cptpf->pdev,
-                                                   CPT_AF_EXEX_CTL2(i), reg);
+                                                   CPT_AF_EXEX_CTL2(i), reg,
+                                                   blkaddr);
                        if (ret)
                                return ret;
                }
@@ -295,15 +326,33 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
 
        /* Enable the cores */
        for_each_set_bit(i, bmap.bits, bmap.size) {
-               ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
-                                               cptpf->pdev,
-                                               CPT_AF_EXEX_CTL(i), 0x1);
+               ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+                                               CPT_AF_EXEX_CTL(i), 0x1,
+                                               blkaddr);
                if (ret)
                        return ret;
        }
-       ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+       return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
 
-       return ret;
+static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+                                      void *obj)
+{
+       struct otx2_cptpf_dev *cptpf = obj;
+       struct otx2_cpt_bitmap bmap;
+       int ret;
+
+       bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+       if (!bmap.size)
+               return -EINVAL;
+
+       if (cptpf->has_cpt1) {
+               ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
+                                                  BLKADDR_CPT1);
+               if (ret)
+                       return ret;
+       }
+       return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
 }
 
 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
@@ -1140,20 +1189,18 @@ release_fw:
        return ret;
 }
 
-int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
+                                 int blkaddr)
 {
-       int i, ret, busy, total_cores;
-       int timeout = 10;
-       u64 reg = 0;
-
-       total_cores = cptpf->eng_grps.avail.max_se_cnt +
-                     cptpf->eng_grps.avail.max_ie_cnt +
-                     cptpf->eng_grps.avail.max_ae_cnt;
+       int timeout = 10, ret;
+       int i, busy;
+       u64 reg;
 
        /* Disengage the cores from groups */
        for (i = 0; i < total_cores; i++) {
                ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
-                                               CPT_AF_EXEX_CTL2(i), 0x0);
+                                               CPT_AF_EXEX_CTL2(i), 0x0,
+                                               blkaddr);
                if (ret)
                        return ret;
 
@@ -1173,7 +1220,8 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
                for (i = 0; i < total_cores; i++) {
                        ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
                                                   cptpf->pdev,
-                                                  CPT_AF_EXEX_STS(i), &reg);
+                                                  CPT_AF_EXEX_STS(i), &reg,
+                                                  blkaddr);
                        if (ret)
                                return ret;
 
@@ -1187,13 +1235,30 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
        /* Disable the cores */
        for (i = 0; i < total_cores; i++) {
                ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
-                                               CPT_AF_EXEX_CTL(i), 0x0);
+                                               CPT_AF_EXEX_CTL(i), 0x0,
+                                               blkaddr);
                if (ret)
                        return ret;
        }
        return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
 }
 
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+{
+       int total_cores, ret;
+
+       total_cores = cptpf->eng_grps.avail.max_se_cnt +
+                     cptpf->eng_grps.avail.max_ie_cnt +
+                     cptpf->eng_grps.avail.max_ae_cnt;
+
+       if (cptpf->has_cpt1) {
+               ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
+               if (ret)
+                       return ret;
+       }
+       return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
+}
+
 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
                               struct otx2_cpt_eng_grps *eng_grps)
 {
@@ -1354,6 +1419,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
        lfs->pdev = pdev;
        lfs->reg_base = cptpf->reg_base;
        lfs->mbox = &cptpf->afpf_mbox;
+       lfs->blkaddr = BLKADDR_CPT0;
        ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
                              OTX2_CPT_QUEUE_HI_PRIO, 1);
        if (ret)
index 92e921eceed755f874907c15f5a8ad3473d95683..d6314ea9ae896ed4255f122c18defe0ae64737ef 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES CBC routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2011-2012 International Business Machines Inc.
index 4c9362eebefd252a45416aa1cbe01fef43da621b..e7384d1075739b08960fdc27efeee4735a7509df 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES CCM routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2012 International Business Machines Inc.
index 6d5ce1a66f1eeb12a949652c14dd38cfe9c904c1..13f518802343d40f3345f293dfbd31b79ddd43b6 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES CTR routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2011-2012 International Business Machines Inc.
index 77e338dc33f1de1988b3df7343fe738b8ac5ca94..7a729dc2bc17a3342f0a53cf1e0d3b96bb173ace 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES ECB routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2011-2012 International Business Machines Inc.
index 19c6ed5baea41378837c828628e61bf2b20f8420..fc9baca13920cf0771fb27ef456134fa880837e7 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES GCM routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2012 International Business Machines Inc.
index 48dc1c98ca52587ed055b1c3660b1d00cf848d64..eb5c8f6893601237e1dc0ae30c8328bf03e4bdd3 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2011-2012 International Business Machines Inc.
index 13c65deda8e979f75ce4231240bf5549996000f1..446f611726df5a6afabe751828dbd30ebbc4d8fb 100644 (file)
@@ -932,8 +932,10 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
                        ret = find_nx_device_tree(dn, chip_id, vasid,
                                NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
 
-               if (ret)
+               if (ret) {
+                       of_node_put(dn);
                        return ret;
+               }
        }
 
        if (!ct_842 || !ct_gzip) {
index 90d9a37a57f64565a8d5538e74c0eb04f512be6d..b0ad665e4bda81936c8941198d8f50e2677c49a0 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2011-2012 International Business Machines Inc.
index eb8627a0f3176ee8e6f5890ef1b75e0c1611a907..c29103a1a0b6c93858c28eecf7d7535dde9f80d1 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2011-2012 International Business Machines Inc.
index 1d0e8a1ba160507337f71ebb8d541ec7c09d76e5..010e87d9da36b75c8ddbd825bc7cf28197000f43 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2011-2012 International Business Machines Inc.
@@ -200,7 +200,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg       *nx_dst,
  * @sg: sg list head
  * @end: sg lisg end
  * @delta:  is the amount we need to crop in order to bound the list.
- *
+ * @nbytes: length of data in the scatterlists or data length - whichever
+ *          is greater.
  */
 static long int trim_sg_list(struct nx_sg *sg,
                             struct nx_sg *end,
index 1975bcbee997481ee095dc3adaa4841708bf61cd..ee7cd88bb10a79506a0cea7eb61fd83bd98130ec 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * debugfs routines supporting the Power 7+ Nest Accelerators driver
  *
  * Copyright (C) 2011-2012 International Business Machines Inc.
index a45bdcf3026df9bdfda3746f4436575cc0df4593..0dd4c6b157de903972cd13179d79e10f87a2a419 100644 (file)
@@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
                dd->err = 0;
        }
 
-       err = pm_runtime_get_sync(dd->dev);
+       err = pm_runtime_resume_and_get(dd->dev);
        if (err < 0) {
-               pm_runtime_put_noidle(dd->dev);
                dev_err(dd->dev, "failed to get sync: %d\n", err);
                return err;
        }
@@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
        pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
 
        pm_runtime_enable(dev);
-       err = pm_runtime_get_sync(dev);
+       err = pm_runtime_resume_and_get(dev);
        if (err < 0) {
                dev_err(dev, "%s: failed to get_sync(%d)\n",
                        __func__, err);
@@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
 
 static int omap_aes_resume(struct device *dev)
 {
-       pm_runtime_get_sync(dev);
+       pm_runtime_resume_and_get(dev);
        return 0;
 }
 #endif
index 6a9be01fdf33c2bb176d41e2e723ec338589ea50..3524ddd4893009394f79e0a217204b2ba4498855 100644 (file)
@@ -224,6 +224,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
        hw_data->uof_get_name = uof_get_name;
        hw_data->uof_get_ae_mask = uof_get_ae_mask;
        hw_data->set_msix_rttable = set_msix_default_rttable;
+       hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
 
        adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
 }
index f5990d042c9a15dce7bcafdccfef2d6ac85bcf42..1dd64af22bea92fa3c7b89f23ab78a116e74f14c 100644 (file)
@@ -212,6 +212,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
        hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
        hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index 1d1532e8fb6d98406ac6475ad581afde04340a89..067ca5e17d387cb87c7053aab8ec16a54bd49bb7 100644 (file)
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto out_err_free_reg;
 
-       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
        ret = adf_dev_init(accel_dev);
        if (ret)
                goto out_err_dev_shutdown;
 
+       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
        ret = adf_dev_start(accel_dev);
        if (ret)
                goto out_err_dev_stop;
index cadcf12884c8b4f517f1fd6fcd3f0f08bcb46e51..30337390513c6fdc309c15e66c2e13127b883231 100644 (file)
@@ -214,6 +214,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
        hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
        hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index 04742a6d91cae9df25d4c6b75d01b46ce19b60c0..51ea88c0b17d7751515bb63e316ce85853e3f290 100644 (file)
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto out_err_free_reg;
 
-       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
        ret = adf_dev_init(accel_dev);
        if (ret)
                goto out_err_dev_shutdown;
 
+       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
        ret = adf_dev_start(accel_dev);
        if (ret)
                goto out_err_dev_stop;
index 5527344546e5e6b3011e9819153b14e6499637ae..ac435b44f1d20d12cf0c3a00824302ec83452d6e 100644 (file)
@@ -173,6 +173,7 @@ struct adf_hw_device_data {
        void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
                                      bool enable);
        void (*enable_ints)(struct adf_accel_dev *accel_dev);
+       void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
        int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
        void (*reset_device)(struct adf_accel_dev *accel_dev);
        void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
index 1aa17303838db325624f9f95657479b03a8a903e..9e560c7d416307aaef29f65c64b89c9f58fc50ca 100644 (file)
@@ -179,3 +179,28 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
        return capabilities;
 }
 EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
+
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+       u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+       unsigned long accel_mask = hw_data->accel_mask;
+       void __iomem *pmisc_addr;
+       struct adf_bar *pmisc;
+       int pmisc_id;
+       u32 i = 0;
+
+       pmisc_id = hw_data->get_misc_bar_id(hw_data);
+       pmisc = &GET_BARS(accel_dev)[pmisc_id];
+       pmisc_addr = pmisc->virt_addr;
+
+       /* Configures WDT timers */
+       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+               /* Enable WDT for sym and dc */
+               ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
+               /* Enable WDT for pke */
+               ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
+       }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
index 3816e6500352a6b6fc8a51fda630e549493688ff..756b0ddfac5e16661ff1f7724ca06afa877a764d 100644 (file)
@@ -113,11 +113,24 @@ do { \
 /* Power gating */
 #define ADF_POWERGATE_PKE              BIT(24)
 
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE      0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x2000000
+#define ADF_SSMWDT_OFFSET              0x54
+#define ADF_SSMWDTPKE_OFFSET           0x58
+#define ADF_SSMWDT(i)          (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
+#define ADF_SSMWDTPKE(i)       (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+
 void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
                           int num_a_regs, int num_b_regs);
 void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
 void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
 void adf_gen2_get_arb_info(struct arb_info *arb_info);
 u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
 
 #endif
index b72ff58e0bc791cf71bc464a0b848c88301b0030..000528327b296b08f4509a868352d48d9a881070 100644 (file)
@@ -99,3 +99,43 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
        csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
 }
 EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+
+static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
+                                              u32 *lower)
+{
+       *lower = lower_32_bits(value);
+       *upper = upper_32_bits(value);
+}
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+       u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+       u32 ssm_wdt_pke_high = 0;
+       u32 ssm_wdt_pke_low = 0;
+       u32 ssm_wdt_high = 0;
+       u32 ssm_wdt_low = 0;
+       void __iomem *pmisc_addr;
+       struct adf_bar *pmisc;
+       int pmisc_id;
+
+       pmisc_id = hw_data->get_misc_bar_id(hw_data);
+       pmisc = &GET_BARS(accel_dev)[pmisc_id];
+       pmisc_addr = pmisc->virt_addr;
+
+       /* Convert 64bit WDT timer value into 32bit values for
+        * mmio write to 32bit CSRs.
+        */
+       adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
+       adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
+                                   &ssm_wdt_pke_low);
+
+       /* Enable WDT for sym and dc */
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
+       /* Enable WDT for pke */
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
index 8ab62b2ac311fec8bc14a21c164b5efab81be7c6..b8fca1ff7aab0fd1c97817cec2e340fcc1be2178 100644 (file)
@@ -94,6 +94,18 @@ do { \
                   ADF_RING_BUNDLE_SIZE * (bank) + \
                   ADF_RING_CSR_RING_SRV_ARB_EN, (value))
 
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE      0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x8000000
+#define ADF_SSMWDTL_OFFSET             0x54
+#define ADF_SSMWDTH_OFFSET             0x5C
+#define ADF_SSMWDTPKEL_OFFSET          0x58
+#define ADF_SSMWDTPKEH_OFFSET          0x60
 
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
 #endif
index 42029153408ee738b0f0b057d083cbc2c8bf84b9..744c40351428da6ceeda9f01bcdcbd0798cff732 100644 (file)
@@ -162,6 +162,10 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
                return -EFAULT;
        }
 
+       /* Set ssm watch dog timer */
+       if (hw_data->set_ssm_wdtimer)
+               hw_data->set_ssm_wdtimer(accel_dev);
+
        list_for_each(list_itr, &service_table) {
                service = list_entry(list_itr, struct service_hndl, list);
                if (service->event_hld(accel_dev, ADF_EVENT_START)) {
index c458534635306801ff983102d4788345121a2774..e3ad5587be49e0fc12897fa5b4d74ab5a89f1237 100644 (file)
@@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
 
        ret = adf_isr_alloc_msix_entry_table(accel_dev);
        if (ret)
-               return ret;
-       if (adf_enable_msix(accel_dev))
                goto err_out;
 
-       if (adf_setup_bh(accel_dev))
-               goto err_out;
+       ret = adf_enable_msix(accel_dev);
+       if (ret)
+               goto err_free_msix_table;
 
-       if (adf_request_irqs(accel_dev))
-               goto err_out;
+       ret = adf_setup_bh(accel_dev);
+       if (ret)
+               goto err_disable_msix;
+
+       ret = adf_request_irqs(accel_dev);
+       if (ret)
+               goto err_cleanup_bh;
 
        return 0;
+
+err_cleanup_bh:
+       adf_cleanup_bh(accel_dev);
+
+err_disable_msix:
+       adf_disable_msix(&accel_dev->accel_pci_dev);
+
+err_free_msix_table:
+       adf_isr_free_msix_entry_table(accel_dev);
+
 err_out:
-       adf_isr_resource_free(accel_dev);
-       return -EFAULT;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
index 8b090b7ae8c6b6b8cdef2eab47253f444396845d..a1b77bd7a89443cc22b4801f49b3ea8868639ce2 100644 (file)
@@ -169,7 +169,7 @@ out:
  * @msg:       Message to send
  * @vf_nr:     VF number to which the message will be sent
  *
- * Function sends a messge from the PF to a VF
+ * Function sends a message from the PF to a VF
  *
  * Return: 0 on success, error code otherwise.
  */
index 888c1e0472952591ea8105f6d1377db787730486..8ba28409fb74b7ad9da74f6a9ec659a31b4124c2 100644 (file)
@@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
                dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
                dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
                                  ring->base_addr, ring->dma_addr);
+               ring->base_addr = NULL;
                return -EFAULT;
        }
 
index 2c98fb63f7b7218bac08bc56658f7bb18bb08240..e85bd62d134a4224dfb8e5f717e8a0ef74f41393 100644 (file)
@@ -8,7 +8,7 @@
  * adf_vf2pf_init() - send init msg to PF
  * @accel_dev:  Pointer to acceleration VF device.
  *
- * Function sends an init messge from the VF to a PF
+ * Function sends an init message from the VF to a PF
  *
  * Return: 0 on success, error code otherwise.
  */
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_init);
  * adf_vf2pf_shutdown() - send shutdown msg to PF
  * @accel_dev:  Pointer to acceleration VF device.
  *
- * Function sends a shutdown messge from the VF to a PF
+ * Function sends a shutdown message from the VF to a PF
  *
  * Return: void
  */
index 38d316a42ba6f7f46b9d2410258891c20a25a6ca..888388acb6bd3e7e3b8019f2d1d6a7d6d038071a 100644 (file)
@@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
                goto err_out;
 
        if (adf_setup_pf2vf_bh(accel_dev))
-               goto err_out;
+               goto err_disable_msi;
 
        if (adf_setup_bh(accel_dev))
-               goto err_out;
+               goto err_cleanup_pf2vf_bh;
 
        if (adf_request_msi_irq(accel_dev))
-               goto err_out;
+               goto err_cleanup_bh;
 
        return 0;
+
+err_cleanup_bh:
+       adf_cleanup_bh(accel_dev);
+
+err_cleanup_pf2vf_bh:
+       adf_cleanup_pf2vf_bh(accel_dev);
+
+err_disable_msi:
+       adf_disable_msi(accel_dev);
+
 err_out:
-       adf_vf_isr_resource_free(accel_dev);
        return -EFAULT;
 }
 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
index ff78c73c47e382c87fd84543c449967618e5ee5b..f998ed58457c24fb260a208ca5a65a9b65bf38d7 100644 (file)
@@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
        int n = sg_nents(sgl);
        struct qat_alg_buf_list *bufl;
        struct qat_alg_buf_list *buflout = NULL;
-       dma_addr_t blp;
-       dma_addr_t bloutp = 0;
+       dma_addr_t blp = DMA_MAPPING_ERROR;
+       dma_addr_t bloutp = DMA_MAPPING_ERROR;
        struct scatterlist *sg;
        size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
 
@@ -731,9 +731,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
        if (unlikely(!bufl))
                return -ENOMEM;
 
-       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, blp)))
-               goto err_in;
+       for_each_sg(sgl, sg, n, i)
+               bufl->bufers[i].addr = DMA_MAPPING_ERROR;
 
        for_each_sg(sgl, sg, n, i) {
                int y = sg_nctr;
@@ -750,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                sg_nctr++;
        }
        bufl->num_bufs = sg_nctr;
+       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, blp)))
+               goto err_in;
        qat_req->buf.bl = bufl;
        qat_req->buf.blp = blp;
        qat_req->buf.sz = sz;
@@ -764,10 +766,11 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                                       dev_to_node(&GET_DEV(inst->accel_dev)));
                if (unlikely(!buflout))
                        goto err_in;
-               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(dev, bloutp)))
-                       goto err_out;
+
                bufers = buflout->bufers;
+               for_each_sg(sglout, sg, n, i)
+                       bufers[i].addr = DMA_MAPPING_ERROR;
+
                for_each_sg(sglout, sg, n, i) {
                        int y = sg_nctr;
 
@@ -784,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
                }
                buflout->num_bufs = sg_nctr;
                buflout->num_mapped_bufs = sg_nctr;
+               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, bloutp)))
+                       goto err_out;
                qat_req->buf.blout = buflout;
                qat_req->buf.bloutp = bloutp;
                qat_req->buf.sz_out = sz_out;
@@ -795,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
        return 0;
 
 err_out:
+       if (!dma_mapping_error(dev, bloutp))
+               dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
        n = sg_nents(sglout);
        for (i = 0; i < n; i++)
                if (!dma_mapping_error(dev, buflout->bufers[i].addr))
                        dma_unmap_single(dev, buflout->bufers[i].addr,
                                         buflout->bufers[i].len,
                                         DMA_BIDIRECTIONAL);
-       if (!dma_mapping_error(dev, bloutp))
-               dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
        kfree(buflout);
 
 err_in:
+       if (!dma_mapping_error(dev, blp))
+               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
        n = sg_nents(sgl);
        for (i = 0; i < n; i++)
                if (!dma_mapping_error(dev, bufl->bufers[i].addr))
@@ -813,8 +823,6 @@ err_in:
                                         bufl->bufers[i].len,
                                         DMA_BIDIRECTIONAL);
 
-       if (!dma_mapping_error(dev, blp))
-               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
        kfree(bufl);
 
        dev_err(dev, "Failed to map buf for dma\n");
index c972554a755e7c47191afc90e5d4a1c5794f85bb..29999da716cc949ed0333d539b5c86b7c57edeb6 100644 (file)
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto out_err_free_reg;
 
-       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
        ret = adf_dev_init(accel_dev);
        if (ret)
                goto out_err_dev_shutdown;
 
+       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
        ret = adf_dev_start(accel_dev);
        if (ret)
                goto out_err_dev_stop;
index cffa9fc628ffd38b13e6cd9f486067f0df778dc2..850f257d00f3aca0397adc1f703aea690c754d60 100644 (file)
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
        struct scatterlist result_sg;
        struct sg_table dst_tbl;
        struct scatterlist *dst_sg;
-       struct sg_table src_tbl;
        struct scatterlist *src_sg;
        unsigned int cryptlen;
        struct skcipher_request fallback_req;   // keep at the end
index a73db2a5637f8b2aea8596c313774ccd364d8bb1..dceb9579d87a2d643d4dd3daa51c7ddf01dc29b3 100644 (file)
@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
        return cfg;
 }
 
-static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
-                               u32 totallen, u32 offset)
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
 {
        struct ahash_request *req = ahash_request_cast(async_req);
        struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
@@ -295,19 +294,18 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
 {
        u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
        unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
-       unsigned int xtsdusize;
 
        qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
                               enckeylen / 2);
        qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
 
-       /* xts du size 512B */
-       xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
-       qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+       /* Set data unit size to cryptlen. Anything else causes
+        * crypto engine to return back incorrect results.
+        */
+       qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
 }
 
-static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
-                                    u32 totallen, u32 offset)
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
 {
        struct skcipher_request *req = skcipher_request_cast(async_req);
        struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
 
        qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
        qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
-       qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+       qce_write(qce, REG_ENCR_SEG_START, 0);
 
        if (IS_CTR(flags)) {
                qce_write(qce, REG_CNTR_MASK, ~0);
@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
                qce_write(qce, REG_CNTR_MASK2, ~0);
        }
 
-       qce_write(qce, REG_SEG_SIZE, totallen);
+       qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
 
        /* get little endianness */
        config = qce_config_reg(qce, 1);
@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
 }
 #endif
 
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
-             u32 offset)
+int qce_start(struct crypto_async_request *async_req, u32 type)
 {
        switch (type) {
 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
        case CRYPTO_ALG_TYPE_SKCIPHER:
-               return qce_setup_regs_skcipher(async_req, totallen, offset);
+               return qce_setup_regs_skcipher(async_req);
 #endif
 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
        case CRYPTO_ALG_TYPE_AHASH:
-               return qce_setup_regs_ahash(async_req, totallen, offset);
+               return qce_setup_regs_ahash(async_req);
 #endif
        default:
                return -EINVAL;
index 85ba16418a049ec373058a2e25511c5764075f09..3bc244bcca2d9c2a21a55f6a6760b1ed8f8ff935 100644 (file)
@@ -94,7 +94,6 @@ struct qce_alg_template {
 void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
 int qce_check_status(struct qce_device *qce, u32 *status);
 void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
-             u32 offset);
+int qce_start(struct crypto_async_request *async_req, u32 type);
 
 #endif /* _COMMON_H_ */
index 61c418c123458f18244dc19ad1caea56a2821041..8e6fcf2c21cc0a8b1e532d549927a2468276c864 100644 (file)
 #include "core.h"
 #include "sha.h"
 
-/* crypto hw padding constant for first operation */
-#define SHA_PADDING            64
-#define SHA_PADDING_MASK       (SHA_PADDING - 1)
+struct qce_sha_saved_state {
+       u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE];
+       u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE];
+       __be32 byte_count[2];
+       unsigned int pending_buflen;
+       unsigned int flags;
+       u64 count;
+       bool first_blk;
+};
 
 static LIST_HEAD(ahash_algs);
 
@@ -107,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
 
        qce_dma_issue_pending(&qce->dma);
 
-       ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+       ret = qce_start(async_req, tmpl->crypto_alg_type);
        if (ret)
                goto error_terminate;
 
@@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req)
 
 static int qce_ahash_export(struct ahash_request *req, void *out)
 {
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
        struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
-       unsigned long flags = rctx->flags;
-       unsigned int digestsize = crypto_ahash_digestsize(ahash);
-       unsigned int blocksize =
-                       crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-
-       if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
-               struct sha1_state *out_state = out;
-
-               out_state->count = rctx->count;
-               qce_cpu_to_be32p_array((__be32 *)out_state->state,
-                                      rctx->digest, digestsize);
-               memcpy(out_state->buffer, rctx->buf, blocksize);
-       } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
-               struct sha256_state *out_state = out;
-
-               out_state->count = rctx->count;
-               qce_cpu_to_be32p_array((__be32 *)out_state->state,
-                                      rctx->digest, digestsize);
-               memcpy(out_state->buf, rctx->buf, blocksize);
-       } else {
-               return -EINVAL;
-       }
+       struct qce_sha_saved_state *export_state = out;
 
-       return 0;
-}
-
-static int qce_import_common(struct ahash_request *req, u64 in_count,
-                            const u32 *state, const u8 *buffer, bool hmac)
-{
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
-       unsigned int digestsize = crypto_ahash_digestsize(ahash);
-       unsigned int blocksize;
-       u64 count = in_count;
-
-       blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-       rctx->count = in_count;
-       memcpy(rctx->buf, buffer, blocksize);
-
-       if (in_count <= blocksize) {
-               rctx->first_blk = 1;
-       } else {
-               rctx->first_blk = 0;
-               /*
-                * For HMAC, there is a hardware padding done when first block
-                * is set. Therefore the byte_count must be incremened by 64
-                * after the first block operation.
-                */
-               if (hmac)
-                       count += SHA_PADDING;
-       }
-
-       rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
-       rctx->byte_count[1] = (__force __be32)(count >> 32);
-       qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
-                              digestsize);
-       rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+       memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
+       memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest));
+       export_state->byte_count[0] = rctx->byte_count[0];
+       export_state->byte_count[1] = rctx->byte_count[1];
+       export_state->pending_buflen = rctx->buflen;
+       export_state->count = rctx->count;
+       export_state->first_blk = rctx->first_blk;
+       export_state->flags = rctx->flags;
 
        return 0;
 }
 
 static int qce_ahash_import(struct ahash_request *req, const void *in)
 {
-       struct qce_sha_reqctx *rctx;
-       unsigned long flags;
-       bool hmac;
-       int ret;
-
-       ret = qce_ahash_init(req);
-       if (ret)
-               return ret;
-
-       rctx = ahash_request_ctx(req);
-       flags = rctx->flags;
-       hmac = IS_SHA_HMAC(flags);
-
-       if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
-               const struct sha1_state *state = in;
-
-               ret = qce_import_common(req, state->count, state->state,
-                                       state->buffer, hmac);
-       } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
-               const struct sha256_state *state = in;
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       const struct qce_sha_saved_state *import_state = in;
 
-               ret = qce_import_common(req, state->count, state->state,
-                                       state->buf, hmac);
-       }
+       memset(rctx, 0, sizeof(*rctx));
+       rctx->count = import_state->count;
+       rctx->buflen = import_state->pending_buflen;
+       rctx->first_blk = import_state->first_blk;
+       rctx->flags = import_state->flags;
+       rctx->byte_count[0] = import_state->byte_count[0];
+       rctx->byte_count[1] = import_state->byte_count[1];
+       memcpy(rctx->buf, import_state->pending_buf, rctx->buflen);
+       memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest));
 
-       return ret;
+       return 0;
 }
 
 static int qce_ahash_update(struct ahash_request *req)
@@ -270,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req)
 
        /* calculate how many bytes will be hashed later */
        hash_later = total % blocksize;
+
+       /*
+        * At this point, there is more than one block size of data.  If
+        * the available data to transfer is exactly a multiple of block
+        * size, save the last block to be transferred in qce_ahash_final
+        * (with the last block bit set) if this is indeed the end of data
+        * stream. If not this saved block will be transferred as part of
+        * next update. If this block is not held back and if this is
+        * indeed the end of data stream, the digest obtained will be wrong
+        * since qce_ahash_final will see that rctx->buflen is 0 and return
+        * doing nothing which in turn means that a digest will not be
+        * copied to the destination result buffer.  qce_ahash_final cannot
+        * be made to alter this behavior and allowed to proceed if
+        * rctx->buflen is 0 because the crypto engine BAM does not allow
+        * for zero length transfers.
+        */
+       if (!hash_later)
+               hash_later = blocksize;
+
        if (hash_later) {
                unsigned int src_offset = req->nbytes - hash_later;
                scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
@@ -450,7 +415,7 @@ static const struct qce_ahash_def ahash_def[] = {
                .drv_name       = "sha1-qce",
                .digestsize     = SHA1_DIGEST_SIZE,
                .blocksize      = SHA1_BLOCK_SIZE,
-               .statesize      = sizeof(struct sha1_state),
+               .statesize      = sizeof(struct qce_sha_saved_state),
                .std_iv         = std_iv_sha1,
        },
        {
@@ -459,7 +424,7 @@ static const struct qce_ahash_def ahash_def[] = {
                .drv_name       = "sha256-qce",
                .digestsize     = SHA256_DIGEST_SIZE,
                .blocksize      = SHA256_BLOCK_SIZE,
-               .statesize      = sizeof(struct sha256_state),
+               .statesize      = sizeof(struct qce_sha_saved_state),
                .std_iv         = std_iv_sha256,
        },
        {
@@ -468,7 +433,7 @@ static const struct qce_ahash_def ahash_def[] = {
                .drv_name       = "hmac-sha1-qce",
                .digestsize     = SHA1_DIGEST_SIZE,
                .blocksize      = SHA1_BLOCK_SIZE,
-               .statesize      = sizeof(struct sha1_state),
+               .statesize      = sizeof(struct qce_sha_saved_state),
                .std_iv         = std_iv_sha1,
        },
        {
@@ -477,7 +442,7 @@ static const struct qce_ahash_def ahash_def[] = {
                .drv_name       = "hmac-sha256-qce",
                .digestsize     = SHA256_DIGEST_SIZE,
                .blocksize      = SHA256_BLOCK_SIZE,
-               .statesize      = sizeof(struct sha256_state),
+               .statesize      = sizeof(struct qce_sha_saved_state),
                .std_iv         = std_iv_sha256,
        },
 };
index a2d3da0ad95f3eecc66730b92313fecdd439e6c0..c0a0d8c4fce196e26155ccda0c0836b2c950c550 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/interrupt.h>
 #include <linux/moduleparam.h>
 #include <linux/types.h>
+#include <linux/errno.h>
 #include <crypto/aes.h>
 #include <crypto/internal/des.h>
 #include <crypto/internal/skcipher.h>
@@ -143,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
 
        qce_dma_issue_pending(&qce->dma);
 
-       ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+       ret = qce_start(async_req, tmpl->crypto_alg_type);
        if (ret)
                goto error_terminate;
 
@@ -167,16 +168,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
        struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
        struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
        unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+       unsigned int __keylen;
        int ret;
 
        if (!key || !keylen)
                return -EINVAL;
 
-       switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+       /*
+        * AES XTS key1 = key2 not supported by crypto engine.
+        * Revisit to request a fallback cipher in this case.
+        */
+       if (IS_XTS(flags)) {
+               __keylen = keylen >> 1;
+               if (!memcmp(key, key + __keylen, __keylen))
+                       return -ENOKEY;
+       } else {
+               __keylen = keylen;
+       }
+
+       switch (__keylen) {
        case AES_KEYSIZE_128:
        case AES_KEYSIZE_256:
                memcpy(ctx->enc_key, key, keylen);
                break;
+       case AES_KEYSIZE_192:
+               break;
+       default:
+               return -EINVAL;
        }
 
        ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
@@ -204,12 +222,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
                           unsigned int keylen)
 {
        struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+       u32 _key[6];
        int err;
 
        err = verify_skcipher_des3_key(ablk, key);
        if (err)
                return err;
 
+       /*
+        * The crypto engine does not support any two keys
+        * being the same for triple des algorithms. The
+        * verify_skcipher_des3_key does not check for all the
+        * below conditions. Return -ENOKEY in case any two keys
+        * are the same. Revisit to see if a fallback cipher
+        * is needed to handle this condition.
+        */
+       memcpy(_key, key, DES3_EDE_KEY_SIZE);
+       if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
+           !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
+           !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+               return -ENOKEY;
+
        ctx->enc_keylen = keylen;
        memcpy(ctx->enc_key, key, keylen);
        return 0;
@@ -221,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
        struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
        struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
        struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+       unsigned int blocksize = crypto_skcipher_blocksize(tfm);
        int keylen;
        int ret;
 
@@ -228,14 +262,31 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
        rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
        keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
 
-       /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
-        * is not a multiple of it; pass such requests to the fallback
+       /* CE does not handle 0 length messages */
+       if (!req->cryptlen)
+               return 0;
+
+       /*
+        * ECB and CBC algorithms require message lengths to be
+        * multiples of block size.
+        */
+       if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+               if (!IS_ALIGNED(req->cryptlen, blocksize))
+                       return -EINVAL;
+
+       /*
+        * Conditions for requesting a fallback cipher
+        * AES-192 (not supported by crypto engine (CE))
+        * AES-XTS request with len <= 512 byte (not recommended to use CE)
+        * AES-XTS request with len > QCE_SECTOR_SIZE and
+        * is not a multiple of it.(Revisit this condition to check if it is
+        * needed in all versions of CE)
         */
        if (IS_AES(rctx->flags) &&
-           (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
-             req->cryptlen <= aes_sw_max_len) ||
-            (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
-             req->cryptlen % QCE_SECTOR_SIZE))) {
+           ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+           (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
+           (req->cryptlen > QCE_SECTOR_SIZE &&
+           req->cryptlen % QCE_SECTOR_SIZE))))) {
                skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
                skcipher_request_set_callback(&rctx->fallback_req,
                                              req->base.flags,
@@ -307,7 +358,7 @@ static const struct qce_skcipher_def skcipher_def[] = {
                .name           = "ecb(aes)",
                .drv_name       = "ecb-aes-qce",
                .blocksize      = AES_BLOCK_SIZE,
-               .ivsize         = AES_BLOCK_SIZE,
+               .ivsize         = 0,
                .min_keysize    = AES_MIN_KEY_SIZE,
                .max_keysize    = AES_MAX_KEY_SIZE,
        },
index 81befe7febaa42ca0025fa1f630424992f3c9fbd..ed03058497bc2863e1c6333fb42f025efd85291b 100644 (file)
@@ -48,7 +48,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
 {
        struct ahash_request *req = ahash_request_cast(dev->async_req);
        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
-       int reg_status = 0;
+       int reg_status;
 
        reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
                     RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
index 682c8a450a57b8514c8be439e81fcd85bc30979e..55aa3a71169b057d470374d41152ec2a26508d8d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 
@@ -401,7 +402,7 @@ static const struct samsung_aes_variant exynos_aes_data = {
 static const struct samsung_aes_variant exynos5433_slim_aes_data = {
        .aes_offset     = 0x400,
        .hash_offset    = 0x800,
-       .clk_names      = { "pclk", "aclk", },
+       .clk_names      = { "aclk", "pclk", },
 };
 
 static const struct of_device_id s5p_sss_dt_match[] = {
@@ -424,13 +425,9 @@ MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
 static inline const struct samsung_aes_variant *find_s5p_sss_version
                                   (const struct platform_device *pdev)
 {
-       if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
-               const struct of_device_id *match;
+       if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
+               return of_device_get_match_data(&pdev->dev);
 
-               match = of_match_node(s5p_sss_dt_match,
-                                       pdev->dev.of_node);
-               return (const struct samsung_aes_variant *)match->data;
-       }
        return (const struct samsung_aes_variant *)
                        platform_get_device_id(pdev)->driver_data;
 }
@@ -2159,7 +2156,7 @@ static struct skcipher_alg algs[] = {
 static int s5p_aes_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       int i, j, err = -ENODEV;
+       int i, j, err;
        const struct samsung_aes_variant *variant;
        struct s5p_aes_dev *pdata;
        struct resource *res;
@@ -2189,14 +2186,14 @@ static int s5p_aes_probe(struct platform_device *pdev)
        }
 
        pdata->res = res;
-       pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       pdata->ioaddr = devm_ioremap_resource(dev, res);
        if (IS_ERR(pdata->ioaddr)) {
                if (!pdata->use_hash)
                        return PTR_ERR(pdata->ioaddr);
                /* try AES without HASH */
                res->end -= 0x300;
                pdata->use_hash = false;
-               pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+               pdata->ioaddr = devm_ioremap_resource(dev, res);
                if (IS_ERR(pdata->ioaddr))
                        return PTR_ERR(pdata->ioaddr);
        }
index f300b0a5958a595f4a40f1637fb4534343d667e9..1c6929fb3a13143bc1d172198f676bc239e1df52 100644 (file)
 /* Max Authentication tag size */
 #define SA_MAX_AUTH_TAG_SZ 64
 
-#define PRIV_ID        0x1
-#define PRIV   0x1
+enum sa_algo_id {
+       SA_ALG_CBC_AES = 0,
+       SA_ALG_EBC_AES,
+       SA_ALG_CBC_DES3,
+       SA_ALG_ECB_DES3,
+       SA_ALG_SHA1,
+       SA_ALG_SHA256,
+       SA_ALG_SHA512,
+       SA_ALG_AUTHENC_SHA1_AES,
+       SA_ALG_AUTHENC_SHA256_AES,
+};
+
+struct sa_match_data {
+       u8 priv;
+       u8 priv_id;
+       u32 supported_algos;
+       bool skip_engine_control;
+};
 
 static struct device *sa_k3_dev;
 
@@ -696,8 +712,9 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
 }
 
 static
-int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
-              u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
+int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
+              const u8 *enc_key, u16 enc_key_sz,
+              const u8 *auth_key, u16 auth_key_sz,
               struct algo_data *ad, u8 enc, u32 *swinfo)
 {
        int enc_sc_offset = 0;
@@ -732,8 +749,8 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
        sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
        memcpy(&sc_buf[2], &sc_id, 2);
        sc_buf[4] = 0x0;
-       sc_buf[5] = PRIV_ID;
-       sc_buf[6] = PRIV;
+       sc_buf[5] = match_data->priv_id;
+       sc_buf[6] = match_data->priv;
        sc_buf[7] = 0x0;
 
        /* Prepare context for encryption engine */
@@ -892,8 +909,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
                return ret;
 
        /* Setup Encryption Security Context & Command label template */
-       if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
-                      &ctx->enc.epib[1]))
+       if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
+                      ad, 1, &ctx->enc.epib[1]))
                goto badkey;
 
        cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -905,8 +922,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
        ctx->enc.cmdl_size = cmdl_len;
 
        /* Setup Decryption Security Context & Command label template */
-       if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
-                      &ctx->dec.epib[1]))
+       if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
+                      ad, 0, &ctx->dec.epib[1]))
                goto badkey;
 
        cfg.enc_eng_id = ad->enc_eng.eng_id;
@@ -1106,7 +1123,7 @@ static int sa_run(struct sa_req *req)
        else
                dma_rx = pdata->dma_rx1;
 
-       ddev = dma_rx->device->dev;
+       ddev = dmaengine_get_dma_device(pdata->dma_tx);
        rxd->ddev = ddev;
 
        memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
@@ -1146,8 +1163,10 @@ static int sa_run(struct sa_req *req)
                mapped_sg->sgt.sgl = src;
                mapped_sg->sgt.orig_nents = src_nents;
                ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
-               if (ret)
+               if (ret) {
+                       kfree(rxd);
                        return ret;
+               }
 
                mapped_sg->dir = dir_src;
                mapped_sg->mapped = true;
@@ -1155,8 +1174,10 @@ static int sa_run(struct sa_req *req)
                mapped_sg->sgt.sgl = req->src;
                mapped_sg->sgt.orig_nents = sg_nents;
                ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
-               if (ret)
+               if (ret) {
+                       kfree(rxd);
                        return ret;
+               }
 
                mapped_sg->dir = dir_src;
                mapped_sg->mapped = true;
@@ -1446,9 +1467,10 @@ static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct  algo_data *ad)
        cfg.akey = NULL;
        cfg.akey_len = 0;
 
+       ctx->dev_data = dev_get_drvdata(sa_k3_dev);
        /* Setup Encryption Security Context & Command label template */
-       if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
-                      &ctx->enc.epib[1]))
+       if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
+                      ad, 0, &ctx->enc.epib[1]))
                goto badkey;
 
        cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -1716,6 +1738,7 @@ static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
        int ret;
 
        memzero_explicit(ctx, sizeof(*ctx));
+       ctx->dev_data = data;
 
        ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
        if (IS_ERR(ctx->shash)) {
@@ -1817,8 +1840,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
        cfg.akey_len = keys.authkeylen;
 
        /* Setup Encryption Security Context & Command label template */
-       if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
-                      keys.authkey, keys.authkeylen,
+       if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
+                      keys.enckeylen, keys.authkey, keys.authkeylen,
                       ad, 1, &ctx->enc.epib[1]))
                return -EINVAL;
 
@@ -1831,8 +1854,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
        ctx->enc.cmdl_size = cmdl_len;
 
        /* Setup Decryption Security Context & Command label template */
-       if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
-                      keys.authkey, keys.authkeylen,
+       if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
+                      keys.enckeylen, keys.authkey, keys.authkeylen,
                       ad, 0, &ctx->dec.epib[1]))
                return -EINVAL;
 
@@ -1950,7 +1973,7 @@ static int sa_aead_decrypt(struct aead_request *req)
 }
 
 static struct sa_alg_tmpl sa_algs[] = {
-       {
+       [SA_ALG_CBC_AES] = {
                .type = CRYPTO_ALG_TYPE_SKCIPHER,
                .alg.skcipher = {
                        .base.cra_name          = "cbc(aes)",
@@ -1973,7 +1996,7 @@ static struct sa_alg_tmpl sa_algs[] = {
                        .decrypt                = sa_decrypt,
                }
        },
-       {
+       [SA_ALG_EBC_AES] = {
                .type = CRYPTO_ALG_TYPE_SKCIPHER,
                .alg.skcipher = {
                        .base.cra_name          = "ecb(aes)",
@@ -1995,7 +2018,7 @@ static struct sa_alg_tmpl sa_algs[] = {
                        .decrypt                = sa_decrypt,
                }
        },
-       {
+       [SA_ALG_CBC_DES3] = {
                .type = CRYPTO_ALG_TYPE_SKCIPHER,
                .alg.skcipher = {
                        .base.cra_name          = "cbc(des3_ede)",
@@ -2018,7 +2041,7 @@ static struct sa_alg_tmpl sa_algs[] = {
                        .decrypt                = sa_decrypt,
                }
        },
-       {
+       [SA_ALG_ECB_DES3] = {
                .type = CRYPTO_ALG_TYPE_SKCIPHER,
                .alg.skcipher = {
                        .base.cra_name          = "ecb(des3_ede)",
@@ -2040,7 +2063,7 @@ static struct sa_alg_tmpl sa_algs[] = {
                        .decrypt                = sa_decrypt,
                }
        },
-       {
+       [SA_ALG_SHA1] = {
                .type = CRYPTO_ALG_TYPE_AHASH,
                .alg.ahash = {
                        .halg.base = {
@@ -2069,7 +2092,7 @@ static struct sa_alg_tmpl sa_algs[] = {
                        .import                 = sa_sha_import,
                },
        },
-       {
+       [SA_ALG_SHA256] = {
                .type = CRYPTO_ALG_TYPE_AHASH,
                .alg.ahash = {
                        .halg.base = {
@@ -2098,7 +2121,7 @@ static struct sa_alg_tmpl sa_algs[] = {
                        .import                 = sa_sha_import,
                },
        },
-       {
+       [SA_ALG_SHA512] = {
                .type = CRYPTO_ALG_TYPE_AHASH,
                .alg.ahash = {
                        .halg.base = {
@@ -2127,7 +2150,7 @@ static struct sa_alg_tmpl sa_algs[] = {
                        .import                 = sa_sha_import,
                },
        },
-       {
+       [SA_ALG_AUTHENC_SHA1_AES] = {
                .type   = CRYPTO_ALG_TYPE_AEAD,
                .alg.aead = {
                        .base = {
@@ -2154,7 +2177,7 @@ static struct sa_alg_tmpl sa_algs[] = {
                        .decrypt = sa_aead_decrypt,
                },
        },
-       {
+       [SA_ALG_AUTHENC_SHA256_AES] = {
                .type   = CRYPTO_ALG_TYPE_AEAD,
                .alg.aead = {
                        .base = {
@@ -2185,13 +2208,19 @@ static struct sa_alg_tmpl sa_algs[] = {
 };
 
 /* Register the algorithms in crypto framework */
-static void sa_register_algos(const struct device *dev)
+static void sa_register_algos(struct sa_crypto_data *dev_data)
 {
+       const struct sa_match_data *match_data = dev_data->match_data;
+       struct device *dev = dev_data->dev;
        char *alg_name;
        u32 type;
        int i, err;
 
        for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+               /* Skip unsupported algos */
+               if (!(match_data->supported_algos & BIT(i)))
+                       continue;
+
                type = sa_algs[i].type;
                if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
                        alg_name = sa_algs[i].alg.skcipher.base.cra_name;
@@ -2329,14 +2358,39 @@ static int sa_link_child(struct device *dev, void *data)
        return 0;
 }
 
+static struct sa_match_data am654_match_data = {
+       .priv = 1,
+       .priv_id = 1,
+       .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0),
+};
+
+static struct sa_match_data am64_match_data = {
+       .priv = 0,
+       .priv_id = 0,
+       .supported_algos = BIT(SA_ALG_CBC_AES) |
+                          BIT(SA_ALG_EBC_AES) |
+                          BIT(SA_ALG_SHA256) |
+                          BIT(SA_ALG_SHA512) |
+                          BIT(SA_ALG_AUTHENC_SHA256_AES),
+       .skip_engine_control = true,
+};
+
+static const struct of_device_id of_match[] = {
+       { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
+       { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
+       { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
 static int sa_ul_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *match;
        struct device *dev = &pdev->dev;
        struct device_node *node = dev->of_node;
        struct resource *res;
        static void __iomem *saul_base;
        struct sa_crypto_data *dev_data;
-       u32 val;
        int ret;
 
        dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
@@ -2350,7 +2404,7 @@ static int sa_ul_probe(struct platform_device *pdev)
        dev_set_drvdata(sa_k3_dev, dev_data);
 
        pm_runtime_enable(dev);
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
                        ret);
@@ -2362,18 +2416,28 @@ static int sa_ul_probe(struct platform_device *pdev)
        if (ret)
                goto disable_pm_runtime;
 
+       match = of_match_node(of_match, dev->of_node);
+       if (!match) {
+               dev_err(dev, "No compatible match found\n");
+               return -ENODEV;
+       }
+       dev_data->match_data = match->data;
+
        spin_lock_init(&dev_data->scid_lock);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        saul_base = devm_ioremap_resource(dev, res);
 
        dev_data->base = saul_base;
-       val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
-           SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
-           SA_EEC_TRNG_EN;
 
-       writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+       if (!dev_data->match_data->skip_engine_control) {
+               u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
+                         SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+                         SA_EEC_TRNG_EN;
 
-       sa_register_algos(dev);
+               writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+       }
+
+       sa_register_algos(dev_data);
 
        ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
        if (ret)
@@ -2419,13 +2483,6 @@ static int sa_ul_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct of_device_id of_match[] = {
-       {.compatible = "ti,j721e-sa2ul",},
-       {.compatible = "ti,am654-sa2ul",},
-       {},
-};
-MODULE_DEVICE_TABLE(of, of_match);
-
 static struct platform_driver sa_ul_driver = {
        .probe = sa_ul_probe,
        .remove = sa_ul_remove,
index f597ddecde34f2c626a8440a6cb55fdb0f05a155..ed66d1f111db508e3c35e15352e79139cf4a4d4b 100644 (file)
@@ -171,9 +171,12 @@ struct sa_tfm_ctx;
 #define SA_UNSAFE_DATA_SZ_MIN  240
 #define SA_UNSAFE_DATA_SZ_MAX  256
 
+struct sa_match_data;
+
 /**
  * struct sa_crypto_data - Crypto driver instance data
  * @base: Base address of the register space
+ * @soc_data: Pointer to SoC specific data
  * @pdev: Platform device pointer
  * @sc_pool: security context pool
  * @dev: Device pointer
@@ -189,6 +192,7 @@ struct sa_tfm_ctx;
  */
 struct sa_crypto_data {
        void __iomem *base;
+       const struct sa_match_data *match_data;
        struct platform_device  *pdev;
        struct dma_pool         *sc_pool;
        struct device *dev;
index 2a4793176c713a98370bca7742614872b7aa238f..7389a0536ff020f782a8902c65a8875ef88ccb62 100644 (file)
@@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
        int ret;
        u32 cfg, hw_mode;
 
-       pm_runtime_get_sync(cryp->dev);
+       pm_runtime_resume_and_get(cryp->dev);
 
        /* Disable interrupt */
        stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
        if (!cryp)
                return -ENODEV;
 
-       ret = pm_runtime_get_sync(cryp->dev);
+       ret = pm_runtime_resume_and_get(cryp->dev);
        if (ret < 0)
                return ret;
 
index 7ac0573ef66300ec95d643f56973e39d5647d220..389de9e3302d5658c92e1eb5c7cc3cc4f100a2ac 100644 (file)
@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
                              struct stm32_hash_request_ctx *rctx)
 {
-       pm_runtime_get_sync(hdev->dev);
+       pm_runtime_resume_and_get(hdev->dev);
 
        if (!(HASH_FLAGS_INIT & hdev->flags)) {
                stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
        u32 *preg;
        unsigned int i;
 
-       pm_runtime_get_sync(hdev->dev);
+       pm_runtime_resume_and_get(hdev->dev);
 
        while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
                cpu_relax();
@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
 
        preg = rctx->hw_context;
 
-       pm_runtime_get_sync(hdev->dev);
+       pm_runtime_resume_and_get(hdev->dev);
 
        stm32_hash_write(hdev, HASH_IMR, *preg++);
        stm32_hash_write(hdev, HASH_STR, *preg++);
@@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
        if (!hdev)
                return -ENODEV;
 
-       ret = pm_runtime_get_sync(hdev->dev);
+       ret = pm_runtime_resume_and_get(hdev->dev);
        if (ret < 0)
                return ret;
 
index 9866c2a5e9a70133ce110fdc8187e8a25f279715..759d0d9786fd19019728a212d76f1d70040f8ae8 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Copyright (C) ST-Ericsson SA 2010
  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
  * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -15,7 +15,7 @@
 #include "cryp_p.h"
 #include "cryp.h"
 
-/**
+/*
  * cryp_wait_until_done - wait until the device logic is not busy
  */
 void cryp_wait_until_done(struct cryp_device_data *device_data)
@@ -285,6 +285,7 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data,
  *                             other device context parameter
  * @device_data: Pointer to the device data struct for base address.
  * @ctx: Crypto device context
+ * @cryp_mode: Mode: Polling, Interrupt or DMA
  */
 void cryp_save_device_context(struct cryp_device_data *device_data,
                              struct cryp_device_context *ctx,
index 8da7f87b339b4785edece0bcfde305a0f96ffb01..db5713d7c9405d4f973c97436f4d5e677c6e2ea1 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
  * Copyright (C) ST-Ericsson SA 2010
  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
  * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
index c3adeb2e58232e80da7e8fa4e392025eee365081..30cdd5253929c4fc737498554186d36356834f95 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Copyright (C) ST-Ericsson SA 2010
  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
  * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
@@ -62,7 +62,7 @@ struct cryp_driver_data {
 /**
  * struct cryp_ctx - Crypto context
  * @config: Crypto mode.
- * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @key: Key array.
  * @keylen: Length of key.
  * @iv: Pointer to initialization vector.
  * @indata: Pointer to indata.
@@ -73,6 +73,7 @@ struct cryp_driver_data {
  * @updated: Updated flag.
  * @dev_ctx: Device dependent context.
  * @device: Pointer to the device.
+ * @session_id: Atomic session ID.
  */
 struct cryp_ctx {
        struct cryp_config config;
@@ -608,12 +609,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
        chan = ctx->device->dma.chan_mem2cryp;
        dmaengine_terminate_all(chan);
        dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
-                    ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+                    ctx->device->dma.nents_src, DMA_TO_DEVICE);
 
        chan = ctx->device->dma.chan_cryp2mem;
        dmaengine_terminate_all(chan);
        dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
-                    ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+                    ctx->device->dma.nents_dst, DMA_FROM_DEVICE);
 }
 
 static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
@@ -1290,7 +1291,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
        device_data->phybase = res->start;
        device_data->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(device_data->base)) {
-               dev_err(dev, "[%s]: ioremap failed!", __func__);
                ret = PTR_ERR(device_data->base);
                goto out;
        }
index 7ebde69e8c76b839435de7662cb909eb16be787a..6d2f07bec98a7e74beba3c0297de40fa11538094 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Copyright (C) ST-Ericsson SA 2010
  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
  * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
index 1984f30100ff7d99df3c2a58c01f2ec03f660376..da90029ea141240bb5c57d98eb31c28043036c13 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
  * Copyright (C) ST-Ericsson SA 2010
  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
  * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -19,7 +19,7 @@ enum cryp_irq_src_id {
        CRYP_IRQ_SRC_ALL = 0x3
 };
 
-/**
+/*
  * M0 Funtions
  */
 void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
index 879ed68a12d7d52142bfb8631926b9c6d3057596..4981a3f461e5ed6f8bc0e1c378749ce5a857a956 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
  * Copyright (C) ST-Ericsson SA 2010
  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
  * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -13,7 +13,7 @@
 
 #include "cryp_irq.h"
 
-/**
+/*
  *
  * CRYP Registers - Offset mapping
  *     +-----------------+
index 0df84eaa853141734ef7ea26120caf19afbc3d2e..60b47fe4de35d9b29c3ed2a0e902254f6de1bd38 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
  * Copyright (C) ST-Ericsson SA 2010
  * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
  * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -17,7 +17,7 @@
 #include "cryp.h"
 #include "cryp_irqp.h"
 
-/**
+/*
  * Generic Macros
  */
 #define CRYP_SET_BITS(reg_name, mask) \
@@ -34,7 +34,7 @@
        writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
                (((u32)val << shift) & (mask))), reg)
 
-/**
+/*
  * CRYP specific Macros
  */
 #define CRYP_PERIPHERAL_ID0            0xE3
@@ -48,7 +48,7 @@
 #define CRYP_PCELL_ID2                 0x05
 #define CRYP_PCELL_ID3                 0xB1
 
-/**
+/*
  * CRYP register default values
  */
 #define MAX_DEVICE_SUPPORT             2
@@ -62,7 +62,7 @@
 #define CRYP_KEY_DEFAULT               0x0
 #define CRYP_INIT_VECT_DEFAULT         0x0
 
-/**
+/*
  * CRYP Control register specific mask
  */
 #define CRYP_CR_SECURE_MASK            BIT(0)
@@ -81,7 +81,6 @@
                                         CRYP_CR_PRLG_MASK |\
                                         CRYP_CR_ALGODIR_MASK |\
                                         CRYP_CR_ALGOMODE_MASK |\
-                                        CRYP_CR_DATATYPE_MASK |\
                                         CRYP_CR_KEYSIZE_MASK |\
                                         CRYP_CR_KEYRDEN_MASK |\
                                         CRYP_CR_DATATYPE_MASK)
@@ -91,7 +90,7 @@
 #define CRYP_SR_IFEM_MASK              BIT(0)
 #define CRYP_SR_BUSY_MASK              BIT(4)
 
-/**
+/*
  * Bit position used while setting bits in register
  */
 #define CRYP_CR_PRLG_POS               1
 
 #define CRYP_SR_BUSY_POS               4
 
-/**
+/*
  * CRYP PCRs------PC_NAND control register
  * BIT_MASK
  */
index da284b0ea1b26e72b0af24d8375c8d3be7ec1d23..ecb7412e84e3eb96c3e682368455b212ed2e7e5f 100644 (file)
@@ -190,7 +190,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
        chan = ctx->device->dma.chan_mem2hash;
        dmaengine_terminate_all(chan);
        dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
-                    ctx->device->dma.sg_len, DMA_TO_DEVICE);
+                    ctx->device->dma.nents, DMA_TO_DEVICE);
 }
 
 static int hash_dma_write(struct hash_ctx *ctx,
@@ -356,7 +356,7 @@ out:
 
 /**
  * hash_get_device_data - Checks for an available hash device and return it.
- * @hash_ctx:          Structure for the hash context.
+ * @ctx:               Structure for the hash context.
  * @device_data:       Structure for the hash device.
  *
  * This function check for an available hash device and return it to
@@ -542,7 +542,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
 }
 
 /**
- * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256).
  * @req: The hash request for the job.
  *
  * Initialize structures.
@@ -585,6 +585,7 @@ static int ux500_hash_init(struct ahash_request *req)
  * @device_data:       Structure for the hash device.
  * @message:           Block (512 bits) of message to be written to
  *                     the HASH hardware.
+ * @length:            Message length
  *
  */
 static void hash_processblock(struct hash_device_data *device_data,
@@ -1295,7 +1296,7 @@ void hash_get_digest(struct hash_device_data *device_data,
 }
 
 /**
- * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * ahash_update - The hash update function for SHA1/SHA2 (SHA256).
  * @req: The hash request for the job.
  */
 static int ahash_update(struct ahash_request *req)
@@ -1315,7 +1316,7 @@ static int ahash_update(struct ahash_request *req)
 }
 
 /**
- * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * ahash_final - The hash final function for SHA1/SHA2 (SHA256).
  * @req:       The hash request for the job.
  */
 static int ahash_final(struct ahash_request *req)
@@ -1615,9 +1616,6 @@ static struct hash_algo_template hash_algs[] = {
        }
 };
 
-/**
- * hash_algs_register_all -
- */
 static int ahash_algs_register_all(struct hash_device_data *device_data)
 {
        int ret;
@@ -1640,9 +1638,6 @@ unreg:
        return ret;
 }
 
-/**
- * hash_algs_unregister_all -
- */
 static void ahash_algs_unregister_all(struct hash_device_data *device_data)
 {
        int i;
@@ -1681,7 +1676,6 @@ static int ux500_hash_probe(struct platform_device *pdev)
        device_data->phybase = res->start;
        device_data->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(device_data->base)) {
-               dev_err(dev, "%s: ioremap() failed!\n", __func__);
                ret = PTR_ERR(device_data->base);
                goto out;
        }
index d05c02baebcf0ce541a85f74cc02c688b33bfe35..ec06189fbf9963b840dbc61ed9c6c2d9d428330e 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
index d88084447f1cb59937273d78be8618ebc9526282..ed0debc7acb5fb91a29e7a04a2e9510f72076dfb 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES CBC routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
index 79ba062ee1c1f92b609a4f6725ffa0aa2945c75a..9a3da8cd62f3595efd747e5b4743b1b8dfb4a747 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES CTR routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
index 9fee1b1532a4c14d0ebb7c468c47a15e3fa53173..dabbccb415502ccebb7b335ece652cc627ddcc60 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES XTS routines supporting VMX In-core instructions on Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
index 14807ac2e3b9f0dea9399a172cebda9adaa62879..5bc5710a6de0b5e9b1c32dbf521fedc11bdd3f9f 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * GHASH routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015, 2019 International Business Machines Inc.
index a40d08e75fc0bae31c5e379f03170d9f9dae134d..7eb713cc87c8cadc6b7c7bc5c251f8919f8c8790 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
index 244cb7d89678b24a9cd21de12e2142530fd553ec..2acc6173da36cee0f5630ebcad4ddcea07096b17 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/security.h>
 #include <linux/debugfs.h>
 #include <linux/module.h>
+#include <linux/sizes.h>
 #include <linux/mutex.h>
 #include <linux/cdev.h>
 #include <linux/idr.h>
@@ -96,21 +97,18 @@ struct mbox_cmd {
  * @dev: driver core device object
  * @cdev: char dev core object for ioctl operations
  * @cxlm: pointer to the parent device driver data
- * @ops_active: active user of @cxlm in ops handlers
- * @ops_dead: completion when all @cxlm ops users have exited
  * @id: id number of this memdev instance.
  */
 struct cxl_memdev {
        struct device dev;
        struct cdev cdev;
        struct cxl_mem *cxlm;
-       struct percpu_ref ops_active;
-       struct completion ops_dead;
        int id;
 };
 
 static int cxl_mem_major;
 static DEFINE_IDA(cxl_memdev_ida);
+static DECLARE_RWSEM(cxl_memdev_rwsem);
 static struct dentry *cxl_debugfs;
 static bool cxl_raw_allow_all;
 
@@ -169,7 +167,7 @@ struct cxl_mem_command {
  * table will be validated against the user's input. For example, if size_in is
  * 0, and the user passed in 1, it is an error.
  */
-static struct cxl_mem_command mem_commands[] = {
+static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
        CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
        CXL_CMD(RAW, ~0, ~0, 0),
@@ -776,26 +774,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
                             unsigned long arg)
 {
-       struct cxl_memdev *cxlmd;
-       struct inode *inode;
-       int rc = -ENOTTY;
+       struct cxl_memdev *cxlmd = file->private_data;
+       int rc = -ENXIO;
 
-       inode = file_inode(file);
-       cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+       down_read(&cxl_memdev_rwsem);
+       if (cxlmd->cxlm)
+               rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+       up_read(&cxl_memdev_rwsem);
 
-       if (!percpu_ref_tryget_live(&cxlmd->ops_active))
-               return -ENXIO;
+       return rc;
+}
 
-       rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+static int cxl_memdev_open(struct inode *inode, struct file *file)
+{
+       struct cxl_memdev *cxlmd =
+               container_of(inode->i_cdev, typeof(*cxlmd), cdev);
 
-       percpu_ref_put(&cxlmd->ops_active);
+       get_device(&cxlmd->dev);
+       file->private_data = cxlmd;
 
-       return rc;
+       return 0;
+}
+
+static int cxl_memdev_release_file(struct inode *inode, struct file *file)
+{
+       struct cxl_memdev *cxlmd =
+               container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+
+       put_device(&cxlmd->dev);
+
+       return 0;
 }
 
 static const struct file_operations cxl_memdev_fops = {
        .owner = THIS_MODULE,
        .unlocked_ioctl = cxl_memdev_ioctl,
+       .open = cxl_memdev_open,
+       .release = cxl_memdev_release_file,
        .compat_ioctl = compat_ptr_ioctl,
        .llseek = noop_llseek,
 };
@@ -984,7 +999,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
                return NULL;
        }
 
-       offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
+       offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
        bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
 
        /* Basic sanity check that BAR is big enough */
@@ -1049,7 +1064,6 @@ static void cxl_memdev_release(struct device *dev)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 
-       percpu_ref_exit(&cxlmd->ops_active);
        ida_free(&cxl_memdev_ida, cxlmd->id);
        kfree(cxlmd);
 }
@@ -1066,7 +1080,7 @@ static ssize_t firmware_version_show(struct device *dev,
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
        struct cxl_mem *cxlm = cxlmd->cxlm;
 
-       return sprintf(buf, "%.16s\n", cxlm->firmware_version);
+       return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
 }
 static DEVICE_ATTR_RO(firmware_version);
 
@@ -1076,7 +1090,7 @@ static ssize_t payload_max_show(struct device *dev,
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
        struct cxl_mem *cxlm = cxlmd->cxlm;
 
-       return sprintf(buf, "%zu\n", cxlm->payload_size);
+       return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
 }
 static DEVICE_ATTR_RO(payload_max);
 
@@ -1087,7 +1101,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
        struct cxl_mem *cxlm = cxlmd->cxlm;
        unsigned long long len = range_len(&cxlm->ram_range);
 
-       return sprintf(buf, "%#llx\n", len);
+       return sysfs_emit(buf, "%#llx\n", len);
 }
 
 static struct device_attribute dev_attr_ram_size =
@@ -1100,7 +1114,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
        struct cxl_mem *cxlm = cxlmd->cxlm;
        unsigned long long len = range_len(&cxlm->pmem_range);
 
-       return sprintf(buf, "%#llx\n", len);
+       return sysfs_emit(buf, "%#llx\n", len);
 }
 
 static struct device_attribute dev_attr_pmem_size =
@@ -1150,27 +1164,24 @@ static const struct device_type cxl_memdev_type = {
        .groups = cxl_memdev_attribute_groups,
 };
 
-static void cxlmdev_unregister(void *_cxlmd)
+static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
 {
-       struct cxl_memdev *cxlmd = _cxlmd;
-       struct device *dev = &cxlmd->dev;
-
-       percpu_ref_kill(&cxlmd->ops_active);
-       cdev_device_del(&cxlmd->cdev, dev);
-       wait_for_completion(&cxlmd->ops_dead);
+       down_write(&cxl_memdev_rwsem);
        cxlmd->cxlm = NULL;
-       put_device(dev);
+       up_write(&cxl_memdev_rwsem);
 }
 
-static void cxlmdev_ops_active_release(struct percpu_ref *ref)
+static void cxl_memdev_unregister(void *_cxlmd)
 {
-       struct cxl_memdev *cxlmd =
-               container_of(ref, typeof(*cxlmd), ops_active);
+       struct cxl_memdev *cxlmd = _cxlmd;
+       struct device *dev = &cxlmd->dev;
 
-       complete(&cxlmd->ops_dead);
+       cdev_device_del(&cxlmd->cdev, dev);
+       cxl_memdev_shutdown(cxlmd);
+       put_device(dev);
 }
 
-static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
 {
        struct pci_dev *pdev = cxlm->pdev;
        struct cxl_memdev *cxlmd;
@@ -1180,22 +1191,11 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
 
        cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
        if (!cxlmd)
-               return -ENOMEM;
-       init_completion(&cxlmd->ops_dead);
-
-       /*
-        * @cxlm is deallocated when the driver unbinds so operations
-        * that are using it need to hold a live reference.
-        */
-       cxlmd->cxlm = cxlm;
-       rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
-                            GFP_KERNEL);
-       if (rc)
-               goto err_ref;
+               return ERR_PTR(-ENOMEM);
 
        rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
        if (rc < 0)
-               goto err_id;
+               goto err;
        cxlmd->id = rc;
 
        dev = &cxlmd->dev;
@@ -1204,30 +1204,54 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
        dev->bus = &cxl_bus_type;
        dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
        dev->type = &cxl_memdev_type;
-       dev_set_name(dev, "mem%d", cxlmd->id);
+       device_set_pm_not_required(dev);
 
        cdev = &cxlmd->cdev;
        cdev_init(cdev, &cxl_memdev_fops);
+       return cxlmd;
+
+err:
+       kfree(cxlmd);
+       return ERR_PTR(rc);
+}
+
+static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+{
+       struct cxl_memdev *cxlmd;
+       struct device *dev;
+       struct cdev *cdev;
+       int rc;
+
+       cxlmd = cxl_memdev_alloc(cxlm);
+       if (IS_ERR(cxlmd))
+               return PTR_ERR(cxlmd);
+
+       dev = &cxlmd->dev;
+       rc = dev_set_name(dev, "mem%d", cxlmd->id);
+       if (rc)
+               goto err;
+
+       /*
+        * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+        * needed as this is ordered with cdev_add() publishing the device.
+        */
+       cxlmd->cxlm = cxlm;
 
+       cdev = &cxlmd->cdev;
        rc = cdev_device_add(cdev, dev);
        if (rc)
-               goto err_add;
+               goto err;
 
-       return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
+       return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister,
+                                       cxlmd);
 
-err_add:
-       ida_free(&cxl_memdev_ida, cxlmd->id);
-err_id:
+err:
        /*
-        * Theoretically userspace could have already entered the fops,
-        * so flush ops_active.
+        * The cdev was briefly live, shutdown any ioctl operations that
+        * saw that state.
         */
-       percpu_ref_kill(&cxlmd->ops_active);
-       wait_for_completion(&cxlmd->ops_dead);
-       percpu_ref_exit(&cxlmd->ops_active);
-err_ref:
-       kfree(cxlmd);
-
+       cxl_memdev_shutdown(cxlmd);
+       put_device(dev);
        return rc;
 }
 
@@ -1396,6 +1420,7 @@ out:
  */
 static int cxl_mem_identify(struct cxl_mem *cxlm)
 {
+       /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
        struct cxl_mbox_identify {
                char fw_revision[0x10];
                __le64 total_capacity;
@@ -1424,10 +1449,11 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
         * For now, only the capacity is exported in sysfs
         */
        cxlm->ram_range.start = 0;
-       cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
+       cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
 
        cxlm->pmem_range.start = 0;
-       cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
+       cxlm->pmem_range.end =
+               le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
 
        memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
 
index 452e85ae87a836611056720c5dcebb9938bdf612..5aee26e1bbd6dba393d8d33fe56c52cf9014fcfb 100644 (file)
@@ -90,13 +90,11 @@ static ssize_t do_id_store(struct device_driver *drv, const char *buf,
                                list_add(&dax_id->list, &dax_drv->ids);
                        } else
                                rc = -ENOMEM;
-               } else
-                       /* nothing to remove */;
+               }
        } else if (action == ID_REMOVE) {
                list_del(&dax_id->list);
                kfree(dax_id);
-       } else
-               /* dax_id already added */;
+       }
        mutex_unlock(&dax_bus_lock);
 
        if (rc < 0)
index fe6a460c4373503f0f4df4124d20e1a5a094db2b..af3ee288bc11793c6b155865336f7aff288be585 100644 (file)
@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
        kfree(chan->dev);
  err_free_local:
        free_percpu(chan->local);
+       chan->local = NULL;
        return rc;
 }
 
index e5162690de8f1c375af087a2527ad8f921facf91..db25f9b7778c9eed2f5292436be7f0e9e11ebe29 100644 (file)
@@ -10,6 +10,7 @@ config DW_DMAC_CORE
 
 config DW_DMAC
        tristate "Synopsys DesignWare AHB DMA platform driver"
+       depends on HAS_IOMEM
        select DW_DMAC_CORE
        help
          Support the Synopsys DesignWare AHB DMA controller. This
@@ -18,6 +19,7 @@ config DW_DMAC
 config DW_DMAC_PCI
        tristate "Synopsys DesignWare AHB DMA PCI driver"
        depends on PCI
+       depends on HAS_IOMEM
        select DW_DMAC_CORE
        help
          Support the Synopsys DesignWare AHB DMA controller on the
index 84a6ea60ecf0bddd6107312fc69bff2a19e2c28a..31c819544a229b02e1cceef9ef05f57fb0241261 100644 (file)
@@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
        idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
 }
 
+void idxd_wq_reset(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       u32 operand;
+
+       if (wq->state != IDXD_WQ_ENABLED) {
+               dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+               return;
+       }
+
+       operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+       idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
+       wq->state = IDXD_WQ_DISABLED;
+}
+
 int idxd_wq_map_portal(struct idxd_wq *wq)
 {
        struct idxd_device *idxd = wq->idxd;
@@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
 void idxd_wq_disable_cleanup(struct idxd_wq *wq)
 {
        struct idxd_device *idxd = wq->idxd;
-       struct device *dev = &idxd->pdev->dev;
-       int i, wq_offset;
 
        lockdep_assert_held(&idxd->dev_lock);
        memset(wq->wqcfg, 0, idxd->wqcfg_size);
@@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
        wq->ats_dis = 0;
        clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
        memset(wq->name, 0, WQ_NAME_SIZE);
-
-       for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
-               wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
-               iowrite32(0, idxd->reg_base + wq_offset);
-               dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
-                       wq->id, i, wq_offset,
-                       ioread32(idxd->reg_base + wq_offset));
-       }
 }
 
 /* Device control bits */
@@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
 }
 
 /* Device configuration bits */
+void idxd_msix_perm_setup(struct idxd_device *idxd)
+{
+       union msix_perm mperm;
+       int i, msixcnt;
+
+       msixcnt = pci_msix_vec_count(idxd->pdev);
+       if (msixcnt < 0)
+               return;
+
+       mperm.bits = 0;
+       mperm.pasid = idxd->pasid;
+       mperm.pasid_en = device_pasid_enabled(idxd);
+       for (i = 1; i < msixcnt; i++)
+               iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
+void idxd_msix_perm_clear(struct idxd_device *idxd)
+{
+       union msix_perm mperm;
+       int i, msixcnt;
+
+       msixcnt = pci_msix_vec_count(idxd->pdev);
+       if (msixcnt < 0)
+               return;
+
+       mperm.bits = 0;
+       for (i = 1; i < msixcnt; i++)
+               iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
 static void idxd_group_config_write(struct idxd_group *group)
 {
        struct idxd_device *idxd = group->idxd;
@@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
        if (!wq->group)
                return 0;
 
-       memset(wq->wqcfg, 0, idxd->wqcfg_size);
+       /*
+        * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
+        * wq reset. This will copy back the sticky values that are present on some devices.
+        */
+       for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+               wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+               wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
+       }
 
        /* byte 0-3 */
        wq->wqcfg->wq_size = wq->size;
index 81a0e65fd316d7af96b316af82e4d8f029dfdf57..76014c14f4732854cf95086c69af7fca497b8b8c 100644 (file)
@@ -316,6 +316,8 @@ void idxd_unregister_driver(void);
 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
 
 /* device interrupt control */
+void idxd_msix_perm_setup(struct idxd_device *idxd);
+void idxd_msix_perm_clear(struct idxd_device *idxd);
 irqreturn_t idxd_irq_handler(int vec, void *data);
 irqreturn_t idxd_misc_thread(int vec, void *data);
 irqreturn_t idxd_wq_thread(int irq, void *data);
@@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
 int idxd_wq_enable(struct idxd_wq *wq);
 int idxd_wq_disable(struct idxd_wq *wq);
 void idxd_wq_drain(struct idxd_wq *wq);
+void idxd_wq_reset(struct idxd_wq *wq);
 int idxd_wq_map_portal(struct idxd_wq *wq);
 void idxd_wq_unmap_portal(struct idxd_wq *wq);
 void idxd_wq_disable_cleanup(struct idxd_wq *wq);
index 085a0c3b62c68190d86ceb75f355ec7a6e3c910a..6584b0ec07d54c626102af8c92e3d327fc314f3f 100644 (file)
@@ -65,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        struct idxd_irq_entry *irq_entry;
        int i, msixcnt;
        int rc = 0;
-       union msix_perm mperm;
 
        msixcnt = pci_msix_vec_count(pdev);
        if (msixcnt < 0) {
@@ -144,14 +143,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        }
 
        idxd_unmask_error_interrupts(idxd);
-
-       /* Setup MSIX permission table */
-       mperm.bits = 0;
-       mperm.pasid = idxd->pasid;
-       mperm.pasid_en = device_pasid_enabled(idxd);
-       for (i = 1; i < msixcnt; i++)
-               iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-
+       idxd_msix_perm_setup(idxd);
        return 0;
 
  err_no_irq:
@@ -510,6 +502,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
                idxd_flush_work_list(irq_entry);
        }
 
+       idxd_msix_perm_clear(idxd);
        destroy_workqueue(idxd->wq);
 }
 
index a60ca11a5784a9c5dd0d55e4145868c13845d3a8..f1463fc581125adf61e82e9b5042333779b585d8 100644 (file)
@@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
                for (i = 0; i < 4; i++)
                        idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
                                        IDXD_SWERR_OFFSET + i * sizeof(u64));
-               iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+               iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
+                         idxd->reg_base + IDXD_SWERR_OFFSET);
 
                if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
                        int id = idxd->sw_err.wq_idx;
index 4dbb03c545e48abaff38677818be3b579cdbff5e..18bf4d14898907589b37d979b80a12571c8baff6 100644 (file)
@@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq)
 {
        struct idxd_device *idxd = wq->idxd;
        struct device *dev = &idxd->pdev->dev;
-       int rc;
 
        mutex_lock(&wq->wq_lock);
        dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
@@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq)
        idxd_wq_unmap_portal(wq);
 
        idxd_wq_drain(wq);
-       rc = idxd_wq_disable(wq);
+       idxd_wq_reset(wq);
 
        idxd_wq_free_resources(wq);
        wq->client_count = 0;
        mutex_unlock(&wq->wq_lock);
 
-       if (rc < 0)
-               dev_warn(dev, "Failed to disable %s: %d\n",
-                        dev_name(&wq->conf_dev), rc);
-       else
-               dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+       dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
 }
 
 static int idxd_config_bus_remove(struct device *dev)
@@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev,
        if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
                return -EPERM;
 
-       if (wq->state != IDXD_WQ_DISABLED)
+       if (idxd->state == IDXD_DEV_ENABLED)
                return -EPERM;
 
        if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
@@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev,
 {
        struct idxd_device *idxd =
                container_of(dev, struct idxd_device, conf_dev);
+       int i, rc = 0;
+
+       for (i = 0; i < 4; i++)
+               rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
 
-       return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+       rc--;
+       rc += sysfs_emit_at(buf, rc, "\n");
+       return rc;
 }
 static DEVICE_ATTR_RO(op_cap);
 
index f387c5bbc170cb38360db21fe2bbdd356538c9cd..1669345441619edf81e546146f3de40f6470e6ba 100644 (file)
@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
 
        rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
                         KBUILD_MODNAME, plxdev);
-       if (rc) {
-               kfree(plxdev);
-               return rc;
-       }
+       if (rc)
+               goto free_plx;
 
        spin_lock_init(&plxdev->ring_lock);
        tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
        rc = dma_async_device_register(dma);
        if (rc) {
                pci_err(pdev, "Failed to register dma device: %d\n", rc);
-               free_irq(pci_irq_vector(pdev, 0),  plxdev);
-               kfree(plxdev);
-               return rc;
+               goto put_device;
        }
 
        pci_set_drvdata(pdev, plxdev);
 
        return 0;
+
+put_device:
+       put_device(&pdev->dev);
+       free_irq(pci_irq_vector(pdev, 0),  plxdev);
+free_plx:
+       kfree(plxdev);
+
+       return rc;
 }
 
 static int plx_dma_probe(struct pci_dev *pdev,
index 71827d9b0aa1924b78dec0569aedd7429cc7e96a..b7260749e8eee6be0d27f55d859f5861d4442771 100644 (file)
@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
                goto end;
        }
        if (!tdc->busy) {
-               err = pm_runtime_get_sync(tdc->tdma->dev);
+               err = pm_runtime_resume_and_get(tdc->tdma->dev);
                if (err < 0) {
                        dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
                        goto end;
@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
        int err;
 
-       err = pm_runtime_get_sync(tdc->tdma->dev);
+       err = pm_runtime_resume_and_get(tdc->tdma->dev);
        if (err < 0) {
                dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
                return;
index 55df63dead8d398c4c7cd4a661ec65968ca7e992..70b29bd079c9f30d4a17e294f5ea44acc8eb81f5 100644 (file)
@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
        struct xilinx_dpdma_tx_desc *desc;
        struct virt_dma_desc *vdesc;
        u32 reg, channels;
+       bool first_frame;
 
        lockdep_assert_held(&chan->lock);
 
@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
                chan->running = true;
        }
 
-       if (chan->video_group)
-               channels = xilinx_dpdma_chan_video_group_ready(chan);
-       else
-               channels = BIT(chan->id);
-
-       if (!channels)
-               return;
-
        vdesc = vchan_next_desc(&chan->vchan);
        if (!vdesc)
                return;
@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
                            FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
                                       upper_32_bits(sw_desc->dma_addr)));
 
-       if (chan->first_frame)
+       first_frame = chan->first_frame;
+       chan->first_frame = false;
+
+       if (chan->video_group) {
+               channels = xilinx_dpdma_chan_video_group_ready(chan);
+               /*
+                * Trigger the transfer only when all channels in the group are
+                * ready.
+                */
+               if (!channels)
+                       return;
+       } else {
+               channels = BIT(chan->id);
+       }
+
+       if (first_frame)
                reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
        else
                reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
 
-       chan->first_frame = false;
-
        dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
 }
 
@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
  */
 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
 {
-       struct xilinx_dpdma_tx_desc *active = chan->desc.active;
+       struct xilinx_dpdma_tx_desc *active;
        unsigned long flags;
 
        spin_lock_irqsave(&chan->lock, flags);
 
        xilinx_dpdma_debugfs_desc_done_irq(chan);
 
+       active = chan->desc.active;
        if (active)
                vchan_cyclic_callback(&active->vdesc);
        else
index c211222f5d0ccf6166a394a6b7ff02a16bcb5327..4105df74f2b06f74dee219d86d9f390af58a3144 100644 (file)
@@ -9,6 +9,7 @@
  * (originally switch class is supported)
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/extcon-provider.h>
 #include <linux/gpio/consumer.h>
 #include <linux/init.h>
@@ -112,7 +113,9 @@ static int gpio_extcon_probe(struct platform_device *pdev)
        if (ret < 0)
                return ret;
 
-       INIT_DELAYED_WORK(&data->work, gpio_extcon_work);
+       ret = devm_delayed_work_autocancel(dev, &data->work, gpio_extcon_work);
+       if (ret)
+               return ret;
 
        /*
         * Request the interrupt of gpio to detect whether external connector
@@ -131,15 +134,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int gpio_extcon_remove(struct platform_device *pdev)
-{
-       struct gpio_extcon_data *data = platform_get_drvdata(pdev);
-
-       cancel_delayed_work_sync(&data->work);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int gpio_extcon_resume(struct device *dev)
 {
@@ -158,7 +152,6 @@ static SIMPLE_DEV_PM_OPS(gpio_extcon_pm_ops, NULL, gpio_extcon_resume);
 
 static struct platform_driver gpio_extcon_driver = {
        .probe          = gpio_extcon_probe,
-       .remove         = gpio_extcon_remove,
        .driver         = {
                .name   = "extcon-gpio",
                .pm     = &gpio_extcon_pm_ops,
index 80c9abcc3f97874d4323930c7090b9ae9f60e2e6..fb527c23639e80f689b0f7d2120042d6b62906bb 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/devm-helpers.h>
 #include <linux/extcon-provider.h>
 #include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
@@ -101,7 +102,9 @@ static int int3496_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        data->dev = dev;
-       INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
+       ret = devm_delayed_work_autocancel(dev, &data->work, int3496_do_usb_id);
+       if (ret)
+               return ret;
 
        data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
        if (IS_ERR(data->gpio_usb_id)) {
@@ -155,16 +158,6 @@ static int int3496_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int int3496_remove(struct platform_device *pdev)
-{
-       struct int3496_data *data = platform_get_drvdata(pdev);
-
-       devm_free_irq(&pdev->dev, data->usb_id_irq, data);
-       cancel_delayed_work_sync(&data->work);
-
-       return 0;
-}
-
 static const struct acpi_device_id int3496_acpi_match[] = {
        { "INT3496" },
        { }
@@ -177,7 +170,6 @@ static struct platform_driver int3496_driver = {
                .acpi_match_table = int3496_acpi_match,
        },
        .probe = int3496_probe,
-       .remove = int3496_remove,
 };
 
 module_platform_driver(int3496_driver);
index 337b0eea4e629ea00dd55e96a75299e663a1a67c..e1408075ef7d689f9c7a5f53e1e53de8a6c07722 100644 (file)
@@ -44,6 +44,8 @@ static struct max8997_muic_irq muic_irqs[] = {
        { MAX8997_MUICIRQ_ChgDetRun,    "muic-CHGDETRUN" },
        { MAX8997_MUICIRQ_ChgTyp,       "muic-CHGTYP" },
        { MAX8997_MUICIRQ_OVP,          "muic-OVP" },
+       { MAX8997_PMICIRQ_CHGINS,       "pmic-CHGINS" },
+       { MAX8997_PMICIRQ_CHGRM,        "pmic-CHGRM" },
 };
 
 /* Define supported cable type */
@@ -538,6 +540,8 @@ static void max8997_muic_irq_work(struct work_struct *work)
        case MAX8997_MUICIRQ_DCDTmr:
        case MAX8997_MUICIRQ_ChgDetRun:
        case MAX8997_MUICIRQ_ChgTyp:
+       case MAX8997_PMICIRQ_CHGINS:
+       case MAX8997_PMICIRQ_CHGRM:
                /* Handle charger cable */
                ret = max8997_muic_chg_handler(info);
                break;
index a2852bcc5f0d5e215a6230262c01053fd395202e..d2c1a8b89c085449eb16ca6a168fc8ec4dabbc1a 100644 (file)
@@ -9,6 +9,7 @@
  * Author: Hema HK <hemahk@ti.com>
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
@@ -237,7 +238,11 @@ static int palmas_usb_probe(struct platform_device *pdev)
                        palmas_usb->sw_debounce_jiffies = msecs_to_jiffies(debounce);
        }
 
-       INIT_DELAYED_WORK(&palmas_usb->wq_detectid, palmas_gpio_id_detect);
+       status = devm_delayed_work_autocancel(&pdev->dev,
+                                             &palmas_usb->wq_detectid,
+                                             palmas_gpio_id_detect);
+       if (status)
+               return status;
 
        palmas->usb = palmas_usb;
        palmas_usb->palmas = palmas;
@@ -359,15 +364,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int palmas_usb_remove(struct platform_device *pdev)
-{
-       struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
-
-       cancel_delayed_work_sync(&palmas_usb->wq_detectid);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int palmas_usb_suspend(struct device *dev)
 {
@@ -422,7 +418,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
 
 static struct platform_driver palmas_usb_driver = {
        .probe = palmas_usb_probe,
-       .remove = palmas_usb_remove,
        .driver = {
                .name = "palmas-usb",
                .of_match_table = of_palmas_match_tbl,
index 6b836ae6217673737962be62b5c678fab9b77fc0..eb02cb962b5e18585ef4be6266351314527a8493 100644 (file)
@@ -1,12 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /**
  * extcon-qcom-spmi-misc.c - Qualcomm USB extcon driver to support USB ID
- *                             detection based on extcon-usb-gpio.c.
+ *                     and VBUS detection based on extcon-usb-gpio.c.
  *
  * Copyright (C) 2016 Linaro, Ltd.
  * Stephen Boyd <stephen.boyd@linaro.org>
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/extcon-provider.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 
 struct qcom_usb_extcon_info {
        struct extcon_dev *edev;
-       int irq;
+       int id_irq;
+       int vbus_irq;
        struct delayed_work wq_detcable;
        unsigned long debounce_jiffies;
 };
 
 static const unsigned int qcom_usb_extcon_cable[] = {
+       EXTCON_USB,
        EXTCON_USB_HOST,
        EXTCON_NONE,
 };
 
 static void qcom_usb_extcon_detect_cable(struct work_struct *work)
 {
-       bool id;
+       bool state = false;
        int ret;
+       union extcon_property_value val;
        struct qcom_usb_extcon_info *info = container_of(to_delayed_work(work),
                                                    struct qcom_usb_extcon_info,
                                                    wq_detcable);
 
-       /* check ID and update cable state */
-       ret = irq_get_irqchip_state(info->irq, IRQCHIP_STATE_LINE_LEVEL, &id);
-       if (ret)
-               return;
+       if (info->id_irq > 0) {
+               /* check ID and update cable state */
+               ret = irq_get_irqchip_state(info->id_irq,
+                               IRQCHIP_STATE_LINE_LEVEL, &state);
+               if (ret)
+                       return;
+
+               if (!state) {
+                       val.intval = true;
+                       extcon_set_property(info->edev, EXTCON_USB_HOST,
+                                               EXTCON_PROP_USB_SS, val);
+               }
+               extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !state);
+       }
 
-       extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id);
+       if (info->vbus_irq > 0) {
+               /* check VBUS and update cable state */
+               ret = irq_get_irqchip_state(info->vbus_irq,
+                               IRQCHIP_STATE_LINE_LEVEL, &state);
+               if (ret)
+                       return;
+
+               if (state) {
+                       val.intval = true;
+                       extcon_set_property(info->edev, EXTCON_USB,
+                                               EXTCON_PROP_USB_SS, val);
+               }
+               extcon_set_state_sync(info->edev, EXTCON_USB, state);
+       }
 }
 
 static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
@@ -79,21 +106,52 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = extcon_set_property_capability(info->edev,
+                       EXTCON_USB, EXTCON_PROP_USB_SS);
+       ret |= extcon_set_property_capability(info->edev,
+                       EXTCON_USB_HOST, EXTCON_PROP_USB_SS);
+       if (ret) {
+               dev_err(dev, "failed to register extcon props rc=%d\n",
+                                               ret);
+               return ret;
+       }
+
        info->debounce_jiffies = msecs_to_jiffies(USB_ID_DEBOUNCE_MS);
-       INIT_DELAYED_WORK(&info->wq_detcable, qcom_usb_extcon_detect_cable);
 
-       info->irq = platform_get_irq_byname(pdev, "usb_id");
-       if (info->irq < 0)
-               return info->irq;
+       ret = devm_delayed_work_autocancel(dev, &info->wq_detcable,
+                                          qcom_usb_extcon_detect_cable);
+       if (ret)
+               return ret;
 
-       ret = devm_request_threaded_irq(dev, info->irq, NULL,
+       info->id_irq = platform_get_irq_byname(pdev, "usb_id");
+       if (info->id_irq > 0) {
+               ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
                                        qcom_usb_irq_handler,
                                        IRQF_TRIGGER_RISING |
                                        IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                        pdev->name, info);
-       if (ret < 0) {
-               dev_err(dev, "failed to request handler for ID IRQ\n");
-               return ret;
+               if (ret < 0) {
+                       dev_err(dev, "failed to request handler for ID IRQ\n");
+                       return ret;
+               }
+       }
+
+       info->vbus_irq = platform_get_irq_byname(pdev, "usb_vbus");
+       if (info->vbus_irq > 0) {
+               ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+                                       qcom_usb_irq_handler,
+                                       IRQF_TRIGGER_RISING |
+                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                       pdev->name, info);
+               if (ret < 0) {
+                       dev_err(dev, "failed to request handler for VBUS IRQ\n");
+                       return ret;
+               }
+       }
+
+       if (info->id_irq < 0 && info->vbus_irq < 0) {
+               dev_err(dev, "ID and VBUS IRQ not found\n");
+               return -EINVAL;
        }
 
        platform_set_drvdata(pdev, info);
@@ -105,23 +163,18 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int qcom_usb_extcon_remove(struct platform_device *pdev)
-{
-       struct qcom_usb_extcon_info *info = platform_get_drvdata(pdev);
-
-       cancel_delayed_work_sync(&info->wq_detcable);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int qcom_usb_extcon_suspend(struct device *dev)
 {
        struct qcom_usb_extcon_info *info = dev_get_drvdata(dev);
        int ret = 0;
 
-       if (device_may_wakeup(dev))
-               ret = enable_irq_wake(info->irq);
+       if (device_may_wakeup(dev)) {
+               if (info->id_irq > 0)
+                       ret = enable_irq_wake(info->id_irq);
+               if (info->vbus_irq > 0)
+                       ret = enable_irq_wake(info->vbus_irq);
+       }
 
        return ret;
 }
@@ -131,8 +184,12 @@ static int qcom_usb_extcon_resume(struct device *dev)
        struct qcom_usb_extcon_info *info = dev_get_drvdata(dev);
        int ret = 0;
 
-       if (device_may_wakeup(dev))
-               ret = disable_irq_wake(info->irq);
+       if (device_may_wakeup(dev)) {
+               if (info->id_irq > 0)
+                       ret = disable_irq_wake(info->id_irq);
+               if (info->vbus_irq > 0)
+                       ret = disable_irq_wake(info->vbus_irq);
+       }
 
        return ret;
 }
@@ -149,7 +206,6 @@ MODULE_DEVICE_TABLE(of, qcom_usb_extcon_dt_match);
 
 static struct platform_driver qcom_usb_extcon_driver = {
        .probe          = qcom_usb_extcon_probe,
-       .remove         = qcom_usb_extcon_remove,
        .driver         = {
                .name   = "extcon-pm8941-misc",
                .pm     = &qcom_usb_extcon_pm_ops,
index 106d4da647bd9097d93ce05f6381402274be6ebb..db41d1c58efd56a1e9e14b88f2a7d8471d2dab8b 100644 (file)
@@ -144,6 +144,7 @@ enum sm5502_muic_acc_type {
        SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* |      001|11110| */
        SM5502_MUIC_ADC_AUDIO_TYPE1_SEND_END = 0x5e,    /* |      010|11110| */
                                                        /* |Dev Type1|--ADC| */
+       SM5502_MUIC_ADC_GROUND_USB_OTG = 0x80,          /* |      100|00000| */
        SM5502_MUIC_ADC_OPEN_USB = 0x5f,                /* |      010|11111| */
        SM5502_MUIC_ADC_OPEN_TA = 0xdf,                 /* |      110|11111| */
        SM5502_MUIC_ADC_OPEN_USB_OTG = 0xff,            /* |      111|11111| */
@@ -291,11 +292,27 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
         * connected with to MUIC device.
         */
        cable_type = adc & SM5502_REG_ADC_MASK;
-       if (cable_type == SM5502_MUIC_ADC_GROUND)
-               return SM5502_MUIC_ADC_GROUND;
 
        switch (cable_type) {
        case SM5502_MUIC_ADC_GROUND:
+               ret = regmap_read(info->regmap, SM5502_REG_DEV_TYPE1,
+                                 &dev_type1);
+               if (ret) {
+                       dev_err(info->dev, "failed to read DEV_TYPE1 reg\n");
+                       return ret;
+               }
+
+               switch (dev_type1) {
+               case SM5502_REG_DEV_TYPE1_USB_OTG_MASK:
+                       cable_type = SM5502_MUIC_ADC_GROUND_USB_OTG;
+                       break;
+               default:
+                       dev_dbg(info->dev,
+                               "cannot identify the cable type: adc(0x%x), dev_type1(0x%x)\n",
+                               adc, dev_type1);
+                       return -EINVAL;
+               }
+               break;
        case SM5502_MUIC_ADC_SEND_END_BUTTON:
        case SM5502_MUIC_ADC_REMOTE_S1_BUTTON:
        case SM5502_MUIC_ADC_REMOTE_S2_BUTTON:
@@ -396,6 +413,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
                con_sw  = DM_DP_SWITCH_OPEN;
                vbus_sw = VBUSIN_SWITCH_VBUSOUT;
                break;
+       case SM5502_MUIC_ADC_GROUND_USB_OTG:
        case SM5502_MUIC_ADC_OPEN_USB_OTG:
                id      = EXTCON_USB_HOST;
                con_sw  = DM_DP_SWITCH_USB;
index 9811c40956e54d7c6afb1962027049e86447fcae..17c9d825188bb4c7f622b39b4e466d40391fa5d3 100644 (file)
@@ -2545,7 +2545,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
        struct driver_data *driver_data = packet->driver_data;
        int ret = -ENOENT;
 
-       tasklet_disable(&ctx->tasklet);
+       tasklet_disable_in_atomic(&ctx->tasklet);
 
        if (packet->ack != 0)
                goto out;
@@ -3465,7 +3465,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
        struct iso_context *ctx = container_of(base, struct iso_context, base);
        int ret = 0;
 
-       tasklet_disable(&ctx->context.tasklet);
+       tasklet_disable_in_atomic(&ctx->context.tasklet);
 
        if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
                context_tasklet((unsigned long)&ctx->context);
index 3f14dffb9669693ede1298a45398ad81545e93f5..5dd19dbd67a3bbc7901b5b53a0767a3545dfc175 100644 (file)
@@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
 config QCOM_SCM
        bool
        depends on ARM || ARM64
+       depends on HAVE_ARM_SMCCC
        select RESET_CONTROLLER
 
 config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
index c23466e05e60dff3fd1a6cab9f7dd84c68bc4391..d0537573501e9353446bd45dcf6bf7da026551d1 100644 (file)
@@ -13,7 +13,8 @@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__KERNEL__ \
                                   -Wno-pointer-sign \
                                   $(call cc-disable-warning, address-of-packed-member) \
                                   $(call cc-disable-warning, gnu) \
-                                  -fno-asynchronous-unwind-tables
+                                  -fno-asynchronous-unwind-tables \
+                                  $(CLANG_FLAGS)
 
 # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
 # disable the stackleak plugin
index 3d77f26c1e8c938796f4b15a3b68459ad100c6b0..bb6e77ee3898c16979542b47024e7526b9e6871d 100644 (file)
@@ -136,12 +136,16 @@ MODULE_PARM_DESC(spincount,
        "The number of loop iterations to use when using the spin handshake.");
 
 /*
- * Platforms might not support S0ix logging in their GSMI handlers. In order to
- * avoid any side-effects of generating an SMI for S0ix logging, use the S0ix
- * related GSMI commands only for those platforms that explicitly enable this
- * option.
+ * Some older platforms with Apollo Lake chipsets do not support S0ix logging
+ * in their GSMI handlers, and behaved poorly when resuming via power button
+ * press if the logging was attempted. Updated firmware with proper behavior
+ * has long since shipped, removing the need for this opt-in parameter. It
+ * now exists as an opt-out parameter for folks defiantly running old
+ * firmware, or unforeseen circumstances. After the change from opt-in to
+ * opt-out has baked sufficiently, this parameter should probably be removed
+ * entirely.
  */
-static bool s0ix_logging_enable;
+static bool s0ix_logging_enable = true;
 module_param(s0ix_logging_enable, bool, 0600);
 
 static struct gsmi_buf *gsmi_buf_alloc(void)
index 50bb2a6d6ccf79a7986da30f56c1604804230d92..62f0d1a5dd3242404a7748e644de572ae05fdc0d 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox rWTM firmware driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/armada-37xx-rwtm-mailbox.h>
@@ -547,4 +547,4 @@ module_platform_driver(turris_mox_rwtm_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
index 5ff9438b7b4611803aedf34ebc3e004a0be0de60..d591dd9b7c60c9ccd8f3c57d367814d1ec9f3250 100644 (file)
@@ -118,10 +118,17 @@ config XILINX_PR_DECOUPLER
        depends on FPGA_BRIDGE
        depends on HAS_IOMEM
        help
-         Say Y to enable drivers for Xilinx LogiCORE PR Decoupler.
+         Say Y to enable drivers for Xilinx LogiCORE PR Decoupler
+         or Xilinx Dynamic Function eXchnage AIX Shutdown Manager.
          The PR Decoupler exists in the FPGA fabric to isolate one
          region of the FPGA from the busses while that region is
          being reprogrammed during partial reconfig.
+         The Dynamic Function eXchange AXI shutdown manager prevents
+         AXI traffic from passing through the bridge. The controller
+         safely handles AXI4MM and AXI4-Lite interfaces on a
+         Reconfigurable Partition when it is undergoing dynamic
+         reconfiguration, preventing the system deadlock that can
+         occur if AXI transactions are interrupted by DFX.
 
 config FPGA_REGION
        tristate "FPGA Region"
index c4691187cca9118f278ad3894dc95ff638567740..ab7be62173681ae0a0c97676d319eb5032ed30f2 100644 (file)
@@ -52,7 +52,7 @@ static int afu_port_err_clear(struct device *dev, u64 err)
        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
        struct platform_device *pdev = to_platform_device(dev);
        void __iomem *base_err, *base_hdr;
-       int ret = -EBUSY;
+       int enable_ret = 0, ret = -EBUSY;
        u64 v;
 
        base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
@@ -96,18 +96,20 @@ static int afu_port_err_clear(struct device *dev, u64 err)
                v = readq(base_err + PORT_FIRST_ERROR);
                writeq(v, base_err + PORT_FIRST_ERROR);
        } else {
+               dev_warn(dev, "%s: received 0x%llx, expected 0x%llx\n",
+                        __func__, v, err);
                ret = -EINVAL;
        }
 
        /* Clear mask */
        __afu_port_err_mask(dev, false);
 
-       /* Enable the Port by clear the reset */
-       __afu_port_enable(pdev);
+       /* Enable the Port by clearing the reset */
+       enable_ret = __afu_port_enable(pdev);
 
 done:
        mutex_unlock(&pdata->lock);
-       return ret;
+       return enable_ret ? enable_ret : ret;
 }
 
 static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
index 753cda4b2568380ec9b9fed5f0ae8a0140b5c3db..7f621e96d3b8d8fb0dce599a07a4394b3a702895 100644 (file)
@@ -21,6 +21,9 @@
 
 #include "dfl-afu.h"
 
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
 /**
  * __afu_port_enable - enable a port by clear reset
  * @pdev: port platform device.
@@ -32,7 +35,7 @@
  *
  * The caller needs to hold lock for protection.
  */
-void __afu_port_enable(struct platform_device *pdev)
+int __afu_port_enable(struct platform_device *pdev)
 {
        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
        void __iomem *base;
@@ -41,7 +44,7 @@ void __afu_port_enable(struct platform_device *pdev)
        WARN_ON(!pdata->disable_count);
 
        if (--pdata->disable_count != 0)
-               return;
+               return 0;
 
        base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
 
@@ -49,10 +52,20 @@ void __afu_port_enable(struct platform_device *pdev)
        v = readq(base + PORT_HDR_CTRL);
        v &= ~PORT_CTRL_SFTRST;
        writeq(v, base + PORT_HDR_CTRL);
-}
 
-#define RST_POLL_INVL 10 /* us */
-#define RST_POLL_TIMEOUT 1000 /* us */
+       /*
+        * HW clears the ack bit to indicate that the port is fully out
+        * of reset.
+        */
+       if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
+                              !(v & PORT_CTRL_SFTRST_ACK),
+                              RST_POLL_INVL, RST_POLL_TIMEOUT)) {
+               dev_err(&pdev->dev, "timeout, failure to enable device\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
 
 /**
  * __afu_port_disable - disable a port by hold reset
@@ -86,7 +99,7 @@ int __afu_port_disable(struct platform_device *pdev)
        if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
                               v & PORT_CTRL_SFTRST_ACK,
                               RST_POLL_INVL, RST_POLL_TIMEOUT)) {
-               dev_err(&pdev->dev, "timeout, fail to reset device\n");
+               dev_err(&pdev->dev, "timeout, failure to disable device\n");
                return -ETIMEDOUT;
        }
 
@@ -110,10 +123,10 @@ static int __port_reset(struct platform_device *pdev)
        int ret;
 
        ret = __afu_port_disable(pdev);
-       if (!ret)
-               __afu_port_enable(pdev);
+       if (ret)
+               return ret;
 
-       return ret;
+       return __afu_port_enable(pdev);
 }
 
 static int port_reset(struct platform_device *pdev)
@@ -872,11 +885,11 @@ static int afu_dev_destroy(struct platform_device *pdev)
 static int port_enable_set(struct platform_device *pdev, bool enable)
 {
        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
-       int ret = 0;
+       int ret;
 
        mutex_lock(&pdata->lock);
        if (enable)
-               __afu_port_enable(pdev);
+               ret = __afu_port_enable(pdev);
        else
                ret = __afu_port_disable(pdev);
        mutex_unlock(&pdata->lock);
index 576e9496008634a400fe73a0d57bfe93e8a9fbbf..e5020e2b1f3df634115b55533234a455be426b55 100644 (file)
@@ -80,7 +80,7 @@ struct dfl_afu {
 };
 
 /* hold pdata->lock when call __afu_port_enable/disable */
-void __afu_port_enable(struct platform_device *pdev);
+int __afu_port_enable(struct platform_device *pdev);
 int __afu_port_disable(struct platform_device *pdev);
 
 void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
index 04e47e266f26d0641e36d3d1b44e7fd1e1f8a61b..b44523ea8c91a407df4fadd8094fa7d61cd2759c 100644 (file)
@@ -69,14 +69,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
 }
 
 /* PCI Device ID */
-#define PCIE_DEVICE_ID_PF_INT_5_X      0xBCBD
-#define PCIE_DEVICE_ID_PF_INT_6_X      0xBCC0
-#define PCIE_DEVICE_ID_PF_DSC_1_X      0x09C4
-#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
+#define PCIE_DEVICE_ID_PF_INT_5_X              0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X              0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X              0x09C4
+#define PCIE_DEVICE_ID_INTEL_PAC_N3000         0x0B30
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005         0x0B2B
 /* VF Device */
-#define PCIE_DEVICE_ID_VF_INT_5_X      0xBCBF
-#define PCIE_DEVICE_ID_VF_INT_6_X      0xBCC1
-#define PCIE_DEVICE_ID_VF_DSC_1_X      0x09C5
+#define PCIE_DEVICE_ID_VF_INT_5_X              0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X              0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X              0x09C5
+#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF      0x0B2C
 
 static struct pci_device_id cci_pcie_id_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
@@ -86,6 +88,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
        {0,}
 };
 MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
index 7d69af23056773136fa4611d0dd7d178ee2e4567..ea2bde6e5bc4e101b8a5ad116aea8336dba7d654 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017, National Instruments Corp.
- * Copyright (c) 2017, Xilix Inc
+ * Copyright (c) 2017, Xilinx Inc
  *
  * FPGA Bridge Driver for the Xilinx LogiCORE Partial Reconfiguration
  * Decoupler IP Core.
 #define CTRL_CMD_COUPLE                0
 #define CTRL_OFFSET            0
 
+struct xlnx_config_data {
+       const char *name;
+};
+
 struct xlnx_pr_decoupler_data {
+       const struct xlnx_config_data *ipconfig;
        void __iomem *io_base;
        struct clk *clk;
 };
@@ -76,15 +81,28 @@ static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
        .enable_show = xlnx_pr_decoupler_enable_show,
 };
 
+static const struct xlnx_config_data decoupler_config = {
+       .name = "Xilinx PR Decoupler",
+};
+
+static const struct xlnx_config_data shutdown_config = {
+       .name = "Xilinx DFX AXI Shutdown Manager",
+};
+
 static const struct of_device_id xlnx_pr_decoupler_of_match[] = {
-       { .compatible = "xlnx,pr-decoupler-1.00", },
-       { .compatible = "xlnx,pr-decoupler", },
+       { .compatible = "xlnx,pr-decoupler-1.00", .data = &decoupler_config },
+       { .compatible = "xlnx,pr-decoupler", .data = &decoupler_config },
+       { .compatible = "xlnx,dfx-axi-shutdown-manager-1.00",
+                                       .data = &shutdown_config },
+       { .compatible = "xlnx,dfx-axi-shutdown-manager",
+                                       .data = &shutdown_config },
        {},
 };
 MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match);
 
 static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
 {
+       struct device_node *np = pdev->dev.of_node;
        struct xlnx_pr_decoupler_data *priv;
        struct fpga_bridge *br;
        int err;
@@ -94,17 +112,23 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
+       if (np) {
+               const struct of_device_id *match;
+
+               match = of_match_node(xlnx_pr_decoupler_of_match, np);
+               if (match && match->data)
+                       priv->ipconfig = match->data;
+       }
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->io_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->io_base))
                return PTR_ERR(priv->io_base);
 
        priv->clk = devm_clk_get(&pdev->dev, "aclk");
-       if (IS_ERR(priv->clk)) {
-               if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
-                       dev_err(&pdev->dev, "input clock not found\n");
-               return PTR_ERR(priv->clk);
-       }
+       if (IS_ERR(priv->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+                                    "input clock not found\n");
 
        err = clk_prepare_enable(priv->clk);
        if (err) {
@@ -114,7 +138,7 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
 
        clk_disable(priv->clk);
 
-       br = devm_fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler",
+       br = devm_fpga_bridge_create(&pdev->dev, priv->ipconfig->name,
                                     &xlnx_pr_decoupler_br_ops, priv);
        if (!br) {
                err = -ENOMEM;
@@ -125,7 +149,8 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
 
        err = fpga_bridge_register(br);
        if (err) {
-               dev_err(&pdev->dev, "unable to register Xilinx PR Decoupler");
+               dev_err(&pdev->dev, "unable to register %s",
+                       priv->ipconfig->name);
                goto err_clk;
        }
 
index 27defa98092ddce55d56d3ce3d980cc8104d7481..fee4d0abf6bfe6d349110420c60ecc854d81bf47 100644 (file)
@@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
 
        /* PROGRAM_B is active low */
        conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
-       if (IS_ERR(conf->prog_b)) {
-               dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
-                       PTR_ERR(conf->prog_b));
-               return PTR_ERR(conf->prog_b);
-       }
+       if (IS_ERR(conf->prog_b))
+               return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
+                                    "Failed to get PROGRAM_B gpio\n");
 
        conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
-       if (IS_ERR(conf->init_b)) {
-               dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
-                       PTR_ERR(conf->init_b));
-               return PTR_ERR(conf->init_b);
-       }
+       if (IS_ERR(conf->init_b))
+               return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
+                                    "Failed to get INIT_B gpio\n");
 
        conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
-       if (IS_ERR(conf->done)) {
-               dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
-                       PTR_ERR(conf->done));
-               return PTR_ERR(conf->done);
-       }
+       if (IS_ERR(conf->done))
+               return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
+                                    "Failed to get DONE gpio\n");
 
        mgr = devm_fpga_mgr_create(&spi->dev,
                                   "Xilinx Slave Serial FPGA Manager",
index 8299909318f412451e0cfe6762ad3921d4fbdcff..61f9efd6c64fb4babef5551fb2541659bf5a542b 100644 (file)
@@ -2,7 +2,7 @@
 /*
  *  Turris Mox Moxtet GPIO expander
  *
- *  Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
+ *  Copyright (C) 2018 Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/bitops.h>
@@ -174,6 +174,6 @@ static struct moxtet_driver moxtet_gpio_driver = {
 };
 module_moxtet_driver(moxtet_gpio_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander");
 MODULE_LICENSE("GPL v2");
index 41952bb818ad5266520cefa8a23998708715a669..56152263ab38f8956dd8648fa6598d874a99a375 100644 (file)
@@ -29,6 +29,7 @@
 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
 
 struct gpio_regs {
+       u32 sysconfig;
        u32 irqenable1;
        u32 irqenable2;
        u32 wake_en;
@@ -1069,6 +1070,7 @@ static void omap_gpio_init_context(struct gpio_bank *p)
        const struct omap_gpio_reg_offs *regs = p->regs;
        void __iomem *base = p->base;
 
+       p->context.sysconfig    = readl_relaxed(base + regs->sysconfig);
        p->context.ctrl         = readl_relaxed(base + regs->ctrl);
        p->context.oe           = readl_relaxed(base + regs->direction);
        p->context.wake_en      = readl_relaxed(base + regs->wkup_en);
@@ -1088,6 +1090,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
        const struct omap_gpio_reg_offs *regs = bank->regs;
        void __iomem *base = bank->base;
 
+       writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
        writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
        writel_relaxed(bank->context.ctrl, base + regs->ctrl);
        writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
@@ -1115,6 +1118,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
 
        bank->saved_datain = readl_relaxed(base + bank->regs->datain);
 
+       /* Save syconfig, it's runtime value can be different from init value */
+       if (bank->loses_context)
+               bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
+
        if (!bank->enabled_non_wakeup_gpios)
                goto update_gpio_context_count;
 
@@ -1279,6 +1286,7 @@ out_unlock:
 
 static const struct omap_gpio_reg_offs omap2_gpio_regs = {
        .revision =             OMAP24XX_GPIO_REVISION,
+       .sysconfig =            OMAP24XX_GPIO_SYSCONFIG,
        .direction =            OMAP24XX_GPIO_OE,
        .datain =               OMAP24XX_GPIO_DATAIN,
        .dataout =              OMAP24XX_GPIO_DATAOUT,
@@ -1302,6 +1310,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
 
 static const struct omap_gpio_reg_offs omap4_gpio_regs = {
        .revision =             OMAP4_GPIO_REVISION,
+       .sysconfig =            OMAP4_GPIO_SYSCONFIG,
        .direction =            OMAP4_GPIO_OE,
        .datain =               OMAP4_GPIO_DATAIN,
        .dataout =              OMAP4_GPIO_DATAOUT,
index 26c5466b81799f39fc7c0b1add4a5fe586e0909c..ae49bb23c6ed1eb0dbfa311b64ff1f500f1ab12f 100644 (file)
@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
        long                    gpio;
        struct gpio_desc        *desc;
        int                     status;
+       struct gpio_chip        *gc;
+       int                     offset;
 
        status = kstrtol(buf, 0, &gpio);
        if (status < 0)
@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
                pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
                return -EINVAL;
        }
+       gc = desc->gdev->chip;
+       offset = gpio_chip_hwgpio(desc);
+       if (!gpiochip_line_is_valid(gc, offset)) {
+               pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
+               return -EINVAL;
+       }
 
        /* No extra locking here; FLAG_SYSFS just signifies that the
         * request and export were done by on behalf of userspace, so
index 9fd2157b133accc9e71894fcdc9515c764270948..5efa331e3ee8242772d8c037382eb195c549fb05 100644 (file)
@@ -906,7 +906,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
 
        /* Allocate an SG array and squash pages into it */
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
-                                     ttm->num_pages << PAGE_SHIFT,
+                                     (u64)ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
        if (r)
                goto release_sg;
index 7d2c8b1698279cddb8e480401684ea62bf58e848..326dae31b675d042ecfc17329f5b4599fe95e43c 100644 (file)
@@ -3300,7 +3300,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
        struct amdgpu_bo *root;
        uint64_t value, flags;
        struct amdgpu_vm *vm;
-       long r;
+       int r;
 
        spin_lock(&adev->vm_manager.pasid_lock);
        vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
@@ -3349,6 +3349,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                value = 0;
        }
 
+       r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+       if (r) {
+               pr_debug("failed %d to reserve fence slot\n", r);
+               goto error_unlock;
+       }
+
        r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
                                        addr, flags, value, NULL, NULL,
                                        NULL);
@@ -3360,7 +3366,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
 error_unlock:
        amdgpu_bo_unreserve(root);
        if (r < 0)
-               DRM_ERROR("Can't handle page fault (%ld)\n", r);
+               DRM_ERROR("Can't handle page fault (%d)\n", r);
 
 error_unref:
        amdgpu_bo_unref(&root);
index 45d1172b7bff93e8ef38e87859467c5944e93e0c..63691deb7df3c637c57a8a073f8948623dcfec3f 100644 (file)
@@ -3280,7 +3280,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
index 573cf17262da4e116154dc0bb199ef2fc9de4add..d699a5cf6c11846988e217ba4d69e599c6caa370 100644 (file)
@@ -4071,13 +4071,6 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
        if (modifier == DRM_FORMAT_MOD_LINEAR)
                return true;
 
-       /*
-        * The arbitrary tiling support for multiplane formats has not been hooked
-        * up.
-        */
-       if (info->num_planes > 1)
-               return false;
-
        /*
         * For D swizzle the canonical modifier depends on the bpp, so check
         * it here.
@@ -4096,6 +4089,10 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
                /* Per radeonsi comments 16/64 bpp are more complicated. */
                if (info->cpp[0] != 4)
                        return false;
+               /* We support multi-planar formats, but not when combined with
+                * additional DCC metadata planes. */
+               if (info->num_planes > 1)
+                       return false;
        }
 
        return true;
@@ -4296,7 +4293,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
                    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
-                   AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+                   AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
 
        add_modifier(mods, size, capacity, AMD_FMT_MOD |
                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
@@ -4308,7 +4305,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
                    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
                    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
-                   AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+                   AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
 
        add_modifier(mods, size, capacity, AMD_FMT_MOD |
                    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
index 705fbfc375029f7c1d089f0dca60b4da9b27f6fc..8a32772d4e91af4b673fe0953ea4d9786d2f58c9 100644 (file)
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+       HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
index d0ec83881fc59e7775a55ef062c91b221bbc0da8..c0565a932a124d12101c7c198802041745e37d94 100644 (file)
@@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                    (hwmgr->chip_id == CHIP_POLARIS10) ||
                    (hwmgr->chip_id == CHIP_POLARIS11) ||
                    (hwmgr->chip_id == CHIP_POLARIS12) ||
-                   (hwmgr->chip_id == CHIP_TONGA))
+                   (hwmgr->chip_id == CHIP_TONGA) ||
+                   (hwmgr->chip_id == CHIP_TOPAZ))
                        PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
 
 
index e21fb14d5e07b8cb4c3d5377ac4babecbaf9d57f..833d0c1be4f1d5583ffe8cdc6cb956750a3d3f8a 100644 (file)
@@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
                return;
        }
 
+       if (!pkg->package.count) {
+               DRM_DEBUG_DRIVER("no connection in _DSM\n");
+               return;
+       }
+
        connector_count = &pkg->package.elements[0];
        DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
                  (unsigned long long)connector_count->integer.value);
        for (i = 1; i < pkg->package.count; i++) {
                union acpi_object *obj = &pkg->package.elements[i];
-               union acpi_object *connector_id = &obj->package.elements[0];
-               union acpi_object *info = &obj->package.elements[1];
+               union acpi_object *connector_id;
+               union acpi_object *info;
+
+               if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
+                       DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
+                       continue;
+               }
+
+               connector_id = &obj->package.elements[0];
+               info = &obj->package.elements[1];
+               if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
+                       DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
+                       continue;
+               }
+
                DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
                          (unsigned long long)connector_id->integer.value);
                DRM_DEBUG_DRIVER("  port id: %s\n",
index 6518843901376bcff82a1b493a467968a57e9e72..4f8337c7fd2e0ca67193d66b90e7a26403c4acd1 100644 (file)
@@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
                        break;
                case INTEL_BACKLIGHT_DISPLAY_DDI:
                        try_intel_interface = true;
-                       try_vesa_interface = true;
                        break;
                default:
                        return -ENODEV;
index be6ac0dd846e8acf046defa4f7e66d285adc0f50..2ed309534e97a938ce198c9f7b3d8b559ad347ad 100644 (file)
@@ -848,7 +848,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
        int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
 
        if (lttpr_count < 0)
-               return;
+               /* Still continue with enabling the port and link training. */
+               lttpr_count = 0;
 
        if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
                intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
index f94025ec603a6d132b0024b05afad604834fad02..a9a8ba1d3aba93d6a136cb202335e1705a1dada6 100644 (file)
@@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
         * FIXME As we do with eDP, just make a note of the time here
         * and perform the wait before the next panel power on.
         */
-       intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+       msleep(intel_dsi->panel_pwr_cycle_delay);
 }
 
 static void intel_dsi_shutdown(struct intel_encoder *encoder)
 {
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
-       intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+       msleep(intel_dsi->panel_pwr_cycle_delay);
 }
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
index fef1e857cefc09000d67673fbe5045039c1237c6..01c1d1b36acdb25b8eee7de912b1a612c1bced2d 100644 (file)
@@ -916,19 +916,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 
        if (!strncmp(cmd, "srm", 3) ||
                        !strncmp(cmd, "lrm", 3)) {
-               if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
-                               offset != 0x21f0) {
+               if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) ||
+                   offset == 0x21f0 ||
+                   (IS_BROADWELL(gvt->gt->i915) &&
+                    offset == i915_mmio_reg_offset(INSTPM)))
+                       return 0;
+               else {
                        gvt_vgpu_err("%s access to register (%x)\n",
                                        cmd, offset);
                        return -EPERM;
-               } else
-                       return 0;
+               }
        }
 
        if (!strncmp(cmd, "lrr-src", 7) ||
                        !strncmp(cmd, "lrr-dst", 7)) {
-               gvt_vgpu_err("not allowed cmd %s\n", cmd);
-               return -EPERM;
+               if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c)
+                       return 0;
+               else {
+                       gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset);
+                       return -EPERM;
+               }
        }
 
        if (!strncmp(cmd, "pipe_ctrl", 9)) {
index e622aee6e4be90a599f29ae3496108714580f67a..440c35f1abc9e130fdd2053ac399f6b907698d50 100644 (file)
@@ -105,7 +105,7 @@ static inline bool tasklet_is_locked(const struct tasklet_struct *t)
 static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
 {
        if (!atomic_fetch_inc(&t->count))
-               tasklet_unlock_wait(t);
+               tasklet_unlock_spin_wait(t);
 }
 
 static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
index 97b57acc02e272f4819b24e3175e679992171ed0..4b4d8d034782fc6e63761832542dfd1b30b5c1bf 100644 (file)
@@ -5471,12 +5471,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
        struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
        int ret;
 
-       memset(wm, 0, sizeof(*wm));
-
        /* Watermarks calculated in master */
        if (plane_state->planar_slave)
                return 0;
 
+       memset(wm, 0, sizeof(*wm));
+
        if (plane_state->planar_linked_plane) {
                const struct drm_framebuffer *fb = plane_state->hw.fb;
                enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
index 7e553d3efeb20fcbdcd739f076f6622c777a55bd..ce13d49e615b48c24eed63d4cb2368a5fc0824bd 100644 (file)
@@ -1386,8 +1386,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
 
 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
-       *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+               REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
 
        return 0;
 }
index 690409ca8a18681dc78a71c9a9448480457b024a..d553f62f4eeb8e4f6ec899d6f58c3b64be6bd547 100644 (file)
@@ -567,17 +567,17 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
        }  else {
                /*
                 * a650 tier targets don't need whereami but still need to be
-                * equal to or newer than 1.95 for other security fixes
+                * equal to or newer than 0.95 for other security fixes
                 */
                if (adreno_is_a650(adreno_gpu)) {
-                       if ((buf[0] & 0xfff) >= 0x195) {
+                       if ((buf[0] & 0xfff) >= 0x095) {
                                ret = true;
                                goto out;
                        }
 
                        DRM_DEV_ERROR(&gpu->pdev->dev,
                                "a650 SQE ucode is too old. Have version %x need at least %x\n",
-                               buf[0] & 0xfff, 0x195);
+                               buf[0] & 0xfff, 0x095);
                }
 
                /*
@@ -1228,8 +1228,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
        /* Force the GPU power on so we can read this register */
        a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
-       *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+               REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
 
        a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
        mutex_unlock(&perfcounter_oob);
@@ -1406,7 +1406,13 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
        int ret;
 
        ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
-       if (ret) {
+       /*
+        * -ENOENT means that the platform doesn't support speedbin which is
+        * fine
+        */
+       if (ret == -ENOENT) {
+               return 0;
+       } else if (ret) {
                DRM_DEV_ERROR(dev,
                              "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
                              ret);
index 8981cfa9dbc3722c757fabc4da1f90caa759d3a2..92e6f1b94738682c54e6b1fe6f085a6868846f01 100644 (file)
@@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
 
        DPU_REG_WRITE(c, CTL_TOP, mode_sel);
        DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
-       DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
+       if (cfg->merge_3d)
+               DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+                             BIT(cfg->merge_3d - MERGE_3D_0));
 }
 
 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
index a5c6b8c233366d698e436bc70108e5a38cc2a621..196907689c82e6d25b0a269d449ed1a4fbff1067 100644 (file)
@@ -570,6 +570,7 @@ err_free_priv:
        kfree(priv);
 err_put_drm_dev:
        drm_dev_put(ddev);
+       platform_set_drvdata(pdev, NULL);
        return ret;
 }
 
index af381d756ac1f6c196f998999194a58858b4f0b4..5fbfb71ca3d91f8677ebb04612937c19ecef4839 100644 (file)
@@ -37,6 +37,7 @@ struct dsic_panel_data {
        u32 height_mm;
        u32 max_hs_rate;
        u32 max_lp_rate;
+       bool te_support;
 };
 
 struct panel_drv_data {
@@ -334,9 +335,11 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
        if (r)
                goto err;
 
-       r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
-       if (r)
-               goto err;
+       if (ddata->panel_data->te_support) {
+               r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+               if (r)
+                       goto err;
+       }
 
        /* possible panel bug */
        msleep(100);
@@ -619,6 +622,7 @@ static const struct dsic_panel_data taal_data = {
        .height_mm = 0,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = true,
 };
 
 static const struct dsic_panel_data himalaya_data = {
@@ -629,6 +633,7 @@ static const struct dsic_panel_data himalaya_data = {
        .height_mm = 88,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = false,
 };
 
 static const struct dsic_panel_data droid4_data = {
@@ -639,6 +644,7 @@ static const struct dsic_panel_data droid4_data = {
        .height_mm = 89,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = false,
 };
 
 static const struct of_device_id dsicm_of_match[] = {
index e8c66d10478f03bed887e97caa8886d67fc57c81..78893bea85aede438b9a6818034c9dcb9b960c8f 100644 (file)
@@ -364,7 +364,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
        if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
                /* check that we only pin down anonymous memory
                   to prevent problems with writeback */
-               unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+               unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
                struct vm_area_struct *vma;
                vma = find_vma(gtt->usermm, gtt->userptr);
                if (!vma || vma->vm_file || vma->vm_end < end)
@@ -386,7 +386,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
        } while (pinned < ttm->num_pages);
 
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
-                                     ttm->num_pages << PAGE_SHIFT,
+                                     (u64)ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
        if (r)
                goto release_sg;
index 269390bc586ed12b5a18ad3350a5ee8bcc52fcdd..76657dcdf9b00d07291dd9eb536ff3295aea3983 100644 (file)
@@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
 {
        const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
        const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+       struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
        u32 fifo_len_bytes = pv_data->fifo_depth;
 
        /*
@@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
                if (crtc_data->hvs_output == 5)
                        return 32;
 
+               /*
+                * It looks like in some situations, we will overflow
+                * the PixelValve FIFO (with the bit 10 of PV stat being
+                * set) and stall the HVS / PV, eventually resulting in
+                * a page flip timeout.
+                *
+                * Displaying the video overlay during a playback with
+                * Kodi on an RPi3 seems to be a great solution with a
+                * failure rate around 50%.
+                *
+                * Removing 1 from the FIFO full level however
+                * seems to completely remove that issue.
+                */
+               if (!vc4->hvs->hvs5)
+                       return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
+
                return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
        }
 }
index 7322169c0682fe8564f014ed1e56b5916aab2c2c..1e9c84cf614a140474723c5d3debad6a7259a304 100644 (file)
@@ -1146,7 +1146,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
        plane->state->src_y = state->src_y;
        plane->state->src_w = state->src_w;
        plane->state->src_h = state->src_h;
-       plane->state->src_h = state->src_h;
        plane->state->alpha = state->alpha;
        plane->state->pixel_blend_mode = state->pixel_blend_mode;
        plane->state->rotation = state->rotation;
index ba658fa9cf6c6eb29c17978d4fa90d42336296f4..183571c387b758545ec014044e1bb08ae681286e 100644 (file)
@@ -481,11 +481,15 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        vmw_bo_unreference(&old_buf);
        res->id = vcotbl->type;
 
+       /* Release the pin acquired in vmw_bo_init */
+       ttm_bo_unpin(bo);
+
        return 0;
 
 out_map_new:
        ttm_bo_kunmap(&old_map);
 out_wait:
+       ttm_bo_unpin(bo);
        ttm_bo_unreserve(bo);
        vmw_bo_unreference(&buf);
 
index dd69b51c40e418a4c7ca90656bb42f61b41552e0..6fa24645fbbf6973264e771a9a7dce1fccd224ba 100644 (file)
@@ -712,17 +712,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
        dev_priv->last_read_seqno = (uint32_t) -100;
        dev_priv->drm.dev_private = dev_priv;
 
-       ret = vmw_setup_pci_resources(dev_priv, pci_id);
-       if (ret)
-               return ret;
-       ret = vmw_detect_version(dev_priv);
-       if (ret)
-               goto out_no_pci_or_version;
-
        mutex_init(&dev_priv->cmdbuf_mutex);
-       mutex_init(&dev_priv->release_mutex);
        mutex_init(&dev_priv->binding_mutex);
-       mutex_init(&dev_priv->global_kms_state_mutex);
        ttm_lock_init(&dev_priv->reservation_sem);
        spin_lock_init(&dev_priv->resource_lock);
        spin_lock_init(&dev_priv->hw_lock);
@@ -730,6 +721,14 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
        spin_lock_init(&dev_priv->cap_lock);
        spin_lock_init(&dev_priv->cursor_lock);
 
+       ret = vmw_setup_pci_resources(dev_priv, pci_id);
+       if (ret)
+               return ret;
+       ret = vmw_detect_version(dev_priv);
+       if (ret)
+               goto out_no_pci_or_version;
+
+
        for (i = vmw_res_context; i < vmw_res_max; ++i) {
                idr_init(&dev_priv->res_idr[i]);
                INIT_LIST_HEAD(&dev_priv->res_lru[i]);
index 5fa5bcd20cc5c1abf7a30fa7cc8f05bd29d5549d..eb76a6b9ebcae1bd029be5d5917e5bfc4436e46d 100644 (file)
@@ -529,7 +529,6 @@ struct vmw_private {
        struct vmw_overlay *overlay_priv;
        struct drm_property *hotplug_mode_update_property;
        struct drm_property *implicit_placement_property;
-       struct mutex global_kms_state_mutex;
        spinlock_t cursor_lock;
        struct drm_atomic_state *suspend_state;
 
@@ -592,7 +591,6 @@ struct vmw_private {
        bool refuse_hibernation;
        bool suspend_locked;
 
-       struct mutex release_mutex;
        atomic_t num_fifo_resources;
 
        /*
@@ -1524,9 +1522,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
        struct vmw_buffer_object *tmp_buf = *buf;
 
        *buf = NULL;
-       if (tmp_buf != NULL) {
+       if (tmp_buf != NULL)
                ttm_bo_put(&tmp_buf->base);
-       }
 }
 
 static inline struct vmw_buffer_object *
index a372980fe6a54f18b5bf2e6f0ee37d0f37fd56c6..f2d6254154585826b57c909d2d22c6d20e70af9e 100644 (file)
@@ -94,6 +94,16 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
                             struct vmw_piter data_iter,
                             unsigned long num_data_pages);
 
+
+static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
+{
+       int ret = ttm_bo_reserve(bo, false, true, NULL);
+       BUG_ON(ret != 0);
+       ttm_bo_unpin(bo);
+       ttm_bo_unreserve(bo);
+}
+
+
 /*
  * vmw_setup_otable_base - Issue an object table base setup command to
  * the device
@@ -277,6 +287,7 @@ out_no_setup:
                                                 &batch->otables[i]);
        }
 
+       vmw_bo_unpin_unlocked(batch->otable_bo);
        ttm_bo_put(batch->otable_bo);
        batch->otable_bo = NULL;
        return ret;
@@ -340,6 +351,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
        BUG_ON(ret != 0);
 
        vmw_bo_fence_single(bo, NULL);
+       ttm_bo_unpin(bo);
        ttm_bo_unreserve(bo);
 
        ttm_bo_put(batch->otable_bo);
@@ -528,6 +540,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
 void vmw_mob_destroy(struct vmw_mob *mob)
 {
        if (mob->pt_bo) {
+               vmw_bo_unpin_unlocked(mob->pt_bo);
                ttm_bo_put(mob->pt_bo);
                mob->pt_bo = NULL;
        }
@@ -643,6 +656,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
 out_no_cmd_space:
        vmw_fifo_resource_dec(dev_priv);
        if (pt_set_up) {
+               vmw_bo_unpin_unlocked(mob->pt_bo);
                ttm_bo_put(mob->pt_bo);
                mob->pt_bo = NULL;
        }
index 30d9adf31c8442d256042bcc7eb765bcd8d28e9a..9f14d99c763c2b359b47b78dc9c76787b5ba97ab 100644 (file)
@@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
        drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
        if (IS_ERR(drm_dev)) {
                ret = PTR_ERR(drm_dev);
-               goto fail;
+               goto fail_dev;
        }
 
        drm_info->drm_dev = drm_dev;
@@ -551,8 +551,10 @@ fail_modeset:
        drm_kms_helper_poll_fini(drm_dev);
        drm_mode_config_cleanup(drm_dev);
        drm_dev_put(drm_dev);
-fail:
+fail_dev:
        kfree(drm_info);
+       front_info->drm_info = NULL;
+fail:
        return ret;
 }
 
index 3adacba9a23bfb2712b17f7a4094029d83cdb895..e5f4314899ee7044968bb0101dd36ebecd91254e 100644 (file)
@@ -16,7 +16,6 @@
 struct drm_connector;
 struct xen_drm_front_drm_info;
 
-struct xen_drm_front_drm_info;
 
 int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
                            struct drm_connector *connector);
index 48ad154df3a78c88092807787bb3d5db65d3a334..15661c7f363365a38e962975f1bb4158dbe7d92a 100644 (file)
@@ -72,11 +72,11 @@ struct es2_cport_in {
 };
 
 /**
- * es2_ap_dev - ES2 USB Bridge to AP structure
+ * struct es2_ap_dev - ES2 USB Bridge to AP structure
  * @usb_dev: pointer to the USB device we are.
  * @usb_intf: pointer to the USB interface we are bound to.
  * @hd: pointer to our gb_host_device structure
-
+ *
  * @cport_in: endpoint, urbs and buffer for cport in messages
  * @cport_out_endpoint: endpoint for for cport out messages
  * @cport_out_urb: array of urbs for the CPort out messages
@@ -85,7 +85,7 @@ struct es2_cport_in {
  * @cport_out_urb_cancelled: array of flags indicating whether the
  *                     corresponding @cport_out_urb is being cancelled
  * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
- *
+ * @cdsi1_in_use: true if cport CDSI1 is in use
  * @apb_log_task: task pointer for logging thread
  * @apb_log_dentry: file system entry for the log file interface
  * @apb_log_enable_dentry: file system entry for enabling logging
@@ -1171,7 +1171,7 @@ static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
        char tmp_buf[3];
 
        sprintf(tmp_buf, "%d\n", enable);
-       return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
+       return simple_read_from_buffer(buf, count, ppos, tmp_buf, 2);
 }
 
 static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
index dbac1664166277ba2117b2dde68677699200cfd6..ddecc84fd6f0d62d939df5fbe51bfb27ac051cb8 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/dmi.h>
 #include <linux/interrupt.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/module.h>
 
 #define ACEL_EN                BIT(0)
 #define GYRO_EN                BIT(1)
-#define MAGNO_EN               BIT(2)
+#define MAGNO_EN       BIT(2)
 #define ALS_EN         BIT(19)
 
+static int sensor_mask_override = -1;
+module_param_named(sensor_mask, sensor_mask_override, int, 0444);
+MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+
 void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
 {
        union sfh_cmd_param cmd_param;
@@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
        writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
 }
 
+static const struct dmi_system_id dmi_sensor_mask_overrides[] = {
+       {
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"),
+               },
+               .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"),
+               },
+               .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+       },
+       { }
+};
+
 int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
 {
        int activestatus, num_of_sensors = 0;
+       const struct dmi_system_id *dmi_id;
+       u32 activecontrolstatus;
+
+       if (sensor_mask_override == -1) {
+               dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
+               if (dmi_id)
+                       sensor_mask_override = (long)dmi_id->driver_data;
+       }
+
+       if (sensor_mask_override >= 0) {
+               activestatus = sensor_mask_override;
+       } else {
+               activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
+               activestatus = activecontrolstatus >> 4;
+       }
 
-       privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
-       activestatus = privdata->activecontrolstatus >> 4;
        if (ACEL_EN  & activestatus)
                sensor_id[num_of_sensors++] = accel_idx;
 
index 8f8d19b2cfe5ba611ac62ea0fdf63327abe74a97..489415f7c22ca1eb3c2713f5e207e37c48ae18c3 100644 (file)
@@ -61,7 +61,6 @@ struct amd_mp2_dev {
        struct pci_dev *pdev;
        struct amdtp_cl_data *cl_data;
        void __iomem *mmio;
-       u32 activecontrolstatus;
 };
 
 struct amd_mp2_sensor_info {
index 3feaece13ade06b566aa3da7c530a9fdeca44a79..6b665931147dfe2fe350b13e1a754b66a9f3a2a6 100644 (file)
@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
 
                if (input_register_device(data->input2)) {
                        input_free_device(input2);
+                       ret = -ENOENT;
                        goto exit;
                }
        }
index 1dfe184ebf5a13eb88d4ac7e2a8f139c702cd874..2ab22b92594182816883da368b7c3209b81aba56 100644 (file)
@@ -1221,6 +1221,9 @@ static const struct hid_device_id asus_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
            USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
          QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+           USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
+         QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
index 21e15627a46145a5b0a6e1a44b2268b5922d99a7..477baa30889ccdfec9f9e0a8df124cc5fcf6f8ac 100644 (file)
@@ -161,6 +161,7 @@ struct cp2112_device {
        atomic_t read_avail;
        atomic_t xfer_avail;
        struct gpio_chip gc;
+       struct irq_chip irq;
        u8 *in_out_buffer;
        struct mutex lock;
 
@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
        return 0;
 }
 
-static struct irq_chip cp2112_gpio_irqchip = {
-       .name = "cp2112-gpio",
-       .irq_startup = cp2112_gpio_irq_startup,
-       .irq_shutdown = cp2112_gpio_irq_shutdown,
-       .irq_ack = cp2112_gpio_irq_ack,
-       .irq_mask = cp2112_gpio_irq_mask,
-       .irq_unmask = cp2112_gpio_irq_unmask,
-       .irq_set_type = cp2112_gpio_irq_type,
-};
-
 static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
                                              int pin)
 {
@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
        dev->gc.can_sleep               = 1;
        dev->gc.parent                  = &hdev->dev;
 
+       dev->irq.name = "cp2112-gpio";
+       dev->irq.irq_startup = cp2112_gpio_irq_startup;
+       dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
+       dev->irq.irq_ack = cp2112_gpio_irq_ack;
+       dev->irq.irq_mask = cp2112_gpio_irq_mask;
+       dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
+       dev->irq.irq_set_type = cp2112_gpio_irq_type;
+       dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
+
        girq = &dev->gc.irq;
-       girq->chip = &cp2112_gpio_irqchip;
+       girq->chip = &dev->irq;
        /* The event comes from the outside so no parent handler */
        girq->parent_handler = NULL;
        girq->num_parents = 0;
index d9319622da44b2021abb4d03b9ad3ba5caa1f4f8..e60c31dd05ffb5f3fb427c8dca23ebe938c1def6 100644 (file)
@@ -573,6 +573,8 @@ static void hammer_remove(struct hid_device *hdev)
 }
 
 static const struct hid_device_id hammer_devices[] = {
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
index e42aaae3138fecdbd198a38f807367422430dcb6..67fd8a2f5aba3a64d7f9b9da3cac9cde696cfbcf 100644 (file)
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
 #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD        0x1866
+#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2       0x19b6
 #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
 
 #define USB_VENDOR_ID_ATEN             0x0557
 #define USB_DEVICE_ID_GOOGLE_MASTERBALL        0x503c
 #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
 #define USB_DEVICE_ID_GOOGLE_MOONBALL  0x5044
+#define USB_DEVICE_ID_GOOGLE_DON       0x5050
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index 44d715c12f6ab77f800b68d917cce1c455ac3c15..2d70dc4bea654bc9bfbf3df81bb15990ffc8c11a 100644 (file)
@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
            !wacom_wac->shared->is_touch_on) {
                if (!wacom_wac->shared->touch_down)
                        return;
-               prox = 0;
+               prox = false;
        }
 
        wacom_wac->hid_data.num_received++;
@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
 {
        struct wacom_features *features = &wacom_wac->features;
 
-       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
        if (!(features->device_type & WACOM_DEVICETYPE_PEN))
                return -ENODEV;
 
@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
                return 0;
        }
 
+       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
        __set_bit(BTN_TOUCH, input_dev->keybit);
        __set_bit(ABS_MISC, input_dev->absbit);
 
@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
 {
        struct wacom_features *features = &wacom_wac->features;
 
-       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
        if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
                return -ENODEV;
 
@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
                /* setup has already been done */
                return 0;
 
+       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
        __set_bit(BTN_TOUCH, input_dev->keybit);
 
        if (features->touch_max == 1) {
index 79e5356a737a2ad6b9fb19a498613eba20aa9761..66c794d92391fc73376934f5617652fcd8213b23 100644 (file)
@@ -23,6 +23,7 @@ config HYPERV_UTILS
 config HYPERV_BALLOON
        tristate "Microsoft Hyper-V Balloon driver"
        depends on HYPERV
+       select PAGE_REPORTING
        help
          Select this option to enable Hyper-V Balloon driver.
 
index 0bd202de79600393062817ffa160222cd737074d..c2635e913a92f97d8d506f15b1424adc0ca59eba 100644 (file)
@@ -209,31 +209,96 @@ int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
 }
 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
 
+static int send_modifychannel_without_ack(struct vmbus_channel *channel, u32 target_vp)
+{
+       struct vmbus_channel_modifychannel msg;
+       int ret;
+
+       memset(&msg, 0, sizeof(msg));
+       msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+       msg.child_relid = channel->offermsg.child_relid;
+       msg.target_vp = target_vp;
+
+       ret = vmbus_post_msg(&msg, sizeof(msg), true);
+       trace_vmbus_send_modifychannel(&msg, ret);
+
+       return ret;
+}
+
+static int send_modifychannel_with_ack(struct vmbus_channel *channel, u32 target_vp)
+{
+       struct vmbus_channel_modifychannel *msg;
+       struct vmbus_channel_msginfo *info;
+       unsigned long flags;
+       int ret;
+
+       info = kzalloc(sizeof(struct vmbus_channel_msginfo) +
+                               sizeof(struct vmbus_channel_modifychannel),
+                      GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       init_completion(&info->waitevent);
+       info->waiting_channel = channel;
+
+       msg = (struct vmbus_channel_modifychannel *)info->msg;
+       msg->header.msgtype = CHANNELMSG_MODIFYCHANNEL;
+       msg->child_relid = channel->offermsg.child_relid;
+       msg->target_vp = target_vp;
+
+       spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+       list_add_tail(&info->msglistentry, &vmbus_connection.chn_msg_list);
+       spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+       ret = vmbus_post_msg(msg, sizeof(*msg), true);
+       trace_vmbus_send_modifychannel(msg, ret);
+       if (ret != 0) {
+               spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+               list_del(&info->msglistentry);
+               spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+               goto free_info;
+       }
+
+       /*
+        * Release channel_mutex; otherwise, vmbus_onoffer_rescind() could block on
+        * the mutex and be unable to signal the completion.
+        *
+        * See the caller target_cpu_store() for information about the usage of the
+        * mutex.
+        */
+       mutex_unlock(&vmbus_connection.channel_mutex);
+       wait_for_completion(&info->waitevent);
+       mutex_lock(&vmbus_connection.channel_mutex);
+
+       spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+       list_del(&info->msglistentry);
+       spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+       if (info->response.modify_response.status)
+               ret = -EAGAIN;
+
+free_info:
+       kfree(info);
+       return ret;
+}
+
 /*
  * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
  *
- * CHANNELMSG_MODIFYCHANNEL messages are aynchronous.  Also, Hyper-V does not
- * ACK such messages.  IOW we can't know when the host will stop interrupting
- * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
+ * CHANNELMSG_MODIFYCHANNEL messages are aynchronous.  When VMbus version 5.3
+ * or later is negotiated, Hyper-V always sends an ACK in response to such a
+ * message.  For VMbus version 5.2 and earlier, it never sends an ACK.  With-
+ * out an ACK, we can not know when the host will stop interrupting the "old"
+ * vCPU and start interrupting the "new" vCPU for the given channel.
  *
  * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
  * VERSION_WIN10_V4_1.
  */
-int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp)
 {
-       struct vmbus_channel_modifychannel conn_msg;
-       int ret;
-
-       memset(&conn_msg, 0, sizeof(conn_msg));
-       conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
-       conn_msg.child_relid = child_relid;
-       conn_msg.target_vp = target_vp;
-
-       ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
-
-       trace_vmbus_send_modifychannel(&conn_msg, ret);
-
-       return ret;
+       if (vmbus_proto_version >= VERSION_WIN10_V5_3)
+               return send_modifychannel_with_ack(channel, target_vp);
+       return send_modifychannel_without_ack(channel, target_vp);
 }
 EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
 
@@ -385,7 +450,7 @@ nomem:
  * @kbuffer: from kmalloc or vmalloc
  * @size: page-size multiple
  * @send_offset: the offset (in bytes) where the send ring buffer starts,
- *              should be 0 for BUFFER type gpadl
+ *              should be 0 for BUFFER type gpadl
  * @gpadl_handle: some funky thing
  */
 static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
@@ -653,7 +718,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
 
        if (newchannel->rescind) {
                err = -ENODEV;
-               goto error_free_info;
+               goto error_clean_msglist;
        }
 
        err = vmbus_post_msg(open_msg,
index f0ed730e2e4e40355764db79f6d77254fe455162..caf6d0c4bc1b1d72efeb1e0d0b2bd2c81321241d 100644 (file)
@@ -333,7 +333,6 @@ fw_error:
        negop->icversion_data[1].minor = icmsg_minor;
        return found_match;
 }
-
 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
 
 /*
@@ -593,10 +592,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
         * CPUS_READ_UNLOCK             CPUS_WRITE_UNLOCK
         *
         * Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
-        *              CPU2's SEARCH from *not* seeing CPU1's INSERT
+        *              CPU2's SEARCH from *not* seeing CPU1's INSERT
         *
         * Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
-        *              CPU2's LOAD from *not* seing CPU1's STORE
+        *              CPU2's LOAD from *not* seing CPU1's STORE
         */
        cpus_read_lock();
 
@@ -756,6 +755,12 @@ static void init_vp_index(struct vmbus_channel *channel)
        free_cpumask_var(available_mask);
 }
 
+#define UNLOAD_DELAY_UNIT_MS   10              /* 10 milliseconds */
+#define UNLOAD_WAIT_MS         (100*1000)      /* 100 seconds */
+#define UNLOAD_WAIT_LOOPS      (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
+#define UNLOAD_MSG_MS          (5*1000)        /* Every 5 seconds */
+#define UNLOAD_MSG_LOOPS       (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
+
 static void vmbus_wait_for_unload(void)
 {
        int cpu;
@@ -773,12 +778,17 @@ static void vmbus_wait_for_unload(void)
         * vmbus_connection.unload_event. If not, the last thing we can do is
         * read message pages for all CPUs directly.
         *
-        * Wait no more than 10 seconds so that the panic path can't get
-        * hung forever in case the response message isn't seen.
+        * Wait up to 100 seconds since an Azure host must writeback any dirty
+        * data in its disk cache before the VMbus UNLOAD request will
+        * complete. This flushing has been empirically observed to take up
+        * to 50 seconds in cases with a lot of dirty data, so allow additional
+        * leeway and for inaccuracies in mdelay(). But eventually time out so
+        * that the panic path can't get hung forever in case the response
+        * message isn't seen.
         */
-       for (i = 0; i < 1000; i++) {
+       for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
                if (completion_done(&vmbus_connection.unload_event))
-                       break;
+                       goto completed;
 
                for_each_online_cpu(cpu) {
                        struct hv_per_cpu_context *hv_cpu
@@ -801,9 +811,18 @@ static void vmbus_wait_for_unload(void)
                        vmbus_signal_eom(msg, message_type);
                }
 
-               mdelay(10);
+               /*
+                * Give a notice periodically so someone watching the
+                * serial output won't think it is completely hung.
+                */
+               if (!(i % UNLOAD_MSG_LOOPS))
+                       pr_notice("Waiting for VMBus UNLOAD to complete\n");
+
+               mdelay(UNLOAD_DELAY_UNIT_MS);
        }
+       pr_err("Continuing even though VMBus UNLOAD did not complete\n");
 
+completed:
        /*
         * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
         * maybe-pending messages on all CPUs to be able to receive new
@@ -827,6 +846,11 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
        /*
         * This is a global event; just wakeup the waiting thread.
         * Once we successfully unload, we can cleanup the monitor state.
+        *
+        * NB.  A malicious or compromised Hyper-V could send a spurious
+        * message of type CHANNELMSG_UNLOAD_RESPONSE, and trigger a call
+        * of the complete() below.  Make sure that unload_event has been
+        * initialized by the time this complete() is executed.
         */
        complete(&vmbus_connection.unload_event);
 }
@@ -842,7 +866,7 @@ void vmbus_initiate_unload(bool crash)
        if (vmbus_proto_version < VERSION_WIN8_1)
                return;
 
-       init_completion(&vmbus_connection.unload_event);
+       reinit_completion(&vmbus_connection.unload_event);
        memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
        hdr.msgtype = CHANNELMSG_UNLOAD;
        vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
@@ -980,7 +1004,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
                 *                                      UNLOCK channel_mutex
                 *
                 * Forbids: r1 == valid_relid &&
-                *              channels[valid_relid] == channel
+                *              channels[valid_relid] == channel
                 *
                 * Note.  r1 can be INVALID_RELID only for an hv_sock channel.
                 * None of the hv_sock channels which were present before the
@@ -1312,6 +1336,46 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
        spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
 }
 
+/*
+ * vmbus_onmodifychannel_response - Modify Channel response handler.
+ *
+ * This is invoked when we received a response to our channel modify request.
+ * Find the matching request, copy the response and signal the requesting thread.
+ */
+static void vmbus_onmodifychannel_response(struct vmbus_channel_message_header *hdr)
+{
+       struct vmbus_channel_modifychannel_response *response;
+       struct vmbus_channel_msginfo *msginfo;
+       unsigned long flags;
+
+       response = (struct vmbus_channel_modifychannel_response *)hdr;
+
+       trace_vmbus_onmodifychannel_response(response);
+
+       /*
+        * Find the modify msg, copy the response and signal/unblock the wait event.
+        */
+       spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+
+       list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) {
+               struct vmbus_channel_message_header *responseheader =
+                               (struct vmbus_channel_message_header *)msginfo->msg;
+
+               if (responseheader->msgtype == CHANNELMSG_MODIFYCHANNEL) {
+                       struct vmbus_channel_modifychannel *modifymsg;
+
+                       modifymsg = (struct vmbus_channel_modifychannel *)msginfo->msg;
+                       if (modifymsg->child_relid == response->child_relid) {
+                               memcpy(&msginfo->response.modify_response, response,
+                                      sizeof(*response));
+                               complete(&msginfo->waitevent);
+                               break;
+                       }
+               }
+       }
+       spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+}
+
 /*
  * vmbus_ongpadl_torndown - GPADL torndown handler.
  *
@@ -1429,6 +1493,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
        { CHANNELMSG_TL_CONNECT_REQUEST,        0, NULL, 0},
        { CHANNELMSG_MODIFYCHANNEL,             0, NULL, 0},
        { CHANNELMSG_TL_CONNECT_RESULT,         0, NULL, 0},
+       { CHANNELMSG_MODIFYCHANNEL_RESPONSE,    1, vmbus_onmodifychannel_response,
+               sizeof(struct vmbus_channel_modifychannel_response)},
 };
 
 /*
index c83612cddb99508e330cc60155bca724e85c300f..311cd005b3be666e17d36ac98475d76d17953036 100644 (file)
 
 struct vmbus_connection vmbus_connection = {
        .conn_state             = DISCONNECTED,
+       .unload_event           = COMPLETION_INITIALIZER(
+                                 vmbus_connection.unload_event),
        .next_gpadl_handle      = ATOMIC_INIT(0xE1E10),
 
-       .ready_for_suspend_event= COMPLETION_INITIALIZER(
+       .ready_for_suspend_event = COMPLETION_INITIALIZER(
                                  vmbus_connection.ready_for_suspend_event),
        .ready_for_resume_event = COMPLETION_INITIALIZER(
                                  vmbus_connection.ready_for_resume_event),
@@ -45,6 +47,7 @@ EXPORT_SYMBOL_GPL(vmbus_proto_version);
  * Table of VMBus versions listed from newest to oldest.
  */
 static __u32 vmbus_versions[] = {
+       VERSION_WIN10_V5_3,
        VERSION_WIN10_V5_2,
        VERSION_WIN10_V5_1,
        VERSION_WIN10_V5,
@@ -60,7 +63,7 @@ static __u32 vmbus_versions[] = {
  * Maximal VMBus protocol version guests can negotiate.  Useful to cap the
  * VMBus version for testing and debugging purpose.
  */
-static uint max_version = VERSION_WIN10_V5_2;
+static uint max_version = VERSION_WIN10_V5_3;
 
 module_param(max_version, uint, S_IRUGO);
 MODULE_PARM_DESC(max_version,
index f202ac7f4b3d47c254f1bc90e1f5facaa2d20260..e83507f49676ddd221abda1cdb9e1d8d28af5edf 100644 (file)
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/hyperv.h>
-#include <linux/version.h>
 #include <linux/random.h>
 #include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
 #include <clocksource/hyperv_timer.h>
 #include <asm/mshyperv.h>
 #include "hyperv_vmbus.h"
@@ -36,6 +37,42 @@ int hv_init(void)
        return 0;
 }
 
+/*
+ * Functions for allocating and freeing memory with size and
+ * alignment HV_HYP_PAGE_SIZE. These functions are needed because
+ * the guest page size may not be the same as the Hyper-V page
+ * size. We depend upon kmalloc() aligning power-of-two size
+ * allocations to the allocation size boundary, so that the
+ * allocated memory appears to Hyper-V as a page of the size
+ * it expects.
+ */
+
+void *hv_alloc_hyperv_page(void)
+{
+       BUILD_BUG_ON(PAGE_SIZE <  HV_HYP_PAGE_SIZE);
+
+       if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+               return (void *)__get_free_page(GFP_KERNEL);
+       else
+               return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+}
+
+void *hv_alloc_hyperv_zeroed_page(void)
+{
+       if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+               return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       else
+               return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+}
+
+void hv_free_hyperv_page(unsigned long addr)
+{
+       if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
+               free_page(addr);
+       else
+               kfree((void *)addr);
+}
+
 /*
  * hv_post_message - Post a message using the hypervisor message IPC.
  *
@@ -68,7 +105,7 @@ int hv_post_message(union hv_connection_id connection_id,
         */
        put_cpu_ptr(hv_cpu);
 
-       return status & 0xFFFF;
+       return hv_result(status);
 }
 
 int hv_synic_alloc(void)
@@ -162,34 +199,48 @@ void hv_synic_enable_regs(unsigned int cpu)
        union hv_synic_scontrol sctrl;
 
        /* Setup the Synic's message page */
-       hv_get_simp(simp.as_uint64);
+       simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
        simp.simp_enabled = 1;
        simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
                >> HV_HYP_PAGE_SHIFT;
 
-       hv_set_simp(simp.as_uint64);
+       hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
 
        /* Setup the Synic's event page */
-       hv_get_siefp(siefp.as_uint64);
+       siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
        siefp.siefp_enabled = 1;
        siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
                >> HV_HYP_PAGE_SHIFT;
 
-       hv_set_siefp(siefp.as_uint64);
+       hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
 
        /* Setup the shared SINT. */
-       hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+       if (vmbus_irq != -1)
+               enable_percpu_irq(vmbus_irq, 0);
+       shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
+                                       VMBUS_MESSAGE_SINT);
 
-       shared_sint.vector = hv_get_vector();
+       shared_sint.vector = vmbus_interrupt;
        shared_sint.masked = false;
-       shared_sint.auto_eoi = hv_recommend_using_aeoi();
-       hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+
+       /*
+        * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
+        * it doesn't provide a recommendation flag and AEOI must be disabled.
+        */
+#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
+       shared_sint.auto_eoi =
+                       !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
+#else
+       shared_sint.auto_eoi = 0;
+#endif
+       hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
+                               shared_sint.as_uint64);
 
        /* Enable the global synic bit */
-       hv_get_synic_state(sctrl.as_uint64);
+       sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
        sctrl.enable = 1;
 
-       hv_set_synic_state(sctrl.as_uint64);
+       hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
 }
 
 int hv_synic_init(unsigned int cpu)
@@ -211,30 +262,71 @@ void hv_synic_disable_regs(unsigned int cpu)
        union hv_synic_siefp siefp;
        union hv_synic_scontrol sctrl;
 
-       hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+       shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
+                                       VMBUS_MESSAGE_SINT);
 
        shared_sint.masked = 1;
 
        /* Need to correctly cleanup in the case of SMP!!! */
        /* Disable the interrupt */
-       hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+       hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
+                               shared_sint.as_uint64);
 
-       hv_get_simp(simp.as_uint64);
+       simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
        simp.simp_enabled = 0;
        simp.base_simp_gpa = 0;
 
-       hv_set_simp(simp.as_uint64);
+       hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
 
-       hv_get_siefp(siefp.as_uint64);
+       siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
        siefp.siefp_enabled = 0;
        siefp.base_siefp_gpa = 0;
 
-       hv_set_siefp(siefp.as_uint64);
+       hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
 
        /* Disable the global synic bit */
-       hv_get_synic_state(sctrl.as_uint64);
+       sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
        sctrl.enable = 0;
-       hv_set_synic_state(sctrl.as_uint64);
+       hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+
+       if (vmbus_irq != -1)
+               disable_percpu_irq(vmbus_irq);
+}
+
+#define HV_MAX_TRIES 3
+/*
+ * Scan the event flags page of 'this' CPU looking for any bit that is set.  If we find one
+ * bit set, then wait for a few milliseconds.  Repeat these steps for a maximum of 3 times.
+ * Return 'true', if there is still any set bit after this operation; 'false', otherwise.
+ *
+ * If a bit is set, that means there is a pending channel interrupt.  The expectation is
+ * that the normal interrupt handling mechanism will find and process the channel interrupt
+ * "very soon", and in the process clear the bit.
+ */
+static bool hv_synic_event_pending(void)
+{
+       struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
+       union hv_synic_event_flags *event =
+               (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
+       unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
+       bool pending;
+       u32 relid;
+       int tries = 0;
+
+retry:
+       pending = false;
+       for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
+               /* Special case - VMBus channel protocol messages */
+               if (relid == 0)
+                       continue;
+               pending = true;
+               break;
+       }
+       if (pending && tries++ < HV_MAX_TRIES) {
+               usleep_range(10000, 20000);
+               goto retry;
+       }
+       return pending;
 }
 
 int hv_synic_cleanup(unsigned int cpu)
@@ -242,6 +334,9 @@ int hv_synic_cleanup(unsigned int cpu)
        struct vmbus_channel *channel, *sc;
        bool channel_found = false;
 
+       if (vmbus_connection.conn_state != CONNECTED)
+               goto always_cleanup;
+
        /*
         * Hyper-V does not provide a way to change the connect CPU once
         * it is set; we must prevent the connect CPU from going offline
@@ -249,8 +344,7 @@ int hv_synic_cleanup(unsigned int cpu)
         * path where the vmbus is already disconnected, the CPU must be
         * allowed to shut down.
         */
-       if (cpu == VMBUS_CONNECT_CPU &&
-           vmbus_connection.conn_state == CONNECTED)
+       if (cpu == VMBUS_CONNECT_CPU)
                return -EBUSY;
 
        /*
@@ -277,9 +371,21 @@ int hv_synic_cleanup(unsigned int cpu)
        }
        mutex_unlock(&vmbus_connection.channel_mutex);
 
-       if (channel_found && vmbus_connection.conn_state == CONNECTED)
+       if (channel_found)
+               return -EBUSY;
+
+       /*
+        * channel_found == false means that any channels that were previously
+        * assigned to the CPU have been reassigned elsewhere with a call of
+        * vmbus_send_modifychannel().  Scan the event flags page looking for
+        * bits that are set and waiting with a timeout for vmbus_chan_sched()
+        * to process such bits.  If bits are still set after this operation
+        * and VMBus is connected, fail the CPU offlining operation.
+        */
+       if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
                return -EBUSY;
 
+always_cleanup:
        hv_stimer_legacy_cleanup(cpu);
 
        hv_synic_disable_regs(cpu);
index 2f776d78e3c1a27740d4517363d8525bd19e89f9..58af84e30144b864b952862297c8c62684d6ddb2 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/memory.h>
 #include <linux/notifier.h>
 #include <linux/percpu_counter.h>
+#include <linux/page_reporting.h>
 
 #include <linux/hyperv.h>
 #include <asm/hyperv-tlfs.h>
@@ -563,6 +564,8 @@ struct hv_dynmem_device {
         * The negotiated version agreed by host.
         */
        __u32 version;
+
+       struct page_reporting_dev_info pr_dev_info;
 };
 
 static struct hv_dynmem_device dm_device;
@@ -1568,6 +1571,89 @@ static void balloon_onchannelcallback(void *context)
 
 }
 
+/* Hyper-V only supports reporting 2MB pages or higher */
+#define HV_MIN_PAGE_REPORTING_ORDER    9
+#define HV_MIN_PAGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << HV_MIN_PAGE_REPORTING_ORDER)
+static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
+                   struct scatterlist *sgl, unsigned int nents)
+{
+       unsigned long flags;
+       struct hv_memory_hint *hint;
+       int i;
+       u64 status;
+       struct scatterlist *sg;
+
+       WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
+       WARN_ON_ONCE(sgl->length < HV_MIN_PAGE_REPORTING_LEN);
+       local_irq_save(flags);
+       hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg);
+       if (!hint) {
+               local_irq_restore(flags);
+               return -ENOSPC;
+       }
+
+       hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
+       hint->reserved = 0;
+       for_each_sg(sgl, sg, nents, i) {
+               union hv_gpa_page_range *range;
+
+               range = &hint->ranges[i];
+               range->address_space = 0;
+               /* page reporting only reports 2MB pages or higher */
+               range->page.largepage = 1;
+               range->page.additional_pages =
+                       (sg->length / HV_MIN_PAGE_REPORTING_LEN) - 1;
+               range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
+               range->base_large_pfn =
+                       page_to_hvpfn(sg_page(sg)) >> HV_MIN_PAGE_REPORTING_ORDER;
+       }
+
+       status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
+                                    hint, NULL);
+       local_irq_restore(flags);
+       if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+               pr_err("Cold memory discard hypercall failed with status %llx\n",
+                       status);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void enable_page_reporting(void)
+{
+       int ret;
+
+       /* Essentially, validating 'PAGE_REPORTING_MIN_ORDER' is big enough. */
+       if (pageblock_order < HV_MIN_PAGE_REPORTING_ORDER) {
+               pr_debug("Cold memory discard is only supported on 2MB pages and above\n");
+               return;
+       }
+
+       if (!hv_query_ext_cap(HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT)) {
+               pr_debug("Cold memory discard hint not supported by Hyper-V\n");
+               return;
+       }
+
+       BUILD_BUG_ON(PAGE_REPORTING_CAPACITY > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
+       dm_device.pr_dev_info.report = hv_free_page_report;
+       ret = page_reporting_register(&dm_device.pr_dev_info);
+       if (ret < 0) {
+               dm_device.pr_dev_info.report = NULL;
+               pr_err("Failed to enable cold memory discard: %d\n", ret);
+       } else {
+               pr_info("Cold memory discard hint enabled\n");
+       }
+}
+
+static void disable_page_reporting(void)
+{
+       if (dm_device.pr_dev_info.report) {
+               page_reporting_unregister(&dm_device.pr_dev_info);
+               dm_device.pr_dev_info.report = NULL;
+       }
+}
+
 static int balloon_connect_vsp(struct hv_device *dev)
 {
        struct dm_version_request version_req;
@@ -1713,6 +1799,7 @@ static int balloon_probe(struct hv_device *dev,
        if (ret != 0)
                return ret;
 
+       enable_page_reporting();
        dm_device.state = DM_INITIALIZED;
 
        dm_device.thread =
@@ -1727,6 +1814,7 @@ static int balloon_probe(struct hv_device *dev,
 probe_error:
        dm_device.state = DM_INIT_ERROR;
        dm_device.thread  = NULL;
+       disable_page_reporting();
        vmbus_close(dev->channel);
 #ifdef CONFIG_MEMORY_HOTPLUG
        unregister_memory_notifier(&hv_memory_nb);
@@ -1749,6 +1837,7 @@ static int balloon_remove(struct hv_device *dev)
        cancel_work_sync(&dm->ha_wrk.wrk);
 
        kthread_stop(dm->thread);
+       disable_page_reporting();
        vmbus_close(dev->channel);
 #ifdef CONFIG_MEMORY_HOTPLUG
        unregister_memory_notifier(&hv_memory_nb);
index 6063bb21bb137198f83a637ccdcbdc505671f8e2..c02a1719e92f2042ab265df2b395114458455440 100644 (file)
@@ -103,6 +103,21 @@ TRACE_EVENT(vmbus_ongpadl_created,
                    )
        );
 
+TRACE_EVENT(vmbus_onmodifychannel_response,
+           TP_PROTO(const struct vmbus_channel_modifychannel_response *response),
+           TP_ARGS(response),
+           TP_STRUCT__entry(
+                   __field(u32, child_relid)
+                   __field(u32, status)
+                   ),
+           TP_fast_assign(__entry->child_relid = response->child_relid;
+                          __entry->status = response->status;
+                   ),
+           TP_printk("child_relid 0x%x, status %d",
+                     __entry->child_relid,  __entry->status
+                   )
+       );
+
 TRACE_EVENT(vmbus_ongpadl_torndown,
            TP_PROTO(const struct vmbus_channel_gpadl_torndown *gpadltorndown),
            TP_ARGS(gpadltorndown),
index 35833d4d1a1dcac54e136291d5f8b755c7a65044..374f8afbf8a58c6ebefb56630136a6a38cdf9fec 100644 (file)
@@ -84,15 +84,6 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
        ring_info->ring_buffer->write_index = next_write_location;
 }
 
-/* Set the next read location for the specified ring buffer. */
-static inline void
-hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
-                   u32 next_read_location)
-{
-       ring_info->ring_buffer->read_index = next_read_location;
-       ring_info->priv_read_index = next_read_location;
-}
-
 /* Get the size of the ring buffer. */
 static inline u32
 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
@@ -313,7 +304,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
                rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
                if (rqst_id == VMBUS_RQST_ERROR) {
                        spin_unlock_irqrestore(&outring_info->ring_lock, flags);
-                       pr_err("No request id available\n");
                        return -EAGAIN;
                }
        }
index 10dce9f9121689eb32e4c941c6ee8d88927bc5fc..b12d6827b222baca27b6821a5b77e2481f23b74b 100644 (file)
@@ -48,8 +48,10 @@ static int hyperv_cpuhp_online;
 
 static void *hv_panic_page;
 
+static long __percpu *vmbus_evt;
+
 /* Values parsed from ACPI DSDT */
-static int vmbus_irq;
+int vmbus_irq;
 int vmbus_interrupt;
 
 /*
@@ -1381,7 +1383,13 @@ static void vmbus_isr(void)
                        tasklet_schedule(&hv_cpu->msg_dpc);
        }
 
-       add_interrupt_randomness(hv_get_vector(), 0);
+       add_interrupt_randomness(vmbus_interrupt, 0);
+}
+
+static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
+{
+       vmbus_isr();
+       return IRQ_HANDLED;
 }
 
 /*
@@ -1392,22 +1400,36 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
                         enum kmsg_dump_reason reason)
 {
        size_t bytes_written;
-       phys_addr_t panic_pa;
 
        /* We are only interested in panics. */
        if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
                return;
 
-       panic_pa = virt_to_phys(hv_panic_page);
-
        /*
         * Write dump contents to the page. No need to synchronize; panic should
         * be single-threaded.
         */
        kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
                             &bytes_written);
-       if (bytes_written)
-               hyperv_report_panic_msg(panic_pa, bytes_written);
+       if (!bytes_written)
+               return;
+       /*
+        * P3 to contain the physical address of the panic page & P4 to
+        * contain the size of the panic data in that page. Rest of the
+        * registers are no-op when the NOTIFY_MSG flag is set.
+        */
+       hv_set_register(HV_REGISTER_CRASH_P0, 0);
+       hv_set_register(HV_REGISTER_CRASH_P1, 0);
+       hv_set_register(HV_REGISTER_CRASH_P2, 0);
+       hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
+       hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
+
+       /*
+        * Let Hyper-V know there is crash data available along with
+        * the panic message.
+        */
+       hv_set_register(HV_REGISTER_CRASH_CTL,
+              (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
 }
 
 static struct kmsg_dumper hv_kmsg_dumper = {
@@ -1482,9 +1504,28 @@ static int vmbus_bus_init(void)
        if (ret)
                return ret;
 
-       ret = hv_setup_vmbus_irq(vmbus_irq, vmbus_isr);
-       if (ret)
-               goto err_setup;
+       /*
+        * VMbus interrupts are best modeled as per-cpu interrupts. If
+        * on an architecture with support for per-cpu IRQs (e.g. ARM64),
+        * allocate a per-cpu IRQ using standard Linux kernel functionality.
+        * If not on such an architecture (e.g., x86/x64), then rely on
+        * code in the arch-specific portion of the code tree to connect
+        * the VMbus interrupt handler.
+        */
+
+       if (vmbus_irq == -1) {
+               hv_setup_vmbus_handler(vmbus_isr);
+       } else {
+               vmbus_evt = alloc_percpu(long);
+               ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
+                               "Hyper-V VMbus", vmbus_evt);
+               if (ret) {
+                       pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
+                                       vmbus_irq, ret);
+                       free_percpu(vmbus_evt);
+                       goto err_setup;
+               }
+       }
 
        ret = hv_synic_alloc();
        if (ret)
@@ -1521,7 +1562,7 @@ static int vmbus_bus_init(void)
                 * Register for panic kmsg callback only if the right
                 * capability is supported by the hypervisor.
                 */
-               hv_get_crash_ctl(hyperv_crash_ctl);
+               hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
                if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
                        hv_kmsg_dump_register();
 
@@ -1545,7 +1586,12 @@ err_connect:
 err_cpuhp:
        hv_synic_free();
 err_alloc:
-       hv_remove_vmbus_irq();
+       if (vmbus_irq == -1) {
+               hv_remove_vmbus_handler();
+       } else {
+               free_percpu_irq(vmbus_irq, vmbus_evt);
+               free_percpu(vmbus_evt);
+       }
 err_setup:
        bus_unregister(&hv_bus);
        unregister_sysctl_table(hv_ctl_table_hdr);
@@ -1802,13 +1848,15 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
        if (target_cpu == origin_cpu)
                goto cpu_store_unlock;
 
-       if (vmbus_send_modifychannel(channel->offermsg.child_relid,
+       if (vmbus_send_modifychannel(channel,
                                     hv_cpu_number_to_vp_number(target_cpu))) {
                ret = -EIO;
                goto cpu_store_unlock;
        }
 
        /*
+        * For version before VERSION_WIN10_V5_3, the following warning holds:
+        *
         * Warning.  At this point, there is *no* guarantee that the host will
         * have successfully processed the vmbus_send_modifychannel() request.
         * See the header comment of vmbus_send_modifychannel() for more info.
@@ -2663,6 +2711,18 @@ static int __init hv_acpi_init(void)
                ret = -ETIMEDOUT;
                goto cleanup;
        }
+
+       /*
+        * If we're on an architecture with a hardcoded hypervisor
+        * vector (i.e. x86/x64), override the VMbus interrupt found
+        * in the ACPI tables. Ensure vmbus_irq is not set since the
+        * normal Linux IRQ mechanism is not used in this case.
+        */
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+       vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
+       vmbus_irq = -1;
+#endif
+
        hv_debug_init();
 
        ret = vmbus_bus_init();
@@ -2693,7 +2753,12 @@ static void __exit vmbus_exit(void)
        vmbus_connection.conn_state = DISCONNECTED;
        hv_stimer_global_cleanup();
        vmbus_disconnect();
-       hv_remove_vmbus_irq();
+       if (vmbus_irq == -1) {
+               hv_remove_vmbus_handler();
+       } else {
+               free_percpu_irq(vmbus_irq, vmbus_evt);
+               free_percpu(vmbus_evt);
+       }
        for_each_online_cpu(cpu) {
                struct hv_per_cpu_context *hv_cpu
                        = per_cpu_ptr(hv_context.cpu_context, cpu);
index d3a64a35f7a9af2d11fe31de0d37b5abb7c2301a..805d396aa81b0feba50850521b1288d757545a03 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2018 Stefan Wahren <stefan.wahren@i2se.com>
  */
 #include <linux/device.h>
+#include <linux/devm-helpers.h>
 #include <linux/err.h>
 #include <linux/hwmon.h>
 #include <linux/module.h>
@@ -106,6 +107,7 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct rpi_hwmon_data *data;
+       int ret;
 
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
@@ -119,7 +121,10 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
                                                               &rpi_chip_info,
                                                               NULL);
 
-       INIT_DELAYED_WORK(&data->get_values_poll_work, get_values_poll);
+       ret = devm_delayed_work_autocancel(dev, &data->get_values_poll_work,
+                                          get_values_poll);
+       if (ret)
+               return ret;
        platform_set_drvdata(pdev, data);
 
        if (!PTR_ERR_OR_ZERO(data->hwmon_dev))
@@ -128,18 +133,8 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
        return PTR_ERR_OR_ZERO(data->hwmon_dev);
 }
 
-static int rpi_hwmon_remove(struct platform_device *pdev)
-{
-       struct rpi_hwmon_data *data = platform_get_drvdata(pdev);
-
-       cancel_delayed_work_sync(&data->get_values_poll_work);
-
-       return 0;
-}
-
 static struct platform_driver rpi_hwmon_driver = {
        .probe = rpi_hwmon_probe,
-       .remove = rpi_hwmon_remove,
        .driver = {
                .name = "raspberrypi-hwmon",
        },
index 0062c893565300ac7524c0319ba4829159087e32..b57bea16710274d4a7801f05a9821e0e18109bf6 100644 (file)
@@ -86,7 +86,7 @@ static int coresight_id_match(struct device *dev, void *data)
            i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
                return 0;
 
-       /* Get the source ID for both compoment */
+       /* Get the source ID for both components */
        trace_id = source_ops(csdev)->trace_id(csdev);
        i_trace_id = source_ops(i_csdev)->trace_id(i_csdev);
 
index 0f603b4094f2214960d0b823052f1e73629b855c..c1bec2ad391191e6545902b8ab04bebbda9cde25 100644 (file)
@@ -52,13 +52,13 @@ static ssize_t format_attr_contextid_show(struct device *dev,
 {
        int pid_fmt = ETM_OPT_CTXTID;
 
-#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
+#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
        pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
 #endif
        return sprintf(page, "config:%d\n", pid_fmt);
 }
 
-struct device_attribute format_attr_contextid =
+static struct device_attribute format_attr_contextid =
        __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
 
 static struct attribute *etm_config_formats_attr[] = {
index 15016f757828e6326f6d6d0163264aaa11f01ff8..a5b13a7779c39edfa6ffa52c6798020c1d8ca590 100644 (file)
@@ -1951,6 +1951,7 @@ static const struct amba_id etm4_ids[] = {
        CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */
        CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */
        CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
+       CS_AMBA_UCI_ID(0x000bbd41, uci_id_etm4),/* Cortex-A78 */
        CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
        CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
        CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
index c9ac3dc651135d156915f425ce9c232c94e568ad..24d0c974bfd55d400c8b7362152fde49f33a427b 100644 (file)
@@ -844,7 +844,7 @@ static irqreturn_t intel_th_irq(int irq, void *data)
  * @irq:       irq number
  */
 struct intel_th *
-intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
               struct resource *devres, unsigned int ndevres)
 {
        int err, r, nr_mmios = 0;
index f72803a0239109a4c0249f27f45d26247948edbe..28509b02a0b563ab81b2229880d57a533fdbf8e3 100644 (file)
@@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
        output->active = false;
 
        for_each_set_bit(master, gth->output[output->port].master,
-                        TH_CONFIGURABLE_MASTERS) {
+                        TH_CONFIGURABLE_MASTERS + 1) {
                gth_master_set(gth, master, -1);
        }
        spin_unlock(&gth->gth_lock);
@@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
        othdev->output.port = -1;
        othdev->output.active = false;
        gth->output[port].output = NULL;
-       for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
+       for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
                if (gth->master[master] == port)
                        gth->master[master] = -1;
        spin_unlock(&gth->gth_lock);
index 5fe694708b7a3117991900ea89028b366ae18810..89c67e0e1d348e7184fcbe18df676ca4e2d97bf9 100644 (file)
@@ -74,7 +74,7 @@ struct intel_th_drvdata {
  */
 struct intel_th_device {
        struct device           dev;
-       struct intel_th_drvdata *drvdata;
+       const struct intel_th_drvdata *drvdata;
        struct resource         *resource;
        unsigned int            num_resources;
        unsigned int            type;
@@ -178,7 +178,7 @@ struct intel_th_driver {
        /* file_operations for those who want a device node */
        const struct file_operations *fops;
        /* optional attributes */
-       struct attribute_group  *attr_group;
+       const struct attribute_group *attr_group;
 
        /* source ops */
        int                     (*set_output)(struct intel_th_device *thdev,
@@ -224,7 +224,7 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
 }
 
 struct intel_th *
-intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
               struct resource *devres, unsigned int ndevres);
 void intel_th_free(struct intel_th *th);
 
@@ -272,7 +272,7 @@ struct intel_th {
 
        struct intel_th_device  *thdev[TH_SUBDEVICE_MAX];
        struct intel_th_device  *hub;
-       struct intel_th_drvdata *drvdata;
+       const struct intel_th_drvdata   *drvdata;
 
        struct resource         resource[TH_MMIO_END];
        int                     (*activate)(struct intel_th *);
index 7d95242db900f0fa4151280cfcf9fa3cfe2147bc..2edc4666633d0bc974bbe1274cad1a186b2382ad 100644 (file)
@@ -2095,7 +2095,7 @@ static struct attribute *msc_output_attrs[] = {
        NULL,
 };
 
-static struct attribute_group msc_output_group = {
+static const struct attribute_group msc_output_group = {
        .attrs  = msc_output_attrs,
 };
 
index 251e75c9ba9d0dda26e17d2558715a00a7a923bb..7da4f298ed01e4b2b07064170d8bf40792200f43 100644 (file)
@@ -71,7 +71,7 @@ static void intel_th_pci_deactivate(struct intel_th *th)
 static int intel_th_pci_probe(struct pci_dev *pdev,
                              const struct pci_device_id *id)
 {
-       struct intel_th_drvdata *drvdata = (void *)id->driver_data;
+       const struct intel_th_drvdata *drvdata = (void *)id->driver_data;
        struct resource resource[TH_MMIO_END + TH_NVEC_MAX] = {
                [TH_MMIO_CONFIG]        = pdev->resource[TH_PCI_CONFIG_BAR],
                [TH_MMIO_SW]            = pdev->resource[TH_PCI_STH_SW_BAR],
@@ -273,11 +273,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Alder Lake-M */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        {
                /* Alder Lake CPU */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Rocket Lake CPU */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index 0da6b787f55367ff0a1e07b07e4da673c2656be4..09132ab8bc23265a083597f44a0f44b5bd77d0c4 100644 (file)
@@ -142,7 +142,7 @@ static struct attribute *pti_output_attrs[] = {
        NULL,
 };
 
-static struct attribute_group pti_output_group = {
+static const struct attribute_group pti_output_group = {
        .attrs  = pti_output_attrs,
 };
 
@@ -295,7 +295,7 @@ static struct attribute *lpp_output_attrs[] = {
        NULL,
 };
 
-static struct attribute_group lpp_output_group = {
+static const struct attribute_group lpp_output_group = {
        .attrs  = lpp_output_attrs,
 };
 
index 360b5c03df95b87e600843340ae24d6cfb9f0b99..8254971c02e70eec9fd4dad08824a0bfa8cb4ae3 100644 (file)
@@ -92,7 +92,7 @@ static void sys_t_policy_node_init(void *priv)
 {
        struct sys_t_policy_node *pn = priv;
 
-       generate_random_uuid(pn->uuid.b);
+       uuid_gen(&pn->uuid);
 }
 
 static int sys_t_output_open(void *priv, struct stm_output *output)
@@ -292,6 +292,7 @@ static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output,
        unsigned int m = output->master;
        const unsigned char nil = 0;
        u32 header = DATA_HEADER;
+       u8 uuid[UUID_SIZE];
        ssize_t sz;
 
        /* We require an existing policy node to proceed */
@@ -322,7 +323,8 @@ static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output,
                return sz;
 
        /* GUID */
-       sz = stm_data_write(data, m, c, false, op->node.uuid.b, UUID_SIZE);
+       export_uuid(uuid, &op->node.uuid);
+       sz = stm_data_write(data, m, c, false, uuid, sizeof(op->node.uuid));
        if (sz <= 0)
                return sz;
 
index 603b4a9969d3a2622f8ad5b2912f64583105d7c1..42103c3a177f92f85011de8ce638c2994ecaa898 100644 (file)
@@ -57,11 +57,6 @@ void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
        *cend   = policy_node->last_channel;
 }
 
-static inline char *stp_policy_node_name(struct stp_policy_node *policy_node)
-{
-       return policy_node->group.cg_item.ci_name ? : "<none>";
-}
-
 static inline struct stp_policy *to_stp_policy(struct config_item *item)
 {
        return item ?
index dd27b9dbe9319fcf97b8539c58366340cd349a7e..873ef38eb1c87eda049bbbc5fc5a482241ea3ede 100644 (file)
@@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
                if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
                        != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
                        dev_err(dev->dev, "High Speed not supported!\n");
+                       t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
                        dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
                        dev->master_cfg |= DW_IC_CON_SPEED_FAST;
                        dev->hs_hcnt = 0;
index 5ac30d95650cc523af54a5c2bd98703cb8edcdbf..97d4f3ac0abd39ce7731bec2e78d0fec20d19ece 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver
  *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
index c45f226c2b85cfd3013d0a0c5b8df5e257cd0450..aa00ba8bcb70fe33952b6f4c71353aa471643ced 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
+ * Copyright (c) 2014 HiSilicon Limited.
  *
  * Now only support 7 bit address.
  */
index 8509c5f11356f63fbf046d645e85231fe3adc0a7..55177eb21d7b14534e63005a82cc0f9e9c7c6007 100644 (file)
@@ -525,8 +525,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
                                i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
                                data = *i2c->wbuf;
                                data &= ~JZ4780_I2C_DC_READ;
-                               if ((!i2c->stop_hold) && (i2c->cdata->version >=
-                                               ID_X1000))
+                               if ((i2c->wt_len == 1) && (!i2c->stop_hold) &&
+                                               (i2c->cdata->version >= ID_X1000))
                                        data |= X1000_I2C_DC_STOP;
                                jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
                                i2c->wbuf++;
index c590d36b5fd165134ccf20a0a785f237c939f363..5c8e94b6cdb5a8e6bff0cc5481860ef5764fb0bd 100644 (file)
@@ -221,6 +221,10 @@ mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data)
        writel(0, drv_data->reg_base + drv_data->reg_offsets.ext_addr);
        writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP,
                drv_data->reg_base + drv_data->reg_offsets.control);
+
+       if (drv_data->errata_delay)
+               udelay(5);
+
        drv_data->state = MV64XXX_I2C_STATE_IDLE;
 }
 
index 937c2c8fd3490b97138cfa8a03828cc957600cd0..4933fc8ce3fd1ce0d8300005352def6b06b880a9 100644 (file)
@@ -534,7 +534,7 @@ static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev)
        default:
                /*
                 * N-byte reception:
-                * Enable ACK, reset POS (ACK postion) and clear ADDR flag.
+                * Enable ACK, reset POS (ACK position) and clear ADDR flag.
                 * In that way, ACK will be sent as soon as the current byte
                 * will be received in the shift register
                 */
index 63ebf722a42484d0c2e54a9a92a30638c9850f37..f21362355973e9738a41ecb049d02a36537a6beb 100644 (file)
@@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
 static int i2c_init_recovery(struct i2c_adapter *adap)
 {
        struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
-       char *err_str;
+       char *err_str, *err_level = KERN_ERR;
 
        if (!bri)
                return 0;
@@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
                return -EPROBE_DEFER;
 
        if (!bri->recover_bus) {
-               err_str = "no recover_bus() found";
+               err_str = "no suitable method provided";
+               err_level = KERN_DEBUG;
                goto err;
        }
 
@@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
 
        return 0;
  err:
-       dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
+       dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
        adap->bus_recovery_info = NULL;
 
        return -EINVAL;
index 0abce004a9591e7cd3dd56d33705dae3cc0ee459..65e3e7df8a4b07f06c0760cd61f2f26fc906b018 100644 (file)
@@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
 
 static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
        [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
-               .len = sizeof(struct rdma_nla_ls_gid)},
+               .len = sizeof(struct rdma_nla_ls_gid),
+               .validation_type = NLA_VALIDATE_MIN,
+               .min = sizeof(struct rdma_nla_ls_gid)},
 };
 
 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
index 81903749d24156ae227fd12705f2bfe915afeec8..e42c812e74c3c963bf9923d82a77c43bf0a357e1 100644 (file)
@@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
                c4iw_init_wr_wait(ep->com.wr_waitp);
                err = cxgb4_remove_server(
                                ep->com.dev->rdev.lldi.ports[0], ep->stid,
-                               ep->com.dev->rdev.lldi.rxq_ids[0], true);
+                               ep->com.dev->rdev.lldi.rxq_ids[0],
+                               ep->com.local_addr.ss_family == AF_INET6);
                if (err)
                        goto done;
                err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
index 2a91b8d95e12fd58e6ef2f7e8ddf4173bab9bd32..04b1e8f021f642b1044a793e6c1babaca735c11f 100644 (file)
@@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
  */
 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
 {
-       int node = pcibus_to_node(dd->pcidev->bus);
        struct hfi1_affinity_node *entry;
        const struct cpumask *local_mask;
        int curr_cpu, possible, i, ret;
        bool new_entry = false;
 
-       /*
-        * If the BIOS does not have the NUMA node information set, select
-        * NUMA 0 so we get consistent performance.
-        */
-       if (node < 0) {
-               dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
-               node = 0;
-       }
-       dd->node = node;
-
        local_mask = cpumask_of_node(dd->node);
        if (cpumask_first(local_mask) >= nr_cpu_ids)
                local_mask = topology_core_cpumask(0);
@@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
         * create an entry in the global affinity structure and initialize it.
         */
        if (!entry) {
-               entry = node_affinity_allocate(node);
+               entry = node_affinity_allocate(dd->node);
                if (!entry) {
                        dd_dev_err(dd,
                                   "Unable to allocate global affinity node\n");
@@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
        if (new_entry)
                node_affinity_add_tail(entry);
 
+       dd->affinity_entry = entry;
        mutex_unlock(&node_affinity.lock);
 
        return 0;
@@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
 {
        struct hfi1_affinity_node *entry;
 
-       if (dd->node < 0)
-               return;
-
        mutex_lock(&node_affinity.lock);
+       if (!dd->affinity_entry)
+               goto unlock;
        entry = node_affinity_lookup(dd->node);
        if (!entry)
                goto unlock;
@@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
         */
        _dev_comp_vect_cpu_mask_clean_up(dd, entry);
 unlock:
+       dd->affinity_entry = NULL;
        mutex_unlock(&node_affinity.lock);
-       dd->node = NUMA_NO_NODE;
 }
 
 /*
index e09e8244a94c4e2201ce73ea79fd26d04414e349..2a9a040569ebb7b5220b17166e70847b2478123e 100644 (file)
@@ -1409,6 +1409,7 @@ struct hfi1_devdata {
        spinlock_t irq_src_lock;
        int vnic_num_vports;
        struct net_device *dummy_netdev;
+       struct hfi1_affinity_node *affinity_entry;
 
        /* Keeps track of IPoIB RSM rule users */
        atomic_t ipoib_rsm_usr_num;
index cb7ad12888219774925200f22723ff381332aa81..786c6316273f74964e74650229d193a702dfeaba 100644 (file)
@@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
        dd->pport = (struct hfi1_pportdata *)(dd + 1);
        dd->pcidev = pdev;
        pci_set_drvdata(pdev, dd);
-       dd->node = NUMA_NO_NODE;
 
        ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
                        GFP_KERNEL);
@@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
                goto bail;
        }
        rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+       /*
+        * If the BIOS does not have the NUMA node information set, select
+        * NUMA 0 so we get consistent performance.
+        */
+       dd->node = pcibus_to_node(pdev->bus);
+       if (dd->node == NUMA_NO_NODE) {
+               dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
+               dd->node = 0;
+       }
 
        /*
         * Initialize all locks for the device. This needs to be as early as
index 1fb6e1a0e4e1d64d6d60adb240c560b2ed2705e8..1bcab992ac266dbb51e1a28740d6b9cd84b4d54f 100644 (file)
@@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
                return 0;
        }
 
-       cpumask_and(node_cpu_mask, cpu_mask,
-                   cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
+       cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
 
        available_cpus = cpumask_weight(node_cpu_mask);
 
index 0eb6a7a618e0752d5ac5cec0f20130a4e964ac36..9ea542270ed4a3ecbc518eb02b2981c8ac8e26b5 100644 (file)
@@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
         * TGT QP isn't associated with RQ/SQ
         */
        if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
-           (attrs->qp_type != IB_QPT_XRC_TGT)) {
+           (attrs->qp_type != IB_QPT_XRC_TGT) &&
+           (attrs->qp_type != IB_QPT_XRC_INI)) {
                struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
                struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
 
index 0a08b4b742a3d080ddf363f2b85ed486824026b1..6734329cca33264a3ce1d85820d92f1641e81a64 100644 (file)
@@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
 
        /* Now it is safe to iterate over all paths without locks */
        list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
-               rtrs_clt_destroy_sess_files(sess, NULL);
                rtrs_clt_close_conns(sess, true);
+               rtrs_clt_destroy_sess_files(sess, NULL);
                kobject_put(&sess->kobj);
        }
        free_clt(clt);
index 8bcc529942bc3e2d7adc42c1e0246e3e0ffbd093..9dbca366613e575983f785a1d2459f1d68a7f65d 100644 (file)
@@ -252,8 +252,8 @@ static int __init n64joy_probe(struct platform_device *pdev)
        mutex_init(&priv->n64joy_mutex);
 
        priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
-       if (!priv->reg_base) {
-               err = -EINVAL;
+       if (IS_ERR(priv->reg_base)) {
+               err = PTR_ERR(priv->reg_base);
                goto fail;
        }
 
index 63d5e488137dc037d7fc1bab44a66c2a8480308c..e9fa1423f136090ebeabafb4e74810c56a558f92 100644 (file)
@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
+static int nspire_keypad_open(struct input_dev *input)
 {
+       struct nspire_keypad *keypad = input_get_drvdata(input);
        unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
+       int error;
+
+       error = clk_prepare_enable(keypad->clk);
+       if (error)
+               return error;
 
        cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
        if (cycles_per_us == 0)
@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
        keypad->int_mask = 1 << 1;
        writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
 
-       /* Disable GPIO interrupts to prevent hanging on touchpad */
-       /* Possibly used to detect touchpad events */
-       writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
-       /* Acknowledge existing interrupts */
-       writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
-
-       return 0;
-}
-
-static int nspire_keypad_open(struct input_dev *input)
-{
-       struct nspire_keypad *keypad = input_get_drvdata(input);
-       int error;
-
-       error = clk_prepare_enable(keypad->clk);
-       if (error)
-               return error;
-
-       error = nspire_keypad_chip_init(keypad);
-       if (error) {
-               clk_disable_unprepare(keypad->clk);
-               return error;
-       }
-
        return 0;
 }
 
@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
 {
        struct nspire_keypad *keypad = input_get_drvdata(input);
 
+       /* Disable interrupts */
+       writel(0, keypad->reg_base + KEYPAD_INTMSK);
+       /* Acknowledge existing interrupts */
+       writel(~0, keypad->reg_base + KEYPAD_INT);
+
        clk_disable_unprepare(keypad->clk);
 }
 
@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       error = clk_prepare_enable(keypad->clk);
+       if (error) {
+               dev_err(&pdev->dev, "failed to enable clock\n");
+               return error;
+       }
+
+       /* Disable interrupts */
+       writel(0, keypad->reg_base + KEYPAD_INTMSK);
+       /* Acknowledge existing interrupts */
+       writel(~0, keypad->reg_base + KEYPAD_INT);
+
+       /* Disable GPIO interrupts to prevent hanging on touchpad */
+       /* Possibly used to detect touchpad events */
+       writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
+       /* Acknowledge existing GPIO interrupts */
+       writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
+
+       clk_disable_unprepare(keypad->clk);
+
        input_set_drvdata(input, keypad);
 
        input->id.bustype = BUS_HOST;
index 9119e12a577844ee0b760112b3d805a50c7a8e53..a5a0035536462aa5e34fd7fa310f85b0be16aae9 100644 (file)
@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
                },
+       }, {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
index 4c2b579f6c8bf35a07a22ca0d95db620defff79a..5f7706febcb0934e57852ad474e9b741a37b0ae4 100644 (file)
@@ -1441,7 +1441,7 @@ static int elants_i2c_probe(struct i2c_client *client,
 
        touchscreen_parse_properties(ts->input, true, &ts->prop);
 
-       if (ts->chip_id == EKTF3624) {
+       if (ts->chip_id == EKTF3624 && ts->phy_x && ts->phy_y) {
                /* calculate resolution from size */
                ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x);
                ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y);
@@ -1449,8 +1449,7 @@ static int elants_i2c_probe(struct i2c_client *client,
 
        input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
        input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
-       if (ts->major_res > 0)
-               input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
+       input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
 
        error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
                                    INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
index b63d7fdf0cd20d3c971fca88079e450d188a8199..85a1f465c097ea10621f7e2860b0cb777f84bf89 100644 (file)
@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
        u8 major = event[4];
        u8 minor = event[5];
        u8 z = event[6] & S6SY761_MASK_Z;
-       u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
-       u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
+       u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
+       u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
 
        input_mt_slot(sdata->input, tid);
 
index ca52647f8955077b8670cd85959f09c63f793dfd..cdb3e11462c66b0a1b89bf0b7019ffbc469cac6f 100644 (file)
@@ -74,6 +74,15 @@ config INTERCONNECT_QCOM_SC7180
          This is a driver for the Qualcomm Network-on-Chip on sc7180-based
          platforms.
 
+config INTERCONNECT_QCOM_SDM660
+       tristate "Qualcomm SDM660 interconnect driver"
+       depends on INTERCONNECT_QCOM
+       depends on QCOM_SMD_RPM
+       select INTERCONNECT_QCOM_SMD_RPM
+       help
+         This is a driver for the Qualcomm Network-on-Chip on sdm660-based
+         platforms.
+
 config INTERCONNECT_QCOM_SDM845
        tristate "Qualcomm SDM845 interconnect driver"
        depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
@@ -110,5 +119,14 @@ config INTERCONNECT_QCOM_SM8250
          This is a driver for the Qualcomm Network-on-Chip on sm8250-based
          platforms.
 
+config INTERCONNECT_QCOM_SM8350
+       tristate "Qualcomm SM8350 interconnect driver"
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+         This is a driver for the Qualcomm Network-on-Chip on SM8350-based
+         platforms.
+
 config INTERCONNECT_QCOM_SMD_RPM
        tristate
index c6a735df067ecefd49040ba29a3101f97fe9cdfe..46fc62447156bec2900e0bc6816de4fdbd5a51bd 100644 (file)
@@ -8,10 +8,12 @@ icc-osm-l3-objs                               := osm-l3.o
 qnoc-qcs404-objs                       := qcs404.o
 icc-rpmh-obj                           := icc-rpmh.o
 qnoc-sc7180-objs                       := sc7180.o
+qnoc-sdm660-objs                       := sdm660.o
 qnoc-sdm845-objs                       := sdm845.o
 qnoc-sdx55-objs                                := sdx55.o
 qnoc-sm8150-objs                       := sm8150.o
 qnoc-sm8250-objs                       := sm8250.o
+qnoc-sm8350-objs                       := sm8350.o
 icc-smd-rpm-objs                       := smd-rpm.o icc-rpm.o
 
 obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
@@ -22,8 +24,10 @@ obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
 obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
 obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
index cc6095492cbeacd6dac07e57efff0dc9f2905179..54de49ca7808a861ec9f4857d954776634965b50 100644 (file)
@@ -59,8 +59,8 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
                                            qn->slv_rpm_id,
                                            sum_bw);
                if (ret) {
-                       pr_err("qcom_icc_rpm_smd_send slv error %d\n",
-                              ret);
+                       pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
+                              qn->slv_rpm_id, ret);
                        return ret;
                }
        }
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
new file mode 100644 (file)
index 0000000..632dbdd
--- /dev/null
@@ -0,0 +1,923 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm SDM630/SDM636/SDM660 Network-on-Chip (NoC) QoS driver
+ * Copyright (C) 2020, AngeloGioacchino Del Regno <kholk11@gmail.com>
+ */
+
+#include <dt-bindings/interconnect/qcom,sdm660.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+
+#define RPM_BUS_MASTER_REQ     0x73616d62
+#define RPM_BUS_SLAVE_REQ      0x766c7362
+
+/* BIMC QoS */
+#define M_BKE_REG_BASE(n)              (0x300 + (0x4000 * n))
+#define M_BKE_EN_ADDR(n)               (M_BKE_REG_BASE(n))
+#define M_BKE_HEALTH_CFG_ADDR(i, n)    (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
+
+#define M_BKE_HEALTH_CFG_LIMITCMDS_MASK        0x80000000
+#define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
+#define M_BKE_HEALTH_CFG_PRIOLVL_MASK  0x3
+#define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT        0x8
+#define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
+
+#define M_BKE_EN_EN_BMASK              0x1
+
+/* Valid for both NoC and BIMC */
+#define NOC_QOS_MODE_FIXED             0x0
+#define NOC_QOS_MODE_LIMITER           0x1
+#define NOC_QOS_MODE_BYPASS            0x2
+
+/* NoC QoS */
+#define NOC_PERM_MODE_FIXED            1
+#define NOC_PERM_MODE_BYPASS           (1 << NOC_QOS_MODE_BYPASS)
+
+#define NOC_QOS_PRIORITYn_ADDR(n)      (0x8 + (n * 0x1000))
+#define NOC_QOS_PRIORITY_MASK          0xf
+#define NOC_QOS_PRIORITY_P1_SHIFT      0x2
+#define NOC_QOS_PRIORITY_P0_SHIFT      0x3
+
+#define NOC_QOS_MODEn_ADDR(n)          (0xc + (n * 0x1000))
+#define NOC_QOS_MODEn_MASK             0x3
+
+enum {
+       SDM660_MASTER_IPA = 1,
+       SDM660_MASTER_CNOC_A2NOC,
+       SDM660_MASTER_SDCC_1,
+       SDM660_MASTER_SDCC_2,
+       SDM660_MASTER_BLSP_1,
+       SDM660_MASTER_BLSP_2,
+       SDM660_MASTER_UFS,
+       SDM660_MASTER_USB_HS,
+       SDM660_MASTER_USB3,
+       SDM660_MASTER_CRYPTO_C0,
+       SDM660_MASTER_GNOC_BIMC,
+       SDM660_MASTER_OXILI,
+       SDM660_MASTER_MNOC_BIMC,
+       SDM660_MASTER_SNOC_BIMC,
+       SDM660_MASTER_PIMEM,
+       SDM660_MASTER_SNOC_CNOC,
+       SDM660_MASTER_QDSS_DAP,
+       SDM660_MASTER_APPS_PROC,
+       SDM660_MASTER_CNOC_MNOC_MMSS_CFG,
+       SDM660_MASTER_CNOC_MNOC_CFG,
+       SDM660_MASTER_CPP,
+       SDM660_MASTER_JPEG,
+       SDM660_MASTER_MDP_P0,
+       SDM660_MASTER_MDP_P1,
+       SDM660_MASTER_VENUS,
+       SDM660_MASTER_VFE,
+       SDM660_MASTER_QDSS_ETR,
+       SDM660_MASTER_QDSS_BAM,
+       SDM660_MASTER_SNOC_CFG,
+       SDM660_MASTER_BIMC_SNOC,
+       SDM660_MASTER_A2NOC_SNOC,
+       SDM660_MASTER_GNOC_SNOC,
+
+       SDM660_SLAVE_A2NOC_SNOC,
+       SDM660_SLAVE_EBI,
+       SDM660_SLAVE_HMSS_L3,
+       SDM660_SLAVE_BIMC_SNOC,
+       SDM660_SLAVE_CNOC_A2NOC,
+       SDM660_SLAVE_MPM,
+       SDM660_SLAVE_PMIC_ARB,
+       SDM660_SLAVE_TLMM_NORTH,
+       SDM660_SLAVE_TCSR,
+       SDM660_SLAVE_PIMEM_CFG,
+       SDM660_SLAVE_IMEM_CFG,
+       SDM660_SLAVE_MESSAGE_RAM,
+       SDM660_SLAVE_GLM,
+       SDM660_SLAVE_BIMC_CFG,
+       SDM660_SLAVE_PRNG,
+       SDM660_SLAVE_SPDM,
+       SDM660_SLAVE_QDSS_CFG,
+       SDM660_SLAVE_CNOC_MNOC_CFG,
+       SDM660_SLAVE_SNOC_CFG,
+       SDM660_SLAVE_QM_CFG,
+       SDM660_SLAVE_CLK_CTL,
+       SDM660_SLAVE_MSS_CFG,
+       SDM660_SLAVE_TLMM_SOUTH,
+       SDM660_SLAVE_UFS_CFG,
+       SDM660_SLAVE_A2NOC_CFG,
+       SDM660_SLAVE_A2NOC_SMMU_CFG,
+       SDM660_SLAVE_GPUSS_CFG,
+       SDM660_SLAVE_AHB2PHY,
+       SDM660_SLAVE_BLSP_1,
+       SDM660_SLAVE_SDCC_1,
+       SDM660_SLAVE_SDCC_2,
+       SDM660_SLAVE_TLMM_CENTER,
+       SDM660_SLAVE_BLSP_2,
+       SDM660_SLAVE_PDM,
+       SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
+       SDM660_SLAVE_USB_HS,
+       SDM660_SLAVE_USB3_0,
+       SDM660_SLAVE_SRVC_CNOC,
+       SDM660_SLAVE_GNOC_BIMC,
+       SDM660_SLAVE_GNOC_SNOC,
+       SDM660_SLAVE_CAMERA_CFG,
+       SDM660_SLAVE_CAMERA_THROTTLE_CFG,
+       SDM660_SLAVE_MISC_CFG,
+       SDM660_SLAVE_VENUS_THROTTLE_CFG,
+       SDM660_SLAVE_VENUS_CFG,
+       SDM660_SLAVE_MMSS_CLK_XPU_CFG,
+       SDM660_SLAVE_MMSS_CLK_CFG,
+       SDM660_SLAVE_MNOC_MPU_CFG,
+       SDM660_SLAVE_DISPLAY_CFG,
+       SDM660_SLAVE_CSI_PHY_CFG,
+       SDM660_SLAVE_DISPLAY_THROTTLE_CFG,
+       SDM660_SLAVE_SMMU_CFG,
+       SDM660_SLAVE_MNOC_BIMC,
+       SDM660_SLAVE_SRVC_MNOC,
+       SDM660_SLAVE_HMSS,
+       SDM660_SLAVE_LPASS,
+       SDM660_SLAVE_WLAN,
+       SDM660_SLAVE_CDSP,
+       SDM660_SLAVE_IPA,
+       SDM660_SLAVE_SNOC_BIMC,
+       SDM660_SLAVE_SNOC_CNOC,
+       SDM660_SLAVE_IMEM,
+       SDM660_SLAVE_PIMEM,
+       SDM660_SLAVE_QDSS_STM,
+       SDM660_SLAVE_SRVC_SNOC,
+
+       SDM660_A2NOC,
+       SDM660_BIMC,
+       SDM660_CNOC,
+       SDM660_GNOC,
+       SDM660_MNOC,
+       SDM660_SNOC,
+};
+
+#define to_qcom_provider(_provider) \
+       container_of(_provider, struct qcom_icc_provider, provider)
+
+static const struct clk_bulk_data bus_clocks[] = {
+       { .id = "bus" },
+       { .id = "bus_a" },
+};
+
+static const struct clk_bulk_data bus_mm_clocks[] = {
+       { .id = "bus" },
+       { .id = "bus_a" },
+       { .id = "iface" },
+};
+
+/**
+ * struct qcom_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ * @is_bimc_node: indicates whether to use bimc specific setting
+ * @regmap: regmap for QoS registers read/write access
+ * @mmio: NoC base iospace
+ */
+struct qcom_icc_provider {
+       struct icc_provider provider;
+       struct clk_bulk_data *bus_clks;
+       int num_clks;
+       bool is_bimc_node;
+       struct regmap *regmap;
+       void __iomem *mmio;
+};
+
+#define SDM660_MAX_LINKS       34
+
+/**
+ * struct qcom_icc_qos - Qualcomm specific interconnect QoS parameters
+ * @areq_prio: node requests priority
+ * @prio_level: priority level for bus communication
+ * @limit_commands: activate/deactivate limiter mode during runtime
+ * @ap_owned: indicates if the node is owned by the AP or by the RPM
+ * @qos_mode: default qos mode for this node
+ * @qos_port: qos port number for finding qos registers of this node
+ */
+struct qcom_icc_qos {
+       u32 areq_prio;
+       u32 prio_level;
+       bool limit_commands;
+       bool ap_owned;
+       int qos_mode;
+       int qos_port;
+};
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM id for devices that are bus masters
+ * @slv_rpm_id: RPM id for devices that are bus slaves
+ * @qos: NoC QoS setting parameters
+ * @rate: current bus clock rate in Hz
+ */
+struct qcom_icc_node {
+       unsigned char *name;
+       u16 id;
+       u16 links[SDM660_MAX_LINKS];
+       u16 num_links;
+       u16 buswidth;
+       int mas_rpm_id;
+       int slv_rpm_id;
+       struct qcom_icc_qos qos;
+       u64 rate;
+};
+
+struct qcom_icc_desc {
+       struct qcom_icc_node **nodes;
+       size_t num_nodes;
+       const struct regmap_config *regmap_cfg;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id,  \
+                    _ap_owned, _qos_mode, _qos_prio, _qos_port, ...)   \
+               static struct qcom_icc_node _name = {                   \
+               .name = #_name,                                         \
+               .id = _id,                                              \
+               .buswidth = _buswidth,                                  \
+               .mas_rpm_id = _mas_rpm_id,                              \
+               .slv_rpm_id = _slv_rpm_id,                              \
+               .qos.ap_owned = _ap_owned,                              \
+               .qos.qos_mode = _qos_mode,                              \
+               .qos.areq_prio = _qos_prio,                             \
+               .qos.prio_level = _qos_prio,                            \
+               .qos.qos_port = _qos_port,                              \
+               .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })),      \
+               .links = { __VA_ARGS__ },                               \
+       }
+
+DEFINE_QNODE(mas_ipa, SDM660_MASTER_IPA, 8, 59, -1, true, NOC_QOS_MODE_FIXED, 1, 3, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_cnoc_a2noc, SDM660_MASTER_CNOC_A2NOC, 8, 146, -1, true, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_sdcc_1, SDM660_MASTER_SDCC_1, 8, 33, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_sdcc_2, SDM660_MASTER_SDCC_2, 8, 35, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_blsp_1, SDM660_MASTER_BLSP_1, 4, 41, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_blsp_2, SDM660_MASTER_BLSP_2, 4, 39, -1, false, -1, 0, -1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_ufs, SDM660_MASTER_UFS, 8, 68, -1, true, NOC_QOS_MODE_FIXED, 1, 4, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_usb_hs, SDM660_MASTER_USB_HS, 8, 42, -1, true, NOC_QOS_MODE_FIXED, 1, 1, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_usb3, SDM660_MASTER_USB3, 8, 32, -1, true, NOC_QOS_MODE_FIXED, 1, 2, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_crypto, SDM660_MASTER_CRYPTO_C0, 8, 23, -1, true, NOC_QOS_MODE_FIXED, 1, 11, SDM660_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(mas_gnoc_bimc, SDM660_MASTER_GNOC_BIMC, 4, 144, -1, true, NOC_QOS_MODE_FIXED, 0, 0, SDM660_SLAVE_EBI);
+DEFINE_QNODE(mas_oxili, SDM660_MASTER_OXILI, 4, 6, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI, SDM660_SLAVE_BIMC_SNOC);
+DEFINE_QNODE(mas_mnoc_bimc, SDM660_MASTER_MNOC_BIMC, 4, 2, -1, true, NOC_QOS_MODE_BYPASS, 0, 2, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI, SDM660_SLAVE_BIMC_SNOC);
+DEFINE_QNODE(mas_snoc_bimc, SDM660_MASTER_SNOC_BIMC, 4, 3, -1, false, -1, 0, -1, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI);
+DEFINE_QNODE(mas_pimem, SDM660_MASTER_PIMEM, 4, 113, -1, true, NOC_QOS_MODE_FIXED, 1, 4, SDM660_SLAVE_HMSS_L3, SDM660_SLAVE_EBI);
+DEFINE_QNODE(mas_snoc_cnoc, SDM660_MASTER_SNOC_CNOC, 8, 52, -1, true, -1, 0, -1, SDM660_SLAVE_CLK_CTL, SDM660_SLAVE_QDSS_CFG, SDM660_SLAVE_QM_CFG, SDM660_SLAVE_SRVC_CNOC, SDM660_SLAVE_UFS_CFG, SDM660_SLAVE_TCSR, SDM660_SLAVE_A2NOC_SMMU_CFG, SDM660_SLAVE_SNOC_CFG, SDM660_SLAVE_TLMM_SOUTH, SDM660_SLAVE_MPM, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, SDM660_SLAVE_SDCC_2, SDM660_SLAVE_SDCC_1, SDM660_SLAVE_SPDM, SDM660_SLAVE_PMIC_ARB, SDM660_SLAVE_PRNG, SDM660_SLAVE_MSS_CFG, SDM660_SLAVE_GPUSS_CFG, SDM660_SLAVE_IMEM_CFG, SDM660_SLAVE_USB3_0, SDM660_SLAVE_A2NOC_CFG, SDM660_SLAVE_TLMM_NORTH, SDM660_SLAVE_USB_HS, SDM660_SLAVE_PDM, SDM660_SLAVE_TLMM_CENTER, SDM660_SLAVE_AHB2PHY, SDM660_SLAVE_BLSP_2, SDM660_SLAVE_BLSP_1, SDM660_SLAVE_PIMEM_CFG, SDM660_SLAVE_GLM, SDM660_SLAVE_MESSAGE_RAM, SDM660_SLAVE_BIMC_CFG, SDM660_SLAVE_CNOC_MNOC_CFG);
+DEFINE_QNODE(mas_qdss_dap, SDM660_MASTER_QDSS_DAP, 8, 49, -1, true, -1, 0, -1, SDM660_SLAVE_CLK_CTL, SDM660_SLAVE_QDSS_CFG, SDM660_SLAVE_QM_CFG, SDM660_SLAVE_SRVC_CNOC, SDM660_SLAVE_UFS_CFG, SDM660_SLAVE_TCSR, SDM660_SLAVE_A2NOC_SMMU_CFG, SDM660_SLAVE_SNOC_CFG, SDM660_SLAVE_TLMM_SOUTH, SDM660_SLAVE_MPM, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, SDM660_SLAVE_SDCC_2, SDM660_SLAVE_SDCC_1, SDM660_SLAVE_SPDM, SDM660_SLAVE_PMIC_ARB, SDM660_SLAVE_PRNG, SDM660_SLAVE_MSS_CFG, SDM660_SLAVE_GPUSS_CFG, SDM660_SLAVE_IMEM_CFG, SDM660_SLAVE_USB3_0, SDM660_SLAVE_A2NOC_CFG, SDM660_SLAVE_TLMM_NORTH, SDM660_SLAVE_USB_HS, SDM660_SLAVE_PDM, SDM660_SLAVE_TLMM_CENTER, SDM660_SLAVE_AHB2PHY, SDM660_SLAVE_BLSP_2, SDM660_SLAVE_BLSP_1, SDM660_SLAVE_PIMEM_CFG, SDM660_SLAVE_GLM, SDM660_SLAVE_MESSAGE_RAM, SDM660_SLAVE_CNOC_A2NOC, SDM660_SLAVE_BIMC_CFG, SDM660_SLAVE_CNOC_MNOC_CFG);
+DEFINE_QNODE(mas_apss_proc, SDM660_MASTER_APPS_PROC, 16, 0, -1, true, -1, 0, -1, SDM660_SLAVE_GNOC_SNOC, SDM660_SLAVE_GNOC_BIMC);
+DEFINE_QNODE(mas_cnoc_mnoc_mmss_cfg, SDM660_MASTER_CNOC_MNOC_MMSS_CFG, 8, 4, -1, true, -1, 0, -1, SDM660_SLAVE_VENUS_THROTTLE_CFG, SDM660_SLAVE_VENUS_CFG, SDM660_SLAVE_CAMERA_THROTTLE_CFG, SDM660_SLAVE_SMMU_CFG, SDM660_SLAVE_CAMERA_CFG, SDM660_SLAVE_CSI_PHY_CFG, SDM660_SLAVE_DISPLAY_THROTTLE_CFG, SDM660_SLAVE_DISPLAY_CFG, SDM660_SLAVE_MMSS_CLK_CFG, SDM660_SLAVE_MNOC_MPU_CFG, SDM660_SLAVE_MISC_CFG, SDM660_SLAVE_MMSS_CLK_XPU_CFG);
+DEFINE_QNODE(mas_cnoc_mnoc_cfg, SDM660_MASTER_CNOC_MNOC_CFG, 4, 5, -1, true, -1, 0, -1, SDM660_SLAVE_SRVC_MNOC);
+DEFINE_QNODE(mas_cpp, SDM660_MASTER_CPP, 16, 115, -1, true, NOC_QOS_MODE_BYPASS, 0, 4, SDM660_SLAVE_MNOC_BIMC);
+DEFINE_QNODE(mas_jpeg, SDM660_MASTER_JPEG, 16, 7, -1, true, NOC_QOS_MODE_BYPASS, 0, 6, SDM660_SLAVE_MNOC_BIMC);
+DEFINE_QNODE(mas_mdp_p0, SDM660_MASTER_MDP_P0, 16, 8, -1, true, NOC_QOS_MODE_BYPASS, 0, 0, SDM660_SLAVE_MNOC_BIMC); /* vrail-comp???? */
+DEFINE_QNODE(mas_mdp_p1, SDM660_MASTER_MDP_P1, 16, 61, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_MNOC_BIMC); /* vrail-comp??? */
+DEFINE_QNODE(mas_venus, SDM660_MASTER_VENUS, 16, 9, -1, true, NOC_QOS_MODE_BYPASS, 0, 1, SDM660_SLAVE_MNOC_BIMC);
+DEFINE_QNODE(mas_vfe, SDM660_MASTER_VFE, 16, 11, -1, true, NOC_QOS_MODE_BYPASS, 0, 5, SDM660_SLAVE_MNOC_BIMC);
+DEFINE_QNODE(mas_qdss_etr, SDM660_MASTER_QDSS_ETR, 8, 31, -1, true, NOC_QOS_MODE_FIXED, 1, 1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IMEM, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_SNOC_BIMC);
+DEFINE_QNODE(mas_qdss_bam, SDM660_MASTER_QDSS_BAM, 4, 19, -1, true, NOC_QOS_MODE_FIXED, 1, 0, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IMEM, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_SNOC_BIMC);
+DEFINE_QNODE(mas_snoc_cfg, SDM660_MASTER_SNOC_CFG, 4, 20, -1, false, -1, 0, -1, SDM660_SLAVE_SRVC_SNOC);
+DEFINE_QNODE(mas_bimc_snoc, SDM660_MASTER_BIMC_SNOC, 8, 21, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
+DEFINE_QNODE(mas_gnoc_snoc, SDM660_MASTER_GNOC_SNOC, 8, 150, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
+DEFINE_QNODE(mas_a2noc_snoc, SDM660_MASTER_A2NOC_SNOC, 16, 112, -1, false, -1, 0, -1, SDM660_SLAVE_PIMEM, SDM660_SLAVE_IPA, SDM660_SLAVE_QDSS_STM, SDM660_SLAVE_LPASS, SDM660_SLAVE_HMSS, SDM660_SLAVE_SNOC_BIMC, SDM660_SLAVE_CDSP, SDM660_SLAVE_SNOC_CNOC, SDM660_SLAVE_WLAN, SDM660_SLAVE_IMEM);
+DEFINE_QNODE(slv_a2noc_snoc, SDM660_SLAVE_A2NOC_SNOC, 16, -1, 143, false, -1, 0, -1, SDM660_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(slv_ebi, SDM660_SLAVE_EBI, 4, -1, 0, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_hmss_l3, SDM660_SLAVE_HMSS_L3, 4, -1, 160, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_bimc_snoc, SDM660_SLAVE_BIMC_SNOC, 4, -1, 2, false, -1, 0, -1, SDM660_MASTER_BIMC_SNOC);
+DEFINE_QNODE(slv_cnoc_a2noc, SDM660_SLAVE_CNOC_A2NOC, 8, -1, 208, true, -1, 0, -1, SDM660_MASTER_CNOC_A2NOC);
+DEFINE_QNODE(slv_mpm, SDM660_SLAVE_MPM, 4, -1, 62, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, SDM660_SLAVE_PMIC_ARB, 4, -1, 59, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_tlmm_north, SDM660_SLAVE_TLMM_NORTH, 8, -1, 214, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_tcsr, SDM660_SLAVE_TCSR, 4, -1, 50, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_pimem_cfg, SDM660_SLAVE_PIMEM_CFG, 4, -1, 167, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_imem_cfg, SDM660_SLAVE_IMEM_CFG, 4, -1, 54, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_message_ram, SDM660_SLAVE_MESSAGE_RAM, 4, -1, 55, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_glm, SDM660_SLAVE_GLM, 4, -1, 209, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mss_cfg, SDM660_SLAVE_MSS_CFG, 4, -1, 48, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_tlmm_south, SDM660_SLAVE_TLMM_SOUTH, 4, -1, 217, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_ufs_cfg, SDM660_SLAVE_UFS_CFG, 4, -1, 92, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_a2noc_cfg, SDM660_SLAVE_A2NOC_CFG, 4, -1, 150, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_a2noc_smmu_cfg, SDM660_SLAVE_A2NOC_SMMU_CFG, 8, -1, 152, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_gpuss_cfg, SDM660_SLAVE_GPUSS_CFG, 8, -1, 11, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_ahb2phy, SDM660_SLAVE_AHB2PHY, 4, -1, 163, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_blsp_1, SDM660_SLAVE_BLSP_1, 4, -1, 39, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_sdcc_1, SDM660_SLAVE_SDCC_1, 4, -1, 31, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_sdcc_2, SDM660_SLAVE_SDCC_2, 4, -1, 33, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_tlmm_center, SDM660_SLAVE_TLMM_CENTER, 4, -1, 218, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_blsp_2, SDM660_SLAVE_BLSP_2, 4, -1, 37, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_pdm, SDM660_SLAVE_PDM, 4, -1, 41, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, SDM660_SLAVE_CNOC_MNOC_MMSS_CFG, 8, -1, 58, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_MMSS_CFG);
+DEFINE_QNODE(slv_usb_hs, SDM660_SLAVE_USB_HS, 4, -1, 40, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_usb3_0, SDM660_SLAVE_USB3_0, 4, -1, 22, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_srvc_cnoc, SDM660_SLAVE_SRVC_CNOC, 4, -1, 76, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_gnoc_bimc, SDM660_SLAVE_GNOC_BIMC, 16, -1, 210, true, -1, 0, -1, SDM660_MASTER_GNOC_BIMC);
+DEFINE_QNODE(slv_gnoc_snoc, SDM660_SLAVE_GNOC_SNOC, 8, -1, 211, true, -1, 0, -1, SDM660_MASTER_GNOC_SNOC);
+DEFINE_QNODE(slv_camera_cfg, SDM660_SLAVE_CAMERA_CFG, 4, -1, 3, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_camera_throttle_cfg, SDM660_SLAVE_CAMERA_THROTTLE_CFG, 4, -1, 154, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_misc_cfg, SDM660_SLAVE_MISC_CFG, 4, -1, 8, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_venus_throttle_cfg, SDM660_SLAVE_VENUS_THROTTLE_CFG, 4, -1, 178, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_venus_cfg, SDM660_SLAVE_VENUS_CFG, 4, -1, 10, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mmss_clk_xpu_cfg, SDM660_SLAVE_MMSS_CLK_XPU_CFG, 4, -1, 13, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mmss_clk_cfg, SDM660_SLAVE_MMSS_CLK_CFG, 4, -1, 12, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mnoc_mpu_cfg, SDM660_SLAVE_MNOC_MPU_CFG, 4, -1, 14, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_display_cfg, SDM660_SLAVE_DISPLAY_CFG, 4, -1, 4, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_csi_phy_cfg, SDM660_SLAVE_CSI_PHY_CFG, 4, -1, 224, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_display_throttle_cfg, SDM660_SLAVE_DISPLAY_THROTTLE_CFG, 4, -1, 156, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_smmu_cfg, SDM660_SLAVE_SMMU_CFG, 8, -1, 205, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_mnoc_bimc, SDM660_SLAVE_MNOC_BIMC, 16, -1, 16, true, -1, 0, -1, SDM660_MASTER_MNOC_BIMC);
+DEFINE_QNODE(slv_srvc_mnoc, SDM660_SLAVE_SRVC_MNOC, 8, -1, 17, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_hmss, SDM660_SLAVE_HMSS, 8, -1, 20, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_lpass, SDM660_SLAVE_LPASS, 4, -1, 21, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_wlan, SDM660_SLAVE_WLAN, 4, -1, 206, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_cdsp, SDM660_SLAVE_CDSP, 4, -1, 221, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_ipa, SDM660_SLAVE_IPA, 4, -1, 183, true, -1, 0, -1, 0);
+DEFINE_QNODE(slv_snoc_bimc, SDM660_SLAVE_SNOC_BIMC, 16, -1, 24, false, -1, 0, -1, SDM660_MASTER_SNOC_BIMC);
+DEFINE_QNODE(slv_snoc_cnoc, SDM660_SLAVE_SNOC_CNOC, 8, -1, 25, false, -1, 0, -1, SDM660_MASTER_SNOC_CNOC);
+DEFINE_QNODE(slv_imem, SDM660_SLAVE_IMEM, 8, -1, 26, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_pimem, SDM660_SLAVE_PIMEM, 8, -1, 166, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_qdss_stm, SDM660_SLAVE_QDSS_STM, 4, -1, 30, false, -1, 0, -1, 0);
+DEFINE_QNODE(slv_srvc_snoc, SDM660_SLAVE_SRVC_SNOC, 16, -1, 29, false, -1, 0, -1, 0);
+
+static struct qcom_icc_node *sdm660_a2noc_nodes[] = {
+       [MASTER_IPA] = &mas_ipa,
+       [MASTER_CNOC_A2NOC] = &mas_cnoc_a2noc,
+       [MASTER_SDCC_1] = &mas_sdcc_1,
+       [MASTER_SDCC_2] = &mas_sdcc_2,
+       [MASTER_BLSP_1] = &mas_blsp_1,
+       [MASTER_BLSP_2] = &mas_blsp_2,
+       [MASTER_UFS] = &mas_ufs,
+       [MASTER_USB_HS] = &mas_usb_hs,
+       [MASTER_USB3] = &mas_usb3,
+       [MASTER_CRYPTO_C0] = &mas_crypto,
+       [SLAVE_A2NOC_SNOC] = &slv_a2noc_snoc,
+};
+
+static const struct regmap_config sdm660_a2noc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x20000,
+       .fast_io        = true,
+};
+
+static struct qcom_icc_desc sdm660_a2noc = {
+       .nodes = sdm660_a2noc_nodes,
+       .num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
+       .regmap_cfg = &sdm660_a2noc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_bimc_nodes[] = {
+       [MASTER_GNOC_BIMC] = &mas_gnoc_bimc,
+       [MASTER_OXILI] = &mas_oxili,
+       [MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
+       [MASTER_SNOC_BIMC] = &mas_snoc_bimc,
+       [MASTER_PIMEM] = &mas_pimem,
+       [SLAVE_EBI] = &slv_ebi,
+       [SLAVE_HMSS_L3] = &slv_hmss_l3,
+       [SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config sdm660_bimc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x80000,
+       .fast_io        = true,
+};
+
+static struct qcom_icc_desc sdm660_bimc = {
+       .nodes = sdm660_bimc_nodes,
+       .num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
+       .regmap_cfg = &sdm660_bimc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_cnoc_nodes[] = {
+       [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
+       [MASTER_QDSS_DAP] = &mas_qdss_dap,
+       [SLAVE_CNOC_A2NOC] = &slv_cnoc_a2noc,
+       [SLAVE_MPM] = &slv_mpm,
+       [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+       [SLAVE_TLMM_NORTH] = &slv_tlmm_north,
+       [SLAVE_TCSR] = &slv_tcsr,
+       [SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
+       [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+       [SLAVE_MESSAGE_RAM] = &slv_message_ram,
+       [SLAVE_GLM] = &slv_glm,
+       [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+       [SLAVE_PRNG] = &slv_prng,
+       [SLAVE_SPDM] = &slv_spdm,
+       [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+       [SLAVE_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
+       [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+       [SLAVE_QM_CFG] = &slv_qm_cfg,
+       [SLAVE_CLK_CTL] = &slv_clk_ctl,
+       [SLAVE_MSS_CFG] = &slv_mss_cfg,
+       [SLAVE_TLMM_SOUTH] = &slv_tlmm_south,
+       [SLAVE_UFS_CFG] = &slv_ufs_cfg,
+       [SLAVE_A2NOC_CFG] = &slv_a2noc_cfg,
+       [SLAVE_A2NOC_SMMU_CFG] = &slv_a2noc_smmu_cfg,
+       [SLAVE_GPUSS_CFG] = &slv_gpuss_cfg,
+       [SLAVE_AHB2PHY] = &slv_ahb2phy,
+       [SLAVE_BLSP_1] = &slv_blsp_1,
+       [SLAVE_SDCC_1] = &slv_sdcc_1,
+       [SLAVE_SDCC_2] = &slv_sdcc_2,
+       [SLAVE_TLMM_CENTER] = &slv_tlmm_center,
+       [SLAVE_BLSP_2] = &slv_blsp_2,
+       [SLAVE_PDM] = &slv_pdm,
+       [SLAVE_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg,
+       [SLAVE_USB_HS] = &slv_usb_hs,
+       [SLAVE_USB3_0] = &slv_usb3_0,
+       [SLAVE_SRVC_CNOC] = &slv_srvc_cnoc,
+};
+
+static const struct regmap_config sdm660_cnoc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x10000,
+       .fast_io        = true,
+};
+
+static struct qcom_icc_desc sdm660_cnoc = {
+       .nodes = sdm660_cnoc_nodes,
+       .num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes),
+       .regmap_cfg = &sdm660_cnoc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_gnoc_nodes[] = {
+       [MASTER_APSS_PROC] = &mas_apss_proc,
+       [SLAVE_GNOC_BIMC] = &slv_gnoc_bimc,
+       [SLAVE_GNOC_SNOC] = &slv_gnoc_snoc,
+};
+
+static const struct regmap_config sdm660_gnoc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0xe000,
+       .fast_io        = true,
+};
+
+static struct qcom_icc_desc sdm660_gnoc = {
+       .nodes = sdm660_gnoc_nodes,
+       .num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
+       .regmap_cfg = &sdm660_gnoc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_mnoc_nodes[] = {
+       [MASTER_CPP] = &mas_cpp,
+       [MASTER_JPEG] = &mas_jpeg,
+       [MASTER_MDP_P0] = &mas_mdp_p0,
+       [MASTER_MDP_P1] = &mas_mdp_p1,
+       [MASTER_VENUS] = &mas_venus,
+       [MASTER_VFE] = &mas_vfe,
+       [MASTER_CNOC_MNOC_MMSS_CFG] = &mas_cnoc_mnoc_mmss_cfg,
+       [MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
+       [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+       [SLAVE_CAMERA_THROTTLE_CFG] = &slv_camera_throttle_cfg,
+       [SLAVE_MISC_CFG] = &slv_misc_cfg,
+       [SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
+       [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+       [SLAVE_MMSS_CLK_XPU_CFG] = &slv_mmss_clk_xpu_cfg,
+       [SLAVE_MMSS_CLK_CFG] = &slv_mmss_clk_cfg,
+       [SLAVE_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
+       [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+       [SLAVE_CSI_PHY_CFG] = &slv_csi_phy_cfg,
+       [SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
+       [SLAVE_SMMU_CFG] = &slv_smmu_cfg,
+       [SLAVE_SRVC_MNOC] = &slv_srvc_mnoc,
+       [SLAVE_MNOC_BIMC] = &slv_mnoc_bimc,
+};
+
+static const struct regmap_config sdm660_mnoc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x10000,
+       .fast_io        = true,
+};
+
+static struct qcom_icc_desc sdm660_mnoc = {
+       .nodes = sdm660_mnoc_nodes,
+       .num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
+       .regmap_cfg = &sdm660_mnoc_regmap_config,
+};
+
+static struct qcom_icc_node *sdm660_snoc_nodes[] = {
+       [MASTER_QDSS_ETR] = &mas_qdss_etr,
+       [MASTER_QDSS_BAM] = &mas_qdss_bam,
+       [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+       [MASTER_BIMC_SNOC] = &mas_bimc_snoc,
+       [MASTER_A2NOC_SNOC] = &mas_a2noc_snoc,
+       [MASTER_GNOC_SNOC] = &mas_gnoc_snoc,
+       [SLAVE_HMSS] = &slv_hmss,
+       [SLAVE_LPASS] = &slv_lpass,
+       [SLAVE_WLAN] = &slv_wlan,
+       [SLAVE_CDSP] = &slv_cdsp,
+       [SLAVE_IPA] = &slv_ipa,
+       [SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
+       [SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
+       [SLAVE_IMEM] = &slv_imem,
+       [SLAVE_PIMEM] = &slv_pimem,
+       [SLAVE_QDSS_STM] = &slv_qdss_stm,
+       [SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
+};
+
+static const struct regmap_config sdm660_snoc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x20000,
+       .fast_io        = true,
+};
+
+static struct qcom_icc_desc sdm660_snoc = {
+       .nodes = sdm660_snoc_nodes,
+       .num_nodes = ARRAY_SIZE(sdm660_snoc_nodes),
+       .regmap_cfg = &sdm660_snoc_regmap_config,
+};
+
+static int qcom_icc_bimc_set_qos_health(struct regmap *rmap,
+                                       struct qcom_icc_qos *qos,
+                                       int regnum)
+{
+       u32 val;
+       u32 mask;
+
+       val = qos->prio_level;
+       mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
+
+       val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
+       mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
+
+       /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
+       if (regnum != 3) {
+               val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
+               mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
+       }
+
+       return regmap_update_bits(rmap,
+                                 M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
+                                 mask, val);
+}
+
+static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw,
+                                bool bypass_mode)
+{
+       struct qcom_icc_provider *qp;
+       struct qcom_icc_node *qn;
+       struct icc_provider *provider;
+       u32 mode = NOC_QOS_MODE_BYPASS;
+       u32 val = 0;
+       int i, rc = 0;
+
+       qn = src->data;
+       provider = src->provider;
+       qp = to_qcom_provider(provider);
+
+       if (qn->qos.qos_mode != -1)
+               mode = qn->qos.qos_mode;
+
+       /* QoS Priority: The QoS Health parameters are getting considered
+        * only if we are NOT in Bypass Mode.
+        */
+       if (mode != NOC_QOS_MODE_BYPASS) {
+               for (i = 3; i >= 0; i--) {
+                       rc = qcom_icc_bimc_set_qos_health(qp->regmap,
+                                                         &qn->qos, i);
+                       if (rc)
+                               return rc;
+               }
+
+               /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
+               val = 1;
+       }
+
+       return regmap_update_bits(qp->regmap, M_BKE_EN_ADDR(qn->qos.qos_port),
+                                 M_BKE_EN_EN_BMASK, val);
+}
+
+static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
+                                        struct qcom_icc_qos *qos)
+{
+       u32 val;
+       int rc;
+
+       /* Must be updated one at a time, P1 first, P0 last */
+       val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
+       rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
+                               NOC_QOS_PRIORITY_MASK, val);
+       if (rc)
+               return rc;
+
+       val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT;
+       return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
+                                 NOC_QOS_PRIORITY_MASK, val);
+}
+
+static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
+{
+       struct qcom_icc_provider *qp;
+       struct qcom_icc_node *qn;
+       struct icc_provider *provider;
+       u32 mode = NOC_QOS_MODE_BYPASS;
+       int rc = 0;
+
+       qn = src->data;
+       provider = src->provider;
+       qp = to_qcom_provider(provider);
+
+       if (qn->qos.qos_port < 0) {
+               dev_dbg(src->provider->dev,
+                       "NoC QoS: Skipping %s: vote aggregated on parent.\n",
+                       qn->name);
+               return 0;
+       }
+
+       if (qn->qos.qos_mode != -1)
+               mode = qn->qos.qos_mode;
+
+       if (mode == NOC_QOS_MODE_FIXED) {
+               dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
+                       qn->name);
+               rc = qcom_icc_noc_set_qos_priority(qp->regmap, &qn->qos);
+               if (rc)
+                       return rc;
+       } else if (mode == NOC_QOS_MODE_BYPASS) {
+               dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
+                       qn->name);
+       }
+
+       return regmap_update_bits(qp->regmap,
+                                 NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
+                                 NOC_QOS_MODEn_MASK, mode);
+}
+
+static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
+{
+       struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
+       struct qcom_icc_node *qn = node->data;
+
+       dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
+
+       if (qp->is_bimc_node)
+               return qcom_icc_set_bimc_qos(node, sum_bw,
+                               (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS));
+
+       return qcom_icc_set_noc_qos(node, sum_bw);
+}
+
+static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
+{
+       int ret = 0;
+
+       if (mas_rpm_id != -1) {
+               ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+                                           RPM_BUS_MASTER_REQ,
+                                           mas_rpm_id,
+                                           sum_bw);
+               if (ret) {
+                       pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+                              mas_rpm_id, ret);
+                       return ret;
+               }
+       }
+
+       if (slv_rpm_id != -1) {
+               ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+                                           RPM_BUS_SLAVE_REQ,
+                                           slv_rpm_id,
+                                           sum_bw);
+               if (ret) {
+                       pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
+                              slv_rpm_id, ret);
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+       struct qcom_icc_provider *qp;
+       struct qcom_icc_node *qn;
+       struct icc_provider *provider;
+       struct icc_node *n;
+       u64 sum_bw;
+       u64 max_peak_bw;
+       u64 rate;
+       u32 agg_avg = 0;
+       u32 agg_peak = 0;
+       int ret, i;
+
+       qn = src->data;
+       provider = src->provider;
+       qp = to_qcom_provider(provider);
+
+       list_for_each_entry(n, &provider->nodes, node_list)
+               provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+                                   &agg_avg, &agg_peak);
+
+       sum_bw = icc_units_to_bps(agg_avg);
+       max_peak_bw = icc_units_to_bps(agg_peak);
+
+       if (!qn->qos.ap_owned) {
+               /* send bandwidth request message to the RPM processor */
+               ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
+               if (ret)
+                       return ret;
+       } else if (qn->qos.qos_mode != -1) {
+               /* set bandwidth directly from the AP */
+               ret = qcom_icc_qos_set(src, sum_bw);
+               if (ret)
+                       return ret;
+       }
+
+       rate = max(sum_bw, max_peak_bw);
+
+       do_div(rate, qn->buswidth);
+
+       if (qn->rate == rate)
+               return 0;
+
+       for (i = 0; i < qp->num_clks; i++) {
+               ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+               if (ret) {
+                       pr_err("%s clk_set_rate error: %d\n",
+                              qp->bus_clks[i].id, ret);
+                       return ret;
+               }
+       }
+
+       qn->rate = rate;
+
+       return 0;
+}
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       const struct qcom_icc_desc *desc;
+       struct icc_onecell_data *data;
+       struct icc_provider *provider;
+       struct qcom_icc_node **qnodes;
+       struct qcom_icc_provider *qp;
+       struct icc_node *node;
+       struct resource *res;
+       size_t num_nodes, i;
+       int ret;
+
+       /* wait for the RPM proxy */
+       if (!qcom_icc_rpm_smd_available())
+               return -EPROBE_DEFER;
+
+       desc = of_device_get_match_data(dev);
+       if (!desc)
+               return -EINVAL;
+
+       qnodes = desc->nodes;
+       num_nodes = desc->num_nodes;
+
+       qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+       if (!qp)
+               return -ENOMEM;
+
+       data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+                           GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       if (of_device_is_compatible(dev->of_node, "qcom,sdm660-mnoc")) {
+               qp->bus_clks = devm_kmemdup(dev, bus_mm_clocks,
+                                           sizeof(bus_mm_clocks), GFP_KERNEL);
+               qp->num_clks = ARRAY_SIZE(bus_mm_clocks);
+       } else {
+               if (of_device_is_compatible(dev->of_node, "qcom,sdm660-bimc"))
+                       qp->is_bimc_node = true;
+
+               qp->bus_clks = devm_kmemdup(dev, bus_clocks, sizeof(bus_clocks),
+                                           GFP_KERNEL);
+               qp->num_clks = ARRAY_SIZE(bus_clocks);
+       }
+       if (!qp->bus_clks)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       qp->mmio = devm_ioremap_resource(dev, res);
+       if (IS_ERR(qp->mmio)) {
+               dev_err(dev, "Cannot ioremap interconnect bus resource\n");
+               return PTR_ERR(qp->mmio);
+       }
+
+       qp->regmap = devm_regmap_init_mmio(dev, qp->mmio, desc->regmap_cfg);
+       if (IS_ERR(qp->regmap)) {
+               dev_err(dev, "Cannot regmap interconnect bus resource\n");
+               return PTR_ERR(qp->regmap);
+       }
+
+       ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+       if (ret)
+               return ret;
+
+       ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+       if (ret)
+               return ret;
+
+       provider = &qp->provider;
+       INIT_LIST_HEAD(&provider->nodes);
+       provider->dev = dev;
+       provider->set = qcom_icc_set;
+       provider->aggregate = icc_std_aggregate;
+       provider->xlate = of_icc_xlate_onecell;
+       provider->data = data;
+
+       ret = icc_provider_add(provider);
+       if (ret) {
+               dev_err(dev, "error adding interconnect provider: %d\n", ret);
+               clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+               return ret;
+       }
+
+       for (i = 0; i < num_nodes; i++) {
+               size_t j;
+
+               node = icc_node_create(qnodes[i]->id);
+               if (IS_ERR(node)) {
+                       ret = PTR_ERR(node);
+                       goto err;
+               }
+
+               node->name = qnodes[i]->name;
+               node->data = qnodes[i];
+               icc_node_add(node, provider);
+
+               for (j = 0; j < qnodes[i]->num_links; j++)
+                       icc_link_create(node, qnodes[i]->links[j]);
+
+               data->nodes[i] = node;
+       }
+       data->num_nodes = num_nodes;
+       platform_set_drvdata(pdev, qp);
+
+       return 0;
+err:
+       icc_nodes_remove(provider);
+       clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+       icc_provider_del(provider);
+
+       return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+       struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+       icc_nodes_remove(&qp->provider);
+       clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+       return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id sdm660_noc_of_match[] = {
+       { .compatible = "qcom,sdm660-a2noc", .data = &sdm660_a2noc },
+       { .compatible = "qcom,sdm660-bimc", .data = &sdm660_bimc },
+       { .compatible = "qcom,sdm660-cnoc", .data = &sdm660_cnoc },
+       { .compatible = "qcom,sdm660-gnoc", .data = &sdm660_gnoc },
+       { .compatible = "qcom,sdm660-mnoc", .data = &sdm660_mnoc },
+       { .compatible = "qcom,sdm660-snoc", .data = &sdm660_snoc },
+       { },
+};
+MODULE_DEVICE_TABLE(of, sdm660_noc_of_match);
+
+static struct platform_driver sdm660_noc_driver = {
+       .probe = qnoc_probe,
+       .remove = qnoc_remove,
+       .driver = {
+               .name = "qnoc-sdm660",
+               .of_match_table = sdm660_noc_of_match,
+       },
+};
+module_platform_driver(sdm660_noc_driver);
+MODULE_DESCRIPTION("Qualcomm sdm660 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
new file mode 100644 (file)
index 0000000..579b6ce
--- /dev/null
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ *
+ */
+
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <dt-bindings/interconnect/qcom,sm8350.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm8350.h"
+
+DEFINE_QNODE(qhm_qspi, SM8350_MASTER_QSPI_0, 1, 4, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_qup0, SM8350_MASTER_QUP_0, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup1, SM8350_MASTER_QUP_1, 1, 4, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, SM8350_MASTER_QUP_2, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_a1noc_cfg, SM8350_MASTER_A1NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(xm_sdc4, SM8350_MASTER_SDCC_4, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, SM8350_MASTER_UFS_MEM, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, SM8350_MASTER_USB3_0, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_usb3_1, SM8350_MASTER_USB3_1, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_qdss_bam, SM8350_MASTER_QDSS_BAM, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_a2noc_cfg, SM8350_MASTER_A2NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qxm_crypto, SM8350_MASTER_CRYPTO, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, SM8350_MASTER_IPA, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_pcie3_0, SM8350_MASTER_PCIE_0, 1, 8, SM8350_SLAVE_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(xm_pcie3_1, SM8350_MASTER_PCIE_1, 1, 8, SM8350_SLAVE_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(xm_qdss_etr, SM8350_MASTER_QDSS_ETR, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, SM8350_MASTER_SDCC_2, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_ufs_card, SM8350_MASTER_UFS_CARD, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_gemnoc_cnoc, SM8350_MASTER_GEM_NOC_CNOC, 1, 16, SM8350_SLAVE_AHB2PHY_SOUTH, SM8350_SLAVE_AHB2PHY_NORTH, SM8350_SLAVE_AOSS, SM8350_SLAVE_APPSS, SM8350_SLAVE_CAMERA_CFG, SM8350_SLAVE_CLK_CTL, SM8350_SLAVE_CDSP_CFG, SM8350_SLAVE_RBCPR_CX_CFG, SM8350_SLAVE_RBCPR_MMCX_CFG, SM8350_SLAVE_RBCPR_MX_CFG, SM8350_SLAVE_CRYPTO_0_CFG, SM8350_SLAVE_CX_RDPM, SM8350_SLAVE_DCC_CFG, SM8350_SLAVE_DISPLAY_CFG, SM8350_SLAVE_GFX3D_CFG, SM8350_SLAVE_HWKM, SM8350_SLAVE_IMEM_CFG, SM8350_SLAVE_IPA_CFG, SM8350_SLAVE_IPC_ROUTER_CFG, SM8350_SLAVE_LPASS, SM8350_SLAVE_CNOC_MSS, SM8350_SLAVE_MX_RDPM, SM8350_SLAVE_PCIE_0_CFG, SM8350_SLAVE_PCIE_1_CFG, SM8350_SLAVE_PDM, SM8350_SLAVE_PIMEM_CFG, SM8350_SLAVE_PKA_WRAPPER_CFG, SM8350_SLAVE_PMU_WRAPPER_CFG, SM8350_SLAVE_QDSS_CFG, SM8350_SLAVE_QSPI_0, SM8350_SLAVE_QUP_0, SM8350_SLAVE_QUP_1, SM8350_SLAVE_QUP_2, SM8350_SLAVE_SDCC_2, SM8350_SLAVE_SDCC_4, SM8350_SLAVE_SECURITY, SM8350_SLAVE_SPSS_CFG, SM8350_SLAVE_TCSR, SM8350_SLAVE_TLMM, SM8350_SLAVE_UFS_CARD_CFG, SM8350_SLAVE_UFS_MEM_CFG, SM8350_SLAVE_USB3_0, SM8350_SLAVE_USB3_1, SM8350_SLAVE_VENUS_CFG, SM8350_SLAVE_VSENSE_CTRL_CFG, SM8350_SLAVE_A1NOC_CFG, SM8350_SLAVE_A2NOC_CFG, SM8350_SLAVE_DDRSS_CFG, SM8350_SLAVE_CNOC_MNOC_CFG, SM8350_SLAVE_SNOC_CFG, SM8350_SLAVE_BOOT_IMEM, SM8350_SLAVE_IMEM, SM8350_SLAVE_PIMEM, SM8350_SLAVE_SERVICE_CNOC, SM8350_SLAVE_QDSS_STM, SM8350_SLAVE_TCU);
+DEFINE_QNODE(qnm_gemnoc_pcie, SM8350_MASTER_GEM_NOC_PCIE_SNOC, 1, 8, SM8350_SLAVE_PCIE_0, SM8350_SLAVE_PCIE_1);
+DEFINE_QNODE(xm_qdss_dap, SM8350_MASTER_QDSS_DAP, 1, 8, SM8350_SLAVE_AHB2PHY_SOUTH, SM8350_SLAVE_AHB2PHY_NORTH, SM8350_SLAVE_AOSS, SM8350_SLAVE_APPSS, SM8350_SLAVE_CAMERA_CFG, SM8350_SLAVE_CLK_CTL, SM8350_SLAVE_CDSP_CFG, SM8350_SLAVE_RBCPR_CX_CFG, SM8350_SLAVE_RBCPR_MMCX_CFG, SM8350_SLAVE_RBCPR_MX_CFG, SM8350_SLAVE_CRYPTO_0_CFG, SM8350_SLAVE_CX_RDPM, SM8350_SLAVE_DCC_CFG, SM8350_SLAVE_DISPLAY_CFG, SM8350_SLAVE_GFX3D_CFG, SM8350_SLAVE_HWKM, SM8350_SLAVE_IMEM_CFG, SM8350_SLAVE_IPA_CFG, SM8350_SLAVE_IPC_ROUTER_CFG, SM8350_SLAVE_LPASS, SM8350_SLAVE_CNOC_MSS, SM8350_SLAVE_MX_RDPM, SM8350_SLAVE_PCIE_0_CFG, SM8350_SLAVE_PCIE_1_CFG, SM8350_SLAVE_PDM, SM8350_SLAVE_PIMEM_CFG, SM8350_SLAVE_PKA_WRAPPER_CFG, SM8350_SLAVE_PMU_WRAPPER_CFG, SM8350_SLAVE_QDSS_CFG, SM8350_SLAVE_QSPI_0, SM8350_SLAVE_QUP_0, SM8350_SLAVE_QUP_1, SM8350_SLAVE_QUP_2, SM8350_SLAVE_SDCC_2, SM8350_SLAVE_SDCC_4, SM8350_SLAVE_SECURITY, SM8350_SLAVE_SPSS_CFG, SM8350_SLAVE_TCSR, SM8350_SLAVE_TLMM, SM8350_SLAVE_UFS_CARD_CFG, SM8350_SLAVE_UFS_MEM_CFG, SM8350_SLAVE_USB3_0, SM8350_SLAVE_USB3_1, SM8350_SLAVE_VENUS_CFG, SM8350_SLAVE_VSENSE_CTRL_CFG, SM8350_SLAVE_A1NOC_CFG, SM8350_SLAVE_A2NOC_CFG, SM8350_SLAVE_DDRSS_CFG, SM8350_SLAVE_CNOC_MNOC_CFG, SM8350_SLAVE_SNOC_CFG, SM8350_SLAVE_BOOT_IMEM, SM8350_SLAVE_IMEM, SM8350_SLAVE_PIMEM, SM8350_SLAVE_SERVICE_CNOC, SM8350_SLAVE_QDSS_STM, SM8350_SLAVE_TCU);
+DEFINE_QNODE(qnm_cnoc_dc_noc, SM8350_MASTER_CNOC_DC_NOC, 1, 4, SM8350_SLAVE_LLCC_CFG, SM8350_SLAVE_GEM_NOC_CFG);
+DEFINE_QNODE(alm_gpu_tcu, SM8350_MASTER_GPU_TCU, 1, 8, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(alm_sys_tcu, SM8350_MASTER_SYS_TCU, 1, 8, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(chm_apps, SM8350_MASTER_APPSS_PROC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC, SM8350_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qnm_cmpnoc, SM8350_MASTER_COMPUTE_NOC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_gemnoc_cfg, SM8350_MASTER_GEM_NOC_CFG, 1, 4, SM8350_SLAVE_MSS_PROC_MS_MPU_CFG, SM8350_SLAVE_MCDMA_MS_MPU_CFG, SM8350_SLAVE_SERVICE_GEM_NOC_1, SM8350_SLAVE_SERVICE_GEM_NOC_2, SM8350_SLAVE_SERVICE_GEM_NOC);
+DEFINE_QNODE(qnm_gpu, SM8350_MASTER_GFX3D, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, SM8350_MASTER_MNOC_HF_MEM_NOC, 2, 32, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SM8350_MASTER_MNOC_SF_MEM_NOC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_pcie, SM8350_MASTER_ANOC_PCIE_GEM_NOC, 1, 16, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_gc, SM8350_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8350_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SM8350_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC, SM8350_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhm_config_noc, SM8350_MASTER_CNOC_LPASS_AG_NOC, 1, 4, SM8350_SLAVE_LPASS_CORE_CFG, SM8350_SLAVE_LPASS_LPI_CFG, SM8350_SLAVE_LPASS_MPU_CFG, SM8350_SLAVE_LPASS_TOP_CFG, SM8350_SLAVE_SERVICES_LPASS_AML_NOC, SM8350_SLAVE_SERVICE_LPASS_AG_NOC);
+DEFINE_QNODE(llcc_mc, SM8350_MASTER_LLCC, 4, 4, SM8350_SLAVE_EBI1);
+DEFINE_QNODE(qnm_camnoc_hf, SM8350_MASTER_CAMNOC_HF, 2, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qnm_camnoc_icp, SM8350_MASTER_CAMNOC_ICP, 1, 8, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_camnoc_sf, SM8350_MASTER_CAMNOC_SF, 2, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_mnoc_cfg, SM8350_MASTER_CNOC_MNOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qnm_video0, SM8350_MASTER_VIDEO_P0, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_video1, SM8350_MASTER_VIDEO_P1, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_video_cvp, SM8350_MASTER_VIDEO_PROC, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SM8350_MASTER_MDP0, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, SM8350_MASTER_MDP1, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SM8350_MASTER_ROTATOR, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_nsp_noc_config, SM8350_MASTER_CDSP_NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_NSP_NOC);
+DEFINE_QNODE(qxm_nsp, SM8350_MASTER_CDSP_PROC, 2, 32, SM8350_SLAVE_CDSP_MEM_NOC);
+DEFINE_QNODE(qnm_aggre1_noc, SM8350_MASTER_A1NOC_SNOC, 1, 16, SM8350_SLAVE_SNOC_GEM_NOC_SF);
+DEFINE_QNODE(qnm_aggre2_noc, SM8350_MASTER_A2NOC_SNOC, 1, 16, SM8350_SLAVE_SNOC_GEM_NOC_SF);
+DEFINE_QNODE(qnm_snoc_cfg, SM8350_MASTER_SNOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qxm_pimem, SM8350_MASTER_PIMEM, 1, 8, SM8350_SLAVE_SNOC_GEM_NOC_GC);
+DEFINE_QNODE(xm_gic, SM8350_MASTER_GIC, 1, 8, SM8350_SLAVE_SNOC_GEM_NOC_GC);
+DEFINE_QNODE(qnm_mnoc_hf_disp, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_SLAVE_LLCC_DISP);
+DEFINE_QNODE(qnm_mnoc_sf_disp, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_SLAVE_LLCC_DISP);
+DEFINE_QNODE(llcc_mc_disp, SM8350_MASTER_LLCC_DISP, 4, 4, SM8350_SLAVE_EBI1_DISP);
+DEFINE_QNODE(qxm_mdp0_disp, SM8350_MASTER_MDP0_DISP, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP);
+DEFINE_QNODE(qxm_mdp1_disp, SM8350_MASTER_MDP1_DISP, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP);
+DEFINE_QNODE(qxm_rot_disp, SM8350_MASTER_ROTATOR_DISP, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP);
+DEFINE_QNODE(qns_a1noc_snoc, SM8350_SLAVE_A1NOC_SNOC, 1, 16, SM8350_MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SM8350_SLAVE_SERVICE_A1NOC, 1, 4);
+DEFINE_QNODE(qns_a2noc_snoc, SM8350_SLAVE_A2NOC_SNOC, 1, 16, SM8350_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(qns_pcie_mem_noc, SM8350_SLAVE_ANOC_PCIE_GEM_NOC, 1, 16, SM8350_MASTER_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(srvc_aggre2_noc, SM8350_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qhs_ahb2phy0, SM8350_SLAVE_AHB2PHY_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_ahb2phy1, SM8350_SLAVE_AHB2PHY_NORTH, 1, 4);
+DEFINE_QNODE(qhs_aoss, SM8350_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_apss, SM8350_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qhs_camera_cfg, SM8350_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SM8350_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_compute_cfg, SM8350_SLAVE_CDSP_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SM8350_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mmcx, SM8350_SLAVE_RBCPR_MMCX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mx, SM8350_SLAVE_RBCPR_MX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SM8350_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_cx_rdpm, SM8350_SLAVE_CX_RDPM, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SM8350_SLAVE_DCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_display_cfg, SM8350_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SM8350_SLAVE_GFX3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_hwkm, SM8350_SLAVE_HWKM, 1, 4);
+DEFINE_QNODE(qhs_imem_cfg, SM8350_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SM8350_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipc_router, SM8350_SLAVE_IPC_ROUTER_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_cfg, SM8350_SLAVE_LPASS, 1, 4, SM8350_MASTER_CNOC_LPASS_AG_NOC);
+DEFINE_QNODE(qhs_mss_cfg, SM8350_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_mx_rdpm, SM8350_SLAVE_MX_RDPM, 1, 4);
+DEFINE_QNODE(qhs_pcie0_cfg, SM8350_SLAVE_PCIE_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_pcie1_cfg, SM8350_SLAVE_PCIE_1_CFG, 1, 4);
+DEFINE_QNODE(qhs_pdm, SM8350_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SM8350_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_pka_wrapper_cfg, SM8350_SLAVE_PKA_WRAPPER_CFG, 1, 4);
+DEFINE_QNODE(qhs_pmu_wrapper_cfg, SM8350_SLAVE_PMU_WRAPPER_CFG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SM8350_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qspi, SM8350_SLAVE_QSPI_0, 1, 4);
+DEFINE_QNODE(qhs_qup0, SM8350_SLAVE_QUP_0, 1, 4);
+DEFINE_QNODE(qhs_qup1, SM8350_SLAVE_QUP_1, 1, 4);
+DEFINE_QNODE(qhs_qup2, SM8350_SLAVE_QUP_2, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SM8350_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_sdc4, SM8350_SLAVE_SDCC_4, 1, 4);
+DEFINE_QNODE(qhs_security, SM8350_SLAVE_SECURITY, 1, 4);
+DEFINE_QNODE(qhs_spss_cfg, SM8350_SLAVE_SPSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SM8350_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm, SM8350_SLAVE_TLMM, 1, 4);
+DEFINE_QNODE(qhs_ufs_card_cfg, SM8350_SLAVE_UFS_CARD_CFG, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SM8350_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3_0, SM8350_SLAVE_USB3_0, 1, 4);
+DEFINE_QNODE(qhs_usb3_1, SM8350_SLAVE_USB3_1, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SM8350_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM8350_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(qns_a1_noc_cfg, SM8350_SLAVE_A1NOC_CFG, 1, 4);
+DEFINE_QNODE(qns_a2_noc_cfg, SM8350_SLAVE_A2NOC_CFG, 1, 4);
+DEFINE_QNODE(qns_ddrss_cfg, SM8350_SLAVE_DDRSS_CFG, 1, 4);
+DEFINE_QNODE(qns_mnoc_cfg, SM8350_SLAVE_CNOC_MNOC_CFG, 1, 4);
+DEFINE_QNODE(qns_snoc_cfg, SM8350_SLAVE_SNOC_CFG, 1, 4);
+DEFINE_QNODE(qxs_boot_imem, SM8350_SLAVE_BOOT_IMEM, 1, 8);
+DEFINE_QNODE(qxs_imem, SM8350_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(qxs_pimem, SM8350_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_cnoc, SM8350_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(xs_pcie_0, SM8350_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_pcie_1, SM8350_SLAVE_PCIE_1, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SM8350_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SM8350_SLAVE_TCU, 1, 8);
+DEFINE_QNODE(qhs_llcc, SM8350_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qns_gemnoc, SM8350_SLAVE_GEM_NOC_CFG, 1, 4);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SM8350_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_modem_ms_mpu_cfg, SM8350_SLAVE_MCDMA_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_gem_noc_cnoc, SM8350_SLAVE_GEM_NOC_CNOC, 1, 16, SM8350_MASTER_GEM_NOC_CNOC);
+DEFINE_QNODE(qns_llcc, SM8350_SLAVE_LLCC, 4, 16, SM8350_MASTER_LLCC);
+DEFINE_QNODE(qns_pcie, SM8350_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8);
+DEFINE_QNODE(srvc_even_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
+DEFINE_QNODE(srvc_odd_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC_2, 1, 4);
+DEFINE_QNODE(srvc_sys_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC, 1, 4);
+DEFINE_QNODE(qhs_lpass_core, SM8350_SLAVE_LPASS_CORE_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_lpi, SM8350_SLAVE_LPASS_LPI_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_mpu, SM8350_SLAVE_LPASS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_top, SM8350_SLAVE_LPASS_TOP_CFG, 1, 4);
+DEFINE_QNODE(srvc_niu_aml_noc, SM8350_SLAVE_SERVICES_LPASS_AML_NOC, 1, 4);
+DEFINE_QNODE(srvc_niu_lpass_agnoc, SM8350_SLAVE_SERVICE_LPASS_AG_NOC, 1, 4);
+DEFINE_QNODE(ebi, SM8350_SLAVE_EBI1, 4, 4);
+DEFINE_QNODE(qns_mem_noc_hf, SM8350_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_sf, SM8350_SLAVE_MNOC_SF_MEM_NOC, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SM8350_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qns_nsp_gemnoc, SM8350_SLAVE_CDSP_MEM_NOC, 2, 32, SM8350_MASTER_COMPUTE_NOC);
+DEFINE_QNODE(service_nsp_noc, SM8350_SLAVE_SERVICE_NSP_NOC, 1, 4);
+DEFINE_QNODE(qns_gemnoc_gc, SM8350_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM8350_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_gemnoc_sf, SM8350_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM8350_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(srvc_snoc, SM8350_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(qns_llcc_disp, SM8350_SLAVE_LLCC_DISP, 4, 16, SM8350_MASTER_LLCC_DISP);
+DEFINE_QNODE(ebi_disp, SM8350_SLAVE_EBI1_DISP, 4, 4);
+DEFINE_QNODE(qns_mem_noc_hf_disp, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP);
+DEFINE_QNODE(qns_mem_noc_sf_disp, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie);
+DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_qdss_dap, &qhs_ahb2phy0, &qhs_ahb2phy1, &qhs_aoss, &qhs_apss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_cfg, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_cx_rdpm, &qhs_dcc_cfg, &qhs_display_cfg, &qhs_gpuss_cfg, &qhs_hwkm, &qhs_imem_cfg, &qhs_ipa, &qhs_ipc_router, &qhs_mss_cfg, &qhs_mx_rdpm, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_pimem_cfg, &qhs_pka_wrapper_cfg, &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg, &qhs_qup0, &qhs_qup1, &qhs_qup2, &qhs_security, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_a1_noc_cfg, &qns_a2_noc_cfg, &qns_ddrss_cfg, &qns_mnoc_cfg, &qns_snoc_cfg, &srvc_cnoc);
+DEFINE_QBCM(bcm_cn2, "CN2", false, &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4);
+DEFINE_QBCM(bcm_co0, "CO0", false, &qns_nsp_gemnoc);
+DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_nsp);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_mm4, "MM4", false, &qns_mem_noc_sf);
+DEFINE_QBCM(bcm_mm5, "MM5", false, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp, &qxm_rot);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &chm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xm_pcie3_0);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &xm_pcie3_1);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+DEFINE_QBCM(bcm_acv_disp, "ACV", false, &ebi_disp);
+DEFINE_QBCM(bcm_mc0_disp, "MC0", false, &ebi_disp);
+DEFINE_QBCM(bcm_mm0_disp, "MM0", false, &qns_mem_noc_hf_disp);
+DEFINE_QBCM(bcm_mm1_disp, "MM1", false, &qxm_mdp0_disp, &qxm_mdp1_disp);
+DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
+DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
+DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+       [MASTER_QSPI_0] = &qhm_qspi,
+       [MASTER_QUP_1] = &qhm_qup1,
+       [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
+       [MASTER_SDCC_4] = &xm_sdc4,
+       [MASTER_UFS_MEM] = &xm_ufs_mem,
+       [MASTER_USB3_0] = &xm_usb3_0,
+       [MASTER_USB3_1] = &xm_usb3_1,
+       [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+       [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sm8350_aggre1_noc = {
+       .nodes = aggre1_noc_nodes,
+       .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+       .bcms = aggre1_noc_bcms,
+       .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+       &bcm_ce0,
+       &bcm_sn5,
+       &bcm_sn6,
+       &bcm_sn14,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+       [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+       [MASTER_QUP_0] = &qhm_qup0,
+       [MASTER_QUP_2] = &qhm_qup2,
+       [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
+       [MASTER_CRYPTO] = &qxm_crypto,
+       [MASTER_IPA] = &qxm_ipa,
+       [MASTER_PCIE_0] = &xm_pcie3_0,
+       [MASTER_PCIE_1] = &xm_pcie3_1,
+       [MASTER_QDSS_ETR] = &xm_qdss_etr,
+       [MASTER_SDCC_2] = &xm_sdc2,
+       [MASTER_UFS_CARD] = &xm_ufs_card,
+       [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+       [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+       [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sm8350_aggre2_noc = {
+       .nodes = aggre2_noc_nodes,
+       .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+       .bcms = aggre2_noc_bcms,
+       .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+       &bcm_cn0,
+       &bcm_cn1,
+       &bcm_cn2,
+       &bcm_sn3,
+       &bcm_sn4,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+       [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+       [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+       [MASTER_QDSS_DAP] = &xm_qdss_dap,
+       [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+       [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+       [SLAVE_AOSS] = &qhs_aoss,
+       [SLAVE_APPSS] = &qhs_apss,
+       [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+       [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+       [SLAVE_CDSP_CFG] = &qhs_compute_cfg,
+       [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+       [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+       [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+       [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+       [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+       [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+       [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+       [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+       [SLAVE_HWKM] = &qhs_hwkm,
+       [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+       [SLAVE_IPA_CFG] = &qhs_ipa,
+       [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+       [SLAVE_LPASS] = &qhs_lpass_cfg,
+       [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+       [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+       [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+       [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+       [SLAVE_PDM] = &qhs_pdm,
+       [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+       [SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
+       [SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
+       [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+       [SLAVE_QSPI_0] = &qhs_qspi,
+       [SLAVE_QUP_0] = &qhs_qup0,
+       [SLAVE_QUP_1] = &qhs_qup1,
+       [SLAVE_QUP_2] = &qhs_qup2,
+       [SLAVE_SDCC_2] = &qhs_sdc2,
+       [SLAVE_SDCC_4] = &qhs_sdc4,
+       [SLAVE_SECURITY] = &qhs_security,
+       [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+       [SLAVE_TCSR] = &qhs_tcsr,
+       [SLAVE_TLMM] = &qhs_tlmm,
+       [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+       [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+       [SLAVE_USB3_0] = &qhs_usb3_0,
+       [SLAVE_USB3_1] = &qhs_usb3_1,
+       [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+       [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+       [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
+       [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
+       [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+       [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
+       [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+       [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+       [SLAVE_IMEM] = &qxs_imem,
+       [SLAVE_PIMEM] = &qxs_pimem,
+       [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+       [SLAVE_PCIE_0] = &xs_pcie_0,
+       [SLAVE_PCIE_1] = &xs_pcie_1,
+       [SLAVE_QDSS_STM] = &xs_qdss_stm,
+       [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sm8350_config_noc = {
+       .nodes = config_noc_nodes,
+       .num_nodes = ARRAY_SIZE(config_noc_nodes),
+       .bcms = config_noc_bcms,
+       .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+       [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
+       [SLAVE_LLCC_CFG] = &qhs_llcc,
+       [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
+};
+
+static struct qcom_icc_desc sm8350_dc_noc = {
+       .nodes = dc_noc_nodes,
+       .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+       .bcms = dc_noc_bcms,
+       .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+       &bcm_sh0,
+       &bcm_sh2,
+       &bcm_sh3,
+       &bcm_sh4,
+       &bcm_sh0_disp,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+       [MASTER_GPU_TCU] = &alm_gpu_tcu,
+       [MASTER_SYS_TCU] = &alm_sys_tcu,
+       [MASTER_APPSS_PROC] = &chm_apps,
+       [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+       [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
+       [MASTER_GFX3D] = &qnm_gpu,
+       [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+       [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+       [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+       [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+       [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+       [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+       [SLAVE_MCDMA_MS_MPU_CFG] = &qhs_modem_ms_mpu_cfg,
+       [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+       [SLAVE_LLCC] = &qns_llcc,
+       [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+       [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
+       [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
+       [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
+       [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
+       [MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp,
+       [SLAVE_LLCC_DISP] = &qns_llcc_disp,
+};
+
+static struct qcom_icc_desc sm8350_gem_noc = {
+       .nodes = gem_noc_nodes,
+       .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+       .bcms = gem_noc_bcms,
+       .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+       [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+       [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+       [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+       [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+       [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+       [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+       [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static struct qcom_icc_desc sm8350_lpass_ag_noc = {
+       .nodes = lpass_ag_noc_nodes,
+       .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+       .bcms = lpass_ag_noc_bcms,
+       .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+       &bcm_acv,
+       &bcm_mc0,
+       &bcm_acv_disp,
+       &bcm_mc0_disp,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+       [MASTER_LLCC] = &llcc_mc,
+       [SLAVE_EBI1] = &ebi,
+       [MASTER_LLCC_DISP] = &llcc_mc_disp,
+       [SLAVE_EBI1_DISP] = &ebi_disp,
+};
+
+static struct qcom_icc_desc sm8350_mc_virt = {
+       .nodes = mc_virt_nodes,
+       .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+       .bcms = mc_virt_bcms,
+       .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+       &bcm_mm0,
+       &bcm_mm1,
+       &bcm_mm4,
+       &bcm_mm5,
+       &bcm_mm0_disp,
+       &bcm_mm1_disp,
+       &bcm_mm4_disp,
+       &bcm_mm5_disp,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+       [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+       [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+       [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+       [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
+       [MASTER_VIDEO_P0] = &qnm_video0,
+       [MASTER_VIDEO_P1] = &qnm_video1,
+       [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+       [MASTER_MDP0] = &qxm_mdp0,
+       [MASTER_MDP1] = &qxm_mdp1,
+       [MASTER_ROTATOR] = &qxm_rot,
+       [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+       [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+       [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+       [MASTER_MDP0_DISP] = &qxm_mdp0_disp,
+       [MASTER_MDP1_DISP] = &qxm_mdp1_disp,
+       [MASTER_ROTATOR_DISP] = &qxm_rot_disp,
+       [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
+       [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
+};
+
+static struct qcom_icc_desc sm8350_mmss_noc = {
+       .nodes = mmss_noc_nodes,
+       .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+       .bcms = mmss_noc_bcms,
+       .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+       &bcm_co0,
+       &bcm_co3,
+};
+
+static struct qcom_icc_node *nsp_noc_nodes[] = {
+       [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+       [MASTER_CDSP_PROC] = &qxm_nsp,
+       [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+       [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static struct qcom_icc_desc sm8350_compute_noc = {
+       .nodes = nsp_noc_nodes,
+       .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+       .bcms = nsp_noc_bcms,
+       .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+       &bcm_sn0,
+       &bcm_sn2,
+       &bcm_sn7,
+       &bcm_sn8,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+       [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+       [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+       [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+       [MASTER_PIMEM] = &qxm_pimem,
+       [MASTER_GIC] = &xm_gic,
+       [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+       [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+       [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static struct qcom_icc_desc sm8350_system_noc = {
+       .nodes = system_noc_nodes,
+       .num_nodes = ARRAY_SIZE(system_noc_nodes),
+       .bcms = system_noc_bcms,
+       .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+       const struct qcom_icc_desc *desc;
+       struct icc_onecell_data *data;
+       struct icc_provider *provider;
+       struct qcom_icc_node **qnodes;
+       struct qcom_icc_provider *qp;
+       struct icc_node *node;
+       size_t num_nodes, i;
+       int ret;
+
+       desc = of_device_get_match_data(&pdev->dev);
+       if (!desc)
+               return -EINVAL;
+
+       qnodes = desc->nodes;
+       num_nodes = desc->num_nodes;
+
+       qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+       if (!qp)
+               return -ENOMEM;
+
+       data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       provider = &qp->provider;
+       provider->dev = &pdev->dev;
+       provider->set = qcom_icc_set;
+       provider->pre_aggregate = qcom_icc_pre_aggregate;
+       provider->aggregate = qcom_icc_aggregate;
+       provider->xlate = of_icc_xlate_onecell;
+       INIT_LIST_HEAD(&provider->nodes);
+       provider->data = data;
+
+       qp->dev = &pdev->dev;
+       qp->bcms = desc->bcms;
+       qp->num_bcms = desc->num_bcms;
+
+       qp->voter = of_bcm_voter_get(qp->dev, NULL);
+       if (IS_ERR(qp->voter))
+               return PTR_ERR(qp->voter);
+
+       ret = icc_provider_add(provider);
+       if (ret) {
+               dev_err(&pdev->dev, "error adding interconnect provider\n");
+               return ret;
+       }
+
+       for (i = 0; i < qp->num_bcms; i++)
+               qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+       for (i = 0; i < num_nodes; i++) {
+               size_t j;
+
+               if (!qnodes[i])
+                       continue;
+
+               node = icc_node_create(qnodes[i]->id);
+               if (IS_ERR(node)) {
+                       ret = PTR_ERR(node);
+                       goto err;
+               }
+
+               node->name = qnodes[i]->name;
+               node->data = qnodes[i];
+               icc_node_add(node, provider);
+
+               for (j = 0; j < qnodes[i]->num_links; j++)
+                       icc_link_create(node, qnodes[i]->links[j]);
+
+               data->nodes[i] = node;
+       }
+       data->num_nodes = num_nodes;
+
+       platform_set_drvdata(pdev, qp);
+
+       return ret;
+
+err:
+       icc_nodes_remove(provider);
+       icc_provider_del(provider);
+       return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+       struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+       icc_nodes_remove(&qp->provider);
+       return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+       { .compatible = "qcom,sm8350-aggre1-noc", .data = &sm8350_aggre1_noc},
+       { .compatible = "qcom,sm8350-aggre2-noc", .data = &sm8350_aggre2_noc},
+       { .compatible = "qcom,sm8350-config-noc", .data = &sm8350_config_noc},
+       { .compatible = "qcom,sm8350-dc-noc", .data = &sm8350_dc_noc},
+       { .compatible = "qcom,sm8350-gem-noc", .data = &sm8350_gem_noc},
+       { .compatible = "qcom,sm8350-lpass-ag-noc", .data = &sm8350_lpass_ag_noc},
+       { .compatible = "qcom,sm8350-mc-virt", .data = &sm8350_mc_virt},
+       { .compatible = "qcom,sm8350-mmss-noc", .data = &sm8350_mmss_noc},
+       { .compatible = "qcom,sm8350-compute-noc", .data = &sm8350_compute_noc},
+       { .compatible = "qcom,sm8350-system-noc", .data = &sm8350_system_noc},
+       { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+       .probe = qnoc_probe,
+       .remove = qnoc_remove,
+       .driver = {
+               .name = "qnoc-sm8350",
+               .of_match_table = qnoc_of_match,
+               .sync_state = icc_sync_state,
+       },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("SM8350 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm8350.h b/drivers/interconnect/qcom/sm8350.h
new file mode 100644 (file)
index 0000000..328d152
--- /dev/null
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SM8350 interconnect IDs
+ *
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8350_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8350_H
+
+#define SM8350_MASTER_GPU_TCU                          0
+#define SM8350_MASTER_SYS_TCU                          1
+#define SM8350_MASTER_APPSS_PROC                       2
+#define SM8350_MASTER_LLCC                             3
+#define SM8350_MASTER_CNOC_LPASS_AG_NOC                        4
+#define SM8350_MASTER_CDSP_NOC_CFG                     5
+#define SM8350_MASTER_QDSS_BAM                         6
+#define SM8350_MASTER_QSPI_0                           7
+#define SM8350_MASTER_QUP_0                            8
+#define SM8350_MASTER_QUP_1                            9
+#define SM8350_MASTER_QUP_2                            10
+#define SM8350_MASTER_A1NOC_CFG                                11
+#define SM8350_MASTER_A2NOC_CFG                                12
+#define SM8350_MASTER_A1NOC_SNOC                       13
+#define SM8350_MASTER_A2NOC_SNOC                       14
+#define SM8350_MASTER_CAMNOC_HF                                15
+#define SM8350_MASTER_CAMNOC_ICP                       16
+#define SM8350_MASTER_CAMNOC_SF                                17
+#define SM8350_MASTER_COMPUTE_NOC                      18
+#define SM8350_MASTER_CNOC_DC_NOC                      19
+#define SM8350_MASTER_GEM_NOC_CFG                      20
+#define SM8350_MASTER_GEM_NOC_CNOC                     21
+#define SM8350_MASTER_GEM_NOC_PCIE_SNOC                        22
+#define SM8350_MASTER_GFX3D                            23
+#define SM8350_MASTER_CNOC_MNOC_CFG                    24
+#define SM8350_MASTER_MNOC_HF_MEM_NOC                  25
+#define SM8350_MASTER_MNOC_SF_MEM_NOC                  26
+#define SM8350_MASTER_ANOC_PCIE_GEM_NOC                        27
+#define SM8350_MASTER_SNOC_CFG                         28
+#define SM8350_MASTER_SNOC_GC_MEM_NOC                  29
+#define SM8350_MASTER_SNOC_SF_MEM_NOC                  30
+#define SM8350_MASTER_VIDEO_P0                         31
+#define SM8350_MASTER_VIDEO_P1                         32
+#define SM8350_MASTER_VIDEO_PROC                       33
+#define SM8350_MASTER_QUP_CORE_0                       34
+#define SM8350_MASTER_QUP_CORE_1                       35
+#define SM8350_MASTER_QUP_CORE_2                       36
+#define SM8350_MASTER_CRYPTO                           37
+#define SM8350_MASTER_IPA                              38
+#define SM8350_MASTER_MDP0                             39
+#define SM8350_MASTER_MDP1                             40
+#define SM8350_MASTER_CDSP_PROC                                41
+#define SM8350_MASTER_PIMEM                            42
+#define SM8350_MASTER_ROTATOR                          43
+#define SM8350_MASTER_GIC                              44
+#define SM8350_MASTER_PCIE_0                           45
+#define SM8350_MASTER_PCIE_1                           46
+#define SM8350_MASTER_QDSS_DAP                         47
+#define SM8350_MASTER_QDSS_ETR                         48
+#define SM8350_MASTER_SDCC_2                           49
+#define SM8350_MASTER_SDCC_4                           50
+#define SM8350_MASTER_UFS_CARD                         51
+#define SM8350_MASTER_UFS_MEM                          52
+#define SM8350_MASTER_USB3_0                           53
+#define SM8350_MASTER_USB3_1                           54
+#define SM8350_SLAVE_EBI1                              55
+#define SM8350_SLAVE_AHB2PHY_SOUTH                     56
+#define SM8350_SLAVE_AHB2PHY_NORTH                     57
+#define SM8350_SLAVE_AOSS                              58
+#define SM8350_SLAVE_APPSS                             59
+#define SM8350_SLAVE_CAMERA_CFG                                60
+#define SM8350_SLAVE_CLK_CTL                           61
+#define SM8350_SLAVE_CDSP_CFG                          62
+#define SM8350_SLAVE_RBCPR_CX_CFG                      63
+#define SM8350_SLAVE_RBCPR_MMCX_CFG                    64
+#define SM8350_SLAVE_RBCPR_MX_CFG                      65
+#define SM8350_SLAVE_CRYPTO_0_CFG                      66
+#define SM8350_SLAVE_CX_RDPM                           67
+#define SM8350_SLAVE_DCC_CFG                           68
+#define SM8350_SLAVE_DISPLAY_CFG                       69
+#define SM8350_SLAVE_GFX3D_CFG                         70
+#define SM8350_SLAVE_HWKM                              71
+#define SM8350_SLAVE_IMEM_CFG                          72
+#define SM8350_SLAVE_IPA_CFG                           73
+#define SM8350_SLAVE_IPC_ROUTER_CFG                    74
+#define SM8350_SLAVE_LLCC_CFG                          75
+#define SM8350_SLAVE_LPASS                             76
+#define SM8350_SLAVE_LPASS_CORE_CFG                    77
+#define SM8350_SLAVE_LPASS_LPI_CFG                     78
+#define SM8350_SLAVE_LPASS_MPU_CFG                     79
+#define SM8350_SLAVE_LPASS_TOP_CFG                     80
+#define SM8350_SLAVE_MSS_PROC_MS_MPU_CFG               81
+#define SM8350_SLAVE_MCDMA_MS_MPU_CFG                  82
+#define SM8350_SLAVE_CNOC_MSS                          83
+#define SM8350_SLAVE_MX_RDPM                           84
+#define SM8350_SLAVE_PCIE_0_CFG                                85
+#define SM8350_SLAVE_PCIE_1_CFG                                86
+#define SM8350_SLAVE_PDM                               87
+#define SM8350_SLAVE_PIMEM_CFG                         88
+#define SM8350_SLAVE_PKA_WRAPPER_CFG                   89
+#define SM8350_SLAVE_PMU_WRAPPER_CFG                   90
+#define SM8350_SLAVE_QDSS_CFG                          91
+#define SM8350_SLAVE_QSPI_0                            92
+#define SM8350_SLAVE_QUP_0                             93
+#define SM8350_SLAVE_QUP_1                             94
+#define SM8350_SLAVE_QUP_2                             95
+#define SM8350_SLAVE_SDCC_2                            96
+#define SM8350_SLAVE_SDCC_4                            97
+#define SM8350_SLAVE_SECURITY                          98
+#define SM8350_SLAVE_SPSS_CFG                          99
+#define SM8350_SLAVE_TCSR                              100
+#define SM8350_SLAVE_TLMM                              101
+#define SM8350_SLAVE_UFS_CARD_CFG                      102
+#define SM8350_SLAVE_UFS_MEM_CFG                       103
+#define SM8350_SLAVE_USB3_0                            104
+#define SM8350_SLAVE_USB3_1                            105
+#define SM8350_SLAVE_VENUS_CFG                         106
+#define SM8350_SLAVE_VSENSE_CTRL_CFG                   107
+#define SM8350_SLAVE_A1NOC_CFG                         108
+#define SM8350_SLAVE_A1NOC_SNOC                                109
+#define SM8350_SLAVE_A2NOC_CFG                         110
+#define SM8350_SLAVE_A2NOC_SNOC                                111
+#define SM8350_SLAVE_DDRSS_CFG                         112
+#define SM8350_SLAVE_GEM_NOC_CNOC                      113
+#define SM8350_SLAVE_GEM_NOC_CFG                       114
+#define SM8350_SLAVE_SNOC_GEM_NOC_GC                   115
+#define SM8350_SLAVE_SNOC_GEM_NOC_SF                   116
+#define SM8350_SLAVE_LLCC                              117
+#define SM8350_SLAVE_MNOC_HF_MEM_NOC                   118
+#define SM8350_SLAVE_MNOC_SF_MEM_NOC                   119
+#define SM8350_SLAVE_CNOC_MNOC_CFG                     120
+#define SM8350_SLAVE_CDSP_MEM_NOC                      121
+#define SM8350_SLAVE_MEM_NOC_PCIE_SNOC                 122
+#define SM8350_SLAVE_ANOC_PCIE_GEM_NOC                 123
+#define SM8350_SLAVE_SNOC_CFG                          124
+#define SM8350_SLAVE_QUP_CORE_0                                125
+#define SM8350_SLAVE_QUP_CORE_1                                126
+#define SM8350_SLAVE_QUP_CORE_2                                127
+#define SM8350_SLAVE_BOOT_IMEM                         128
+#define SM8350_SLAVE_IMEM                              129
+#define SM8350_SLAVE_PIMEM                             130
+#define SM8350_SLAVE_SERVICE_NSP_NOC                   131
+#define SM8350_SLAVE_SERVICE_A1NOC                     132
+#define SM8350_SLAVE_SERVICE_A2NOC                     133
+#define SM8350_SLAVE_SERVICE_CNOC                      134
+#define SM8350_SLAVE_SERVICE_GEM_NOC_1                 135
+#define SM8350_SLAVE_SERVICE_MNOC                      136
+#define SM8350_SLAVE_SERVICES_LPASS_AML_NOC            137
+#define SM8350_SLAVE_SERVICE_LPASS_AG_NOC              138
+#define SM8350_SLAVE_SERVICE_GEM_NOC_2                 139
+#define SM8350_SLAVE_SERVICE_SNOC                      140
+#define SM8350_SLAVE_SERVICE_GEM_NOC                   141
+#define SM8350_SLAVE_PCIE_0                            142
+#define SM8350_SLAVE_PCIE_1                            143
+#define SM8350_SLAVE_QDSS_STM                          144
+#define SM8350_SLAVE_TCU                               145
+#define SM8350_MASTER_LLCC_DISP                                146
+#define SM8350_MASTER_MNOC_HF_MEM_NOC_DISP             147
+#define SM8350_MASTER_MNOC_SF_MEM_NOC_DISP             148
+#define SM8350_MASTER_MDP0_DISP                                149
+#define SM8350_MASTER_MDP1_DISP                                150
+#define SM8350_MASTER_ROTATOR_DISP                     151
+#define SM8350_SLAVE_EBI1_DISP                         152
+#define SM8350_SLAVE_LLCC_DISP                         153
+#define SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP              154
+#define SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP              155
+
+#endif
index 15536e321df55605694049e74b30c4cc454e23cf..c8f57e3e058dcd35eedad723296d1e2d701ad9c2 100644 (file)
@@ -279,8 +279,13 @@ config XTENSA_MX
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
 
 config XILINX_INTC
-       bool
+       bool "Xilinx Interrupt Controller IP"
+       depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP
        select IRQ_DOMAIN
+       help
+         Support for the Xilinx Interrupt Controller IP core.
+         This is used as a primary controller with MicroBlaze and can also
+         be used as a secondary chained controller on other platforms.
 
 config IRQ_CROSSBAR
        bool
@@ -577,4 +582,15 @@ config MST_IRQ
        help
          Support MStar Interrupt Controller.
 
+config WPCM450_AIC
+       bool "Nuvoton WPCM450 Advanced Interrupt Controller"
+       depends on ARCH_WPCM450
+       help
+         Support for the interrupt controller in the Nuvoton WPCM450 BMC SoC.
+
+config IRQ_IDT3243X
+       bool
+       select GENERIC_IRQ_CHIP
+       select IRQ_DOMAIN
+
 endmenu
index c59b95a0532c9af77c4bf5bf489074ec4ef35dc7..18573602a939b0861586f4129806f644a158bf34 100644 (file)
@@ -113,3 +113,5 @@ obj-$(CONFIG_LOONGSON_PCH_MSI)              += irq-loongson-pch-msi.o
 obj-$(CONFIG_MST_IRQ)                  += irq-mst-intc.o
 obj-$(CONFIG_SL28CPLD_INTC)            += irq-sl28cpld.o
 obj-$(CONFIG_MACH_REALTEK_RTL)         += irq-realtek-rtl.o
+obj-$(CONFIG_WPCM450_AIC)              += irq-wpcm450-aic.o
+obj-$(CONFIG_IRQ_IDT3243X)             += irq-idt3243x.o
index 6567ed782f82c95ae08c470b5f9c01ba78806c16..58717cd44f99f1545fab75a56bb4fecad43f1906 100644 (file)
@@ -71,7 +71,7 @@ static void vic_init_hw(struct aspeed_vic *vic)
        writel(0, vic->base + AVIC_INT_SELECT);
        writel(0, vic->base + AVIC_INT_SELECT + 4);
 
-       /* Some interrupts have a programable high/low level trigger
+       /* Some interrupts have a programmable high/low level trigger
         * (4 GPIO direct inputs), for now we assume this was configured
         * by firmware. We read which ones are edge now.
         */
@@ -203,7 +203,7 @@ static int __init avic_of_init(struct device_node *node,
        }
        vic->base = regs;
 
-       /* Initialize soures, all masked */
+       /* Initialize sources, all masked */
        vic_init_hw(vic);
 
        /* Ready to receive interrupts */
index c7c9e976acbb9f06e32cb8246a0bfda05073f122..ad59656ccc282871f8f31cc05cf3f83c803801e7 100644 (file)
@@ -309,7 +309,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
 
                if (data->can_wake) {
                        /* This IRQ chip can wake the system, set all
-                        * relevant child interupts in wake_enabled mask
+                        * relevant child interrupts in wake_enabled mask
                         */
                        gc->wake_enabled = 0xffffffff;
                        gc->wake_enabled &= ~gc->unused;
index 5a2ec43b7ddd49311be11d71fd8dc998e926934e..ab91afa867557bf9c13221091ef185d66f2b12c3 100644 (file)
@@ -176,7 +176,7 @@ gx_intc_init(struct device_node *node, struct device_node *parent)
        writel(0x0, reg_base + GX_INTC_NEN63_32);
 
        /*
-        * Initial mask reg with all unmasked, because we only use enalbe reg
+        * Initial mask reg with all unmasked, because we only use enable reg
         */
        writel(0x0, reg_base + GX_INTC_NMASK31_00);
        writel(0x0, reg_base + GX_INTC_NMASK63_32);
index fbec07d634ad28855d870d97ec51e51e100e3ffe..4116b48e60aff8710ccf7e4472c6a6e79a4f0724 100644 (file)
@@ -371,7 +371,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
         * the MSI data is the absolute value within the range from
         * spi_start to (spi_start + num_spis).
         *
-        * Broadom NS2 GICv2m implementation has an erratum where the MSI data
+        * Broadcom NS2 GICv2m implementation has an erratum where the MSI data
         * is 'spi_number - 32'
         *
         * Reading that register fails on the Graviton implementation
index ed46e6057e3350fbc31cb381270010b1d56ee746..c3485b230d70ea7e90d5dfbb7523fa3f33625b49 100644 (file)
@@ -1492,7 +1492,7 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
         *
         * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
         * value or to 1023, depending on the enable bit. But that
-        * would be issueing a mapping for an /existing/ DevID+EventID
+        * would be issuing a mapping for an /existing/ DevID+EventID
         * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
         * to the /same/ vPE, using this opportunity to adjust the
         * doorbell. Mouahahahaha. We loves it, Precious.
@@ -3122,7 +3122,7 @@ static void its_cpu_init_lpis(void)
 
                /*
                 * It's possible for CPU to receive VLPIs before it is
-                * sheduled as a vPE, especially for the first CPU, and the
+                * scheduled as a vPE, especially for the first CPU, and the
                 * VLPI with INTID larger than 2^(IDbits+1) will be considered
                 * as out of range and dropped by GIC.
                 * So we initialize IDbits to known value to avoid VLPI drop.
@@ -3616,7 +3616,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
 
        /*
         * If all interrupts have been freed, start mopping the
-        * floor. This is conditionned on the device not being shared.
+        * floor. This is conditioned on the device not being shared.
         */
        if (!its_dev->shared &&
            bitmap_empty(its_dev->event_map.lpi_map,
@@ -4194,7 +4194,7 @@ static int its_sgi_set_affinity(struct irq_data *d,
 {
        /*
         * There is no notion of affinity for virtual SGIs, at least
-        * not on the host (since they can only be targetting a vPE).
+        * not on the host (since they can only be targeting a vPE).
         * Tell the kernel we've done whatever it asked for.
         */
        irq_data_update_effective_affinity(d, mask_val);
@@ -4239,7 +4239,7 @@ static int its_sgi_get_irqchip_state(struct irq_data *d,
        /*
         * Locking galore! We can race against two different events:
         *
-        * - Concurent vPE affinity change: we must make sure it cannot
+        * - Concurrent vPE affinity change: we must make sure it cannot
         *   happen, or we'll talk to the wrong redistributor. This is
         *   identical to what happens with vLPIs.
         *
index 563a9b36629415db6365d4baceb770b528c75b41..e81e89a81cb5bbdd29b05939570890bd93f21b75 100644 (file)
@@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
        reg = of_get_property(np, "mbi-alias", NULL);
        if (reg) {
                mbi_phys_base = of_translate_address(np, reg);
-               if (mbi_phys_base == OF_BAD_ADDR) {
+               if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
                        ret = -ENXIO;
                        goto err_free_mbi;
                }
index eb0ee356a62944313e3ef03a4226909e4da00079..37a23aa6de37c13f5dbb2ea1bbec185bcc49e530 100644 (file)
@@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
 
        irqnr = gic_read_iar();
 
+       /* Check for special IDs first */
+       if ((irqnr >= 1020 && irqnr <= 1023))
+               return;
+
        if (gic_supports_nmi() &&
            unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
                gic_handle_nmi(irqnr, regs);
@@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
                gic_arch_enable_irqs();
        }
 
-       /* Check for special IDs first */
-       if ((irqnr >= 1020 && irqnr <= 1023))
-               return;
-
        if (static_branch_likely(&supports_deactivate_key))
                gic_write_eoir(irqnr);
        else
@@ -1379,7 +1379,7 @@ static int gic_irq_domain_translate(struct irq_domain *d,
 
                /*
                 * Make it clear that broken DTs are... broken.
-                * Partitionned PPIs are an unfortunate exception.
+                * Partitioned PPIs are an unfortunate exception.
                 */
                WARN_ON(*type == IRQ_TYPE_NONE &&
                        fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
index 5d1dc9915272b1892459e5cd1c4de64c1b6b0c13..4ea71b28f9f5f21ec0c8979bd91f07bf215c75cf 100644 (file)
@@ -87,17 +87,40 @@ static struct irq_domain *gic_domain;
 static const struct irq_domain_ops *vpe_domain_ops;
 static const struct irq_domain_ops *sgi_domain_ops;
 
+#ifdef CONFIG_ARM64
+#include <asm/cpufeature.h>
+
+bool gic_cpuif_has_vsgi(void)
+{
+       unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+       fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT);
+
+       return fld >= 0x3;
+}
+#else
+bool gic_cpuif_has_vsgi(void)
+{
+       return false;
+}
+#endif
+
 static bool has_v4_1(void)
 {
        return !!sgi_domain_ops;
 }
 
+static bool has_v4_1_sgi(void)
+{
+       return has_v4_1() && gic_cpuif_has_vsgi();
+}
+
 static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
 {
        char *name;
        int sgi_base;
 
-       if (!has_v4_1())
+       if (!has_v4_1_sgi())
                return 0;
 
        name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
@@ -182,7 +205,7 @@ static void its_free_sgi_irqs(struct its_vm *vm)
 {
        int i;
 
-       if (!has_v4_1())
+       if (!has_v4_1_sgi())
                return;
 
        for (i = 0; i < vm->nr_vpes; i++) {
index a6ed877d9dd39e88f137209fe309259bc2dab755..058ebaebe2c40e5d0c402f8c7068a5b9cc966e11 100644 (file)
@@ -1,9 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Hisilicon HiP04 INTC
+ * HiSilicon HiP04 INTC
  *
  * Copyright (C) 2002-2014 ARM Limited.
- * Copyright (c) 2013-2014 Hisilicon Ltd.
+ * Copyright (c) 2013-2014 HiSilicon Ltd.
  * Copyright (c) 2013-2014 Linaro Ltd.
  *
  * Interrupt architecture for the HIP04 INTC:
diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c
new file mode 100644 (file)
index 0000000..f099682
--- /dev/null
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for IDT/Renesas 79RC3243x Interrupt Controller.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define IDT_PIC_NR_IRQS                32
+
+#define IDT_PIC_IRQ_PEND               0x00
+#define IDT_PIC_IRQ_MASK               0x08
+
+struct idt_pic_data {
+       void __iomem *base;
+       struct irq_domain *irq_domain;
+       struct irq_chip_generic *gc;
+};
+
+static void idt_irq_dispatch(struct irq_desc *desc)
+{
+       struct idt_pic_data *idtpic = irq_desc_get_handler_data(desc);
+       struct irq_chip *host_chip = irq_desc_get_chip(desc);
+       u32 pending, hwirq, virq;
+
+       chained_irq_enter(host_chip, desc);
+
+       pending = irq_reg_readl(idtpic->gc, IDT_PIC_IRQ_PEND);
+       pending &= ~idtpic->gc->mask_cache;
+       while (pending) {
+               hwirq = __fls(pending);
+               virq = irq_linear_revmap(idtpic->irq_domain, hwirq);
+               if (virq)
+                       generic_handle_irq(virq);
+               pending &= ~(1 << hwirq);
+       }
+
+       chained_irq_exit(host_chip, desc);
+}
+
+static int idt_pic_init(struct device_node *of_node, struct device_node *parent)
+{
+       struct irq_domain *domain;
+       struct idt_pic_data *idtpic;
+       struct irq_chip_generic *gc;
+       struct irq_chip_type *ct;
+       unsigned int parent_irq;
+       int ret = 0;
+
+       idtpic = kzalloc(sizeof(*idtpic), GFP_KERNEL);
+       if (!idtpic) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       parent_irq = irq_of_parse_and_map(of_node, 0);
+       if (!parent_irq) {
+               pr_err("Failed to map parent IRQ!\n");
+               ret = -EINVAL;
+               goto out_free;
+       }
+
+       idtpic->base = of_iomap(of_node, 0);
+       if (!idtpic->base) {
+               pr_err("Failed to map base address!\n");
+               ret = -ENOMEM;
+               goto out_unmap_irq;
+       }
+
+       domain = irq_domain_add_linear(of_node, IDT_PIC_NR_IRQS,
+                                      &irq_generic_chip_ops, NULL);
+       if (!domain) {
+               pr_err("Failed to add irqdomain!\n");
+               ret = -ENOMEM;
+               goto out_iounmap;
+       }
+       idtpic->irq_domain = domain;
+
+       ret = irq_alloc_domain_generic_chips(domain, 32, 1, "IDTPIC",
+                                            handle_level_irq, 0,
+                                            IRQ_NOPROBE | IRQ_LEVEL, 0);
+       if (ret)
+               goto out_domain_remove;
+
+       gc = irq_get_domain_generic_chip(domain, 0);
+       gc->reg_base = idtpic->base;
+       gc->private = idtpic;
+
+       ct = gc->chip_types;
+       ct->regs.mask = IDT_PIC_IRQ_MASK;
+       ct->chip.irq_mask = irq_gc_mask_set_bit;
+       ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+       idtpic->gc = gc;
+
+       /* Mask interrupts. */
+       writel(0xffffffff, idtpic->base + IDT_PIC_IRQ_MASK);
+       gc->mask_cache = 0xffffffff;
+
+       irq_set_chained_handler_and_data(parent_irq,
+                                        idt_irq_dispatch, idtpic);
+
+       return 0;
+
+out_domain_remove:
+       irq_domain_remove(domain);
+out_iounmap:
+       iounmap(idtpic->base);
+out_unmap_irq:
+       irq_dispose_mapping(parent_irq);
+out_free:
+       kfree(idtpic);
+out_err:
+       pr_err("Failed to initialize! (errno = %d)\n", ret);
+       return ret;
+}
+
+IRQCHIP_DECLARE(idt_pic, "idt,32434-pic", idt_pic_init);
index 033bccb41455c46c32d5473fa46940c2770973e9..5f47d8ee4ae39e04964a461626b3138fadd11d18 100644 (file)
@@ -100,11 +100,11 @@ static int __init aic_irq_of_init(struct device_node *node,
        jcore_aic.irq_unmask = noop;
        jcore_aic.name = "AIC";
 
-       domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops,
+       domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
+                                      &jcore_aic_irqdomain_ops,
                                       &jcore_aic);
        if (!domain)
                return -ENOMEM;
-       irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq);
 
        return 0;
 }
index 9bf6b9a5f73485cec4942147a8707bf1ec7d0fcf..f790ca6d78aa4c534723a145931e6ece45933523 100644 (file)
@@ -163,7 +163,7 @@ static void pch_pic_reset(struct pch_pic *priv)
        int i;
 
        for (i = 0; i < PIC_COUNT; i++) {
-               /* Write vectore ID */
+               /* Write vectored ID */
                writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
                /* Hardcode route to HT0 Lo */
                writeb(1, priv->base + PCH_INT_ROUTE(i));
index ff7627b577726e6e887df1de1ef88137a5f5c4d0..2cb45c6b85011479cc25433b2b4662eaf883e453 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (C) 2015 Hisilicon Limited, All Rights Reserved.
+ * Copyright (C) 2015 HiSilicon Limited, All Rights Reserved.
  * Author: Jun Ma <majun258@huawei.com>
  * Author: Yun Wu <wuyun.wu@huawei.com>
  */
@@ -390,4 +390,4 @@ module_platform_driver(mbigen_platform_driver);
 MODULE_AUTHOR("Jun Ma <majun258@huawei.com>");
 MODULE_AUTHOR("Yun Wu <wuyun.wu@huawei.com>");
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Hisilicon MBI Generator driver");
+MODULE_DESCRIPTION("HiSilicon MBI Generator driver");
index bc7aebcc96e9cdd1f31eecc435ca9454a40acacc..e50676ce2ec84b3b59dc6b5a1562c82fde87151c 100644 (file)
@@ -227,7 +227,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
 
        /*
         * Get the hwirq number assigned to this channel through
-        * a pointer the channel_irq table. The added benifit of this
+        * a pointer the channel_irq table. The added benefit of this
         * method is that we can also retrieve the channel index with
         * it, using the table base.
         */
index 143657b0cf281aa57b0876d1c167a536ff419d7e..f6133ae28155667f064cecc1b108168d2d63fd3a 100644 (file)
 #include <linux/of_irq.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
 
-#define INTC_MASK      0x0
-#define INTC_EOI       0x20
+#define MST_INTC_MAX_IRQS      64
+
+#define INTC_MASK              0x0
+#define INTC_REV_POLARITY      0x10
+#define INTC_EOI               0x20
+
+#ifdef CONFIG_PM_SLEEP
+static LIST_HEAD(mst_intc_list);
+#endif
 
 struct mst_intc_chip_data {
        raw_spinlock_t  lock;
        unsigned int    irq_start, nr_irqs;
        void __iomem    *base;
        bool            no_eoi;
+#ifdef CONFIG_PM_SLEEP
+       struct list_head entry;
+       u16 saved_polarity_conf[DIV_ROUND_UP(MST_INTC_MAX_IRQS, 16)];
+#endif
 };
 
 static void mst_set_irq(struct irq_data *d, u32 offset)
@@ -78,6 +90,24 @@ static void mst_intc_eoi_irq(struct irq_data *d)
        irq_chip_eoi_parent(d);
 }
 
+static int mst_irq_chip_set_type(struct irq_data *data, unsigned int type)
+{
+       switch (type) {
+       case IRQ_TYPE_LEVEL_LOW:
+       case IRQ_TYPE_EDGE_FALLING:
+               mst_set_irq(data, INTC_REV_POLARITY);
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+       case IRQ_TYPE_EDGE_RISING:
+               mst_clear_irq(data, INTC_REV_POLARITY);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return irq_chip_set_type_parent(data, IRQ_TYPE_LEVEL_HIGH);
+}
+
 static struct irq_chip mst_intc_chip = {
        .name                   = "mst-intc",
        .irq_mask               = mst_intc_mask_irq,
@@ -87,13 +117,62 @@ static struct irq_chip mst_intc_chip = {
        .irq_set_irqchip_state  = irq_chip_set_parent_state,
        .irq_set_affinity       = irq_chip_set_affinity_parent,
        .irq_set_vcpu_affinity  = irq_chip_set_vcpu_affinity_parent,
-       .irq_set_type           = irq_chip_set_type_parent,
+       .irq_set_type           = mst_irq_chip_set_type,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SET_TYPE_MASKED |
                                  IRQCHIP_SKIP_SET_WAKE |
                                  IRQCHIP_MASK_ON_SUSPEND,
 };
 
+#ifdef CONFIG_PM_SLEEP
+static void mst_intc_polarity_save(struct mst_intc_chip_data *cd)
+{
+       int i;
+       void __iomem *addr = cd->base + INTC_REV_POLARITY;
+
+       for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++)
+               cd->saved_polarity_conf[i] = readw_relaxed(addr + i * 4);
+}
+
+static void mst_intc_polarity_restore(struct mst_intc_chip_data *cd)
+{
+       int i;
+       void __iomem *addr = cd->base + INTC_REV_POLARITY;
+
+       for (i = 0; i < DIV_ROUND_UP(cd->nr_irqs, 16); i++)
+               writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4);
+}
+
+static void mst_irq_resume(void)
+{
+       struct mst_intc_chip_data *cd;
+
+       list_for_each_entry(cd, &mst_intc_list, entry)
+               mst_intc_polarity_restore(cd);
+}
+
+static int mst_irq_suspend(void)
+{
+       struct mst_intc_chip_data *cd;
+
+       list_for_each_entry(cd, &mst_intc_list, entry)
+               mst_intc_polarity_save(cd);
+       return 0;
+}
+
+static struct syscore_ops mst_irq_syscore_ops = {
+       .suspend        = mst_irq_suspend,
+       .resume         = mst_irq_resume,
+};
+
+static int __init mst_irq_pm_init(void)
+{
+       register_syscore_ops(&mst_irq_syscore_ops);
+       return 0;
+}
+late_initcall(mst_irq_pm_init);
+#endif
+
 static int mst_intc_domain_translate(struct irq_domain *d,
                                     struct irq_fwspec *fwspec,
                                     unsigned long *hwirq,
@@ -145,6 +224,15 @@ static int mst_intc_domain_alloc(struct irq_domain *domain, unsigned int virq,
        parent_fwspec = *fwspec;
        parent_fwspec.fwnode = domain->parent->fwnode;
        parent_fwspec.param[1] = cd->irq_start + hwirq;
+
+       /*
+        * mst-intc latch the interrupt request if it's edge triggered,
+        * so the output signal to parent GIC is always level sensitive.
+        * And if the irq signal is active low, configure it to active high
+        * to meet GIC SPI spec in mst_irq_chip_set_type via REV_POLARITY bit.
+        */
+       parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
        return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec);
 }
 
@@ -193,6 +281,10 @@ static int __init mst_intc_of_init(struct device_node *dn,
                return -ENOMEM;
        }
 
+#ifdef CONFIG_PM_SLEEP
+       INIT_LIST_HEAD(&cd->entry);
+       list_add_tail(&cd->entry, &mst_intc_list);
+#endif
        return 0;
 }
 
index 69ba8ce3c17856be68c9330323f59acb00d55f78..9bca0918078e88cb242ec0d8f873dc9e25a15101 100644 (file)
@@ -217,7 +217,7 @@ static void mtk_cirq_resume(void)
 {
        u32 value;
 
-       /* flush recored interrupts, will send signals to parent controller */
+       /* flush recorded interrupts, will send signals to parent controller */
        value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
        writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
 
index a671938fd97f6fd25cb2055db3ee52d44c64a3cd..d1f5740cd5755cbc4858e9ff4dbd2d09e86f3eb1 100644 (file)
@@ -58,7 +58,7 @@ struct icoll_priv {
 static struct icoll_priv icoll_priv;
 static struct irq_domain *icoll_domain;
 
-/* calculate bit offset depending on number of intterupt per register */
+/* calculate bit offset depending on number of interrupt per register */
 static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
 {
        /*
@@ -68,7 +68,7 @@ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit)
        return bit << ((d->hwirq & 3) << 3);
 }
 
-/* calculate mem offset depending on number of intterupt per register */
+/* calculate mem offset depending on number of interrupt per register */
 static void __iomem *icoll_intr_reg(struct irq_data *d)
 {
        /* offset = hwirq / intr_per_reg * 0x10 */
index 6f432d2a5cebdc388f5c7d88e1c807f4d83fc28f..97d4d04b0a80eb57346227da018b2f5cdee8ccdc 100644 (file)
@@ -77,8 +77,8 @@ struct plic_handler {
        void __iomem            *enable_base;
        struct plic_priv        *priv;
 };
-static int plic_parent_irq;
-static bool plic_cpuhp_setup_done;
+static int plic_parent_irq __ro_after_init;
+static bool plic_cpuhp_setup_done __ro_after_init;
 static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
 
 static inline void plic_toggle(struct plic_handler *handler,
index 8662d7b7b2625af83c70235569e8f5da6e603d5e..b9db90c4aa566f186622fe05b50c07b8bbc5e986 100644 (file)
@@ -193,7 +193,14 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
        { .exti = 23, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
        { .exti = 24, .irq_parent = 95, .chip = &stm32_exti_h_chip_direct },
        { .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 26, .irq_parent = 37, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 27, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 28, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 29, .irq_parent = 71, .chip = &stm32_exti_h_chip_direct },
        { .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 31, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 32, .irq_parent = 82, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 33, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct },
        { .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
        { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct },
        { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct },
index fb78d6623556c15f065b2053275caa45b838673e..9ea94456b178ccce9fa39ae12d54f7ff50c10a60 100644 (file)
@@ -189,7 +189,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
         * 3) spurious irq
         * So if we immediately get a reading of 0, check the irq-pending reg
         * to differentiate between 2 and 3. We only do this once to avoid
-        * the extra check in the common case of 1 hapening after having
+        * the extra check in the common case of 1 happening after having
         * read the vector-reg once.
         */
        hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
index 9e456497c1c48a7ea2b2dce884e7733cff72edc3..9a63b02b817643991adbb86783263d14aeed4e49 100644 (file)
@@ -60,6 +60,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
                break;
        case IRQ_TYPE_NONE:
                flow_type = IRQ_TYPE_LEVEL_LOW;
+               fallthrough;
        case IRQ_TYPE_LEVEL_LOW:
                mod ^= im;
                pol ^= im;
index 532d0ae172d9f8f47b9ae7f398f7087a3c8f74be..ca1f593f4d13aa2fd39db911cb230d818fa365ec 100644 (file)
@@ -78,7 +78,7 @@ struct ti_sci_inta_vint_desc {
  * struct ti_sci_inta_irq_domain - Structure representing a TISCI based
  *                                Interrupt Aggregator IRQ domain.
  * @sci:               Pointer to TISCI handle
- * @vint:              TISCI resource pointer representing IA inerrupts.
+ * @vint:              TISCI resource pointer representing IA interrupts.
  * @global_event:      TISCI resource pointer representing global events.
  * @vint_list:         List of the vints active in the system
  * @vint_mutex:                Mutex to protect vint_list
index e46036374227297ed9aa1185724df3a735d8bbec..62f3d29f90420e56674f79b10a8f78e1ce5acecd 100644 (file)
@@ -163,7 +163,7 @@ static struct syscore_ops vic_syscore_ops = {
 };
 
 /**
- * vic_pm_init - initicall to register VIC pm
+ * vic_pm_init - initcall to register VIC pm
  *
  * This is called via late_initcall() to register
  * the resources for the VICs due to the early
@@ -397,7 +397,7 @@ static void __init vic_clear_interrupts(void __iomem *base)
 /*
  * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
  * The original cell has 32 interrupts, while the modified one has 64,
- * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * replicating two blocks 0x00..0x1f in 0x20..0x3f. In that case
  * the probe function is called twice, with base set to offset 000
  *  and 020 within the page. We call this "second block".
  */
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
new file mode 100644 (file)
index 0000000..f3ac392
--- /dev/null
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2021 Jonathan Neuschäfer
+
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/printk.h>
+
+#include <asm/exception.h>
+
+#define AIC_SCR(x)     ((x)*4) /* Source control registers */
+#define AIC_GEN                0x84    /* Interrupt group enable control register */
+#define AIC_GRSR       0x88    /* Interrupt group raw status register */
+#define AIC_IRSR       0x100   /* Interrupt raw status register */
+#define AIC_IASR       0x104   /* Interrupt active status register */
+#define AIC_ISR                0x108   /* Interrupt status register */
+#define AIC_IPER       0x10c   /* Interrupt priority encoding register */
+#define AIC_ISNR       0x110   /* Interrupt source number register */
+#define AIC_IMR                0x114   /* Interrupt mask register */
+#define AIC_OISR       0x118   /* Output interrupt status register */
+#define AIC_MECR       0x120   /* Mask enable command register */
+#define AIC_MDCR       0x124   /* Mask disable command register */
+#define AIC_SSCR       0x128   /* Source set command register */
+#define AIC_SCCR       0x12c   /* Source clear command register */
+#define AIC_EOSCR      0x130   /* End of service command register */
+
+#define AIC_SCR_SRCTYPE_LOW_LEVEL      (0 << 6)
+#define AIC_SCR_SRCTYPE_HIGH_LEVEL     (1 << 6)
+#define AIC_SCR_SRCTYPE_NEG_EDGE       (2 << 6)
+#define AIC_SCR_SRCTYPE_POS_EDGE       (3 << 6)
+#define AIC_SCR_PRIORITY(x)            (x)
+#define AIC_SCR_PRIORITY_MASK          0x7
+
+#define AIC_NUM_IRQS           32
+
+struct wpcm450_aic {
+       void __iomem *regs;
+       struct irq_domain *domain;
+};
+
+static struct wpcm450_aic *aic;
+
+static void wpcm450_aic_init_hw(void)
+{
+       int i;
+
+       /* Disable (mask) all interrupts */
+       writel(0xffffffff, aic->regs + AIC_MDCR);
+
+       /*
+        * Make sure the interrupt controller is ready to serve new interrupts.
+        * Reading from IPER indicates that the nIRQ signal may be deasserted,
+        * and writing to EOSCR indicates that interrupt handling has finished.
+        */
+       readl(aic->regs + AIC_IPER);
+       writel(0, aic->regs + AIC_EOSCR);
+
+       /* Initialize trigger mode and priority of each interrupt source */
+       for (i = 0; i < AIC_NUM_IRQS; i++)
+               writel(AIC_SCR_SRCTYPE_HIGH_LEVEL | AIC_SCR_PRIORITY(7),
+                      aic->regs + AIC_SCR(i));
+}
+
+static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs)
+{
+       int hwirq;
+
+       /* Determine the interrupt source */
+       /* Read IPER to signal that nIRQ can be de-asserted */
+       hwirq = readl(aic->regs + AIC_IPER) / 4;
+
+       handle_domain_irq(aic->domain, hwirq, regs);
+}
+
+static void wpcm450_aic_eoi(struct irq_data *d)
+{
+       /* Signal end-of-service */
+       writel(0, aic->regs + AIC_EOSCR);
+}
+
+static void wpcm450_aic_mask(struct irq_data *d)
+{
+       unsigned int mask = BIT(d->hwirq);
+
+       /* Disable (mask) the interrupt */
+       writel(mask, aic->regs + AIC_MDCR);
+}
+
+static void wpcm450_aic_unmask(struct irq_data *d)
+{
+       unsigned int mask = BIT(d->hwirq);
+
+       /* Enable (unmask) the interrupt */
+       writel(mask, aic->regs + AIC_MECR);
+}
+
+static int wpcm450_aic_set_type(struct irq_data *d, unsigned int flow_type)
+{
+       /*
+        * The hardware supports high/low level, as well as rising/falling edge
+        * modes, and the DT binding accommodates for that, but as long as
+        * other modes than high level mode are not used and can't be tested,
+        * they are rejected in this driver.
+        */
+       if ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
+               return -EINVAL;
+
+       return 0;
+}
+
+static struct irq_chip wpcm450_aic_chip = {
+       .name = "wpcm450-aic",
+       .irq_eoi = wpcm450_aic_eoi,
+       .irq_mask = wpcm450_aic_mask,
+       .irq_unmask = wpcm450_aic_unmask,
+       .irq_set_type = wpcm450_aic_set_type,
+};
+
+static int wpcm450_aic_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)
+{
+       if (hwirq >= AIC_NUM_IRQS)
+               return -EPERM;
+
+       irq_set_chip_and_handler(irq, &wpcm450_aic_chip, handle_fasteoi_irq);
+       irq_set_chip_data(irq, aic);
+       irq_set_probe(irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops wpcm450_aic_ops = {
+       .map = wpcm450_aic_map,
+       .xlate = irq_domain_xlate_twocell,
+};
+
+static int __init wpcm450_aic_of_init(struct device_node *node,
+                                     struct device_node *parent)
+{
+       if (parent)
+               return -EINVAL;
+
+       aic = kzalloc(sizeof(*aic), GFP_KERNEL);
+       if (!aic)
+               return -ENOMEM;
+
+       aic->regs = of_iomap(node, 0);
+       if (!aic->regs) {
+               pr_err("Failed to map WPCM450 AIC registers\n");
+               return -ENOMEM;
+       }
+
+       wpcm450_aic_init_hw();
+
+       set_handle_irq(wpcm450_aic_handle_irq);
+
+       aic->domain = irq_domain_add_linear(node, AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
+
+       return 0;
+}
+
+IRQCHIP_DECLARE(wpcm450_aic, "nuvoton,wpcm450-aic", wpcm450_aic_of_init);
index 1d3d273309bd3a561510298093f9d3b67703d62a..8cd1bfc7305722f1777502f2c21f88b4be205508 100644 (file)
@@ -210,7 +210,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
 
        /*
         * Disable all external interrupts until they are
-        * explicity requested.
+        * explicitly requested.
         */
        xintc_write(irqc, IER, 0);
 
index 7b2f4d0ae3fe90bc69efc57bec01e56196b6d3c3..2f9a289ab2456e7b38d263cf5cd208373782d134 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * CZ.NIC's Turris Omnia LEDs driver
  *
- * 2020 by Marek Behun <marek.behun@nic.cz>
+ * 2020 by Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/i2c.h>
@@ -287,6 +287,6 @@ static struct i2c_driver omnia_leds_driver = {
 
 module_i2c_driver(omnia_leds_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("CZ.NIC's Turris Omnia LEDs");
 MODULE_LICENSE("GPL v2");
index 9f2ce7f03c677971b886978c7b62d990ce614b38..456a117a65fdf3e6cb17279d831a0aa8fe719627 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * rWTM BIU Mailbox driver for Armada 37xx
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/device.h>
@@ -203,4 +203,4 @@ module_platform_driver(armada_37xx_mbox_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
index 66f4c6398f6708d11cec3f8113d1922521445b71..cea2b37897367c1fb55153f1ca70811f61f49211 100644 (file)
@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
        u8 *res;
 
        position = (index + rsb) * v->fec->roots;
-       block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
+       block = div64_u64_rem(position, v->fec->io_size, &rem);
        *offset = (unsigned)rem;
 
        res = dm_bufio_read(v->fec->bufio, block, buf);
@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
 
                /* read the next block when we run out of parity bytes */
                offset += v->fec->roots;
-               if (offset >= v->fec->roots << SECTOR_SHIFT) {
+               if (offset >= v->fec->io_size) {
                        dm_bufio_release(buf);
 
                        par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
                return -E2BIG;
        }
 
+       if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
+               f->io_size = 1 << v->data_dev_block_bits;
+       else
+               f->io_size = v->fec->roots << SECTOR_SHIFT;
+
        f->bufio = dm_bufio_client_create(f->dev->bdev,
-                                         f->roots << SECTOR_SHIFT,
+                                         f->io_size,
                                          1, 0, NULL, NULL);
        if (IS_ERR(f->bufio)) {
                ti->error = "Cannot initialize FEC bufio client";
index 42fbd3a7fc9f16e2e25574fd14da6a2628197eb0..3c46c8d6188332bdff2d4459c2b7600440d1ea27 100644 (file)
@@ -36,6 +36,7 @@ struct dm_verity_fec {
        struct dm_dev *dev;     /* parity data device */
        struct dm_bufio_client *data_bufio;     /* for data dev access */
        struct dm_bufio_client *bufio;          /* for parity data access */
+       size_t io_size;         /* IO size for roots */
        sector_t start;         /* parity data start in blocks */
        sector_t blocks;        /* number of blocks covered */
        sector_t rounds;        /* number of interleaving rounds */
index c2199042d3db593b8d4f6bf045b791cfb6055f28..e8511787c1e439a72588e9f8609a965372e20bec 100644 (file)
@@ -79,8 +79,8 @@ static void cio2_bridge_create_fwnode_properties(
 {
        sensor->prop_names = prop_names;
 
-       sensor->local_ref[0].node = &sensor->swnodes[SWNODE_CIO2_ENDPOINT];
-       sensor->remote_ref[0].node = &sensor->swnodes[SWNODE_SENSOR_ENDPOINT];
+       sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CIO2_ENDPOINT]);
+       sensor->remote_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_SENSOR_ENDPOINT]);
 
        sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
                                        sensor->prop_names.clock_frequency,
index 744b230cdccaa4fa096cb3b74b30c9a936eeb940..dd7eb614c28e47d56fd88fb80cf501e0e67fd12b 100644 (file)
@@ -49,10 +49,14 @@ enum pmt_quirks {
 
        /* Use shift instead of mask to read discovery table offset */
        PMT_QUIRK_TABLE_SHIFT   = BIT(2),
+
+       /* DVSEC not present (provided in driver data) */
+       PMT_QUIRK_NO_DVSEC      = BIT(3),
 };
 
 struct pmt_platform_info {
        unsigned long quirks;
+       struct intel_dvsec_header **capabilities;
 };
 
 static const struct pmt_platform_info tgl_info = {
@@ -60,6 +64,26 @@ static const struct pmt_platform_info tgl_info = {
                  PMT_QUIRK_TABLE_SHIFT,
 };
 
+/* DG1 Platform with DVSEC quirk*/
+static struct intel_dvsec_header dg1_telemetry = {
+       .length = 0x10,
+       .id = 2,
+       .num_entries = 1,
+       .entry_size = 3,
+       .tbir = 0,
+       .offset = 0x466000,
+};
+
+static struct intel_dvsec_header *dg1_capabilities[] = {
+       &dg1_telemetry,
+       NULL
+};
+
+static const struct pmt_platform_info dg1_info = {
+       .quirks = PMT_QUIRK_NO_DVSEC,
+       .capabilities = dg1_capabilities,
+};
+
 static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
                       unsigned long quirks)
 {
@@ -79,19 +103,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
        case DVSEC_INTEL_ID_WATCHER:
                if (quirks & PMT_QUIRK_NO_WATCHER) {
                        dev_info(dev, "Watcher not supported\n");
-                       return 0;
+                       return -EINVAL;
                }
                name = "pmt_watcher";
                break;
        case DVSEC_INTEL_ID_CRASHLOG:
                if (quirks & PMT_QUIRK_NO_CRASHLOG) {
                        dev_info(dev, "Crashlog not supported\n");
-                       return 0;
+                       return -EINVAL;
                }
                name = "pmt_crashlog";
                break;
        default:
-               dev_err(dev, "Unrecognized PMT capability: %d\n", id);
                return -EINVAL;
        }
 
@@ -148,41 +171,54 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (info)
                quirks = info->quirks;
 
-       do {
-               struct intel_dvsec_header header;
-               u32 table;
-               u16 vid;
-
-               pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
-               if (!pos)
-                       break;
-
-               pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid);
-               if (vid != PCI_VENDOR_ID_INTEL)
-                       continue;
-
-               pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2,
-                                    &header.id);
-               pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES,
-                                    &header.num_entries);
-               pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE,
-                                    &header.entry_size);
-               pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE,
-                                     &table);
-
-               header.tbir = INTEL_DVSEC_TABLE_BAR(table);
-               header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
-
-               ret = pmt_add_dev(pdev, &header, quirks);
-               if (ret) {
-                       dev_warn(&pdev->dev,
-                                "Failed to add device for DVSEC id %d\n",
-                                header.id);
-                       continue;
-               }
+       if (info && (info->quirks & PMT_QUIRK_NO_DVSEC)) {
+               struct intel_dvsec_header **header;
+
+               header = info->capabilities;
+               while (*header) {
+                       ret = pmt_add_dev(pdev, *header, quirks);
+                       if (ret)
+                               dev_warn(&pdev->dev,
+                                        "Failed to add device for DVSEC id %d\n",
+                                        (*header)->id);
+                       else
+                               found_devices = true;
 
-               found_devices = true;
-       } while (true);
+                       ++header;
+               }
+       } else {
+               do {
+                       struct intel_dvsec_header header;
+                       u32 table;
+                       u16 vid;
+
+                       pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
+                       if (!pos)
+                               break;
+
+                       pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid);
+                       if (vid != PCI_VENDOR_ID_INTEL)
+                               continue;
+
+                       pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2,
+                                            &header.id);
+                       pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES,
+                                            &header.num_entries);
+                       pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE,
+                                            &header.entry_size);
+                       pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE,
+                                             &table);
+
+                       header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+                       header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+                       ret = pmt_add_dev(pdev, &header, quirks);
+                       if (ret)
+                               continue;
+
+                       found_devices = true;
+               } while (true);
+       }
 
        if (!found_devices)
                return -ENODEV;
@@ -200,10 +236,12 @@ static void pmt_pci_remove(struct pci_dev *pdev)
 }
 
 #define PCI_DEVICE_ID_INTEL_PMT_ADL    0x467d
+#define PCI_DEVICE_ID_INTEL_PMT_DG1    0x490e
 #define PCI_DEVICE_ID_INTEL_PMT_OOBMSM 0x09a7
 #define PCI_DEVICE_ID_INTEL_PMT_TGL    0x9a0d
 static const struct pci_device_id pmt_pci_ids[] = {
        { PCI_DEVICE_DATA(INTEL, PMT_ADL, &tgl_info) },
+       { PCI_DEVICE_DATA(INTEL, PMT_DG1, &dg1_info) },
        { PCI_DEVICE_DATA(INTEL, PMT_OOBMSM, NULL) },
        { PCI_DEVICE_DATA(INTEL, PMT_TGL, &tgl_info) },
        { }
index f532c59bb59bca5f501fdfecde0d48242aafaf5f..f4fb5c52b8633e6cbbaebd474ab079d75da999e0 100644 (file)
@@ -402,6 +402,16 @@ config SRAM
 config SRAM_EXEC
        bool
 
+config DW_XDATA_PCIE
+       depends on PCI
+       tristate "Synopsys DesignWare xData PCIe driver"
+       help
+         This driver allows controlling Synopsys DesignWare PCIe traffic
+         generator IP also known as xData, present in Synopsys DesignWare
+         PCIe Endpoint prototype.
+
+         If unsure, say N.
+
 config PCI_ENDPOINT_TEST
        depends on PCI
        select CRC32
@@ -427,14 +437,6 @@ config MISC_RTSX
        tristate
        default MISC_RTSX_PCI || MISC_RTSX_USB
 
-config PVPANIC
-       tristate "pvpanic device support"
-       depends on HAS_IOMEM && (ACPI || OF)
-       help
-         This driver provides support for the pvpanic device.  pvpanic is
-         a paravirtualized device provided by QEMU; it lets a virtual machine
-         (guest) communicate panic events to the host.
-
 config HISI_HIKEY_USB
        tristate "USB GPIO Hub on HiSilicon Hikey 960/970 Platform"
        depends on (OF && GPIOLIB) || COMPILE_TEST
@@ -461,4 +463,5 @@ source "drivers/misc/bcm-vk/Kconfig"
 source "drivers/misc/cardreader/Kconfig"
 source "drivers/misc/habanalabs/Kconfig"
 source "drivers/misc/uacce/Kconfig"
+source "drivers/misc/pvpanic/Kconfig"
 endmenu
index 99b6f15a3c707e2e3b02654a183146cf82b0b605..e92a56d4442f8dc6c64af39898cc7d568e330dfc 100644 (file)
@@ -47,11 +47,12 @@ obj-$(CONFIG_SRAM_EXEC)             += sram-exec.o
 obj-$(CONFIG_GENWQE)           += genwqe/
 obj-$(CONFIG_ECHO)             += echo/
 obj-$(CONFIG_CXL_BASE)         += cxl/
+obj-$(CONFIG_DW_XDATA_PCIE)    += dw-xdata-pcie.o
 obj-$(CONFIG_PCI_ENDPOINT_TEST)        += pci_endpoint_test.o
 obj-$(CONFIG_OCXL)             += ocxl/
 obj-$(CONFIG_BCM_VK)           += bcm-vk/
 obj-y                          += cardreader/
-obj-$(CONFIG_PVPANIC)          += pvpanic.o
+obj-$(CONFIG_PVPANIC)          += pvpanic/
 obj-$(CONFIG_HABANA_AI)                += habanalabs/
 obj-$(CONFIG_UACCE)            += uacce/
 obj-$(CONFIG_XILINX_SDFEC)     += xilinx_sdfec.o
index 6f164522b02823d9ceb803a88c0bc1cfff6d9e6e..5d8f3f6a95f2a8914bbd72a48b30741cf29851dc 100644 (file)
@@ -139,6 +139,9 @@ static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
                        value = dpot_read_r8d8(dpot,
                                DPOT_AD5291_READ_RDAC << 2);
 
+                       if (value < 0)
+                               return value;
+
                        if (dpot->uid == DPOT_UID(AD5291_ID))
                                value = value >> 2;
 
index fb2eff69e449d29aa2670070361c40690bee85c2..e627b405662395eec33405b942e9ba4ad196d456 100644 (file)
@@ -52,7 +52,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
                 * can always access it when dereferenced from IDR. For the same
                 * reason, the segment table is only destroyed after the context is
                 * removed from the IDR.  Access to this in the IOCTL is protected by
-                * Linux filesytem symantics (can't IOCTL until open is complete).
+                * Linux filesystem semantics (can't IOCTL until open is complete).
                 */
                i = cxl_alloc_sst(ctx);
                if (i)
index 01153b74334a1b394a1a9e5dba7fffed5921807d..60c829113299bd771580030b1d616cf272d1daf3 100644 (file)
@@ -200,7 +200,7 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx)
        if (ctx->mm == NULL)
                return NULL;
 
-       if (!atomic_inc_not_zero(&ctx->mm->mm_users))
+       if (!mmget_not_zero(ctx->mm))
                return NULL;
 
        return ctx->mm;
diff --git a/drivers/misc/dw-xdata-pcie.c b/drivers/misc/dw-xdata-pcie.c
new file mode 100644 (file)
index 0000000..257c25d
--- /dev/null
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare xData driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/bitfield.h>
+#include <linux/pci-epf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#define DW_XDATA_DRIVER_NAME           "dw-xdata-pcie"
+
+#define DW_XDATA_EP_MEM_OFFSET         0x8000000
+
+static DEFINE_IDA(xdata_ida);
+
+#define STATUS_DONE                    BIT(0)
+
+#define CONTROL_DOORBELL               BIT(0)
+#define CONTROL_IS_WRITE               BIT(1)
+#define CONTROL_LENGTH(a)              FIELD_PREP(GENMASK(13, 2), a)
+#define CONTROL_PATTERN_INC            BIT(16)
+#define CONTROL_NO_ADDR_INC            BIT(18)
+
+#define XPERF_CONTROL_ENABLE           BIT(5)
+
+#define BURST_REPEAT                   BIT(31)
+#define BURST_VALUE                    0x1001
+
+#define PATTERN_VALUE                  0x0
+
+struct dw_xdata_regs {
+       u32 addr_lsb;                                   /* 0x000 */
+       u32 addr_msb;                                   /* 0x004 */
+       u32 burst_cnt;                                  /* 0x008 */
+       u32 control;                                    /* 0x00c */
+       u32 pattern;                                    /* 0x010 */
+       u32 status;                                     /* 0x014 */
+       u32 RAM_addr;                                   /* 0x018 */
+       u32 RAM_port;                                   /* 0x01c */
+       u32 _reserved0[14];                             /* 0x020..0x054 */
+       u32 perf_control;                               /* 0x058 */
+       u32 _reserved1[41];                             /* 0x05c..0x0fc */
+       u32 wr_cnt_lsb;                                 /* 0x100 */
+       u32 wr_cnt_msb;                                 /* 0x104 */
+       u32 rd_cnt_lsb;                                 /* 0x108 */
+       u32 rd_cnt_msb;                                 /* 0x10c */
+} __packed;
+
+struct dw_xdata_region {
+       phys_addr_t paddr;                              /* physical address */
+       void __iomem *vaddr;                            /* virtual address */
+};
+
+struct dw_xdata {
+       struct dw_xdata_region rg_region;               /* registers */
+       size_t max_wr_len;                              /* max wr xfer len */
+       size_t max_rd_len;                              /* max rd xfer len */
+       struct mutex mutex;
+       struct pci_dev *pdev;
+       struct miscdevice misc_dev;
+};
+
+static inline struct dw_xdata_regs __iomem *__dw_regs(struct dw_xdata *dw)
+{
+       return dw->rg_region.vaddr;
+}
+
+static void dw_xdata_stop(struct dw_xdata *dw)
+{
+       u32 burst;
+
+       mutex_lock(&dw->mutex);
+
+       burst = readl(&(__dw_regs(dw)->burst_cnt));
+
+       if (burst & BURST_REPEAT) {
+               burst &= ~(u32)BURST_REPEAT;
+               writel(burst, &(__dw_regs(dw)->burst_cnt));
+       }
+
+       mutex_unlock(&dw->mutex);
+}
+
+static void dw_xdata_start(struct dw_xdata *dw, bool write)
+{
+       struct device *dev = &dw->pdev->dev;
+       u32 control, status;
+
+       /* Stop first if xfer in progress */
+       dw_xdata_stop(dw);
+
+       mutex_lock(&dw->mutex);
+
+       /* Clear status register */
+       writel(0x0, &(__dw_regs(dw)->status));
+
+       /* Burst count register set for continuous until stopped */
+       writel(BURST_REPEAT | BURST_VALUE, &(__dw_regs(dw)->burst_cnt));
+
+       /* Pattern register */
+       writel(PATTERN_VALUE, &(__dw_regs(dw)->pattern));
+
+       /* Control register */
+       control = CONTROL_DOORBELL | CONTROL_PATTERN_INC | CONTROL_NO_ADDR_INC;
+       if (write) {
+               control |= CONTROL_IS_WRITE;
+               control |= CONTROL_LENGTH(dw->max_wr_len);
+       } else {
+               control |= CONTROL_LENGTH(dw->max_rd_len);
+       }
+       writel(control, &(__dw_regs(dw)->control));
+
+       /*
+        * The xData HW block needs about 100 ms to initiate the traffic
+        * generation according this HW block datasheet.
+        */
+       usleep_range(100, 150);
+
+       status = readl(&(__dw_regs(dw)->status));
+
+       mutex_unlock(&dw->mutex);
+
+       if (!(status & STATUS_DONE))
+               dev_dbg(dev, "xData: started %s direction\n",
+                       write ? "write" : "read");
+}
+
+static void dw_xdata_perf_meas(struct dw_xdata *dw, u64 *data, bool write)
+{
+       if (write) {
+               *data = readl(&(__dw_regs(dw)->wr_cnt_msb));
+               *data <<= 32;
+               *data |= readl(&(__dw_regs(dw)->wr_cnt_lsb));
+       } else {
+               *data = readl(&(__dw_regs(dw)->rd_cnt_msb));
+               *data <<= 32;
+               *data |= readl(&(__dw_regs(dw)->rd_cnt_lsb));
+       }
+}
+
+static u64 dw_xdata_perf_diff(u64 *m1, u64 *m2, u64 time)
+{
+       u64 rate = (*m1 - *m2);
+
+       rate *= (1000 * 1000 * 1000);
+       rate >>= 20;
+       rate = DIV_ROUND_CLOSEST_ULL(rate, time);
+
+       return rate;
+}
+
+static void dw_xdata_perf(struct dw_xdata *dw, u64 *rate, bool write)
+{
+       struct device *dev = &dw->pdev->dev;
+       u64 data[2], time[2], diff;
+
+       mutex_lock(&dw->mutex);
+
+       /* First acquisition of current count frames */
+       writel(0x0, &(__dw_regs(dw)->perf_control));
+       dw_xdata_perf_meas(dw, &data[0], write);
+       time[0] = jiffies;
+       writel((u32)XPERF_CONTROL_ENABLE, &(__dw_regs(dw)->perf_control));
+
+       /*
+        * Wait 100ms between the 1st count frame acquisition and the 2nd
+        * count frame acquisition, in order to calculate the speed later
+        */
+       mdelay(100);
+
+       /* Second acquisition of current count frames */
+       writel(0x0, &(__dw_regs(dw)->perf_control));
+       dw_xdata_perf_meas(dw, &data[1], write);
+       time[1] = jiffies;
+       writel((u32)XPERF_CONTROL_ENABLE, &(__dw_regs(dw)->perf_control));
+
+       /*
+        * Speed calculation
+        *
+        * rate = (2nd count frames - 1st count frames) / (time elapsed)
+        */
+       diff = jiffies_to_nsecs(time[1] - time[0]);
+       *rate = dw_xdata_perf_diff(&data[1], &data[0], diff);
+
+       mutex_unlock(&dw->mutex);
+
+       dev_dbg(dev, "xData: time=%llu us, %s=%llu MB/s\n",
+               diff, write ? "write" : "read", *rate);
+}
+
+static struct dw_xdata *misc_dev_to_dw(struct miscdevice *misc_dev)
+{
+       return container_of(misc_dev, struct dw_xdata, misc_dev);
+}
+
+static ssize_t write_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct miscdevice *misc_dev = dev_get_drvdata(dev);
+       struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
+       u64 rate;
+
+       dw_xdata_perf(dw, &rate, true);
+
+       return sysfs_emit(buf, "%llu\n", rate);
+}
+
+static ssize_t write_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       struct miscdevice *misc_dev = dev_get_drvdata(dev);
+       struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
+       bool enabled;
+       int ret;
+
+       ret = kstrtobool(buf, &enabled);
+       if (ret < 0)
+               return ret;
+
+       if (enabled) {
+               dev_dbg(dev, "xData: requested write transfer\n");
+               dw_xdata_start(dw, true);
+       } else {
+               dev_dbg(dev, "xData: requested stop transfer\n");
+               dw_xdata_stop(dw);
+       }
+
+       return size;
+}
+
+static DEVICE_ATTR_RW(write);
+
+static ssize_t read_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct miscdevice *misc_dev = dev_get_drvdata(dev);
+       struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
+       u64 rate;
+
+       dw_xdata_perf(dw, &rate, false);
+
+       return sysfs_emit(buf, "%llu\n", rate);
+}
+
+static ssize_t read_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t size)
+{
+       struct miscdevice *misc_dev = dev_get_drvdata(dev);
+       struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
+       bool enabled;
+       int ret;
+
+       ret = kstrtobool(buf, &enabled);
+       if (ret < 0)
+               return ret;
+
+       if (enabled) {
+               dev_dbg(dev, "xData: requested read transfer\n");
+               dw_xdata_start(dw, false);
+       } else {
+               dev_dbg(dev, "xData: requested stop transfer\n");
+               dw_xdata_stop(dw);
+       }
+
+       return size;
+}
+
+static DEVICE_ATTR_RW(read);
+
+static struct attribute *xdata_attrs[] = {
+       &dev_attr_write.attr,
+       &dev_attr_read.attr,
+       NULL,
+};
+
+ATTRIBUTE_GROUPS(xdata);
+
+static int dw_xdata_pcie_probe(struct pci_dev *pdev,
+                              const struct pci_device_id *pid)
+{
+       struct device *dev = &pdev->dev;
+       struct dw_xdata *dw;
+       char name[24];
+       u64 addr;
+       int err;
+       int id;
+
+       /* Enable PCI device */
+       err = pcim_enable_device(pdev);
+       if (err) {
+               dev_err(dev, "enabling device failed\n");
+               return err;
+       }
+
+       /* Mapping PCI BAR regions */
+       err = pcim_iomap_regions(pdev, BIT(BAR_0), pci_name(pdev));
+       if (err) {
+               dev_err(dev, "xData BAR I/O remapping failed\n");
+               return err;
+       }
+
+       pci_set_master(pdev);
+
+       /* Allocate memory */
+       dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
+       if (!dw)
+               return -ENOMEM;
+
+       /* Data structure initialization */
+       mutex_init(&dw->mutex);
+
+       dw->rg_region.vaddr = pcim_iomap_table(pdev)[BAR_0];
+       if (!dw->rg_region.vaddr)
+               return -ENOMEM;
+
+       dw->rg_region.paddr = pdev->resource[BAR_0].start;
+
+       dw->max_wr_len = pcie_get_mps(pdev);
+       dw->max_wr_len >>= 2;
+
+       dw->max_rd_len = pcie_get_readrq(pdev);
+       dw->max_rd_len >>= 2;
+
+       dw->pdev = pdev;
+
+       id = ida_simple_get(&xdata_ida, 0, 0, GFP_KERNEL);
+       if (id < 0) {
+               dev_err(dev, "xData: unable to get id\n");
+               return id;
+       }
+
+       snprintf(name, sizeof(name), DW_XDATA_DRIVER_NAME ".%d", id);
+       dw->misc_dev.name = kstrdup(name, GFP_KERNEL);
+       if (!dw->misc_dev.name) {
+               err = -ENOMEM;
+               goto err_ida_remove;
+       }
+
+       dw->misc_dev.minor = MISC_DYNAMIC_MINOR;
+       dw->misc_dev.parent = dev;
+       dw->misc_dev.groups = xdata_groups;
+
+       writel(0x0, &(__dw_regs(dw)->RAM_addr));
+       writel(0x0, &(__dw_regs(dw)->RAM_port));
+
+       addr = dw->rg_region.paddr + DW_XDATA_EP_MEM_OFFSET;
+       writel(lower_32_bits(addr), &(__dw_regs(dw)->addr_lsb));
+       writel(upper_32_bits(addr), &(__dw_regs(dw)->addr_msb));
+       dev_dbg(dev, "xData: target address = 0x%.16llx\n", addr);
+
+       dev_dbg(dev, "xData: wr_len = %zu, rd_len = %zu\n",
+               dw->max_wr_len * 4, dw->max_rd_len * 4);
+
+       /* Saving data structure reference */
+       pci_set_drvdata(pdev, dw);
+
+       /* Register misc device */
+       err = misc_register(&dw->misc_dev);
+       if (err) {
+               dev_err(dev, "xData: failed to register device\n");
+               goto err_kfree_name;
+       }
+
+       return 0;
+
+err_kfree_name:
+       kfree(dw->misc_dev.name);
+
+err_ida_remove:
+       ida_simple_remove(&xdata_ida, id);
+
+       return err;
+}
+
+static void dw_xdata_pcie_remove(struct pci_dev *pdev)
+{
+       struct dw_xdata *dw = pci_get_drvdata(pdev);
+       int id;
+
+       if (sscanf(dw->misc_dev.name, DW_XDATA_DRIVER_NAME ".%d", &id) != 1)
+               return;
+
+       if (id < 0)
+               return;
+
+       dw_xdata_stop(dw);
+       misc_deregister(&dw->misc_dev);
+       kfree(dw->misc_dev.name);
+       ida_simple_remove(&xdata_ida, id);
+}
+
+static const struct pci_device_id dw_xdata_pcie_id_table[] = {
+       { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, dw_xdata_pcie_id_table);
+
+static struct pci_driver dw_xdata_pcie_driver = {
+       .name           = DW_XDATA_DRIVER_NAME,
+       .id_table       = dw_xdata_pcie_id_table,
+       .probe          = dw_xdata_pcie_probe,
+       .remove         = dw_xdata_pcie_remove,
+};
+
+module_pci_driver(dw_xdata_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare xData PCIe driver");
+MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
+
index 0db4000dedf203aef289d9c040ae4fb4730df50e..500b1feaf1f6f5a30ec204abb428705a9bec417a 100644 (file)
@@ -316,7 +316,7 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
 
 /**
  * copy_ddcb_results() - Copy output state from real DDCB to request
- * @req:        pointer to requsted DDCB parameters
+ * @req:        pointer to requested DDCB parameters
  * @ddcb_no:    pointer to ddcb number being tapped
  *
  * Copy DDCB ASV to request struct. There is no endian
@@ -356,7 +356,7 @@ static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
 }
 
 /**
- * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
+ * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work requests.
  * @cd:         pointer to genwqe device descriptor
  * @queue:     queue to be checked
  *
@@ -498,7 +498,7 @@ int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
 
        /*
         * We need to distinguish 3 cases here:
-        *   1. rc == 0              timeout occured
+        *   1. rc == 0              timeout occurred
         *   2. rc == -ERESTARTSYS   signal received
         *   3. rc > 0               remaining jiffies condition is true
         */
@@ -982,7 +982,7 @@ static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
 
        spin_lock_irqsave(&queue->ddcb_lock, flags);
 
-       if (queue_empty(queue)) { /* emtpy queue */
+       if (queue_empty(queue)) { /* empty queue */
                spin_unlock_irqrestore(&queue->ddcb_lock, flags);
                return 0;
        }
@@ -1002,7 +1002,7 @@ static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
  * @cd:         pointer to genwqe device descriptor
  *
  * Keep track on the number of DDCBs which ware currently in the
- * queue. This is needed for statistics as well as conditon if we want
+ * queue. This is needed for statistics as well as condition if we want
  * to wait or better do polling in case of no interrupts available.
  */
 int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
index d9adb9a5e4d879a8b097b0ea3366f9ab970f7a71..719168c980a45b3aa41626b37d8771ef253a9dcd 100644 (file)
@@ -181,7 +181,7 @@ static void cb_release(struct kref *ref)
 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
                                        int ctx_id, bool internal_cb)
 {
-       struct hl_cb *cb;
+       struct hl_cb *cb = NULL;
        u32 cb_offset;
        void *p;
 
@@ -193,9 +193,10 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
         * the kernel's copy. Hence, we must never sleep in this code section
         * and must use GFP_ATOMIC for all memory allocations.
         */
-       if (ctx_id == HL_KERNEL_ASID_ID)
+       if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
                cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
-       else
+
+       if (!cb)
                cb = kzalloc(sizeof(*cb), GFP_KERNEL);
 
        if (!cb)
@@ -214,6 +215,9 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
        } else if (ctx_id == HL_KERNEL_ASID_ID) {
                p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
                                                &cb->bus_address, GFP_ATOMIC);
+               if (!p)
+                       p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+                                       cb_size, &cb->bus_address, GFP_KERNEL);
        } else {
                p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
                                                &cb->bus_address,
@@ -310,6 +314,8 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
 
        spin_lock(&mgr->cb_lock);
        rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
+       if (rc < 0)
+               rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_KERNEL);
        spin_unlock(&mgr->cb_lock);
 
        if (rc < 0) {
index 7bd4a03b34291e686ffa20b75f8eba8f8e481dfc..ff8791a651fd1030b7091b709141f367771c2bd3 100644 (file)
@@ -84,6 +84,38 @@ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
        return 0;
 }
 
+static void sob_reset_work(struct work_struct *work)
+{
+       struct hl_cs_compl *hl_cs_cmpl =
+               container_of(work, struct hl_cs_compl, sob_reset_work);
+       struct hl_device *hdev = hl_cs_cmpl->hdev;
+
+       /*
+        * A signal CS can get completion while the corresponding wait
+        * for signal CS is on its way to the PQ. The wait for signal CS
+        * will get stuck if the signal CS incremented the SOB to its
+        * max value and there are no pending (submitted) waits on this
+        * SOB.
+        * We do the following to void this situation:
+        * 1. The wait for signal CS must get a ref for the signal CS as
+        *    soon as possible in cs_ioctl_signal_wait() and put it
+        *    before being submitted to the PQ but after it incremented
+        *    the SOB refcnt in init_signal_wait_cs().
+        * 2. Signal/Wait for signal CS will decrement the SOB refcnt
+        *    here.
+        * These two measures guarantee that the wait for signal CS will
+        * reset the SOB upon completion rather than the signal CS and
+        * hence the above scenario is avoided.
+        */
+       kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
+
+       if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
+               hdev->asic_funcs->reset_sob_group(hdev,
+                               hl_cs_cmpl->sob_group);
+
+       kfree(hl_cs_cmpl);
+}
+
 static void hl_fence_release(struct kref *kref)
 {
        struct hl_fence *fence =
@@ -109,28 +141,9 @@ static void hl_fence_release(struct kref *kref)
                        hl_cs_cmpl->hw_sob->sob_id,
                        hl_cs_cmpl->sob_val);
 
-               /*
-                * A signal CS can get completion while the corresponding wait
-                * for signal CS is on its way to the PQ. The wait for signal CS
-                * will get stuck if the signal CS incremented the SOB to its
-                * max value and there are no pending (submitted) waits on this
-                * SOB.
-                * We do the following to void this situation:
-                * 1. The wait for signal CS must get a ref for the signal CS as
-                *    soon as possible in cs_ioctl_signal_wait() and put it
-                *    before being submitted to the PQ but after it incremented
-                *    the SOB refcnt in init_signal_wait_cs().
-                * 2. Signal/Wait for signal CS will decrement the SOB refcnt
-                *    here.
-                * These two measures guarantee that the wait for signal CS will
-                * reset the SOB upon completion rather than the signal CS and
-                * hence the above scenario is avoided.
-                */
-               kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
+               queue_work(hdev->sob_reset_wq, &hl_cs_cmpl->sob_reset_work);
 
-               if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
-                       hdev->asic_funcs->reset_sob_group(hdev,
-                                       hl_cs_cmpl->sob_group);
+               return;
        }
 
 free:
@@ -454,8 +467,7 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
 
        if (next_entry_found && !next->tdr_active) {
                next->tdr_active = true;
-               schedule_delayed_work(&next->work_tdr,
-                                       hdev->timeout_jiffies);
+               schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
        }
 
        spin_unlock(&hdev->cs_mirror_lock);
@@ -492,24 +504,6 @@ static void cs_do_release(struct kref *ref)
                goto out;
        }
 
-       hdev->asic_funcs->hw_queues_lock(hdev);
-
-       hdev->cs_active_cnt--;
-       if (!hdev->cs_active_cnt) {
-               struct hl_device_idle_busy_ts *ts;
-
-               ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
-               ts->busy_to_idle_ts = ktime_get();
-
-               if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
-                       hdev->idle_busy_ts_idx = 0;
-       } else if (hdev->cs_active_cnt < 0) {
-               dev_crit(hdev->dev, "CS active cnt %d is negative\n",
-                       hdev->cs_active_cnt);
-       }
-
-       hdev->asic_funcs->hw_queues_unlock(hdev);
-
        /* Need to update CI for all queue jobs that does not get completion */
        hl_hw_queue_update_ci(cs);
 
@@ -620,14 +614,14 @@ static void cs_timedout(struct work_struct *work)
        cs_put(cs);
 
        if (hdev->reset_on_lockup)
-               hl_device_reset(hdev, false, false);
+               hl_device_reset(hdev, 0);
        else
                hdev->needs_reset = true;
 }
 
 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
                        enum hl_cs_type cs_type, u64 user_sequence,
-                       struct hl_cs **cs_new)
+                       struct hl_cs **cs_new, u32 flags, u32 timeout)
 {
        struct hl_cs_counters_atomic *cntr;
        struct hl_fence *other = NULL;
@@ -638,6 +632,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
        cntr = &hdev->aggregated_cs_counters;
 
        cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
+       if (!cs)
+               cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+
        if (!cs) {
                atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
                atomic64_inc(&cntr->out_of_mem_drop_cnt);
@@ -651,12 +648,17 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
        cs->submitted = false;
        cs->completed = false;
        cs->type = cs_type;
+       cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
+       cs->timeout_jiffies = timeout;
        INIT_LIST_HEAD(&cs->job_list);
        INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
        kref_init(&cs->refcount);
        spin_lock_init(&cs->job_lock);
 
        cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
+       if (!cs_cmpl)
+               cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_KERNEL);
+
        if (!cs_cmpl) {
                atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
                atomic64_inc(&cntr->out_of_mem_drop_cnt);
@@ -664,9 +666,23 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
                goto free_cs;
        }
 
+       cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
+                       sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
+       if (!cs->jobs_in_queue_cnt)
+               cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
+                               sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
+
+       if (!cs->jobs_in_queue_cnt) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&cntr->out_of_mem_drop_cnt);
+               rc = -ENOMEM;
+               goto free_cs_cmpl;
+       }
+
        cs_cmpl->hdev = hdev;
        cs_cmpl->type = cs->type;
        spin_lock_init(&cs_cmpl->lock);
+       INIT_WORK(&cs_cmpl->sob_reset_work, sob_reset_work);
        cs->fence = &cs_cmpl->base_fence;
 
        spin_lock(&ctx->cs_lock);
@@ -696,15 +712,6 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
                goto free_fence;
        }
 
-       cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
-                       sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
-       if (!cs->jobs_in_queue_cnt) {
-               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
-               atomic64_inc(&cntr->out_of_mem_drop_cnt);
-               rc = -ENOMEM;
-               goto free_fence;
-       }
-
        /* init hl_fence */
        hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
 
@@ -727,6 +734,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
 
 free_fence:
        spin_unlock(&ctx->cs_lock);
+       kfree(cs->jobs_in_queue_cnt);
+free_cs_cmpl:
        kfree(cs_cmpl);
 free_cs:
        kfree(cs);
@@ -749,6 +758,8 @@ void hl_cs_rollback_all(struct hl_device *hdev)
        int i;
        struct hl_cs *cs, *tmp;
 
+       flush_workqueue(hdev->sob_reset_wq);
+
        /* flush all completions before iterating over the CS mirror list in
         * order to avoid a race with the release functions
         */
@@ -778,6 +789,44 @@ void hl_pending_cb_list_flush(struct hl_ctx *ctx)
        }
 }
 
+static void
+wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
+{
+       struct hl_user_pending_interrupt *pend;
+
+       spin_lock(&interrupt->wait_list_lock);
+       list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
+               pend->fence.error = -EIO;
+               complete_all(&pend->fence.completion);
+       }
+       spin_unlock(&interrupt->wait_list_lock);
+}
+
+void hl_release_pending_user_interrupts(struct hl_device *hdev)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       struct hl_user_interrupt *interrupt;
+       int i;
+
+       if (!prop->user_interrupt_count)
+               return;
+
+       /* We iterate through the user interrupt requests and waking up all
+        * user threads waiting for interrupt completion. We iterate the
+        * list under a lock, this is why all user threads, once awake,
+        * will wait on the same lock and will release the waiting object upon
+        * unlock.
+        */
+
+       for (i = 0 ; i < prop->user_interrupt_count ; i++) {
+               interrupt = &hdev->user_interrupt[i];
+               wake_pending_user_interrupt_threads(interrupt);
+       }
+
+       interrupt = &hdev->common_user_interrupt;
+       wake_pending_user_interrupt_threads(interrupt);
+}
+
 static void job_wq_completion(struct work_struct *work)
 {
        struct hl_cs_job *job = container_of(work, struct hl_cs_job,
@@ -889,6 +938,9 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
        struct hl_cs_job *job;
 
        job = kzalloc(sizeof(*job), GFP_ATOMIC);
+       if (!job)
+               job = kzalloc(sizeof(*job), GFP_KERNEL);
+
        if (!job)
                return NULL;
 
@@ -991,6 +1043,9 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
 
        *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
                                        GFP_ATOMIC);
+       if (!*cs_chunk_array)
+               *cs_chunk_array = kmalloc_array(num_chunks,
+                                       sizeof(**cs_chunk_array), GFP_KERNEL);
        if (!*cs_chunk_array) {
                atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
                atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
@@ -1038,7 +1093,8 @@ static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
 }
 
 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
-                               u32 num_chunks, u64 *cs_seq, u32 flags)
+                               u32 num_chunks, u64 *cs_seq, u32 flags,
+                               u32 timeout)
 {
        bool staged_mid, int_queues_only = true;
        struct hl_device *hdev = hpriv->hdev;
@@ -1067,11 +1123,11 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
                staged_mid = false;
 
        rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
-                       staged_mid ? user_sequence : ULLONG_MAX, &cs);
+                       staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
+                       timeout);
        if (rc)
                goto free_cs_chunk_array;
 
-       cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
        *cs_seq = cs->sequence;
 
        hl_debugfs_add_cs(cs);
@@ -1269,7 +1325,8 @@ static int hl_submit_pending_cb(struct hl_fpriv *hpriv)
                list_move_tail(&pending_cb->cb_node, &local_cb_list);
        spin_unlock(&ctx->pending_cb_lock);
 
-       rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs);
+       rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs, 0,
+                               hdev->timeout_jiffies);
        if (rc)
                goto add_list_elements;
 
@@ -1370,7 +1427,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
                        rc = 0;
                } else {
                        rc = cs_ioctl_default(hpriv, chunks, num_chunks,
-                                                               cs_seq, 0);
+                                       cs_seq, 0, hdev->timeout_jiffies);
                }
 
                mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1419,7 +1476,7 @@ wait_again:
 
 out:
        if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
-               hl_device_reset(hdev, false, false);
+               hl_device_reset(hdev, 0);
 
        return rc;
 }
@@ -1445,6 +1502,10 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
        signal_seq_arr = kmalloc_array(signal_seq_arr_len,
                                        sizeof(*signal_seq_arr),
                                        GFP_ATOMIC);
+       if (!signal_seq_arr)
+               signal_seq_arr = kmalloc_array(signal_seq_arr_len,
+                                       sizeof(*signal_seq_arr),
+                                       GFP_KERNEL);
        if (!signal_seq_arr) {
                atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
                atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
@@ -1536,7 +1597,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
 
 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                                void __user *chunks, u32 num_chunks,
-                               u64 *cs_seq, bool timestamp)
+                               u64 *cs_seq, u32 flags, u32 timeout)
 {
        struct hl_cs_chunk *cs_chunk_array, *chunk;
        struct hw_queue_properties *hw_queue_prop;
@@ -1642,7 +1703,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                }
        }
 
-       rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs);
+       rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
        if (rc) {
                if (cs_type == CS_TYPE_WAIT ||
                        cs_type == CS_TYPE_COLLECTIVE_WAIT)
@@ -1650,8 +1711,6 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                goto free_cs_chunk_array;
        }
 
-       cs->timestamp = !!timestamp;
-
        /*
         * Save the signal CS fence for later initialization right before
         * hanging the wait CS on the queue.
@@ -1709,7 +1768,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
        enum hl_cs_type cs_type;
        u64 cs_seq = ULONG_MAX;
        void __user *chunks;
-       u32 num_chunks, flags;
+       u32 num_chunks, flags, timeout;
        int rc;
 
        rc = hl_cs_sanity_checks(hpriv, args);
@@ -1735,16 +1794,20 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
                        !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
                cs_seq = args->in.seq;
 
+       timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
+                       ? msecs_to_jiffies(args->in.timeout * 1000)
+                       : hpriv->hdev->timeout_jiffies;
+
        switch (cs_type) {
        case CS_TYPE_SIGNAL:
        case CS_TYPE_WAIT:
        case CS_TYPE_COLLECTIVE_WAIT:
                rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
-                       &cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
+                                       &cs_seq, args->in.cs_flags, timeout);
                break;
        default:
                rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
-                                                       args->in.cs_flags);
+                                               args->in.cs_flags, timeout);
                break;
        }
 
@@ -1818,7 +1881,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
        return rc;
 }
 
-int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
 {
        struct hl_device *hdev = hpriv->hdev;
        union hl_wait_cs_args *args = data;
@@ -1873,3 +1936,176 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
 
        return 0;
 }
+
+static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+                               u32 timeout_us, u64 user_address,
+                               u32 target_value, u16 interrupt_offset,
+                               enum hl_cs_wait_status *status)
+{
+       struct hl_user_pending_interrupt *pend;
+       struct hl_user_interrupt *interrupt;
+       unsigned long timeout;
+       long completion_rc;
+       u32 completion_value;
+       int rc = 0;
+
+       if (timeout_us == U32_MAX)
+               timeout = timeout_us;
+       else
+               timeout = usecs_to_jiffies(timeout_us);
+
+       hl_ctx_get(hdev, ctx);
+
+       pend = kmalloc(sizeof(*pend), GFP_KERNEL);
+       if (!pend) {
+               hl_ctx_put(ctx);
+               return -ENOMEM;
+       }
+
+       hl_fence_init(&pend->fence, ULONG_MAX);
+
+       if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
+               interrupt = &hdev->common_user_interrupt;
+       else
+               interrupt = &hdev->user_interrupt[interrupt_offset];
+
+       spin_lock(&interrupt->wait_list_lock);
+       if (!hl_device_operational(hdev, NULL)) {
+               rc = -EPERM;
+               goto unlock_and_free_fence;
+       }
+
+       if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+               dev_err(hdev->dev,
+                       "Failed to copy completion value from user\n");
+               rc = -EFAULT;
+               goto unlock_and_free_fence;
+       }
+
+       if (completion_value >= target_value)
+               *status = CS_WAIT_STATUS_COMPLETED;
+       else
+               *status = CS_WAIT_STATUS_BUSY;
+
+       if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
+               goto unlock_and_free_fence;
+
+       /* Add pending user interrupt to relevant list for the interrupt
+        * handler to monitor
+        */
+       list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+       spin_unlock(&interrupt->wait_list_lock);
+
+wait_again:
+       /* Wait for interrupt handler to signal completion */
+       completion_rc =
+               wait_for_completion_interruptible_timeout(
+                               &pend->fence.completion, timeout);
+
+       /* If timeout did not expire we need to perform the comparison.
+        * If comparison fails, keep waiting until timeout expires
+        */
+       if (completion_rc > 0) {
+               if (copy_from_user(&completion_value,
+                               u64_to_user_ptr(user_address), 4)) {
+                       dev_err(hdev->dev,
+                               "Failed to copy completion value from user\n");
+                       rc = -EFAULT;
+                       goto remove_pending_user_interrupt;
+               }
+
+               if (completion_value >= target_value) {
+                       *status = CS_WAIT_STATUS_COMPLETED;
+               } else {
+                       timeout -= jiffies_to_usecs(completion_rc);
+                       goto wait_again;
+               }
+       } else {
+               *status = CS_WAIT_STATUS_BUSY;
+       }
+
+remove_pending_user_interrupt:
+       spin_lock(&interrupt->wait_list_lock);
+       list_del(&pend->wait_list_node);
+
+unlock_and_free_fence:
+       spin_unlock(&interrupt->wait_list_lock);
+       kfree(pend);
+       hl_ctx_put(ctx);
+
+       return rc;
+}
+
+static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+       u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
+       struct hl_device *hdev = hpriv->hdev;
+       struct asic_fixed_properties *prop;
+       union hl_wait_cs_args *args = data;
+       enum hl_cs_wait_status status;
+       int rc;
+
+       prop = &hdev->asic_prop;
+
+       if (!prop->user_interrupt_count) {
+               dev_err(hdev->dev, "no user interrupts allowed");
+               return -EPERM;
+       }
+
+       interrupt_id =
+               FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
+
+       first_interrupt = prop->first_available_user_msix_interrupt;
+       last_interrupt = prop->first_available_user_msix_interrupt +
+                                               prop->user_interrupt_count - 1;
+
+       if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
+                       interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
+               dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
+               return -EINVAL;
+       }
+
+       if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
+               interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
+       else
+               interrupt_offset = interrupt_id - first_interrupt;
+
+       rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
+                               args->in.interrupt_timeout_us, args->in.addr,
+                               args->in.target, interrupt_offset, &status);
+
+       memset(args, 0, sizeof(*args));
+
+       if (rc) {
+               dev_err_ratelimited(hdev->dev,
+                       "interrupt_wait_ioctl failed (%d)\n", rc);
+
+               return rc;
+       }
+
+       switch (status) {
+       case CS_WAIT_STATUS_COMPLETED:
+               args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
+               break;
+       case CS_WAIT_STATUS_BUSY:
+       default:
+               args->out.status = HL_WAIT_CS_STATUS_BUSY;
+               break;
+       }
+
+       return 0;
+}
+
+int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+       union hl_wait_cs_args *args = data;
+       u32 flags = args->in.flags;
+       int rc;
+
+       if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
+               rc = hl_interrupt_wait_ioctl(hpriv, data);
+       else
+               rc = hl_cs_wait_ioctl(hpriv, data);
+
+       return rc;
+}
index cda871afb8f4224531fd29679ef8d4ea60a6e35d..62d705889ca87d941f83eccd3946bb74dcff302c 100644 (file)
@@ -20,6 +20,11 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
         */
        hl_pending_cb_list_flush(ctx);
 
+       /* Release all allocated HW block mapped list entries and destroy
+        * the mutex.
+        */
+       hl_hw_block_mem_fini(ctx);
+
        /*
         * If we arrived here, there are no jobs waiting for this context
         * on its queues so we can safely remove it.
@@ -160,13 +165,15 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
        if (!ctx->cs_pending)
                return -ENOMEM;
 
+       hl_hw_block_mem_init(ctx);
+
        if (is_kernel_ctx) {
                ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
                rc = hl_vm_ctx_init(ctx);
                if (rc) {
                        dev_err(hdev->dev, "Failed to init mem ctx module\n");
                        rc = -ENOMEM;
-                       goto err_free_cs_pending;
+                       goto err_hw_block_mem_fini;
                }
 
                rc = hdev->asic_funcs->ctx_init(ctx);
@@ -179,7 +186,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
                if (!ctx->asid) {
                        dev_err(hdev->dev, "No free ASID, failed to create context\n");
                        rc = -ENOMEM;
-                       goto err_free_cs_pending;
+                       goto err_hw_block_mem_fini;
                }
 
                rc = hl_vm_ctx_init(ctx);
@@ -214,7 +221,8 @@ err_vm_ctx_fini:
 err_asid_free:
        if (ctx->asid != HL_KERNEL_ASID_ID)
                hl_asid_free(hdev, ctx->asid);
-err_free_cs_pending:
+err_hw_block_mem_fini:
+       hl_hw_block_mem_fini(ctx);
        kfree(ctx->cs_pending);
 
        return rc;
index 9f19bee7b592214e1404efda1b93a3734d9b16b5..8381155578a0875dd990208022d6c38620fbf053 100644 (file)
@@ -9,8 +9,8 @@
 #include "../include/hw_ip/mmu/mmu_general.h"
 
 #include <linux/pci.h>
-#include <linux/debugfs.h>
 #include <linux/uaccess.h>
+#include <linux/vmalloc.h>
 
 #define MMU_ADDR_BUF_SIZE      40
 #define MMU_ASID_BUF_SIZE      10
@@ -229,6 +229,7 @@ static int vm_show(struct seq_file *s, void *data)
 {
        struct hl_debugfs_entry *entry = s->private;
        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+       struct hl_vm_hw_block_list_node *lnode;
        struct hl_ctx *ctx;
        struct hl_vm *vm;
        struct hl_vm_hash_node *hnode;
@@ -272,6 +273,21 @@ static int vm_show(struct seq_file *s, void *data)
                }
                mutex_unlock(&ctx->mem_hash_lock);
 
+               if (ctx->asid != HL_KERNEL_ASID_ID &&
+                   !list_empty(&ctx->hw_block_mem_list)) {
+                       seq_puts(s, "\nhw_block mappings:\n\n");
+                       seq_puts(s, "    virtual address    size    HW block id\n");
+                       seq_puts(s, "-------------------------------------------\n");
+                       mutex_lock(&ctx->hw_block_list_lock);
+                       list_for_each_entry(lnode, &ctx->hw_block_mem_list,
+                                           node) {
+                               seq_printf(s,
+                                       "    0x%-14lx   %-6u      %-9u\n",
+                                       lnode->vaddr, lnode->size, lnode->id);
+                       }
+                       mutex_unlock(&ctx->hw_block_list_lock);
+               }
+
                vm = &ctx->hdev->vm;
                spin_lock(&vm->idr_lock);
 
@@ -441,21 +457,86 @@ out:
        return false;
 }
 
-static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
-                               u64 *phys_addr)
+static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
+                                               u32 size)
 {
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u64 dram_start_addr, dram_end_addr;
+
+       if (!hdev->mmu_enable)
+               return false;
+
+       if (prop->dram_supports_virtual_memory) {
+               dram_start_addr = prop->dmmu.start_addr;
+               dram_end_addr = prop->dmmu.end_addr;
+       } else {
+               dram_start_addr = prop->dram_base_address;
+               dram_end_addr = prop->dram_end_address;
+       }
+
+       if (hl_mem_area_inside_range(addr, size, dram_start_addr,
+                                       dram_end_addr))
+               return true;
+
+       if (hl_mem_area_inside_range(addr, size, prop->sram_base_address,
+                                       prop->sram_end_address))
+               return true;
+
+       return false;
+}
+
+static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
+                       u64 *phys_addr)
+{
+       struct hl_vm_phys_pg_pack *phys_pg_pack;
        struct hl_ctx *ctx = hdev->compute_ctx;
-       int rc = 0;
+       struct hl_vm_hash_node *hnode;
+       struct hl_userptr *userptr;
+       enum vm_type_t *vm_type;
+       bool valid = false;
+       u64 end_address;
+       u32 range_size;
+       int i, rc = 0;
 
        if (!ctx) {
                dev_err(hdev->dev, "no ctx available\n");
                return -EINVAL;
        }
 
+       /* Verify address is mapped */
+       mutex_lock(&ctx->mem_hash_lock);
+       hash_for_each(ctx->mem_hash, i, hnode, node) {
+               vm_type = hnode->ptr;
+
+               if (*vm_type == VM_TYPE_USERPTR) {
+                       userptr = hnode->ptr;
+                       range_size = userptr->size;
+               } else {
+                       phys_pg_pack = hnode->ptr;
+                       range_size = phys_pg_pack->total_size;
+               }
+
+               end_address = virt_addr + size;
+               if ((virt_addr >= hnode->vaddr) &&
+                               (end_address <= hnode->vaddr + range_size)) {
+                       valid = true;
+                       break;
+               }
+       }
+       mutex_unlock(&ctx->mem_hash_lock);
+
+       if (!valid) {
+               dev_err(hdev->dev,
+                       "virt addr 0x%llx is not mapped\n",
+                       virt_addr);
+               return -EINVAL;
+       }
+
        rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
        if (rc) {
-               dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
-                               virt_addr);
+               dev_err(hdev->dev,
+                       "virt addr 0x%llx is not mapped to phys addr\n",
+                       virt_addr);
                rc = -EINVAL;
        }
 
@@ -467,10 +548,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
 {
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
-       char tmp_buf[32];
        u64 addr = entry->addr;
-       u32 val;
+       bool user_address;
+       char tmp_buf[32];
        ssize_t rc;
+       u32 val;
 
        if (atomic_read(&hdev->in_reset)) {
                dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
@@ -480,13 +562,14 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
        if (*ppos)
                return 0;
 
-       if (hl_is_device_va(hdev, addr)) {
-               rc = device_va_to_pa(hdev, addr, &addr);
+       user_address = hl_is_device_va(hdev, addr);
+       if (user_address) {
+               rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
                if (rc)
                        return rc;
        }
 
-       rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
+       rc = hdev->asic_funcs->debugfs_read32(hdev, addr, user_address, &val);
        if (rc) {
                dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
                return rc;
@@ -503,6 +586,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
        u64 addr = entry->addr;
+       bool user_address;
        u32 value;
        ssize_t rc;
 
@@ -515,13 +599,14 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
        if (rc)
                return rc;
 
-       if (hl_is_device_va(hdev, addr)) {
-               rc = device_va_to_pa(hdev, addr, &addr);
+       user_address = hl_is_device_va(hdev, addr);
+       if (user_address) {
+               rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
                if (rc)
                        return rc;
        }
 
-       rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
+       rc = hdev->asic_funcs->debugfs_write32(hdev, addr, user_address, value);
        if (rc) {
                dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
                        value, addr);
@@ -536,21 +621,28 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
 {
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
-       char tmp_buf[32];
        u64 addr = entry->addr;
-       u64 val;
+       bool user_address;
+       char tmp_buf[32];
        ssize_t rc;
+       u64 val;
+
+       if (atomic_read(&hdev->in_reset)) {
+               dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
+               return 0;
+       }
 
        if (*ppos)
                return 0;
 
-       if (hl_is_device_va(hdev, addr)) {
-               rc = device_va_to_pa(hdev, addr, &addr);
+       user_address = hl_is_device_va(hdev, addr);
+       if (user_address) {
+               rc = device_va_to_pa(hdev, addr, sizeof(val), &addr);
                if (rc)
                        return rc;
        }
 
-       rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val);
+       rc = hdev->asic_funcs->debugfs_read64(hdev, addr, user_address, &val);
        if (rc) {
                dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
                return rc;
@@ -567,20 +659,27 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
        struct hl_device *hdev = entry->hdev;
        u64 addr = entry->addr;
+       bool user_address;
        u64 value;
        ssize_t rc;
 
+       if (atomic_read(&hdev->in_reset)) {
+               dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
+               return 0;
+       }
+
        rc = kstrtoull_from_user(buf, count, 16, &value);
        if (rc)
                return rc;
 
-       if (hl_is_device_va(hdev, addr)) {
-               rc = device_va_to_pa(hdev, addr, &addr);
+       user_address = hl_is_device_va(hdev, addr);
+       if (user_address) {
+               rc = device_va_to_pa(hdev, addr, sizeof(value), &addr);
                if (rc)
                        return rc;
        }
 
-       rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value);
+       rc = hdev->asic_funcs->debugfs_write64(hdev, addr, user_address, value);
        if (rc) {
                dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
                        value, addr);
@@ -590,6 +689,63 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
        return count;
 }
 
+static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+       struct hl_device *hdev = entry->hdev;
+       u64 addr = entry->addr;
+       ssize_t rc;
+       u32 size;
+
+       if (atomic_read(&hdev->in_reset)) {
+               dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
+               return 0;
+       }
+       rc = kstrtouint_from_user(buf, count, 16, &size);
+       if (rc)
+               return rc;
+
+       if (!size) {
+               dev_err(hdev->dev, "DMA read failed. size can't be 0\n");
+               return -EINVAL;
+       }
+
+       if (size > SZ_128M) {
+               dev_err(hdev->dev,
+                       "DMA read failed. size can't be larger than 128MB\n");
+               return -EINVAL;
+       }
+
+       if (!hl_is_device_internal_memory_va(hdev, addr, size)) {
+               dev_err(hdev->dev,
+                       "DMA read failed. Invalid 0x%010llx + 0x%08x\n",
+                       addr, size);
+               return -EINVAL;
+       }
+
+       /* Free the previous allocation, if there was any */
+       entry->blob_desc.size = 0;
+       vfree(entry->blob_desc.data);
+
+       entry->blob_desc.data = vmalloc(size);
+       if (!entry->blob_desc.data)
+               return -ENOMEM;
+
+       rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
+                                               entry->blob_desc.data);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
+               vfree(entry->blob_desc.data);
+               entry->blob_desc.data = NULL;
+               return -EIO;
+       }
+
+       entry->blob_desc.size = size;
+
+       return count;
+}
+
 static ssize_t hl_get_power_state(struct file *f, char __user *buf,
                size_t count, loff_t *ppos)
 {
@@ -871,7 +1027,7 @@ static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
 
        hdev->stop_on_err = value ? 1 : 0;
 
-       hl_device_reset(hdev, false, false);
+       hl_device_reset(hdev, 0);
 
        return count;
 }
@@ -899,6 +1055,11 @@ static const struct file_operations hl_data64b_fops = {
        .write = hl_data_write64
 };
 
+static const struct file_operations hl_dma_size_fops = {
+       .owner = THIS_MODULE,
+       .write = hl_dma_size_write
+};
+
 static const struct file_operations hl_i2c_data_fops = {
        .owner = THIS_MODULE,
        .read = hl_i2c_data_read,
@@ -1001,6 +1162,9 @@ void hl_debugfs_add_device(struct hl_device *hdev)
        if (!dev_entry->entry_arr)
                return;
 
+       dev_entry->blob_desc.size = 0;
+       dev_entry->blob_desc.data = NULL;
+
        INIT_LIST_HEAD(&dev_entry->file_list);
        INIT_LIST_HEAD(&dev_entry->cb_list);
        INIT_LIST_HEAD(&dev_entry->cs_list);
@@ -1103,6 +1267,17 @@ void hl_debugfs_add_device(struct hl_device *hdev)
                                dev_entry,
                                &hl_security_violations_fops);
 
+       debugfs_create_file("dma_size",
+                               0200,
+                               dev_entry->root,
+                               dev_entry,
+                               &hl_dma_size_fops);
+
+       debugfs_create_blob("data_dma",
+                               0400,
+                               dev_entry->root,
+                               &dev_entry->blob_desc);
+
        for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
                debugfs_create_file(hl_debugfs_list[i].name,
                                        0444,
@@ -1121,6 +1296,9 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
        debugfs_remove_recursive(entry->root);
 
        mutex_destroy(&entry->file_mutex);
+
+       vfree(entry->blob_desc.data);
+
        kfree(entry->entry_arr);
 }
 
index 334009e83823677b3779b805add6134c6783889e..00e92b6788284eaf293f8e2d55bc966c7f3d2528 100644 (file)
@@ -70,6 +70,9 @@ static void hpriv_release(struct kref *ref)
        mutex_unlock(&hdev->fpriv_list_lock);
 
        kfree(hpriv);
+
+       if (hdev->reset_upon_device_release)
+               hl_device_reset(hdev, 0);
 }
 
 void hl_hpriv_get(struct hl_fpriv *hpriv)
@@ -77,9 +80,9 @@ void hl_hpriv_get(struct hl_fpriv *hpriv)
        kref_get(&hpriv->refcount);
 }
 
-void hl_hpriv_put(struct hl_fpriv *hpriv)
+int hl_hpriv_put(struct hl_fpriv *hpriv)
 {
-       kref_put(&hpriv->refcount, hpriv_release);
+       return kref_put(&hpriv->refcount, hpriv_release);
 }
 
 /*
@@ -103,10 +106,17 @@ static int hl_device_release(struct inode *inode, struct file *filp)
                return 0;
        }
 
-       hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
-       hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+       /* Each pending user interrupt holds the user's context, hence we
+        * must release them all before calling hl_ctx_mgr_fini().
+        */
+       hl_release_pending_user_interrupts(hpriv->hdev);
+
+       hl_cb_mgr_fini(hdev, &hpriv->cb_mgr);
+       hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
 
-       hl_hpriv_put(hpriv);
+       if (!hl_hpriv_put(hpriv))
+               dev_warn(hdev->dev,
+                       "Device is still in use because there are live CS and/or memory mappings\n");
 
        return 0;
 }
@@ -283,7 +293,7 @@ static void device_hard_reset_pending(struct work_struct *work)
        struct hl_device *hdev = device_reset_work->hdev;
        int rc;
 
-       rc = hl_device_reset(hdev, true, true);
+       rc = hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD);
        if ((rc == -EBUSY) && !hdev->device_fini_pending) {
                dev_info(hdev->dev,
                        "Could not reset device. will try again in %u seconds",
@@ -311,11 +321,15 @@ static int device_early_init(struct hl_device *hdev)
        switch (hdev->asic_type) {
        case ASIC_GOYA:
                goya_set_asic_funcs(hdev);
-               strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
+               strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
                break;
        case ASIC_GAUDI:
                gaudi_set_asic_funcs(hdev);
-               sprintf(hdev->asic_name, "GAUDI");
+               strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
+               break;
+       case ASIC_GAUDI_SEC:
+               gaudi_set_asic_funcs(hdev);
+               strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
                break;
        default:
                dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
@@ -334,7 +348,7 @@ static int device_early_init(struct hl_device *hdev)
        if (hdev->asic_prop.completion_queues_count) {
                hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
                                sizeof(*hdev->cq_wq),
-                               GFP_ATOMIC);
+                               GFP_KERNEL);
                if (!hdev->cq_wq) {
                        rc = -ENOMEM;
                        goto asid_fini;
@@ -358,24 +372,24 @@ static int device_early_init(struct hl_device *hdev)
                goto free_cq_wq;
        }
 
-       hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
-                                       GFP_KERNEL);
-       if (!hdev->hl_chip_info) {
+       hdev->sob_reset_wq = alloc_workqueue("hl-sob-reset", WQ_UNBOUND, 0);
+       if (!hdev->sob_reset_wq) {
+               dev_err(hdev->dev,
+                       "Failed to allocate SOB reset workqueue\n");
                rc = -ENOMEM;
                goto free_eq_wq;
        }
 
-       hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE,
-                                       sizeof(struct hl_device_idle_busy_ts),
-                                       (GFP_KERNEL | __GFP_ZERO));
-       if (!hdev->idle_busy_ts_arr) {
+       hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
+                                       GFP_KERNEL);
+       if (!hdev->hl_chip_info) {
                rc = -ENOMEM;
-               goto free_chip_info;
+               goto free_sob_reset_wq;
        }
 
        rc = hl_mmu_if_set_funcs(hdev);
        if (rc)
-               goto free_idle_busy_ts_arr;
+               goto free_chip_info;
 
        hl_cb_mgr_init(&hdev->kernel_cb_mgr);
 
@@ -404,10 +418,10 @@ static int device_early_init(struct hl_device *hdev)
 
 free_cb_mgr:
        hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
-free_idle_busy_ts_arr:
-       kfree(hdev->idle_busy_ts_arr);
 free_chip_info:
        kfree(hdev->hl_chip_info);
+free_sob_reset_wq:
+       destroy_workqueue(hdev->sob_reset_wq);
 free_eq_wq:
        destroy_workqueue(hdev->eq_wq);
 free_cq_wq:
@@ -441,9 +455,9 @@ static void device_early_fini(struct hl_device *hdev)
 
        hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
 
-       kfree(hdev->idle_busy_ts_arr);
        kfree(hdev->hl_chip_info);
 
+       destroy_workqueue(hdev->sob_reset_wq);
        destroy_workqueue(hdev->eq_wq);
        destroy_workqueue(hdev->device_reset_work.wq);
 
@@ -485,7 +499,7 @@ static void hl_device_heartbeat(struct work_struct *work)
                goto reschedule;
 
        dev_err(hdev->dev, "Device heartbeat failed!\n");
-       hl_device_reset(hdev, true, false);
+       hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_HEARTBEAT);
 
        return;
 
@@ -561,100 +575,24 @@ static void device_late_fini(struct hl_device *hdev)
        hdev->late_init_done = false;
 }
 
-uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms)
+int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
 {
-       struct hl_device_idle_busy_ts *ts;
-       ktime_t zero_ktime, curr = ktime_get();
-       u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx;
-       s64 period_us, last_start_us, last_end_us, last_busy_time_us,
-               total_busy_time_us = 0, total_busy_time_ms;
-
-       zero_ktime = ktime_set(0, 0);
-       period_us = period_ms * USEC_PER_MSEC;
-       ts = &hdev->idle_busy_ts_arr[last_index];
-
-       /* check case that device is currently in idle */
-       if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) &&
-                       !ktime_compare(ts->idle_to_busy_ts, zero_ktime)) {
-
-               last_index--;
-               /* Handle case idle_busy_ts_idx was 0 */
-               if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
-                       last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
-
-               ts = &hdev->idle_busy_ts_arr[last_index];
-       }
-
-       while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) {
-               /* Check if we are in last sample case. i.e. if the sample
-                * begun before the sampling period. This could be a real
-                * sample or 0 so need to handle both cases
-                */
-               last_start_us = ktime_to_us(
-                               ktime_sub(curr, ts->idle_to_busy_ts));
-
-               if (last_start_us > period_us) {
-
-                       /* First check two cases:
-                        * 1. If the device is currently busy
-                        * 2. If the device was idle during the whole sampling
-                        *    period
-                        */
-
-                       if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) {
-                               /* Check if the device is currently busy */
-                               if (ktime_compare(ts->idle_to_busy_ts,
-                                               zero_ktime))
-                                       return 100;
-
-                               /* We either didn't have any activity or we
-                                * reached an entry which is 0. Either way,
-                                * exit and return what was accumulated so far
-                                */
-                               break;
-                       }
-
-                       /* If sample has finished, check it is relevant */
-                       last_end_us = ktime_to_us(
-                                       ktime_sub(curr, ts->busy_to_idle_ts));
-
-                       if (last_end_us > period_us)
-                               break;
-
-                       /* It is relevant so add it but with adjustment */
-                       last_busy_time_us = ktime_to_us(
-                                               ktime_sub(ts->busy_to_idle_ts,
-                                               ts->idle_to_busy_ts));
-                       total_busy_time_us += last_busy_time_us -
-                                       (last_start_us - period_us);
-                       break;
-               }
-
-               /* Check if the sample is finished or still open */
-               if (ktime_compare(ts->busy_to_idle_ts, zero_ktime))
-                       last_busy_time_us = ktime_to_us(
-                                               ktime_sub(ts->busy_to_idle_ts,
-                                               ts->idle_to_busy_ts));
-               else
-                       last_busy_time_us = ktime_to_us(
-                                       ktime_sub(curr, ts->idle_to_busy_ts));
-
-               total_busy_time_us += last_busy_time_us;
+       u64 max_power, curr_power, dc_power, dividend;
+       int rc;
 
-               last_index--;
-               /* Handle case idle_busy_ts_idx was 0 */
-               if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
-                       last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
+       max_power = hdev->asic_prop.max_power_default;
+       dc_power = hdev->asic_prop.dc_power_default;
+       rc = hl_fw_cpucp_power_get(hdev, &curr_power);
 
-               ts = &hdev->idle_busy_ts_arr[last_index];
+       if (rc)
+               return rc;
 
-               overlap_cnt++;
-       }
+       curr_power = clamp(curr_power, dc_power, max_power);
 
-       total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us,
-                                               USEC_PER_MSEC);
+       dividend = (curr_power - dc_power) * 100;
+       *utilization = (u32) div_u64(dividend, (max_power - dc_power));
 
-       return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms);
+       return 0;
 }
 
 /*
@@ -809,7 +747,7 @@ int hl_device_resume(struct hl_device *hdev)
        hdev->disabled = false;
        atomic_set(&hdev->in_reset, 0);
 
-       rc = hl_device_reset(hdev, true, false);
+       rc = hl_device_reset(hdev, HL_RESET_HARD);
        if (rc) {
                dev_err(hdev->dev, "Failed to reset device during resume\n");
                goto disable_device;
@@ -915,9 +853,7 @@ static void device_disable_open_processes(struct hl_device *hdev)
  * hl_device_reset - reset the device
  *
  * @hdev: pointer to habanalabs device structure
- * @hard_reset: should we do hard reset to all engines or just reset the
- *              compute/dma engines
- * @from_hard_reset_thread: is the caller the hard-reset thread
+ * @flags: reset flags.
  *
  * Block future CS and wait for pending CS to be enqueued
  * Call ASIC H/W fini
@@ -929,9 +865,10 @@ static void device_disable_open_processes(struct hl_device *hdev)
  *
  * Returns 0 for success or an error on failure.
  */
-int hl_device_reset(struct hl_device *hdev, bool hard_reset,
-                       bool from_hard_reset_thread)
+int hl_device_reset(struct hl_device *hdev, u32 flags)
 {
+       u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
+       bool hard_reset, from_hard_reset_thread;
        int i, rc;
 
        if (!hdev->init_done) {
@@ -940,6 +877,9 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
                return 0;
        }
 
+       hard_reset = (flags & HL_RESET_HARD) != 0;
+       from_hard_reset_thread = (flags & HL_RESET_FROM_RESET_THREAD) != 0;
+
        if ((!hard_reset) && (!hdev->supports_soft_reset)) {
                dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
                hard_reset = true;
@@ -960,7 +900,11 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
                if (rc)
                        return 0;
 
-               if (hard_reset) {
+               /*
+                * if reset is due to heartbeat, device CPU is no responsive in
+                * which case no point sending PCI disable message to it
+                */
+               if (hard_reset && !(flags & HL_RESET_HEARTBEAT)) {
                        /* Disable PCI access from device F/W so he won't send
                         * us additional interrupts. We disable MSI/MSI-X at
                         * the halt_engines function and we can't have the F/W
@@ -1030,6 +974,11 @@ again:
        /* Go over all the queues, release all CS and their jobs */
        hl_cs_rollback_all(hdev);
 
+       /* Release all pending user interrupts, each pending user interrupt
+        * holds a reference to user context
+        */
+       hl_release_pending_user_interrupts(hdev);
+
 kill_processes:
        if (hard_reset) {
                /* Kill processes here after CS rollback. This is because the
@@ -1078,14 +1027,6 @@ kill_processes:
        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
                hl_cq_reset(hdev, &hdev->completion_queue[i]);
 
-       hdev->idle_busy_ts_idx = 0;
-       hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0);
-       hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0);
-
-       if (hdev->cs_active_cnt)
-               dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n",
-                       hdev->cs_active_cnt);
-
        mutex_lock(&hdev->fpriv_list_lock);
 
        /* Make sure the context switch phase will run again */
@@ -1151,6 +1092,16 @@ kill_processes:
                goto out_err;
        }
 
+       /* If device is not idle fail the reset process */
+       if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
+                       HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
+               dev_err(hdev->dev,
+                       "device is not idle (mask %#llx %#llx) after reset\n",
+                       idle_mask[0], idle_mask[1]);
+               rc = -EIO;
+               goto out_err;
+       }
+
        /* Check that the communication with the device is working */
        rc = hdev->asic_funcs->test_queues(hdev);
        if (rc) {
@@ -1235,7 +1186,7 @@ out_err:
  */
 int hl_device_init(struct hl_device *hdev, struct class *hclass)
 {
-       int i, rc, cq_cnt, cq_ready_cnt;
+       int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
        char *name;
        bool add_cdev_sysfs_on_err = false;
 
@@ -1274,13 +1225,26 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
        if (rc)
                goto free_dev_ctrl;
 
+       user_interrupt_cnt = hdev->asic_prop.user_interrupt_count;
+
+       if (user_interrupt_cnt) {
+               hdev->user_interrupt = kcalloc(user_interrupt_cnt,
+                               sizeof(*hdev->user_interrupt),
+                               GFP_KERNEL);
+
+               if (!hdev->user_interrupt) {
+                       rc = -ENOMEM;
+                       goto early_fini;
+               }
+       }
+
        /*
         * Start calling ASIC initialization. First S/W then H/W and finally
         * late init
         */
        rc = hdev->asic_funcs->sw_init(hdev);
        if (rc)
-               goto early_fini;
+               goto user_interrupts_fini;
 
        /*
         * Initialize the H/W queues. Must be done before hw_init, because
@@ -1478,6 +1442,8 @@ hw_queues_destroy:
        hl_hw_queues_destroy(hdev);
 sw_fini:
        hdev->asic_funcs->sw_fini(hdev);
+user_interrupts_fini:
+       kfree(hdev->user_interrupt);
 early_fini:
        device_early_fini(hdev);
 free_dev_ctrl:
@@ -1609,6 +1575,7 @@ void hl_device_fini(struct hl_device *hdev)
        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
                hl_cq_fini(hdev, &hdev->completion_queue[i]);
        kfree(hdev->completion_queue);
+       kfree(hdev->user_interrupt);
 
        hl_hw_queues_destroy(hdev);
 
index 09706c571e9503192efbe57ddf71b01de460cd9d..832dd5c5bb0653952e682f4cf5385c2abd4aa3c0 100644 (file)
@@ -293,6 +293,7 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
                u32 cpu_security_boot_status_reg)
 {
        u32 err_val, security_val;
+       bool err_exists = false;
 
        /* Some of the firmware status codes are deprecated in newer f/w
         * versions. In those versions, the errors are reported
@@ -307,48 +308,102 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
        if (!(err_val & CPU_BOOT_ERR0_ENABLED))
                return 0;
 
-       if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
+       if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
                dev_err(hdev->dev,
                        "Device boot error - DRAM initialization failed\n");
-       if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
+               err_exists = true;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
                dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
-       if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
+               err_exists = true;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
                dev_err(hdev->dev,
                        "Device boot error - Thermal Sensor initialization failed\n");
-       if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
+               err_exists = true;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
                dev_warn(hdev->dev,
                        "Device boot warning - Skipped DRAM initialization\n");
+               /* This is a warning so we don't want it to disable the
+                * device
+                */
+               err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
+       }
 
        if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
-               if (hdev->bmc_enable)
-                       dev_warn(hdev->dev,
+               if (hdev->bmc_enable) {
+                       dev_err(hdev->dev,
                                "Device boot error - Skipped waiting for BMC\n");
-               else
+                       err_exists = true;
+               } else {
+                       dev_info(hdev->dev,
+                               "Device boot message - Skipped waiting for BMC\n");
+                       /* This is an info so we don't want it to disable the
+                        * device
+                        */
                        err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
+               }
        }
 
-       if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
+       if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
                dev_err(hdev->dev,
                        "Device boot error - Serdes data from BMC not available\n");
-       if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
+               err_exists = true;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
                dev_err(hdev->dev,
                        "Device boot error - NIC F/W initialization failed\n");
-       if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
+               err_exists = true;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
                dev_warn(hdev->dev,
                        "Device boot warning - security not ready\n");
-       if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
+               /* This is a warning so we don't want it to disable the
+                * device
+                */
+               err_val &= ~CPU_BOOT_ERR0_SECURITY_NOT_RDY;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
                dev_err(hdev->dev, "Device boot error - security failure\n");
-       if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
+               err_exists = true;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
                dev_err(hdev->dev, "Device boot error - eFuse failure\n");
-       if (err_val & CPU_BOOT_ERR0_PLL_FAIL)
+               err_exists = true;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
                dev_err(hdev->dev, "Device boot error - PLL failure\n");
+               err_exists = true;
+       }
+
+       if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
+               dev_err(hdev->dev,
+                       "Device boot error - device unusable\n");
+               err_exists = true;
+       }
 
        security_val = RREG32(cpu_security_boot_status_reg);
        if (security_val & CPU_BOOT_DEV_STS0_ENABLED)
                dev_dbg(hdev->dev, "Device security status %#x\n",
                                security_val);
 
-       if (err_val & ~CPU_BOOT_ERR0_ENABLED)
+       if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
+               dev_err(hdev->dev,
+                       "Device boot error - unknown error 0x%08x\n",
+                       err_val);
+               err_exists = true;
+       }
+
+       if (err_exists)
                return -EIO;
 
        return 0;
@@ -419,6 +474,73 @@ out:
        return rc;
 }
 
+static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
+{
+       struct cpucp_array_data_packet *pkt;
+       size_t total_pkt_size, data_size;
+       u64 result;
+       int rc;
+
+       /* skip sending this info for unsupported ASICs */
+       if (!hdev->asic_funcs->get_msi_info)
+               return 0;
+
+       data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
+       total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
+
+       /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
+       total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
+
+       /* total_pkt_size is casted to u16 later on */
+       if (total_pkt_size > USHRT_MAX) {
+               dev_err(hdev->dev, "CPUCP array data is too big\n");
+               return -EINVAL;
+       }
+
+       pkt = kzalloc(total_pkt_size, GFP_KERNEL);
+       if (!pkt)
+               return -ENOMEM;
+
+       pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
+
+       hdev->asic_funcs->get_msi_info((u32 *)&pkt->data);
+
+       pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
+                                               CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
+                                               total_pkt_size, 0, &result);
+
+       /*
+        * in case packet result is invalid it means that FW does not support
+        * this feature and will use default/hard coded MSI values. no reason
+        * to stop the boot
+        */
+       if (rc && result == cpucp_packet_invalid)
+               rc = 0;
+
+       if (rc)
+               dev_err(hdev->dev, "failed to send CPUCP array data\n");
+
+       kfree(pkt);
+
+       return rc;
+}
+
+int hl_fw_cpucp_handshake(struct hl_device *hdev,
+                       u32 cpu_security_boot_status_reg,
+                       u32 boot_err0_reg)
+{
+       int rc;
+
+       rc = hl_fw_cpucp_info_get(hdev, cpu_security_boot_status_reg,
+                                       boot_err0_reg);
+       if (rc)
+               return rc;
+
+       return hl_fw_send_msi_info_msg(hdev);
+}
+
 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
 {
        struct cpucp_packet pkt = {};
@@ -539,18 +661,63 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
        return rc;
 }
 
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
+int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+                                               enum pll_index *pll_index)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u8 pll_byte, pll_bit_off;
+       bool dynamic_pll;
+
+       if (input_pll_index >= PLL_MAX) {
+               dev_err(hdev->dev, "PLL index %d is out of range\n",
+                                                       input_pll_index);
+               return -EINVAL;
+       }
+
+       dynamic_pll = prop->fw_security_status_valid &&
+               (prop->fw_app_security_map & CPU_BOOT_DEV_STS0_DYN_PLL_EN);
+
+       if (!dynamic_pll) {
+               /*
+                * in case we are working with legacy FW (each asic has unique
+                * PLL numbering) extract the legacy numbering
+                */
+               *pll_index = hdev->legacy_pll_map[input_pll_index];
+               return 0;
+       }
+
+       /* PLL map is a u8 array */
+       pll_byte = prop->cpucp_info.pll_map[input_pll_index >> 3];
+       pll_bit_off = input_pll_index & 0x7;
+
+       if (!(pll_byte & BIT(pll_bit_off))) {
+               dev_err(hdev->dev, "PLL index %d is not supported\n",
+                                                       input_pll_index);
+               return -EINVAL;
+       }
+
+       *pll_index = input_pll_index;
+
+       return 0;
+}
+
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
                u16 *pll_freq_arr)
 {
        struct cpucp_packet pkt;
+       enum pll_index used_pll_idx;
        u64 result;
        int rc;
 
+       rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
+       if (rc)
+               return rc;
+
        memset(&pkt, 0, sizeof(pkt));
 
        pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
                                CPUCP_PKT_CTL_OPCODE_SHIFT);
-       pkt.pll_type = __cpu_to_le16(pll_index);
+       pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
 
        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
                        HL_CPUCP_INFO_TIMEOUT_USEC, &result);
@@ -565,6 +732,29 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
        return rc;
 }
 
+int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
+{
+       struct cpucp_packet pkt;
+       u64 result;
+       int rc;
+
+       memset(&pkt, 0, sizeof(pkt));
+
+       pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
+                               CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+       rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+                       HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
+               return rc;
+       }
+
+       *power = result;
+
+       return rc;
+}
+
 static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
 {
        /* Some of the status codes below are deprecated in newer f/w
@@ -623,7 +813,11 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
        u32 status, security_status;
        int rc;
 
-       if (!hdev->cpu_enable)
+       /* pldm was added for cases in which we use preboot on pldm and want
+        * to load boot fit, but we can't wait for preboot because it runs
+        * very slowly
+        */
+       if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) || hdev->pldm)
                return 0;
 
        /* Need to check two possible scenarios:
@@ -677,16 +871,16 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
        if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
                prop->fw_security_status_valid = 1;
 
+               /* FW security should be derived from PCI ID, we keep this
+                * check for backward compatibility
+                */
                if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
                        prop->fw_security_disabled = false;
-               else
-                       prop->fw_security_disabled = true;
 
                if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
                        prop->hard_reset_done_by_fw = true;
        } else {
                prop->fw_security_status_valid = 0;
-               prop->fw_security_disabled = true;
        }
 
        dev_dbg(hdev->dev, "Firmware preboot security status %#x\n",
@@ -710,7 +904,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
        u32 status;
        int rc;
 
-       if (!(hdev->fw_loading & FW_TYPE_BOOT_CPU))
+       if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
                return 0;
 
        dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
@@ -801,7 +995,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
                goto out;
        }
 
-       if (!(hdev->fw_loading & FW_TYPE_LINUX)) {
+       if (!(hdev->fw_components & FW_TYPE_LINUX)) {
                dev_info(hdev->dev, "Skip loading Linux F/W\n");
                goto out;
        }
index 4b321e4f8059f6735a913534662929c0d92c1982..44e89da30b4a70040488c2d838755b62275aa44f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
 #include <linux/hashtable.h>
+#include <linux/debugfs.h>
 #include <linux/bitfield.h>
 #include <linux/genalloc.h>
 #include <linux/sched/signal.h>
@@ -61,7 +62,7 @@
 
 #define HL_SIM_MAX_TIMEOUT_US          10000000 /* 10s */
 
-#define HL_IDLE_BUSY_TS_ARR_SIZE       4096
+#define HL_COMMON_USER_INTERRUPT_ID    0xFFF
 
 /* Memory */
 #define MEM_HASH_TABLE_BITS            7 /* 1 << 7 buckets */
@@ -102,6 +103,23 @@ enum hl_mmu_page_table_location {
 
 #define HL_MAX_DCORES                  4
 
+/*
+ * Reset Flags
+ *
+ * - HL_RESET_HARD
+ *       If set do hard reset to all engines. If not set reset just
+ *       compute/DMA engines.
+ *
+ * - HL_RESET_FROM_RESET_THREAD
+ *       Set if the caller is the hard-reset thread
+ *
+ * - HL_RESET_HEARTBEAT
+ *       Set if reset is due to heartbeat
+ */
+#define HL_RESET_HARD                  (1 << 0)
+#define HL_RESET_FROM_RESET_THREAD     (1 << 1)
+#define HL_RESET_HEARTBEAT             (1 << 2)
+
 #define HL_MAX_SOBS_PER_MONITOR        8
 
 /**
@@ -169,15 +187,19 @@ enum hl_fw_component {
 };
 
 /**
- * enum hl_fw_types - F/W types to load
+ * enum hl_fw_types - F/W types present in the system
  * @FW_TYPE_LINUX: Linux image for device CPU
  * @FW_TYPE_BOOT_CPU: Boot image for device CPU
+ * @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system
+ *                       (preboot, ppboot etc...)
  * @FW_TYPE_ALL_TYPES: Mask for all types
  */
 enum hl_fw_types {
        FW_TYPE_LINUX = 0x1,
        FW_TYPE_BOOT_CPU = 0x2,
-       FW_TYPE_ALL_TYPES = (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU)
+       FW_TYPE_PREBOOT_CPU = 0x4,
+       FW_TYPE_ALL_TYPES =
+               (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU | FW_TYPE_PREBOOT_CPU)
 };
 
 /**
@@ -368,6 +390,7 @@ struct hl_mmu_properties {
  * @dram_size: DRAM total size.
  * @dram_pci_bar_size: size of PCI bar towards DRAM.
  * @max_power_default: max power of the device after reset
+ * @dc_power_default: power consumed by the device in mode idle.
  * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
  *                                      fault.
  * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
@@ -412,6 +435,7 @@ struct hl_mmu_properties {
  * @first_available_user_msix_interrupt: first available msix interrupt
  *                                       reserved for the user
  * @first_available_cq: first available CQ for the user.
+ * @user_interrupt_count: number of user interrupts.
  * @tpc_enabled_mask: which TPCs are enabled.
  * @completion_queues_count: number of completion queues.
  * @fw_security_disabled: true if security measures are disabled in firmware,
@@ -421,6 +445,7 @@ struct hl_mmu_properties {
  * @dram_supports_virtual_memory: is there an MMU towards the DRAM
  * @hard_reset_done_by_fw: true if firmware is handling hard reset flow
  * @num_functional_hbms: number of functional HBMs in each DCORE.
+ * @iatu_done_by_fw: true if iATU configuration is being done by FW.
  */
 struct asic_fixed_properties {
        struct hw_queue_properties      *hw_queues_props;
@@ -439,6 +464,7 @@ struct asic_fixed_properties {
        u64                             dram_size;
        u64                             dram_pci_bar_size;
        u64                             max_power_default;
+       u64                             dc_power_default;
        u64                             dram_size_for_default_page_mapping;
        u64                             pcie_dbi_base_address;
        u64                             pcie_aux_dbi_reg_addr;
@@ -475,6 +501,7 @@ struct asic_fixed_properties {
        u16                             first_available_user_mon[HL_MAX_DCORES];
        u16                             first_available_user_msix_interrupt;
        u16                             first_available_cq[HL_MAX_DCORES];
+       u16                             user_interrupt_count;
        u8                              tpc_enabled_mask;
        u8                              completion_queues_count;
        u8                              fw_security_disabled;
@@ -482,6 +509,7 @@ struct asic_fixed_properties {
        u8                              dram_supports_virtual_memory;
        u8                              hard_reset_done_by_fw;
        u8                              num_functional_hbms;
+       u8                              iatu_done_by_fw;
 };
 
 /**
@@ -503,6 +531,7 @@ struct hl_fence {
 
 /**
  * struct hl_cs_compl - command submission completion object.
+ * @sob_reset_work: workqueue object to run SOB reset flow.
  * @base_fence: hl fence object.
  * @lock: spinlock to protect fence.
  * @hdev: habanalabs device structure.
@@ -513,6 +542,7 @@ struct hl_fence {
  * @sob_group: the SOB group that is used in this collective wait CS.
  */
 struct hl_cs_compl {
+       struct work_struct      sob_reset_work;
        struct hl_fence         base_fence;
        spinlock_t              lock;
        struct hl_device        *hdev;
@@ -689,6 +719,31 @@ struct hl_cq {
        atomic_t                free_slots_cnt;
 };
 
+/**
+ * struct hl_user_interrupt - holds user interrupt information
+ * @hdev: pointer to the device structure
+ * @wait_list_head: head to the list of user threads pending on this interrupt
+ * @wait_list_lock: protects wait_list_head
+ * @interrupt_id: msix interrupt id
+ */
+struct hl_user_interrupt {
+       struct hl_device        *hdev;
+       struct list_head        wait_list_head;
+       spinlock_t              wait_list_lock;
+       u32                     interrupt_id;
+};
+
+/**
+ * struct hl_user_pending_interrupt - holds a context to a user thread
+ *                                    pending on an interrupt
+ * @wait_list_node: node in the list of user threads pending on an interrupt
+ * @fence: hl fence object for interrupt completion
+ */
+struct hl_user_pending_interrupt {
+       struct list_head        wait_list_node;
+       struct hl_fence         fence;
+};
+
 /**
  * struct hl_eq - describes the event queue (single one per device)
  * @hdev: pointer to the device structure
@@ -713,11 +768,13 @@ struct hl_eq {
  * @ASIC_INVALID: Invalid ASIC type.
  * @ASIC_GOYA: Goya device.
  * @ASIC_GAUDI: Gaudi device.
+ * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
  */
 enum hl_asic_type {
        ASIC_INVALID,
        ASIC_GOYA,
-       ASIC_GAUDI
+       ASIC_GAUDI,
+       ASIC_GAUDI_SEC
 };
 
 struct hl_cs_parser;
@@ -802,8 +859,12 @@ enum div_select_defs {
  * @update_eq_ci: update event queue CI.
  * @context_switch: called upon ASID context switch.
  * @restore_phase_topology: clear all SOBs amd MONs.
- * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
- * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
+ * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory.
+ * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory.
+ * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory.
+ * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory.
+ * @debugfs_read_dma: debug interface for reading up to 2MB from the device's
+ *                    internal memory via DMA engine.
  * @add_device_attr: add ASIC specific device attributes.
  * @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
  * @set_pll_profile: change PLL profile (manual/automatic).
@@ -919,10 +980,16 @@ struct hl_asic_funcs {
        void (*update_eq_ci)(struct hl_device *hdev, u32 val);
        int (*context_switch)(struct hl_device *hdev, u32 asid);
        void (*restore_phase_topology)(struct hl_device *hdev);
-       int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
-       int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
-       int (*debugfs_read64)(struct hl_device *hdev, u64 addr, u64 *val);
-       int (*debugfs_write64)(struct hl_device *hdev, u64 addr, u64 val);
+       int (*debugfs_read32)(struct hl_device *hdev, u64 addr,
+                               bool user_address, u32 *val);
+       int (*debugfs_write32)(struct hl_device *hdev, u64 addr,
+                               bool user_address, u32 val);
+       int (*debugfs_read64)(struct hl_device *hdev, u64 addr,
+                               bool user_address, u64 *val);
+       int (*debugfs_write64)(struct hl_device *hdev, u64 addr,
+                               bool user_address, u64 val);
+       int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
+                               void *blob_addr);
        void (*add_device_attr)(struct hl_device *hdev,
                                struct attribute_group *dev_attr_grp);
        void (*handle_eqe)(struct hl_device *hdev,
@@ -986,6 +1053,7 @@ struct hl_asic_funcs {
        int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
                        u32 block_id, u32 block_size);
        void (*enable_events_from_fw)(struct hl_device *hdev);
+       void (*get_msi_info)(u32 *table);
 };
 
 
@@ -1070,9 +1138,11 @@ struct hl_pending_cb {
  * @mem_hash_lock: protects the mem_hash.
  * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
  *            MMU hash or walking the PGT requires talking this lock.
+ * @hw_block_list_lock: protects the HW block memory list.
  * @debugfs_list: node in debugfs list of contexts.
  * pending_cb_list: list of pending command buffers waiting to be sent upon
  *                  next user command submission context.
+ * @hw_block_mem_list: list of HW block virtual mapped addresses.
  * @cs_counters: context command submission counters.
  * @cb_va_pool: device VA pool for command buffers which are mapped to the
  *              device's MMU.
@@ -1109,8 +1179,10 @@ struct hl_ctx {
        struct hl_va_range              *va_range[HL_VA_RANGE_TYPE_MAX];
        struct mutex                    mem_hash_lock;
        struct mutex                    mmu_lock;
+       struct mutex                    hw_block_list_lock;
        struct list_head                debugfs_list;
        struct list_head                pending_cb_list;
+       struct list_head                hw_block_mem_list;
        struct hl_cs_counters_atomic    cs_counters;
        struct gen_pool                 *cb_va_pool;
        u64                             cs_sequence;
@@ -1185,6 +1257,7 @@ struct hl_userptr {
  * @sequence: the sequence number of this CS.
  * @staged_sequence: the sequence of the staged submission this CS is part of,
  *                   relevant only if staged_cs is set.
+ * @timeout_jiffies: cs timeout in jiffies.
  * @type: CS_TYPE_*.
  * @submitted: true if CS was submitted to H/W.
  * @completed: true if CS was completed by device.
@@ -1213,6 +1286,7 @@ struct hl_cs {
        struct list_head        debugfs_list;
        u64                     sequence;
        u64                     staged_sequence;
+       u64                     timeout_jiffies;
        enum hl_cs_type         type;
        u8                      submitted;
        u8                      completed;
@@ -1329,6 +1403,23 @@ struct hl_vm_hash_node {
        void                    *ptr;
 };
 
+/**
+ * struct hl_vm_hw_block_list_node - list element from user virtual address to
+ *                             HW block id.
+ * @node: node to hang on the list in context object.
+ * @ctx: the context this node belongs to.
+ * @vaddr: virtual address of the HW block.
+ * @size: size of the block.
+ * @id: HW block id (handle).
+ */
+struct hl_vm_hw_block_list_node {
+       struct list_head        node;
+       struct hl_ctx           *ctx;
+       unsigned long           vaddr;
+       u32                     size;
+       u32                     id;
+};
+
 /**
  * struct hl_vm_phys_pg_pack - physical page pack.
  * @vm_type: describes the type of the virtual area descriptor.
@@ -1490,12 +1581,13 @@ struct hl_debugfs_entry {
  * @userptr_spinlock: protects userptr_list.
  * @ctx_mem_hash_list: list of available contexts with MMU mappings.
  * @ctx_mem_hash_spinlock: protects cb_list.
+ * @blob_desc: descriptor of blob
  * @addr: next address to read/write from/to in read/write32.
  * @mmu_addr: next virtual address to translate to physical address in mmu_show.
  * @mmu_asid: ASID to use while translating in mmu_show.
  * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
- * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
- * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
+ * @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
+ * @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
  */
 struct hl_dbg_device_entry {
        struct dentry                   *root;
@@ -1513,6 +1605,7 @@ struct hl_dbg_device_entry {
        spinlock_t                      userptr_spinlock;
        struct list_head                ctx_mem_hash_list;
        spinlock_t                      ctx_mem_hash_spinlock;
+       struct debugfs_blob_wrapper     blob_desc;
        u64                             addr;
        u64                             mmu_addr;
        u32                             mmu_asid;
@@ -1683,16 +1776,6 @@ struct hl_device_reset_work {
        struct hl_device                *hdev;
 };
 
-/**
- * struct hl_device_idle_busy_ts - used for calculating device utilization rate.
- * @idle_to_busy_ts: timestamp where device changed from idle to busy.
- * @busy_to_idle_ts: timestamp where device changed from busy to idle.
- */
-struct hl_device_idle_busy_ts {
-       ktime_t                         idle_to_busy_ts;
-       ktime_t                         busy_to_idle_ts;
-};
-
 /**
  * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
  * information.
@@ -1821,9 +1904,16 @@ struct hl_mmu_funcs {
  * @asic_name: ASIC specific name.
  * @asic_type: ASIC specific type.
  * @completion_queue: array of hl_cq.
+ * @user_interrupt: array of hl_user_interrupt. upon the corresponding user
+ *                  interrupt, driver will monitor the list of fences
+ *                  registered to this interrupt.
+ * @common_user_interrupt: common user interrupt for all user interrupts.
+ *                         upon any user interrupt, driver will monitor the
+ *                         list of fences registered to this common structure.
  * @cq_wq: work queues of completion queues for executing work in process
  *         context.
  * @eq_wq: work queue of event queue for executing work in process context.
+ * @sob_reset_wq: work queue for sob reset executions.
  * @kernel_ctx: Kernel driver context structure.
  * @kernel_queues: array of hl_hw_queue.
  * @cs_mirror_list: CS mirror list for TDR.
@@ -1857,11 +1947,11 @@ struct hl_mmu_funcs {
  *              when a user opens the device
  * @fpriv_list_lock: protects the fpriv_list
  * @compute_ctx: current compute context executing.
- * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
- *                    and vice-versa
  * @aggregated_cs_counters: aggregated cs counters among all contexts
  * @mmu_priv: device-specific MMU data.
  * @mmu_func: device-related MMU functions.
+ * @legacy_pll_map: map holding map between dynamic (common) PLL indexes and
+ *                  static (asic specific) PLL indexes.
  * @dram_used_mem: current DRAM memory consumption.
  * @timeout_jiffies: device CS timeout value.
  * @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1874,13 +1964,10 @@ struct hl_mmu_funcs {
  * @curr_pll_profile: current PLL profile.
  * @card_type: Various ASICs have several card types. This indicates the card
  *             type of the current device.
- * @cs_active_cnt: number of active command submissions on this device (active
- *                 means already in H/W queues)
  * @major: habanalabs kernel driver major.
  * @high_pll: high PLL profile frequency.
  * @soft_reset_cnt: number of soft reset since the driver was loaded.
  * @hard_reset_cnt: number of hard reset since the driver was loaded.
- * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
  * @clk_throttling_reason: bitmask represents the current clk throttling reasons
  * @id: device minor.
  * @id_control: minor of the control device
@@ -1937,8 +2024,11 @@ struct hl_device {
        char                            status[HL_DEV_STS_MAX][HL_STR_MAX];
        enum hl_asic_type               asic_type;
        struct hl_cq                    *completion_queue;
+       struct hl_user_interrupt        *user_interrupt;
+       struct hl_user_interrupt        common_user_interrupt;
        struct workqueue_struct         **cq_wq;
        struct workqueue_struct         *eq_wq;
+       struct workqueue_struct         *sob_reset_wq;
        struct hl_ctx                   *kernel_ctx;
        struct hl_hw_queue              *kernel_queues;
        struct list_head                cs_mirror_list;
@@ -1976,13 +2066,13 @@ struct hl_device {
 
        struct hl_ctx                   *compute_ctx;
 
-       struct hl_device_idle_busy_ts   *idle_busy_ts_arr;
-
        struct hl_cs_counters_atomic    aggregated_cs_counters;
 
        struct hl_mmu_priv              mmu_priv;
        struct hl_mmu_funcs             mmu_func[MMU_NUM_PGT_LOCATIONS];
 
+       enum pll_index                  *legacy_pll_map;
+
        atomic64_t                      dram_used_mem;
        u64                             timeout_jiffies;
        u64                             max_power;
@@ -1990,12 +2080,10 @@ struct hl_device {
        atomic_t                        in_reset;
        enum hl_pll_frequency           curr_pll_profile;
        enum cpucp_card_types           card_type;
-       int                             cs_active_cnt;
        u32                             major;
        u32                             high_pll;
        u32                             soft_reset_cnt;
        u32                             hard_reset_cnt;
-       u32                             idle_busy_ts_idx;
        u32                             clk_throttling_reason;
        u16                             id;
        u16                             id_control;
@@ -2029,10 +2117,9 @@ struct hl_device {
 
        /* Parameters for bring-up */
        u64                             nic_ports_mask;
-       u64                             fw_loading;
+       u64                             fw_components;
        u8                              mmu_enable;
        u8                              mmu_huge_page_opt;
-       u8                              cpu_enable;
        u8                              reset_pcilink;
        u8                              cpu_queues_enable;
        u8                              pldm;
@@ -2043,6 +2130,7 @@ struct hl_device {
        u8                              bmc_enable;
        u8                              rl_enable;
        u8                              reset_on_preboot_fail;
+       u8                              reset_upon_device_release;
 };
 
 
@@ -2157,6 +2245,8 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
 void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
 irqreturn_t hl_irq_handler_cq(int irq, void *arg);
 irqreturn_t hl_irq_handler_eq(int irq, void *arg);
+irqreturn_t hl_irq_handler_user_cq(int irq, void *arg);
+irqreturn_t hl_irq_handler_default(int irq, void *arg);
 u32 hl_cq_inc_ptr(u32 ptr);
 
 int hl_asid_init(struct hl_device *hdev);
@@ -2178,12 +2268,11 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass);
 void hl_device_fini(struct hl_device *hdev);
 int hl_device_suspend(struct hl_device *hdev);
 int hl_device_resume(struct hl_device *hdev);
-int hl_device_reset(struct hl_device *hdev, bool hard_reset,
-                       bool from_hard_reset_thread);
+int hl_device_reset(struct hl_device *hdev, u32 flags);
 void hl_hpriv_get(struct hl_fpriv *hpriv);
-void hl_hpriv_put(struct hl_fpriv *hpriv);
+int hl_hpriv_put(struct hl_fpriv *hpriv);
 int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
-uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms);
+int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
 
 int hl_build_hwmon_channel_info(struct hl_device *hdev,
                struct cpucp_sensor *sensors_arr);
@@ -2235,6 +2324,9 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx);
 int hl_vm_init(struct hl_device *hdev);
 void hl_vm_fini(struct hl_device *hdev);
 
+void hl_hw_block_mem_init(struct hl_ctx *ctx);
+void hl_hw_block_mem_fini(struct hl_ctx *ctx);
+
 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
                enum hl_va_range_type type, u32 size, u32 alignment);
 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
@@ -2287,13 +2379,19 @@ int hl_fw_send_heartbeat(struct hl_device *hdev);
 int hl_fw_cpucp_info_get(struct hl_device *hdev,
                        u32 cpu_security_boot_status_reg,
                        u32 boot_err0_reg);
+int hl_fw_cpucp_handshake(struct hl_device *hdev,
+                       u32 cpu_security_boot_status_reg,
+                       u32 boot_err0_reg);
 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
                struct hl_info_pci_counters *counters);
 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
                        u64 *total_energy);
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
+int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+                                               enum pll_index *pll_index);
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
                u16 *pll_freq_arr);
+int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
 int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
                        u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
                        u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
@@ -2304,6 +2402,7 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
 
 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
                        bool is_wc[3]);
+int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
 int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
 int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
                struct hl_inbound_pci_region *pci_region);
@@ -2312,8 +2411,10 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
 int hl_pci_init(struct hl_device *hdev);
 void hl_pci_fini(struct hl_device *hdev);
 
-long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
-void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
+long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+                                                               bool curr);
+void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+                                                               u64 freq);
 int hl_get_temperature(struct hl_device *hdev,
                       int sensor_index, u32 attr, long *value);
 int hl_set_temperature(struct hl_device *hdev,
@@ -2334,6 +2435,7 @@ int hl_set_voltage(struct hl_device *hdev,
                        int sensor_index, u32 attr, long value);
 int hl_set_current(struct hl_device *hdev,
                        int sensor_index, u32 attr, long value);
+void hl_release_pending_user_interrupts(struct hl_device *hdev);
 
 #ifdef CONFIG_DEBUG_FS
 
@@ -2434,7 +2536,7 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
-int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data);
 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
 
 #endif /* HABANALABSP_H_ */
index 032d114f01ea54b705f2f0df0cb93309d07e2947..7135f1e03864107b6bcdf8b6040f1f8ff15dfe3e 100644 (file)
@@ -27,13 +27,13 @@ static struct class *hl_class;
 static DEFINE_IDR(hl_devs_idr);
 static DEFINE_MUTEX(hl_devs_idr_lock);
 
-static int timeout_locked = 5;
+static int timeout_locked = 30;
 static int reset_on_lockup = 1;
 static int memory_scrub = 1;
 
 module_param(timeout_locked, int, 0444);
 MODULE_PARM_DESC(timeout_locked,
-       "Device lockup timeout in seconds (0 = disabled, default 5s)");
+       "Device lockup timeout in seconds (0 = disabled, default 30s)");
 
 module_param(reset_on_lockup, int, 0444);
 MODULE_PARM_DESC(reset_on_lockup,
@@ -47,10 +47,12 @@ MODULE_PARM_DESC(memory_scrub,
 
 #define PCI_IDS_GOYA                   0x0001
 #define PCI_IDS_GAUDI                  0x1000
+#define PCI_IDS_GAUDI_SEC              0x1010
 
 static const struct pci_device_id ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
        { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
+       { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI_SEC), },
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, ids);
@@ -74,6 +76,9 @@ static enum hl_asic_type get_asic_type(u16 device)
        case PCI_IDS_GAUDI:
                asic_type = ASIC_GAUDI;
                break;
+       case PCI_IDS_GAUDI_SEC:
+               asic_type = ASIC_GAUDI_SEC;
+               break;
        default:
                asic_type = ASIC_INVALID;
                break;
@@ -82,6 +87,16 @@ static enum hl_asic_type get_asic_type(u16 device)
        return asic_type;
 }
 
+static bool is_asic_secured(enum hl_asic_type asic_type)
+{
+       switch (asic_type) {
+       case ASIC_GAUDI_SEC:
+               return true;
+       default:
+               return false;
+       }
+}
+
 /*
  * hl_device_open - open function for habanalabs device
  *
@@ -234,8 +249,7 @@ out_err:
 
 static void set_driver_behavior_per_device(struct hl_device *hdev)
 {
-       hdev->cpu_enable = 1;
-       hdev->fw_loading = FW_TYPE_ALL_TYPES;
+       hdev->fw_components = FW_TYPE_ALL_TYPES;
        hdev->cpu_queues_enable = 1;
        hdev->heartbeat = 1;
        hdev->mmu_enable = 1;
@@ -288,6 +302,12 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
                hdev->asic_type = asic_type;
        }
 
+       if (pdev)
+               hdev->asic_prop.fw_security_disabled =
+                               !is_asic_secured(pdev->device);
+       else
+               hdev->asic_prop.fw_security_disabled = true;
+
        /* Assign status description string */
        strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
                                        "disabled", HL_STR_MAX);
index 083a30969c5f3698acc0151bee28808d1b5b90ef..33841c272eb66b37ff0f8fffb47bc7992ecd6d5d 100644 (file)
@@ -226,19 +226,14 @@ static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
        struct hl_info_device_utilization device_util = {0};
        u32 max_size = args->return_size;
        void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+       int rc;
 
        if ((!max_size) || (!out))
                return -EINVAL;
 
-       if ((args->period_ms < 100) || (args->period_ms > 1000) ||
-               (args->period_ms % 100)) {
-               dev_err(hdev->dev,
-                       "period %u must be between 100 - 1000 and must be divisible by 100\n",
-                       args->period_ms);
+       rc = hl_device_utilization(hdev, &device_util.utilization);
+       if (rc)
                return -EINVAL;
-       }
-
-       device_util.utilization = hl_device_utilization(hdev, args->period_ms);
 
        return copy_to_user(out, &device_util,
                min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
@@ -446,6 +441,25 @@ static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
                min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
 }
 
+static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+       struct hl_device *hdev = hpriv->hdev;
+       u32 max_size = args->return_size;
+       struct hl_power_info power_info = {0};
+       void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+       int rc;
+
+       if ((!max_size) || (!out))
+               return -EINVAL;
+
+       rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
+       if (rc)
+               return rc;
+
+       return copy_to_user(out, &power_info,
+               min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
+}
+
 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
                                struct device *dev)
 {
@@ -526,6 +540,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
        case HL_INFO_PLL_FREQUENCY:
                return pll_frequency_info(hpriv, args);
 
+       case HL_INFO_POWER:
+               return power_info(hpriv, args);
+
        default:
                dev_err(dev, "Invalid request %d\n", args->op);
                rc = -ENOTTY;
@@ -596,7 +613,7 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
        HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
        HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
        HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
-       HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
+       HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
        HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
        HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
 };
index 0f335182267fd19d28982b2c6f518f6ef1b6aae9..1734384618351596db379f42566b17f90ac8748d 100644 (file)
@@ -629,20 +629,12 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
        if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
                                first_entry && cs_needs_timeout(cs)) {
                cs->tdr_active = true;
-               schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
+               schedule_delayed_work(&cs->work_tdr, cs->timeout_jiffies);
 
        }
 
        spin_unlock(&hdev->cs_mirror_lock);
 
-       if (!hdev->cs_active_cnt++) {
-               struct hl_device_idle_busy_ts *ts;
-
-               ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
-               ts->busy_to_idle_ts = ktime_set(0, 0);
-               ts->idle_to_busy_ts = ktime_get();
-       }
-
        list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
                switch (job->queue_type) {
                case QUEUE_TYPE_EXT:
index 44a0522b59b9507e3b4144719a8a9d07bf26c4d2..27129868c71109528c8b7bbf9d0e7601eeb9e726 100644 (file)
@@ -137,6 +137,62 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static void handle_user_cq(struct hl_device *hdev,
+                       struct hl_user_interrupt *user_cq)
+{
+       struct hl_user_pending_interrupt *pend;
+
+       spin_lock(&user_cq->wait_list_lock);
+       list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node)
+               complete_all(&pend->fence.completion);
+       spin_unlock(&user_cq->wait_list_lock);
+}
+
+/**
+ * hl_irq_handler_user_cq - irq handler for user completion queues
+ *
+ * @irq: irq number
+ * @arg: pointer to user interrupt structure
+ *
+ */
+irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
+{
+       struct hl_user_interrupt *user_cq = arg;
+       struct hl_device *hdev = user_cq->hdev;
+
+       dev_dbg(hdev->dev,
+               "got user completion interrupt id %u",
+               user_cq->interrupt_id);
+
+       /* Handle user cq interrupts registered on all interrupts */
+       handle_user_cq(hdev, &hdev->common_user_interrupt);
+
+       /* Handle user cq interrupts registered on this specific interrupt */
+       handle_user_cq(hdev, user_cq);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * hl_irq_handler_default - default irq handler
+ *
+ * @irq: irq number
+ * @arg: pointer to user interrupt structure
+ *
+ */
+irqreturn_t hl_irq_handler_default(int irq, void *arg)
+{
+       struct hl_user_interrupt *user_interrupt = arg;
+       struct hl_device *hdev = user_interrupt->hdev;
+       u32 interrupt_id = user_interrupt->interrupt_id;
+
+       dev_err(hdev->dev,
+               "got invalid user interrupt %u",
+               interrupt_id);
+
+       return IRQ_HANDLED;
+}
+
 /**
  * hl_irq_handler_eq - irq handler for event queue
  *
index 1f5910517b0e3ad565d6179fc8d481598fca57ba..2938cbbafbbcfe4eec510243269c32bd76f41697 100644 (file)
@@ -81,16 +81,6 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                                num_pgs, total_size);
                        return -ENOMEM;
                }
-
-               if (hdev->memory_scrub) {
-                       rc = hdev->asic_funcs->scrub_device_mem(hdev, paddr,
-                                       total_size);
-                       if (rc) {
-                               dev_err(hdev->dev,
-                                       "Failed to scrub contiguous device memory\n");
-                               goto pages_pack_err;
-                       }
-               }
        }
 
        phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
@@ -128,24 +118,13 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                                goto page_err;
                        }
 
-                       if (hdev->memory_scrub) {
-                               rc = hdev->asic_funcs->scrub_device_mem(hdev,
-                                               phys_pg_pack->pages[i],
-                                               page_size);
-                               if (rc) {
-                                       dev_err(hdev->dev,
-                                               "Failed to scrub device memory\n");
-                                       goto page_err;
-                               }
-                       }
-
                        num_curr_pgs++;
                }
        }
 
        spin_lock(&vm->idr_lock);
        handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
-                               GFP_ATOMIC);
+                               GFP_KERNEL);
        spin_unlock(&vm->idr_lock);
 
        if (handle < 0) {
@@ -280,37 +259,67 @@ static void dram_pg_pool_do_release(struct kref *ref)
  * @phys_pg_pack: physical page pack to free.
  *
  * This function does the following:
- * - For DRAM memory only, iterate over the pack and free each physical block
- *   structure by returning it to the general pool.
+ * - For DRAM memory only
+ *   - iterate over the pack, scrub and free each physical block structure by
+ *     returning it to the general pool.
+ *     In case of error during scrubbing, initiate hard reset.
+ *     Once hard reset is triggered, scrubbing is bypassed while freeing the
+ *     memory continues.
  * - Free the hl_vm_phys_pg_pack structure.
  */
-static void free_phys_pg_pack(struct hl_device *hdev,
+static int free_phys_pg_pack(struct hl_device *hdev,
                                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_vm *vm = &hdev->vm;
        u64 i;
+       int rc = 0;
 
-       if (!phys_pg_pack->created_from_userptr) {
-               if (phys_pg_pack->contiguous) {
-                       gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
+       if (phys_pg_pack->created_from_userptr)
+               goto end;
+
+       if (phys_pg_pack->contiguous) {
+               if (hdev->memory_scrub && !hdev->disabled) {
+                       rc = hdev->asic_funcs->scrub_device_mem(hdev,
+                                       phys_pg_pack->pages[0],
                                        phys_pg_pack->total_size);
+                       if (rc)
+                               dev_err(hdev->dev,
+                                       "Failed to scrub contiguous device memory\n");
+               }
 
-                       for (i = 0; i < phys_pg_pack->npages ; i++)
-                               kref_put(&vm->dram_pg_pool_refcount,
-                                       dram_pg_pool_do_release);
-               } else {
-                       for (i = 0 ; i < phys_pg_pack->npages ; i++) {
-                               gen_pool_free(vm->dram_pg_pool,
+               gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
+                       phys_pg_pack->total_size);
+
+               for (i = 0; i < phys_pg_pack->npages ; i++)
+                       kref_put(&vm->dram_pg_pool_refcount,
+                               dram_pg_pool_do_release);
+       } else {
+               for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+                       if (hdev->memory_scrub && !hdev->disabled && rc == 0) {
+                               rc = hdev->asic_funcs->scrub_device_mem(
+                                               hdev,
                                                phys_pg_pack->pages[i],
                                                phys_pg_pack->page_size);
-                               kref_put(&vm->dram_pg_pool_refcount,
-                                       dram_pg_pool_do_release);
+                               if (rc)
+                                       dev_err(hdev->dev,
+                                               "Failed to scrub device memory\n");
                        }
+                       gen_pool_free(vm->dram_pg_pool,
+                               phys_pg_pack->pages[i],
+                               phys_pg_pack->page_size);
+                       kref_put(&vm->dram_pg_pool_refcount,
+                               dram_pg_pool_do_release);
                }
        }
 
+       if (rc && !hdev->disabled)
+               hl_device_reset(hdev, HL_RESET_HARD);
+
+end:
        kvfree(phys_pg_pack->pages);
        kfree(phys_pg_pack);
+
+       return rc;
 }
 
 /**
@@ -349,7 +358,7 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
                atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
                atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
 
-               free_phys_pg_pack(hdev, phys_pg_pack);
+               return free_phys_pg_pack(hdev, phys_pg_pack);
        } else {
                spin_unlock(&vm->idr_lock);
                dev_err(hdev->dev,
@@ -857,6 +866,7 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
        u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
        u32 page_size = phys_pg_pack->page_size;
        int rc = 0;
+       bool is_host_addr;
 
        for (i = 0 ; i < phys_pg_pack->npages ; i++) {
                paddr = phys_pg_pack->pages[i];
@@ -878,6 +888,8 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
        return 0;
 
 err:
+       is_host_addr = !hl_is_dram_va(hdev, vaddr);
+
        next_vaddr = vaddr;
        for (i = 0 ; i < mapped_pg_cnt ; i++) {
                if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
@@ -888,6 +900,17 @@ err:
                                        phys_pg_pack->pages[i], page_size);
 
                next_vaddr += page_size;
+
+               /*
+                * unmapping on Palladium can be really long, so avoid a CPU
+                * soft lockup bug by sleeping a little between unmapping pages
+                *
+                * In addition, on host num of pages could be huge,
+                * because page size could be 4KB, so when unmapping host
+                * pages sleep every 32K pages to avoid soft lockup
+                */
+               if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+                       usleep_range(50, 200);
        }
 
        return rc;
@@ -921,9 +944,9 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
                 * unmapping on Palladium can be really long, so avoid a CPU
                 * soft lockup bug by sleeping a little between unmapping pages
                 *
-                * In addition, when unmapping host memory we pass through
-                * the Linux kernel to unpin the pages and that takes a long
-                * time. Therefore, sleep every 32K pages to avoid soft lockup
+                * In addition, on host num of pages could be huge,
+                * because page size could be 4KB, so when unmapping host
+                * pages sleep every 32K pages to avoid soft lockup
                 */
                if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
                        usleep_range(50, 200);
@@ -1117,9 +1140,9 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
        *device_addr = ret_vaddr;
 
        if (is_userptr)
-               free_phys_pg_pack(hdev, phys_pg_pack);
+               rc = free_phys_pg_pack(hdev, phys_pg_pack);
 
-       return 0;
+       return rc;
 
 map_err:
        if (add_va_block(hdev, va_range, ret_vaddr,
@@ -1272,7 +1295,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
        kfree(hnode);
 
        if (is_userptr) {
-               free_phys_pg_pack(hdev, phys_pg_pack);
+               rc = free_phys_pg_pack(hdev, phys_pg_pack);
                dma_unmap_host_va(hdev, userptr);
        }
 
@@ -1305,9 +1328,15 @@ static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
 
 static void hw_block_vm_close(struct vm_area_struct *vma)
 {
-       struct hl_ctx *ctx = (struct hl_ctx *) vma->vm_private_data;
+       struct hl_vm_hw_block_list_node *lnode =
+               (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
+       struct hl_ctx *ctx = lnode->ctx;
 
+       mutex_lock(&ctx->hw_block_list_lock);
+       list_del(&lnode->node);
+       mutex_unlock(&ctx->hw_block_list_lock);
        hl_ctx_put(ctx);
+       kfree(lnode);
        vma->vm_private_data = NULL;
 }
 
@@ -1325,7 +1354,9 @@ static const struct vm_operations_struct hw_block_vm_ops = {
  */
 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
 {
+       struct hl_vm_hw_block_list_node *lnode;
        struct hl_device *hdev = hpriv->hdev;
+       struct hl_ctx *ctx = hpriv->ctx;
        u32 block_id, block_size;
        int rc;
 
@@ -1351,17 +1382,31 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
                return -EINVAL;
        }
 
+       lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
+       if (!lnode)
+               return -ENOMEM;
+
        vma->vm_ops = &hw_block_vm_ops;
-       vma->vm_private_data = hpriv->ctx;
+       vma->vm_private_data = lnode;
 
-       hl_ctx_get(hdev, hpriv->ctx);
+       hl_ctx_get(hdev, ctx);
 
        rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
        if (rc) {
-               hl_ctx_put(hpriv->ctx);
+               hl_ctx_put(ctx);
+               kfree(lnode);
                return rc;
        }
 
+       lnode->ctx = ctx;
+       lnode->vaddr = vma->vm_start;
+       lnode->size = block_size;
+       lnode->id = block_id;
+
+       mutex_lock(&ctx->hw_block_list_lock);
+       list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
+       mutex_unlock(&ctx->hw_block_list_lock);
+
        vma->vm_pgoff = block_id;
 
        return 0;
@@ -1574,7 +1619,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
 
        rc = sg_alloc_table_from_pages(userptr->sgt,
                                       userptr->pages,
-                                      npages, offset, size, GFP_ATOMIC);
+                                      npages, offset, size, GFP_KERNEL);
        if (rc < 0) {
                dev_err(hdev->dev, "failed to create SG table from pages\n");
                goto put_pages;
@@ -1624,11 +1669,7 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
                return -EINVAL;
        }
 
-       /*
-        * This function can be called also from data path, hence use atomic
-        * always as it is not a big allocation.
-        */
-       userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
+       userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
        if (!userptr->sgt)
                return -ENOMEM;
 
@@ -2122,3 +2163,38 @@ void hl_vm_fini(struct hl_device *hdev)
 
        vm->init_done = false;
 }
+
+/**
+ * hl_hw_block_mem_init() - HW block memory initialization.
+ * @ctx: pointer to the habanalabs context structure.
+ *
+ * This function initializes the HW block virtual mapped addresses list and
+ * it's lock.
+ */
+void hl_hw_block_mem_init(struct hl_ctx *ctx)
+{
+       mutex_init(&ctx->hw_block_list_lock);
+       INIT_LIST_HEAD(&ctx->hw_block_mem_list);
+}
+
+/**
+ * hl_hw_block_mem_fini() - HW block memory teardown.
+ * @ctx: pointer to the habanalabs context structure.
+ *
+ * This function clears the HW block virtual mapped addresses list and destroys
+ * it's lock.
+ */
+void hl_hw_block_mem_fini(struct hl_ctx *ctx)
+{
+       struct hl_vm_hw_block_list_node *lnode, *tmp;
+
+       if (!list_empty(&ctx->hw_block_mem_list))
+               dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
+
+       list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
+               list_del(&lnode->node);
+               kfree(lnode);
+       }
+
+       mutex_destroy(&ctx->hw_block_list_lock);
+}
index 93c9e5f587e1a476bba17318e532da2e3ecc9bc8..b37189956b146604f5562426a78fb808f8dec281 100644 (file)
@@ -532,6 +532,8 @@ int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
        struct hl_mmu_hop_info hops;
        int rc;
 
+       memset(&hops, 0, sizeof(hops));
+
        rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops);
        if (rc)
                return rc;
@@ -589,6 +591,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
        switch (hdev->asic_type) {
        case ASIC_GOYA:
        case ASIC_GAUDI:
+       case ASIC_GAUDI_SEC:
                hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
                break;
        default:
index b799f9258fb014e5a46f36539c8ded2e9aa40825..e941b7eef346d4d6620db0e510fc45a649adfeb6 100644 (file)
@@ -85,6 +85,58 @@ static void hl_pci_bars_unmap(struct hl_device *hdev)
        pci_release_regions(pdev);
 }
 
+int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
+{
+       struct pci_dev *pdev = hdev->pdev;
+       ktime_t timeout;
+       u64 msec;
+       u32 val;
+
+       if (hdev->pldm)
+               msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
+       else
+               msec = HL_PCI_ELBI_TIMEOUT_MSEC;
+
+       /* Clear previous status */
+       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
+
+       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
+       pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, 0);
+
+       timeout = ktime_add_ms(ktime_get(), msec);
+       for (;;) {
+               pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
+               if (val & PCI_CONFIG_ELBI_STS_MASK)
+                       break;
+               if (ktime_compare(ktime_get(), timeout) > 0) {
+                       pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
+                                               &val);
+                       break;
+               }
+
+               usleep_range(300, 500);
+       }
+
+       if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
+               pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
+
+               return 0;
+       }
+
+       if (val & PCI_CONFIG_ELBI_STS_ERR) {
+               dev_err(hdev->dev, "Error reading from ELBI\n");
+               return -EIO;
+       }
+
+       if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
+               dev_err(hdev->dev, "ELBI read didn't finish in time\n");
+               return -EIO;
+       }
+
+       dev_err(hdev->dev, "ELBI read has undefined bits in status\n");
+       return -EIO;
+}
+
 /**
  * hl_pci_elbi_write() - Write through the ELBI interface.
  * @hdev: Pointer to hl_device structure.
index 4366d8f9384290437e05fe23d06044f0bda9e50c..9fa61573a89de89e15d4f80e5a228bb66a890536 100644 (file)
@@ -9,12 +9,18 @@
 
 #include <linux/pci.h>
 
-long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
+long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+                                                               bool curr)
 {
        struct cpucp_packet pkt;
+       u32 used_pll_idx;
        u64 result;
        int rc;
 
+       rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
+       if (rc)
+               return rc;
+
        memset(&pkt, 0, sizeof(pkt));
 
        if (curr)
@@ -23,7 +29,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
        else
                pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET <<
                                                CPUCP_PKT_CTL_OPCODE_SHIFT);
-       pkt.pll_index = cpu_to_le32(pll_index);
+       pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
 
        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
                                                0, &result);
@@ -31,23 +37,29 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to get frequency of PLL %d, error %d\n",
-                       pll_index, rc);
+                       used_pll_idx, rc);
                return rc;
        }
 
        return (long) result;
 }
 
-void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
+void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+                                                               u64 freq)
 {
        struct cpucp_packet pkt;
+       u32 used_pll_idx;
        int rc;
 
+       rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
+       if (rc)
+               return;
+
        memset(&pkt, 0, sizeof(pkt));
 
        pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET <<
                                        CPUCP_PKT_CTL_OPCODE_SHIFT);
-       pkt.pll_index = cpu_to_le32(pll_index);
+       pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
        pkt.value = cpu_to_le64(freq);
 
        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -56,7 +68,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
        if (rc)
                dev_err(hdev->dev,
                        "Failed to set frequency to PLL %d, error %d\n",
-                       pll_index, rc);
+                       used_pll_idx, rc);
 }
 
 u64 hl_get_max_power(struct hl_device *hdev)
@@ -203,7 +215,7 @@ static ssize_t soft_reset_store(struct device *dev,
 
        dev_warn(hdev->dev, "Soft-Reset requested through sysfs\n");
 
-       hl_device_reset(hdev, false, false);
+       hl_device_reset(hdev, 0);
 
 out:
        return count;
@@ -226,7 +238,7 @@ static ssize_t hard_reset_store(struct device *dev,
 
        dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n");
 
-       hl_device_reset(hdev, true, false);
+       hl_device_reset(hdev, HL_RESET_HARD);
 
 out:
        return count;
@@ -245,6 +257,9 @@ static ssize_t device_type_show(struct device *dev,
        case ASIC_GAUDI:
                str = "GAUDI";
                break;
+       case ASIC_GAUDI_SEC:
+               str = "GAUDI SEC";
+               break;
        default:
                dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
                                hdev->asic_type);
@@ -344,7 +359,7 @@ static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
                        struct bin_attribute *attr, char *buf, loff_t offset,
                        size_t max_size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct hl_device *hdev = dev_get_drvdata(dev);
        char *data;
        int rc;
index 9152242778f5e7495bb63688e26916612e393612..b751652f80a8ce043359bae5be7b18c98878ecda 100644 (file)
 
 #define GAUDI_PLL_MAX 10
 
+/*
+ * this enum kept here for compatibility with old FW (in which each asic has
+ * unique PLL numbering
+ */
+enum gaudi_pll_index {
+       GAUDI_CPU_PLL = 0,
+       GAUDI_PCI_PLL,
+       GAUDI_SRAM_PLL,
+       GAUDI_HBM_PLL,
+       GAUDI_NIC_PLL,
+       GAUDI_DMA_PLL,
+       GAUDI_MESH_PLL,
+       GAUDI_MME_PLL,
+       GAUDI_TPC_PLL,
+       GAUDI_IF_PLL,
+};
+
+static enum pll_index gaudi_pll_map[PLL_MAX] = {
+       [CPU_PLL] = GAUDI_CPU_PLL,
+       [PCI_PLL] = GAUDI_PCI_PLL,
+       [SRAM_PLL] = GAUDI_SRAM_PLL,
+       [HBM_PLL] = GAUDI_HBM_PLL,
+       [NIC_PLL] = GAUDI_NIC_PLL,
+       [DMA_PLL] = GAUDI_DMA_PLL,
+       [MESH_PLL] = GAUDI_MESH_PLL,
+       [MME_PLL] = GAUDI_MME_PLL,
+       [TPC_PLL] = GAUDI_TPC_PLL,
+       [IF_PLL] = GAUDI_IF_PLL,
+};
+
 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
                "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
                "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -396,6 +426,19 @@ get_collective_mode(struct hl_device *hdev, u32 queue_id)
        return HL_COLLECTIVE_NOT_SUPPORTED;
 }
 
+static inline void set_default_power_values(struct hl_device *hdev)
+{
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+       if (hdev->card_type == cpucp_card_type_pmc) {
+               prop->max_power_default = MAX_POWER_DEFAULT_PMC;
+               prop->dc_power_default = DC_POWER_DEFAULT_PMC;
+       } else {
+               prop->max_power_default = MAX_POWER_DEFAULT_PCI;
+               prop->dc_power_default = DC_POWER_DEFAULT_PCI;
+       }
+}
+
 static int gaudi_get_fixed_properties(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -507,7 +550,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
        prop->num_of_events = GAUDI_EVENT_SIZE;
        prop->tpc_enabled_mask = TPC_ENABLED_MASK;
 
-       prop->max_power_default = MAX_POWER_DEFAULT_PCI;
+       set_default_power_values(hdev);
 
        prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
        prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
@@ -532,8 +575,6 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
        for (i = 0 ; i < HL_MAX_DCORES ; i++)
                prop->first_available_cq[i] = USHRT_MAX;
 
-       /* disable fw security for now, set it in a later stage */
-       prop->fw_security_disabled = true;
        prop->fw_security_status_valid = false;
        prop->hard_reset_done_by_fw = false;
 
@@ -588,6 +629,11 @@ static int gaudi_init_iatu(struct hl_device *hdev)
        struct hl_outbound_pci_region outbound_region;
        int rc;
 
+       if (hdev->asic_prop.iatu_done_by_fw) {
+               hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+               return 0;
+       }
+
        /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
        inbound_region.mode = PCI_BAR_MATCH_MODE;
        inbound_region.bar = SRAM_BAR_ID;
@@ -632,6 +678,7 @@ static int gaudi_early_init(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct pci_dev *pdev = hdev->pdev;
+       u32 fw_boot_status;
        int rc;
 
        rc = gaudi_get_fixed_properties(hdev);
@@ -665,6 +712,23 @@ static int gaudi_early_init(struct hl_device *hdev)
 
        prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
 
+       /* If FW security is enabled at this point it means no access to ELBI */
+       if (!hdev->asic_prop.fw_security_disabled) {
+               hdev->asic_prop.iatu_done_by_fw = true;
+               goto pci_init;
+       }
+
+       rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
+                               &fw_boot_status);
+       if (rc)
+               goto free_queue_props;
+
+       /* Check whether FW is configuring iATU */
+       if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
+                       (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
+               hdev->asic_prop.iatu_done_by_fw = true;
+
+pci_init:
        rc = hl_pci_init(hdev);
        if (rc)
                goto free_queue_props;
@@ -1588,6 +1652,9 @@ static int gaudi_sw_init(struct hl_device *hdev)
 
        hdev->asic_specific = gaudi;
 
+       /* store legacy PLL map */
+       hdev->legacy_pll_map = gaudi_pll_map;
+
        /* Create DMA pool for small allocations */
        hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
                        &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
@@ -1766,8 +1833,7 @@ static int gaudi_enable_msi(struct hl_device *hdev)
        if (gaudi->hw_cap_initialized & HW_CAP_MSI)
                return 0;
 
-       rc = pci_alloc_irq_vectors(hdev->pdev, 1, GAUDI_MSI_ENTRIES,
-                                       PCI_IRQ_MSI);
+       rc = pci_alloc_irq_vectors(hdev->pdev, 1, 1, PCI_IRQ_MSI);
        if (rc < 0) {
                dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc);
                return rc;
@@ -3701,7 +3767,7 @@ static int gaudi_init_cpu(struct hl_device *hdev)
        struct gaudi_device *gaudi = hdev->asic_specific;
        int rc;
 
-       if (!hdev->cpu_enable)
+       if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
                return 0;
 
        if (gaudi->hw_cap_initialized & HW_CAP_CPU)
@@ -4873,7 +4939,7 @@ static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
                        parser->job_userptr_list, &userptr))
                goto already_pinned;
 
-       userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
+       userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
        if (!userptr)
                return -ENOMEM;
 
@@ -5684,18 +5750,26 @@ release_cb:
 static int gaudi_schedule_register_memset(struct hl_device *hdev,
                u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val)
 {
-       struct hl_ctx *ctx = hdev->compute_ctx;
+       struct hl_ctx *ctx;
        struct hl_pending_cb *pending_cb;
        struct packet_msg_long *pkt;
        u32 cb_size, ctl;
        struct hl_cb *cb;
-       int i;
+       int i, rc;
+
+       mutex_lock(&hdev->fpriv_list_lock);
+       ctx = hdev->compute_ctx;
 
        /* If no compute context available or context is going down
         * memset registers directly
         */
-       if (!ctx || kref_read(&ctx->refcount) == 0)
-               return gaudi_memset_registers(hdev, reg_base, num_regs, val);
+       if (!ctx || kref_read(&ctx->refcount) == 0) {
+               rc = gaudi_memset_registers(hdev, reg_base, num_regs, val);
+               mutex_unlock(&hdev->fpriv_list_lock);
+               return rc;
+       }
+
+       mutex_unlock(&hdev->fpriv_list_lock);
 
        cb_size = (sizeof(*pkt) * num_regs) +
                        sizeof(struct packet_msg_prot) * 2;
@@ -5911,13 +5985,16 @@ static void gaudi_restore_phase_topology(struct hl_device *hdev)
 
 }
 
-static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
+static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr,
+                       bool user_address, u32 *val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct gaudi_device *gaudi = hdev->asic_specific;
-       u64 hbm_bar_addr;
+       u64 hbm_bar_addr, host_phys_end;
        int rc = 0;
 
+       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
        if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
 
                if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
@@ -5949,6 +6026,9 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
                }
                if (hbm_bar_addr == U64_MAX)
                        rc = -EIO;
+       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+                       user_address && !iommu_present(&pci_bus_type)) {
+               *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
        } else {
                rc = -EFAULT;
        }
@@ -5956,13 +6036,16 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
        return rc;
 }
 
-static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
+static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr,
+                       bool user_address, u32 val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct gaudi_device *gaudi = hdev->asic_specific;
-       u64 hbm_bar_addr;
+       u64 hbm_bar_addr, host_phys_end;
        int rc = 0;
 
+       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
        if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
 
                if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
@@ -5994,6 +6077,9 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
                }
                if (hbm_bar_addr == U64_MAX)
                        rc = -EIO;
+       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+                       user_address && !iommu_present(&pci_bus_type)) {
+               *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
        } else {
                rc = -EFAULT;
        }
@@ -6001,13 +6087,16 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
        return rc;
 }
 
-static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
+static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr,
+                               bool user_address, u64 *val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct gaudi_device *gaudi = hdev->asic_specific;
-       u64 hbm_bar_addr;
+       u64 hbm_bar_addr, host_phys_end;
        int rc = 0;
 
+       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
        if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
 
                if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
@@ -6043,6 +6132,9 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
                }
                if (hbm_bar_addr == U64_MAX)
                        rc = -EIO;
+       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+                       user_address && !iommu_present(&pci_bus_type)) {
+               *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
        } else {
                rc = -EFAULT;
        }
@@ -6050,13 +6142,16 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
        return rc;
 }
 
-static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
+static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr,
+                               bool user_address, u64 val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct gaudi_device *gaudi = hdev->asic_specific;
-       u64 hbm_bar_addr;
+       u64 hbm_bar_addr, host_phys_end;
        int rc = 0;
 
+       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
        if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
 
                if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
@@ -6091,6 +6186,9 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
                }
                if (hbm_bar_addr == U64_MAX)
                        rc = -EIO;
+       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+                       user_address && !iommu_present(&pci_bus_type)) {
+               *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
        } else {
                rc = -EFAULT;
        }
@@ -6098,6 +6196,164 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
        return rc;
 }
 
+static int gaudi_dma_core_transfer(struct hl_device *hdev, int dma_id, u64 addr,
+                                       u32 size_to_dma, dma_addr_t dma_addr)
+{
+       u32 err_cause, val;
+       u64 dma_offset;
+       int rc;
+
+       dma_offset = dma_id * DMA_CORE_OFFSET;
+
+       WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, lower_32_bits(addr));
+       WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, upper_32_bits(addr));
+       WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset, lower_32_bits(dma_addr));
+       WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset, upper_32_bits(dma_addr));
+       WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset, size_to_dma);
+       WREG32(mmDMA0_CORE_COMMIT + dma_offset,
+                       (1 << DMA0_CORE_COMMIT_LIN_SHIFT));
+
+       rc = hl_poll_timeout(
+               hdev,
+               mmDMA0_CORE_STS0 + dma_offset,
+               val,
+               ((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
+               0,
+               1000000);
+
+       if (rc) {
+               dev_err(hdev->dev,
+                       "DMA %d timed-out during reading of 0x%llx\n",
+                       dma_id, addr);
+               return -EIO;
+       }
+
+       /* Verify DMA is OK */
+       err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
+       if (err_cause) {
+               dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
+               dev_dbg(hdev->dev,
+                       "Clearing DMA0 engine from errors (cause 0x%x)\n",
+                       err_cause);
+               WREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset, err_cause);
+
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
+                               void *blob_addr)
+{
+       u32 dma_core_sts0, err_cause, cfg1, size_left, pos, size_to_dma;
+       struct gaudi_device *gaudi = hdev->asic_specific;
+       u64 dma_offset, qm_offset;
+       dma_addr_t dma_addr;
+       void *kernel_addr;
+       bool is_eng_idle;
+       int rc = 0, dma_id;
+
+       kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
+                                               hdev, SZ_2M,
+                                               &dma_addr,
+                                               GFP_KERNEL | __GFP_ZERO);
+
+       if (!kernel_addr)
+               return -ENOMEM;
+
+       mutex_lock(&gaudi->clk_gate_mutex);
+
+       hdev->asic_funcs->disable_clock_gating(hdev);
+
+       hdev->asic_funcs->hw_queues_lock(hdev);
+
+       dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
+       dma_offset = dma_id * DMA_CORE_OFFSET;
+       qm_offset = dma_id * DMA_QMAN_OFFSET;
+       dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
+       is_eng_idle = IS_DMA_IDLE(dma_core_sts0);
+
+       if (!is_eng_idle) {
+               dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
+               dma_offset = dma_id * DMA_CORE_OFFSET;
+               qm_offset = dma_id * DMA_QMAN_OFFSET;
+               dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
+               is_eng_idle = IS_DMA_IDLE(dma_core_sts0);
+
+               if (!is_eng_idle) {
+                       dev_err_ratelimited(hdev->dev,
+                               "Can't read via DMA because it is BUSY\n");
+                       rc = -EAGAIN;
+                       goto out;
+               }
+       }
+
+       cfg1 = RREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset);
+       WREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset,
+                       0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+
+       /* TODO: remove this by mapping the DMA temporary buffer to the MMU
+        * using the compute ctx ASID, if exists. If not, use the kernel ctx
+        * ASID
+        */
+       WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
+
+       /* Verify DMA is OK */
+       err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
+       if (err_cause) {
+               dev_dbg(hdev->dev,
+                       "Clearing DMA0 engine from errors (cause 0x%x)\n",
+                       err_cause);
+               WREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset, err_cause);
+       }
+
+       pos = 0;
+       size_left = size;
+       size_to_dma = SZ_2M;
+
+       while (size_left > 0) {
+
+               if (size_left < SZ_2M)
+                       size_to_dma = size_left;
+
+               rc = gaudi_dma_core_transfer(hdev, dma_id, addr, size_to_dma,
+                                               dma_addr);
+               if (rc)
+                       break;
+
+               memcpy(blob_addr + pos, kernel_addr, size_to_dma);
+
+               if (size_left <= SZ_2M)
+                       break;
+
+               pos += SZ_2M;
+               addr += SZ_2M;
+               size_left -= SZ_2M;
+       }
+
+       /* TODO: remove this by mapping the DMA temporary buffer to the MMU
+        * using the compute ctx ASID, if exists. If not, use the kernel ctx
+        * ASID
+        */
+       WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
+                       ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
+
+       WREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset, cfg1);
+
+out:
+       hdev->asic_funcs->hw_queues_unlock(hdev);
+
+       hdev->asic_funcs->set_clock_gating(hdev);
+
+       mutex_unlock(&gaudi->clk_gate_mutex);
+
+       hdev->asic_funcs->asic_dma_free_coherent(hdev, SZ_2M, kernel_addr,
+                                               dma_addr);
+
+       return rc;
+}
+
 static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
 {
        struct gaudi_device *gaudi = hdev->asic_specific;
@@ -6851,7 +7107,8 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
                }
 
                /* Write 1 clear errors */
-               WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
+               if (!hdev->stop_on_err)
+                       WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
        }
 
        arb_err_val = RREG32(arb_err_addr);
@@ -7097,6 +7354,15 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
        }
 }
 
+static void gaudi_print_out_of_sync_info(struct hl_device *hdev,
+                                       struct cpucp_pkt_sync_err *sync_err)
+{
+       struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
+
+       dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
+                       sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
+}
+
 static int gaudi_soft_reset_late_init(struct hl_device *hdev)
 {
        struct gaudi_device *gaudi = hdev->asic_specific;
@@ -7371,18 +7637,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
        case GAUDI_EVENT_MMU_DERR:
                gaudi_print_irq_info(hdev, event_type, true);
                gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
-               if (hdev->hard_reset_on_fw_events)
-                       hl_device_reset(hdev, true, false);
-               break;
+               goto reset_device;
 
        case GAUDI_EVENT_GIC500:
        case GAUDI_EVENT_AXI_ECC:
        case GAUDI_EVENT_L2_RAM_ECC:
        case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
                gaudi_print_irq_info(hdev, event_type, false);
-               if (hdev->hard_reset_on_fw_events)
-                       hl_device_reset(hdev, true, false);
-               break;
+               goto reset_device;
 
        case GAUDI_EVENT_HBM0_SPI_0:
        case GAUDI_EVENT_HBM1_SPI_0:
@@ -7392,9 +7654,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
                gaudi_hbm_read_interrupts(hdev,
                                gaudi_hbm_event_to_dev(event_type),
                                &eq_entry->hbm_ecc_data);
-               if (hdev->hard_reset_on_fw_events)
-                       hl_device_reset(hdev, true, false);
-               break;
+               goto reset_device;
 
        case GAUDI_EVENT_HBM0_SPI_1:
        case GAUDI_EVENT_HBM1_SPI_1:
@@ -7423,8 +7683,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
                        dev_err(hdev->dev, "hard reset required due to %s\n",
                                gaudi_irq_map_table[event_type].name);
 
-                       if (hdev->hard_reset_on_fw_events)
-                               hl_device_reset(hdev, true, false);
+                       goto reset_device;
                } else {
                        hl_fw_unmask_irq(hdev, event_type);
                }
@@ -7446,8 +7705,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
                        dev_err(hdev->dev, "hard reset required due to %s\n",
                                gaudi_irq_map_table[event_type].name);
 
-                       if (hdev->hard_reset_on_fw_events)
-                               hl_device_reset(hdev, true, false);
+                       goto reset_device;
                } else {
                        hl_fw_unmask_irq(hdev, event_type);
                }
@@ -7516,9 +7774,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
 
        case GAUDI_EVENT_RAZWI_OR_ADC_SW:
                gaudi_print_irq_info(hdev, event_type, true);
-               if (hdev->hard_reset_on_fw_events)
-                       hl_device_reset(hdev, true, false);
-               break;
+               goto reset_device;
 
        case GAUDI_EVENT_TPC0_BMON_SPMU:
        case GAUDI_EVENT_TPC1_BMON_SPMU:
@@ -7552,11 +7808,28 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
                        event_type, cause);
                break;
 
+       case GAUDI_EVENT_DEV_RESET_REQ:
+               gaudi_print_irq_info(hdev, event_type, false);
+               goto reset_device;
+
+       case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC:
+               gaudi_print_irq_info(hdev, event_type, false);
+               gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+               goto reset_device;
+
        default:
                dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
                                event_type);
                break;
        }
+
+       return;
+
+reset_device:
+       if (hdev->hard_reset_on_fw_events)
+               hl_device_reset(hdev, HL_RESET_HARD);
+       else
+               hl_fw_unmask_irq(hdev, event_type);
 }
 
 static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate,
@@ -7607,7 +7880,7 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
        if (rc) {
                dev_err_ratelimited(hdev->dev,
                                        "MMU cache invalidation timeout\n");
-               hl_device_reset(hdev, true, false);
+               hl_device_reset(hdev, HL_RESET_HARD);
        }
 
        return rc;
@@ -7656,7 +7929,7 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
        if (rc) {
                dev_err_ratelimited(hdev->dev,
                                        "MMU cache invalidation timeout\n");
-               hl_device_reset(hdev, true, false);
+               hl_device_reset(hdev, HL_RESET_HARD);
        }
 
        return rc;
@@ -7714,7 +7987,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
        if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
                return 0;
 
-       rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
+       rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
        if (rc)
                return rc;
 
@@ -7724,10 +7997,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
 
        hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type);
 
-       if (hdev->card_type == cpucp_card_type_pci)
-               prop->max_power_default = MAX_POWER_DEFAULT_PCI;
-       else if (hdev->card_type == cpucp_card_type_pmc)
-               prop->max_power_default = MAX_POWER_DEFAULT_PMC;
+       set_default_power_values(hdev);
 
        hdev->max_power = prop->max_power_default;
 
@@ -8549,6 +8819,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
        .debugfs_write32 = gaudi_debugfs_write32,
        .debugfs_read64 = gaudi_debugfs_read64,
        .debugfs_write64 = gaudi_debugfs_write64,
+       .debugfs_read_dma = gaudi_debugfs_read_dma,
        .add_device_attr = gaudi_add_device_attr,
        .handle_eqe = gaudi_handle_eqe,
        .set_pll_profile = gaudi_set_pll_profile,
index 50bb4ad570fdad90e79ab400ca0a84f06c14d49f..5929be81ec230d82feb5b1bdd0506c722c07fb28 100644 (file)
@@ -47,6 +47,9 @@
 #define MAX_POWER_DEFAULT_PCI          200000          /* 200W */
 #define MAX_POWER_DEFAULT_PMC          350000          /* 350W */
 
+#define DC_POWER_DEFAULT_PCI           60000           /* 60W */
+#define DC_POWER_DEFAULT_PMC           60000           /* 60W */
+
 #define GAUDI_CPU_TIMEOUT_USEC         30000000        /* 30s */
 
 #define TPC_ENABLED_MASK               0xFF
index 7085f45814ae4d9564b81e206b9dc5e09f7c09c2..9a706c5980ef147a6d7819bcee134742ef61e318 100644 (file)
@@ -9556,7 +9556,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
        mask = 1U << ((mmTPC0_CFG_PROT & 0x7F) >> 2);
        mask |= 1U << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
-       mask |= 1U << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
        mask |= 1U << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
        mask |= 1U << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
@@ -10011,7 +10010,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
        mask = 1U << ((mmTPC1_CFG_PROT & 0x7F) >> 2);
        mask |= 1U << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
-       mask |= 1U << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
        mask |= 1U << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
        mask |= 1U << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
@@ -10465,7 +10463,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
        mask = 1U << ((mmTPC2_CFG_PROT & 0x7F) >> 2);
        mask |= 1U << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
-       mask |= 1U << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
        mask |= 1U << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
        mask |= 1U << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
@@ -10919,7 +10916,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
        mask = 1U << ((mmTPC3_CFG_PROT & 0x7F) >> 2);
        mask |= 1U << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
-       mask |= 1U << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
        mask |= 1U << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
        mask |= 1U << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
@@ -11373,7 +11369,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
        mask = 1U << ((mmTPC4_CFG_PROT & 0x7F) >> 2);
        mask |= 1U << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
-       mask |= 1U << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
        mask |= 1U << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
        mask |= 1U << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
@@ -11827,7 +11822,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
        mask = 1U << ((mmTPC5_CFG_PROT & 0x7F) >> 2);
        mask |= 1U << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
-       mask |= 1U << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
        mask |= 1U << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
        mask |= 1U << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
@@ -12283,7 +12277,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
        mask = 1U << ((mmTPC6_CFG_PROT & 0x7F) >> 2);
        mask |= 1U << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
-       mask |= 1U << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
        mask |= 1U << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
        mask |= 1U << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
@@ -12739,7 +12732,6 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
        mask = 1U << ((mmTPC7_CFG_PROT & 0x7F) >> 2);
        mask |= 1U << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
-       mask |= 1U << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
        mask |= 1U << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
        mask |= 1U << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
        mask |= 1U << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
index ed566c52ccaa09313f17ab72b2070d42ca11bc18..e27338f4aad2f68afebc1d736ea2014d7e5231e7 100644 (file)
 #define IS_MME_IDLE(mme_arch_sts) \
        (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
 
+/*
+ * this enum kept here for compatibility with old FW (in which each asic has
+ * unique PLL numbering
+ */
+enum goya_pll_index {
+       GOYA_CPU_PLL = 0,
+       GOYA_IC_PLL,
+       GOYA_MC_PLL,
+       GOYA_MME_PLL,
+       GOYA_PCI_PLL,
+       GOYA_EMMC_PLL,
+       GOYA_TPC_PLL,
+};
+
+static enum pll_index goya_pll_map[PLL_MAX] = {
+       [CPU_PLL] = GOYA_CPU_PLL,
+       [IC_PLL] = GOYA_IC_PLL,
+       [MC_PLL] = GOYA_MC_PLL,
+       [MME_PLL] = GOYA_MME_PLL,
+       [PCI_PLL] = GOYA_PCI_PLL,
+       [EMMC_PLL] = GOYA_EMMC_PLL,
+       [TPC_PLL] = GOYA_TPC_PLL,
+};
 
 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
                "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
@@ -446,6 +469,7 @@ int goya_get_fixed_properties(struct hl_device *hdev)
        prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
        prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
        prop->max_power_default = MAX_POWER_DEFAULT;
+       prop->dc_power_default = DC_POWER_DEFAULT;
        prop->tpc_enabled_mask = TPC_ENABLED_MASK;
        prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
        prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
@@ -460,8 +484,6 @@ int goya_get_fixed_properties(struct hl_device *hdev)
        for (i = 0 ; i < HL_MAX_DCORES ; i++)
                prop->first_available_cq[i] = USHRT_MAX;
 
-       /* disable fw security for now, set it in a later stage */
-       prop->fw_security_disabled = true;
        prop->fw_security_status_valid = false;
        prop->hard_reset_done_by_fw = false;
 
@@ -533,6 +555,11 @@ static int goya_init_iatu(struct hl_device *hdev)
        struct hl_outbound_pci_region outbound_region;
        int rc;
 
+       if (hdev->asic_prop.iatu_done_by_fw) {
+               hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+               return 0;
+       }
+
        /* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
        inbound_region.mode = PCI_BAR_MATCH_MODE;
        inbound_region.bar = SRAM_CFG_BAR_ID;
@@ -580,7 +607,7 @@ static int goya_early_init(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        struct pci_dev *pdev = hdev->pdev;
-       u32 val;
+       u32 fw_boot_status, val;
        int rc;
 
        rc = goya_get_fixed_properties(hdev);
@@ -614,6 +641,23 @@ static int goya_early_init(struct hl_device *hdev)
 
        prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
 
+       /* If FW security is enabled at this point it means no access to ELBI */
+       if (!hdev->asic_prop.fw_security_disabled) {
+               hdev->asic_prop.iatu_done_by_fw = true;
+               goto pci_init;
+       }
+
+       rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
+                               &fw_boot_status);
+       if (rc)
+               goto free_queue_props;
+
+       /* Check whether FW is configuring iATU */
+       if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
+                       (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
+               hdev->asic_prop.iatu_done_by_fw = true;
+
+pci_init:
        rc = hl_pci_init(hdev);
        if (rc)
                goto free_queue_props;
@@ -853,6 +897,9 @@ static int goya_sw_init(struct hl_device *hdev)
 
        hdev->asic_specific = goya;
 
+       /* store legacy PLL map */
+       hdev->legacy_pll_map = goya_pll_map;
+
        /* Create DMA pool for small allocations */
        hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
                        &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
@@ -2429,7 +2476,7 @@ static int goya_init_cpu(struct hl_device *hdev)
        struct goya_device *goya = hdev->asic_specific;
        int rc;
 
-       if (!hdev->cpu_enable)
+       if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
                return 0;
 
        if (goya->hw_cap_initialized & HW_CAP_CPU)
@@ -3221,7 +3268,7 @@ static int goya_pin_memory_before_cs(struct hl_device *hdev,
                        parser->job_userptr_list, &userptr))
                goto already_pinned;
 
-       userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
+       userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
        if (!userptr)
                return -ENOMEM;
 
@@ -4101,12 +4148,15 @@ static void goya_clear_sm_regs(struct hl_device *hdev)
  * lead to undefined behavior and therefore, should be done with extreme care
  *
  */
-static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
+static int goya_debugfs_read32(struct hl_device *hdev, u64 addr,
+                       bool user_address, u32 *val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 ddr_bar_addr;
+       u64 ddr_bar_addr, host_phys_end;
        int rc = 0;
 
+       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
        if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
                *val = RREG32(addr - CFG_BASE);
 
@@ -4132,6 +4182,10 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
                if (ddr_bar_addr == U64_MAX)
                        rc = -EIO;
 
+       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+                       user_address && !iommu_present(&pci_bus_type)) {
+               *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
+
        } else {
                rc = -EFAULT;
        }
@@ -4154,12 +4208,15 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
  * lead to undefined behavior and therefore, should be done with extreme care
  *
  */
-static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
+static int goya_debugfs_write32(struct hl_device *hdev, u64 addr,
+                       bool user_address, u32 val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 ddr_bar_addr;
+       u64 ddr_bar_addr, host_phys_end;
        int rc = 0;
 
+       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
        if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
                WREG32(addr - CFG_BASE, val);
 
@@ -4185,6 +4242,10 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
                if (ddr_bar_addr == U64_MAX)
                        rc = -EIO;
 
+       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+                       user_address && !iommu_present(&pci_bus_type)) {
+               *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+
        } else {
                rc = -EFAULT;
        }
@@ -4192,12 +4253,15 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
        return rc;
 }
 
-static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
+static int goya_debugfs_read64(struct hl_device *hdev, u64 addr,
+                       bool user_address, u64 *val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 ddr_bar_addr;
+       u64 ddr_bar_addr, host_phys_end;
        int rc = 0;
 
+       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
        if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
                u32 val_l = RREG32(addr - CFG_BASE);
                u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
@@ -4227,6 +4291,10 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
                if (ddr_bar_addr == U64_MAX)
                        rc = -EIO;
 
+       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+                       user_address && !iommu_present(&pci_bus_type)) {
+               *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
+
        } else {
                rc = -EFAULT;
        }
@@ -4234,12 +4302,15 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
        return rc;
 }
 
-static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
+static int goya_debugfs_write64(struct hl_device *hdev, u64 addr,
+                               bool user_address, u64 val)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u64 ddr_bar_addr;
+       u64 ddr_bar_addr, host_phys_end;
        int rc = 0;
 
+       host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
+
        if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
                WREG32(addr - CFG_BASE, lower_32_bits(val));
                WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
@@ -4267,6 +4338,10 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
                if (ddr_bar_addr == U64_MAX)
                        rc = -EIO;
 
+       } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
+                       user_address && !iommu_present(&pci_bus_type)) {
+               *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+
        } else {
                rc = -EFAULT;
        }
@@ -4274,6 +4349,13 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
        return rc;
 }
 
+static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
+                               void *blob_addr)
+{
+       dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
+       return -EPERM;
+}
+
 static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
 {
        struct goya_device *goya = hdev->asic_specific;
@@ -4401,6 +4483,8 @@ static const char *_goya_get_event_desc(u16 event_type)
                return "THERMAL_ENV_S";
        case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
                return "THERMAL_ENV_E";
+       case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
+               return "QUEUE_OUT_OF_SYNC";
        default:
                return "N/A";
        }
@@ -4483,6 +4567,9 @@ static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
                index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
                snprintf(desc, size, _goya_get_event_desc(event_type), index);
                break;
+       case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
+               snprintf(desc, size, _goya_get_event_desc(event_type));
+               break;
        default:
                snprintf(desc, size, _goya_get_event_desc(event_type));
                break;
@@ -4534,6 +4621,15 @@ static void goya_print_mmu_error_info(struct hl_device *hdev)
        }
 }
 
+static void goya_print_out_of_sync_info(struct hl_device *hdev,
+                                       struct cpucp_pkt_sync_err *sync_err)
+{
+       struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
+
+       dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
+                       sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
+}
+
 static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
                                bool razwi)
 {
@@ -4698,7 +4794,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
        case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
                goya_print_irq_info(hdev, event_type, false);
                if (hdev->hard_reset_on_fw_events)
-                       hl_device_reset(hdev, true, false);
+                       hl_device_reset(hdev, HL_RESET_HARD);
                break;
 
        case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
@@ -4754,6 +4850,15 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
                goya_unmask_irq(hdev, event_type);
                break;
 
+       case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
+               goya_print_irq_info(hdev, event_type, false);
+               goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+               if (hdev->hard_reset_on_fw_events)
+                       hl_device_reset(hdev, HL_RESET_HARD);
+               else
+                       hl_fw_unmask_irq(hdev, event_type);
+               break;
+
        default:
                dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
                                event_type);
@@ -5083,7 +5188,7 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
        if (rc) {
                dev_err_ratelimited(hdev->dev,
                                        "MMU cache invalidation timeout\n");
-               hl_device_reset(hdev, true, false);
+               hl_device_reset(hdev, HL_RESET_HARD);
        }
 
        return rc;
@@ -5134,7 +5239,7 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
        if (rc) {
                dev_err_ratelimited(hdev->dev,
                                        "MMU cache invalidation timeout\n");
-               hl_device_reset(hdev, true, false);
+               hl_device_reset(hdev, HL_RESET_HARD);
        }
 
        return rc;
@@ -5160,7 +5265,7 @@ int goya_cpucp_info_get(struct hl_device *hdev)
        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
                return 0;
 
-       rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
+       rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
        if (rc)
                return rc;
 
@@ -5443,6 +5548,7 @@ static const struct hl_asic_funcs goya_funcs = {
        .debugfs_write32 = goya_debugfs_write32,
        .debugfs_read64 = goya_debugfs_read64,
        .debugfs_write64 = goya_debugfs_write64,
+       .debugfs_read_dma = goya_debugfs_read_dma,
        .add_device_attr = goya_add_device_attr,
        .handle_eqe = goya_handle_eqe,
        .set_pll_profile = goya_set_pll_profile,
index 23fe099ed218c73439b9ff665aa8e96e1a00dd5a..ef8c6c8b5c8dc780077850fd460a76ab359de395 100644 (file)
@@ -49,6 +49,8 @@
 
 #define MAX_POWER_DEFAULT              200000          /* 200W */
 
+#define DC_POWER_DEFAULT               20000           /* 20W */
+
 #define DRAM_PHYS_DEFAULT_SIZE         0x100000000ull  /* 4GB */
 
 #define GOYA_DEFAULT_CARD_NAME         "HL1000"
index b77c1c16c32cf49acbc0450ef948bc88a49274a3..27cd0ba99aa3381e4ee9c07906cf425658eb7a1c 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/types.h>
 #include <linux/if_ether.h>
 
+#include "hl_boot_if.h"
+
 #define NUM_HBM_PSEUDO_CH                              2
 #define NUM_HBM_CH_PER_DEV                             8
 #define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT            0
 #define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT            6
 #define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK             0x000007C0
 
+#define PLL_MAP_MAX_BITS       128
+#define PLL_MAP_LEN            (PLL_MAP_MAX_BITS / 8)
+
+/*
+ * info of the pkt queue pointers in the first async occurrence
+ */
+struct cpucp_pkt_sync_err {
+       __le32 pi;
+       __le32 ci;
+};
+
 struct hl_eq_hbm_ecc_data {
        /* SERR counter */
        __le32 sec_cnt;
@@ -77,6 +90,7 @@ struct hl_eq_entry {
                struct hl_eq_ecc_data ecc_data;
                struct hl_eq_hbm_ecc_data hbm_ecc_data;
                struct hl_eq_sm_sei_data sm_sei_data;
+               struct cpucp_pkt_sync_err pkt_sync_err;
                __le64 data[7];
        };
 };
@@ -287,6 +301,30 @@ enum pq_init_status {
  *       The result is composed of 4 outputs, each is 16-bit
  *       frequency in MHz.
  *
+ * CPUCP_PACKET_POWER_GET
+ *       Fetch the present power consumption of the device (Current * Voltage).
+ *
+ * CPUCP_PACKET_NIC_PFC_SET -
+ *       Enable/Disable the NIC PFC feature. The packet's arguments specify the
+ *       NIC port, relevant lanes to configure and one bit indication for
+ *       enable/disable.
+ *
+ * CPUCP_PACKET_NIC_FAULT_GET -
+ *       Fetch the current indication for local/remote faults from the NIC MAC.
+ *       The result is 32-bit value of the relevant register.
+ *
+ * CPUCP_PACKET_NIC_LPBK_SET -
+ *       Enable/Disable the MAC loopback feature. The packet's arguments specify
+ *       the NIC port, relevant lanes to configure and one bit indication for
+ *       enable/disable.
+ *
+ * CPUCP_PACKET_NIC_MAC_INIT -
+ *       Configure the NIC MAC channels. The packet's arguments specify the
+ *       NIC port and the speed.
+ *
+ * CPUCP_PACKET_MSI_INFO_SET -
+ *       set the index number for each supported msi type going from
+ *       host to device
  */
 
 enum cpucp_packet_id {
@@ -320,6 +358,13 @@ enum cpucp_packet_id {
        CPUCP_PACKET_PCIE_REPLAY_CNT_GET,       /* internal */
        CPUCP_PACKET_TOTAL_ENERGY_GET,          /* internal */
        CPUCP_PACKET_PLL_INFO_GET,              /* internal */
+       CPUCP_PACKET_NIC_STATUS,                /* internal */
+       CPUCP_PACKET_POWER_GET,                 /* internal */
+       CPUCP_PACKET_NIC_PFC_SET,               /* internal */
+       CPUCP_PACKET_NIC_FAULT_GET,             /* internal */
+       CPUCP_PACKET_NIC_LPBK_SET,              /* internal */
+       CPUCP_PACKET_NIC_MAC_CFG,               /* internal */
+       CPUCP_PACKET_MSI_INFO_SET,              /* internal */
 };
 
 #define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -391,6 +436,12 @@ struct cpucp_unmask_irq_arr_packet {
        __le32 irqs[0];
 };
 
+struct cpucp_array_data_packet {
+       struct cpucp_packet cpucp_pkt;
+       __le32 length;
+       __le32 data[0];
+};
+
 enum cpucp_packet_rc {
        cpucp_packet_success,
        cpucp_packet_invalid,
@@ -459,6 +510,51 @@ enum cpucp_pll_type_attributes {
        cpucp_pll_pci,
 };
 
+/*
+ * MSI type enumeration table for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum cpucp_msi_type {
+       CPUCP_EVENT_QUEUE_MSI_TYPE,
+       CPUCP_NIC_PORT1_MSI_TYPE,
+       CPUCP_NIC_PORT3_MSI_TYPE,
+       CPUCP_NIC_PORT5_MSI_TYPE,
+       CPUCP_NIC_PORT7_MSI_TYPE,
+       CPUCP_NIC_PORT9_MSI_TYPE,
+       CPUCP_NUM_OF_MSI_TYPES
+};
+
+/*
+ * PLL enumeration table used for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table.
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum pll_index {
+       CPU_PLL = 0,
+       PCI_PLL = 1,
+       NIC_PLL = 2,
+       DMA_PLL = 3,
+       MESH_PLL = 4,
+       MME_PLL = 5,
+       TPC_PLL = 6,
+       IF_PLL = 7,
+       SRAM_PLL = 8,
+       NS_PLL = 9,
+       HBM_PLL = 10,
+       MSS_PLL = 11,
+       DDR_PLL = 12,
+       VID_PLL = 13,
+       BANK_PLL = 14,
+       MMU_PLL = 15,
+       IC_PLL = 16,
+       MC_PLL = 17,
+       EMMC_PLL = 18,
+       PLL_MAX
+};
+
 /* Event Queue Packets */
 
 struct eq_generic_event {
@@ -470,7 +566,6 @@ struct eq_generic_event {
  */
 
 #define CARD_NAME_MAX_LEN              16
-#define VERSION_MAX_LEN                        128
 #define CPUCP_MAX_SENSORS              128
 #define CPUCP_MAX_NICS                 128
 #define CPUCP_LANES_PER_NIC            4
@@ -533,6 +628,7 @@ struct cpucp_security_info {
  * @dram_size: available DRAM size.
  * @card_name: card name that will be displayed in HWMON subsystem on the host
  * @sec_info: security information
+ * @pll_map: Bit map of supported PLLs for current ASIC version.
  */
 struct cpucp_info {
        struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
@@ -554,6 +650,7 @@ struct cpucp_info {
        __u8 pad[7];
        struct cpucp_security_info sec_info;
        __le32 reserved6;
+       __u8 pll_map[PLL_MAP_LEN];
 };
 
 struct cpucp_mac_addr {
index e87f5a98e19391570977c5d7bcba18eb249551fe..e0a259e0495cfad294bfe6a0ad87f1b6fc7795d2 100644 (file)
@@ -13,6 +13,8 @@
 
 #define BOOT_FIT_SRAM_OFFSET           0x200000
 
+#define VERSION_MAX_LEN                        128
+
 /*
  * CPU error bits in BOOT_ERROR registers
  *
@@ -73,6 +75,9 @@
  * CPU_BOOT_ERR0_PLL_FAIL              PLL settings failed, meaning that one
  *                                     of the PLLs remains in REF_CLK
  *
+ * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL  Device is unusable and customer support
+ *                                     should be contacted.
+ *
  * CPU_BOOT_ERR0_ENABLED               Error registers enabled.
  *                                     This is a main indication that the
  *                                     running FW populates the error
@@ -92,6 +97,7 @@
 #define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL         (1 << 10)
 #define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL         (1 << 11)
 #define CPU_BOOT_ERR0_PLL_FAIL                 (1 << 12)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL     (1 << 13)
 #define CPU_BOOT_ERR0_ENABLED                  (1 << 31)
 
 /*
  *                                     is set to the PI counter.
  *                                     Initialized in: linux
  *
+ * CPU_BOOT_DEV_STS0_FW_LD_COM_EN      Flexible FW loading communication
+ *                                     protocol is enabled.
+ *                                     Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN   FW iATU configuration is enabled.
+ *                                     This bit if set, means the iATU has been
+ *                                     configured and is ready for use.
+ *                                     Initialized in: ppboot
+ *
+ * CPU_BOOT_DEV_STS0_DYN_PLL_EN                Dynamic PLL configuration is enabled.
+ *                                     FW sends to host a bitmap of supported
+ *                                     PLLs.
+ *                                     Initialized in: linux
+ *
  * CPU_BOOT_DEV_STS0_ENABLED           Device status register enabled.
  *                                     This is a main indication that the
  *                                     running FW populates the device status
 #define CPU_BOOT_DEV_STS0_CLK_GATE_EN                  (1 << 13)
 #define CPU_BOOT_DEV_STS0_HBM_ECC_EN                   (1 << 14)
 #define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN                        (1 << 15)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN                 (1 << 16)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN              (1 << 17)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN                   (1 << 19)
 #define CPU_BOOT_DEV_STS0_ENABLED                      (1 << 31)
 
 enum cpu_boot_status {
@@ -230,6 +253,7 @@ enum kmd_msg {
        KMD_MSG_SKIP_BMC,
        RESERVED,
        KMD_MSG_RST_DEV,
+       KMD_MSG_LAST
 };
 
 enum cpu_msg_status {
@@ -238,4 +262,199 @@ enum cpu_msg_status {
        CPU_MSG_ERR,
 };
 
+/* communication registers mapping - consider ABI when changing */
+struct cpu_dyn_regs {
+       uint32_t cpu_pq_base_addr_low;
+       uint32_t cpu_pq_base_addr_high;
+       uint32_t cpu_pq_length;
+       uint32_t cpu_pq_init_status;
+       uint32_t cpu_eq_base_addr_low;
+       uint32_t cpu_eq_base_addr_high;
+       uint32_t cpu_eq_length;
+       uint32_t cpu_eq_ci;
+       uint32_t cpu_cq_base_addr_low;
+       uint32_t cpu_cq_base_addr_high;
+       uint32_t cpu_cq_length;
+       uint32_t cpu_pf_pq_pi;
+       uint32_t cpu_boot_dev_sts0;
+       uint32_t cpu_boot_dev_sts1;
+       uint32_t cpu_boot_err0;
+       uint32_t cpu_boot_err1;
+       uint32_t cpu_boot_status;
+       uint32_t fw_upd_sts;
+       uint32_t fw_upd_cmd;
+       uint32_t fw_upd_pending_sts;
+       uint32_t fuse_ver_offset;
+       uint32_t preboot_ver_offset;
+       uint32_t uboot_ver_offset;
+       uint32_t hw_state;
+       uint32_t kmd_msg_to_cpu;
+       uint32_t cpu_cmd_status_to_host;
+       uint32_t reserved1[32];         /* reserve for future use */
+};
+
+/* HCDM - Habana Communications Descriptor Magic */
+#define HL_COMMS_DESC_MAGIC    0x4843444D
+#define HL_COMMS_DESC_VER      1
+
+/* this is the comms descriptor header - meta data */
+struct comms_desc_header {
+       uint32_t magic;         /* magic for validation */
+       uint32_t crc32;         /* CRC32 of the descriptor w/o header */
+       uint16_t size;          /* size of the descriptor w/o header */
+       uint8_t version;        /* descriptor version */
+       uint8_t reserved[5];    /* pad to 64 bit */
+};
+
+/* this is the main FW descriptor - consider ABI when changing */
+struct lkd_fw_comms_desc {
+       struct comms_desc_header header;
+       struct cpu_dyn_regs cpu_dyn_regs;
+       char fuse_ver[VERSION_MAX_LEN];
+       char cur_fw_ver[VERSION_MAX_LEN];
+       /* can be used for 1 more version w/o ABI change */
+       char reserved0[VERSION_MAX_LEN];
+       uint64_t img_addr;      /* address for next FW component load */
+};
+
+/*
+ * LKD commands:
+ *
+ * COMMS_NOOP                  Used to clear the command register and no actual
+ *                             command is send.
+ *
+ * COMMS_CLR_STS               Clear status command - FW should clear the
+ *                             status register. Used for synchronization
+ *                             between the commands as part of the race free
+ *                             protocol.
+ *
+ * COMMS_RST_STATE             Reset the current communication state which is
+ *                             kept by FW for proper responses.
+ *                             Should be used in the beginning of the
+ *                             communication cycle to clean any leftovers from
+ *                             previous communication attempts.
+ *
+ * COMMS_PREP_DESC             Prepare descriptor for setting up the
+ *                             communication and other dynamic data:
+ *                             struct lkd_fw_comms_desc.
+ *                             This command has a parameter stating the next FW
+ *                             component size, so the FW can actually prepare a
+ *                             space for it and in the status response provide
+ *                             the descriptor offset. The Offset of the next FW
+ *                             data component is a part of the descriptor
+ *                             structure.
+ *
+ * COMMS_DATA_RDY              The FW data has been uploaded and is ready for
+ *                             validation.
+ *
+ * COMMS_EXEC                  Execute the next FW component.
+ *
+ * COMMS_RST_DEV               Reset the device.
+ *
+ * COMMS_GOTO_WFE              Execute WFE command. Allowed only on non-secure
+ *                             devices.
+ *
+ * COMMS_SKIP_BMC              Perform actions required for BMC-less servers.
+ *                             Do not wait for BMC response.
+ *
+ * COMMS_LOW_PLL_OPP           Initialize PLLs for low OPP.
+ */
+enum comms_cmd {
+       COMMS_NOOP = 0,
+       COMMS_CLR_STS = 1,
+       COMMS_RST_STATE = 2,
+       COMMS_PREP_DESC = 3,
+       COMMS_DATA_RDY = 4,
+       COMMS_EXEC = 5,
+       COMMS_RST_DEV = 6,
+       COMMS_GOTO_WFE = 7,
+       COMMS_SKIP_BMC = 8,
+       COMMS_LOW_PLL_OPP = 9,
+       COMMS_INVLD_LAST
+};
+
+#define COMMS_COMMAND_SIZE_SHIFT       0
+#define COMMS_COMMAND_SIZE_MASK                0x1FFFFFF
+#define COMMS_COMMAND_CMD_SHIFT                27
+#define COMMS_COMMAND_CMD_MASK         0xF8000000
+
+/*
+ * LKD command to FW register structure
+ * @size       - FW component size
+ * @cmd                - command from enum comms_cmd
+ */
+struct comms_command {
+       union {         /* bit fields are only for FW use */
+               struct {
+                       unsigned int size :25;          /* 32MB max. */
+                       unsigned int reserved :2;
+                       enum comms_cmd cmd :5;          /* 32 commands */
+               };
+               unsigned int val;
+       };
+};
+
+/*
+ * FW status
+ *
+ * COMMS_STS_NOOP              Used to clear the status register and no actual
+ *                             status is provided.
+ *
+ * COMMS_STS_ACK               Command has been received and recognized.
+ *
+ * COMMS_STS_OK                        Command execution has finished successfully.
+ *
+ * COMMS_STS_ERR               Command execution was unsuccessful and resulted
+ *                             in error.
+ *
+ * COMMS_STS_VALID_ERR         FW validation has failed.
+ *
+ * COMMS_STS_TIMEOUT_ERR       Command execution has timed out.
+ */
+enum comms_sts {
+       COMMS_STS_NOOP = 0,
+       COMMS_STS_ACK = 1,
+       COMMS_STS_OK = 2,
+       COMMS_STS_ERR = 3,
+       COMMS_STS_VALID_ERR = 4,
+       COMMS_STS_TIMEOUT_ERR = 5,
+       COMMS_STS_INVLD_LAST
+};
+
+/* RAM types for FW components loading - defines the base address */
+enum comms_ram_types {
+       COMMS_SRAM = 0,
+       COMMS_DRAM = 1,
+};
+
+#define COMMS_STATUS_OFFSET_SHIFT      0
+#define COMMS_STATUS_OFFSET_MASK       0x03FFFFFF
+#define COMMS_STATUS_OFFSET_ALIGN_SHIFT        2
+#define COMMS_STATUS_RAM_TYPE_SHIFT    26
+#define COMMS_STATUS_RAM_TYPE_MASK     0x0C000000
+#define COMMS_STATUS_STATUS_SHIFT      28
+#define COMMS_STATUS_STATUS_MASK       0xF0000000
+
+/*
+ * FW status to LKD register structure
+ * @offset     - an offset from the base of the ram_type shifted right by
+ *               2 bits (always aligned to 32 bits).
+ *               Allows a maximum addressable offset of 256MB from RAM base.
+ *               Example: for real offset in RAM of 0x800000 (8MB), the value
+ *               in offset field is (0x800000 >> 2) = 0x200000.
+ * @ram_type   - the RAM type that should be used for offset from
+ *               enum comms_ram_types
+ * @status     - status from enum comms_sts
+ */
+struct comms_status {
+       union {         /* bit fields are only for FW use */
+               struct {
+                       unsigned int offset :26;
+                       unsigned int ram_type :2;
+                       enum comms_sts status :4;       /* 16 statuses */
+               };
+               unsigned int val;
+       };
+};
+
 #endif /* HL_BOOT_IF_H */
index f9ea897ae42c38ec96f281263d74cf52b3060c8b..ffae107b1693502f69f27169e055f075ff246e5d 100644 (file)
@@ -38,7 +38,7 @@
 
 #define QMAN_PQ_ENTRY_SIZE     16                      /* Bytes */
 
-#define MAX_ASID               1024
+#define MAX_ASID               2
 
 #define PROT_BITS_OFFS         0xF80
 
index 49335e8334b424f3fcf87fe86cb24aeaa7a0c7ba..e8651abf84f22be2439251ca712f5fc5f1e05b71 100644 (file)
@@ -303,6 +303,8 @@ enum gaudi_async_event_id {
        GAUDI_EVENT_NIC3_QP1 = 619,
        GAUDI_EVENT_NIC4_QP0 = 620,
        GAUDI_EVENT_NIC4_QP1 = 621,
+       GAUDI_EVENT_DEV_RESET_REQ = 646,
+       GAUDI_EVENT_PKT_QUEUE_OUT_SYNC = 647,
        GAUDI_EVENT_FIX_POWER_ENV_S = 658,
        GAUDI_EVENT_FIX_POWER_ENV_E = 659,
        GAUDI_EVENT_FIX_THERMAL_ENV_S = 660,
index 737176ba06fb61c4f3ceb00e8c3663361e927b7c..3dc79c1318059601a59e3f2ab2ea41cf5f0ad744 100644 (file)
@@ -301,10 +301,10 @@ static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = {
        { .fc_id = 274, .cpu_id = 128, .valid = 0, .name = "" },
        { .fc_id = 275, .cpu_id = 128, .valid = 0, .name = "" },
        { .fc_id = 276, .cpu_id = 128, .valid = 0, .name = "" },
-       { .fc_id = 277, .cpu_id = 129, .valid = 0, .name = "" },
-       { .fc_id = 278, .cpu_id = 129, .valid = 0, .name = "" },
-       { .fc_id = 279, .cpu_id = 129, .valid = 0, .name = "" },
-       { .fc_id = 280, .cpu_id = 129, .valid = 0, .name = "" },
+       { .fc_id = 277, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_0" },
+       { .fc_id = 278, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_1" },
+       { .fc_id = 279, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_2" },
+       { .fc_id = 280, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_3" },
        { .fc_id = 281, .cpu_id = 130, .valid = 0, .name = "" },
        { .fc_id = 282, .cpu_id = 131, .valid = 0, .name = "" },
        { .fc_id = 283, .cpu_id = 132, .valid = 0, .name = "" },
@@ -670,18 +670,29 @@ static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = {
        { .fc_id = 643, .cpu_id = 492, .valid = 0, .name = "" },
        { .fc_id = 644, .cpu_id = 493, .valid = 0, .name = "" },
        { .fc_id = 645, .cpu_id = 494, .valid = 0, .name = "" },
-       { .fc_id = 646, .cpu_id = 495, .valid = 0, .name = "" },
-       { .fc_id = 647, .cpu_id = 496, .valid = 0, .name = "" },
-       { .fc_id = 648, .cpu_id = 497, .valid = 0, .name = "" },
-       { .fc_id = 649, .cpu_id = 498, .valid = 0, .name = "" },
-       { .fc_id = 650, .cpu_id = 499, .valid = 0, .name = "" },
-       { .fc_id = 651, .cpu_id = 500, .valid = 0, .name = "" },
-       { .fc_id = 652, .cpu_id = 501, .valid = 0, .name = "" },
-       { .fc_id = 653, .cpu_id = 502, .valid = 0, .name = "" },
-       { .fc_id = 654, .cpu_id = 503, .valid = 0, .name = "" },
-       { .fc_id = 655, .cpu_id = 504, .valid = 0, .name = "" },
-       { .fc_id = 656, .cpu_id = 505, .valid = 0, .name = "" },
-       { .fc_id = 657, .cpu_id = 506, .valid = 0, .name = "" },
+       { .fc_id = 646, .cpu_id = 495, .valid = 1, .name = "DEV_RESET_REQ" },
+       { .fc_id = 647, .cpu_id = 496, .valid = 1,
+               .name = "PKT_QUEUE_OUT_SYNC" },
+       { .fc_id = 648, .cpu_id = 497, .valid = 1,
+               .name = "STATUS_NIC0_ENG0" },
+       { .fc_id = 649, .cpu_id = 498, .valid = 1,
+               .name = "STATUS_NIC0_ENG1" },
+       { .fc_id = 650, .cpu_id = 499, .valid = 1,
+               .name = "STATUS_NIC1_ENG0" },
+       { .fc_id = 651, .cpu_id = 500, .valid = 1,
+               .name = "STATUS_NIC1_ENG1" },
+       { .fc_id = 652, .cpu_id = 501, .valid = 1,
+               .name = "STATUS_NIC2_ENG0" },
+       { .fc_id = 653, .cpu_id = 502, .valid = 1,
+               .name = "STATUS_NIC2_ENG1" },
+       { .fc_id = 654, .cpu_id = 503, .valid = 1,
+               .name = "STATUS_NIC3_ENG0" },
+       { .fc_id = 655, .cpu_id = 504, .valid = 1,
+               .name = "STATUS_NIC3_ENG1" },
+       { .fc_id = 656, .cpu_id = 505, .valid = 1,
+               .name = "STATUS_NIC4_ENG0" },
+       { .fc_id = 657, .cpu_id = 506, .valid = 1,
+               .name = "STATUS_NIC4_ENG1" },
        { .fc_id = 658, .cpu_id = 507, .valid = 1, .name = "FIX_POWER_ENV_S" },
        { .fc_id = 659, .cpu_id = 508, .valid = 1, .name = "FIX_POWER_ENV_E" },
        { .fc_id = 660, .cpu_id = 509, .valid = 1,
index 25acd9e87e209c4bb2826aab80890a1c1a1d7e0f..a9f51f9f9e92bc22298d11472c9ef6a607ecbb77 100644 (file)
 #define UBOOT_FW_OFFSET                        0x100000        /* 1MB in SRAM */
 #define LINUX_FW_OFFSET                        0x800000        /* 8MB in HBM */
 
-enum gaudi_pll_index {
-       CPU_PLL = 0,
-       PCI_PLL,
-       SRAM_PLL,
-       HBM_PLL,
-       NIC_PLL,
-       DMA_PLL,
-       MESH_PLL,
-       MME_PLL,
-       TPC_PLL,
-       IF_PLL,
-       PLL_MAX
-};
-
 enum gaudi_nic_axi_error {
        RXB,
        RXE,
index 43d241891e4560f1cb27ef1db8e89760cf4ea9d8..1b4ca435021d7a39c3390add86800526c666f6f6 100644 (file)
@@ -30,7 +30,7 @@
 
 #define QMAN_PQ_ENTRY_SIZE     16                      /* Bytes */
 
-#define MAX_ASID               1024
+#define MAX_ASID               2
 
 #define PROT_BITS_OFFS         0xF80
 
index 5fb92362fc5feae9db36cab37fb5b38e18b09ce8..09081401cb1dd9542c0e2da5121e2165bef45119 100644 (file)
@@ -188,6 +188,7 @@ enum goya_async_event_id {
        GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485,
        GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486,
        GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487,
+       GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC = 506,
        GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S = 507,
        GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E = 508,
        GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S = 509,
index daf8d8cd14be7e2d8642a8a2b69e301ffd389878..bc05f86c73ac0980ea745408a2596e995e6fcc2d 100644 (file)
 #define UBOOT_FW_OFFSET                0x100000                /* 1MB in SRAM */
 #define LINUX_FW_OFFSET                0x800000                /* 8MB in DDR */
 
-enum goya_pll_index {
-       CPU_PLL = 0,
-       IC_PLL,
-       MC_PLL,
-       MME_PLL,
-       PCI_PLL,
-       EMMC_PLL,
-       TPC_PLL,
-       PLL_MAX
-};
-
 #define GOYA_PLL_FREQ_LOW              50000000 /* 50 MHz */
 
 #endif /* GOYA_FW_IF_H */
index 945701bce553692ad660710ac996be427b5deaf6..64d33e3685091352aa6cab2b8ffd4a70c109bb5c 100644 (file)
 #include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/sched/task.h>
+#include <linux/kallsyms.h>
 
 #include <asm/sections.h>
 
-#define v1printk(a...) do { \
-       if (verbose) \
-               printk(KERN_INFO a); \
-       } while (0)
-#define v2printk(a...) do { \
-       if (verbose > 1) \
-               printk(KERN_INFO a); \
-               touch_nmi_watchdog();   \
-       } while (0)
-#define eprintk(a...) do { \
-               printk(KERN_ERR a); \
-               WARN_ON(1); \
-       } while (0)
+#define v1printk(a...) do {            \
+       if (verbose)                    \
+               printk(KERN_INFO a);    \
+} while (0)
+#define v2printk(a...) do {            \
+       if (verbose > 1)                \
+               printk(KERN_INFO a);    \
+       touch_nmi_watchdog();           \
+} while (0)
+#define eprintk(a...) do {             \
+       printk(KERN_ERR a);             \
+       WARN_ON(1);                     \
+} while (0)
 #define MAX_CONFIG_LEN         40
 
 static struct kgdb_io kgdbts_io_ops;
@@ -200,21 +201,30 @@ static noinline void kgdbts_break_test(void)
        v2printk("kgdbts: breakpoint complete\n");
 }
 
-/* Lookup symbol info in the kernel */
+/*
+ * This is a cached wrapper for kallsyms_lookup_name().
+ *
+ * The cache is a big win for several tests. For example it more the doubles
+ * the cycles per second during the sys_open test. This is not theoretic,
+ * the performance improvement shows up at human scale, especially when
+ * testing using emulators.
+ *
+ * Obviously neither re-entrant nor thread-safe but that is OK since it
+ * can only be called from the debug trap (and therefore all other CPUs
+ * are halted).
+ */
 static unsigned long lookup_addr(char *arg)
 {
-       unsigned long addr = 0;
-
-       if (!strcmp(arg, "kgdbts_break_test"))
-               addr = (unsigned long)kgdbts_break_test;
-       else if (!strcmp(arg, "sys_open"))
-               addr = (unsigned long)do_sys_open;
-       else if (!strcmp(arg, "kernel_clone"))
-               addr = (unsigned long)kernel_clone;
-       else if (!strcmp(arg, "hw_break_val"))
-               addr = (unsigned long)&hw_break_val;
-       addr = (unsigned long) dereference_function_descriptor((void *)addr);
-       return addr;
+       static char cached_arg[KSYM_NAME_LEN];
+       static unsigned long cached_addr;
+
+       if (strcmp(arg, cached_arg)) {
+               strscpy(cached_arg, arg, KSYM_NAME_LEN);
+               cached_addr = kallsyms_lookup_name(arg);
+       }
+
+       return (unsigned long)dereference_function_descriptor(
+                       (void *)cached_addr);
 }
 
 static void break_helper(char *bp_type, char *arg, unsigned long vaddr)
@@ -310,7 +320,7 @@ static int check_and_rewind_pc(char *put_str, char *arg)
 
        if (arch_needs_sstep_emulation && sstep_addr &&
            ip + offset == sstep_addr &&
-           ((!strcmp(arg, "sys_open") || !strcmp(arg, "kernel_clone")))) {
+           ((!strcmp(arg, "do_sys_openat2") || !strcmp(arg, "kernel_clone")))) {
                /* This is special case for emulated single step */
                v2printk("Emul: rewind hit single step bp\n");
                restart_from_top_after_write = 1;
@@ -619,14 +629,14 @@ static struct test_struct do_kernel_clone_test[] = {
  */
 static struct test_struct sys_open_test[] = {
        { "?", "S0*" }, /* Clear break points */
-       { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+       { "do_sys_openat2", "OK", sw_break, }, /* set sw breakpoint */
        { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
-       { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
-       { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
+       { "do_sys_openat2", "OK", sw_rem_break }, /*remove breakpoint */
+       { "g", "do_sys_openat2", NULL, check_and_rewind_pc }, /* check location */
        { "write", "OK", write_regs, emul_reset }, /* Write registers */
        { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
-       { "g", "sys_open", NULL, check_single_step },
-       { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+       { "g", "do_sys_openat2", NULL, check_single_step },
+       { "do_sys_openat2", "OK", sw_break, }, /* set sw breakpoint */
        { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
        { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
        { "", "", get_cont_catch, put_cont_catch },
index dd65cedf3b125370ed16ebcf8cc5f76349875340..70c5bb1e6f4966058be0dd133dcf6b5047185516 100644 (file)
@@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
 static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
 
 /* ODR is Output Data Rate */
-static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
+static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
 {
        u8 ctrl;
        int shift;
@@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
        lis3->read(lis3, CTRL_REG1, &ctrl);
        ctrl &= lis3->odr_mask;
        shift = ffs(lis3->odr_mask) - 1;
-       return lis3->odrs[(ctrl >> shift)];
+       return (ctrl >> shift);
 }
 
 static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
 {
-       int div = lis3lv02d_get_odr(lis3);
+       int odr_idx = lis3lv02d_get_odr_index(lis3);
+       int div = lis3->odrs[odr_idx];
 
-       if (WARN_ONCE(div == 0, "device returned spurious data"))
+       if (div == 0) {
+               if (odr_idx == 0) {
+                       /* Power-down mode, not sampling no need to sleep */
+                       return 0;
+               }
+
+               dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
                return -ENXIO;
+       }
 
        /* LIS3 power on delay is quite long */
        msleep(lis3->pwron_delay / div);
@@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
        struct lis3lv02d *lis3 = dev_get_drvdata(dev);
+       int odr_idx;
 
        lis3lv02d_sysfs_poweron(lis3);
-       return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
+
+       odr_idx = lis3lv02d_get_odr_index(lis3);
+       return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
 }
 
 static ssize_t lis3lv02d_rate_set(struct device *dev,
@@ -1162,16 +1173,14 @@ int lis3lv02d_init_device(struct lis3lv02d *lis3)
                break;
        default:
                pr_err("unknown sensor type 0x%X\n", lis3->whoami);
-               return -EINVAL;
+               return -ENODEV;
        }
 
        lis3->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs),
                                     sizeof(lis3_wai12_regs)), GFP_KERNEL);
 
-       if (lis3->reg_cache == NULL) {
-               printk(KERN_ERR DRIVER_NAME "out of memory\n");
+       if (lis3->reg_cache == NULL)
                return -ENOMEM;
-       }
 
        mutex_init(&lis3->mutex);
        atomic_set(&lis3->wake_thread, 0);
index 110f5a8538e9662b39f5bf5003d5a00647c76ebb..0e8254d0cf0ba83b6e0a40d515e972aed30498d9 100644 (file)
@@ -134,6 +134,23 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
        __lkdtm_CORRUPT_STACK((void *)&data);
 }
 
+static pid_t stack_pid;
+static unsigned long stack_addr;
+
+void lkdtm_REPORT_STACK(void)
+{
+       volatile uintptr_t magic;
+       pid_t pid = task_pid_nr(current);
+
+       if (pid != stack_pid) {
+               pr_info("Starting stack offset tracking for pid %d\n", pid);
+               stack_pid = pid;
+               stack_addr = (uintptr_t)&magic;
+       }
+
+       pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
+}
+
 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
 {
        static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
index b2aff4d87c0143f71c1cbcff0bdf355239c8d31f..8024b6a5cc7fc458c3ab04e33a2d88a6fd56450e 100644 (file)
@@ -110,6 +110,7 @@ static const struct crashtype crashtypes[] = {
        CRASHTYPE(EXHAUST_STACK),
        CRASHTYPE(CORRUPT_STACK),
        CRASHTYPE(CORRUPT_STACK_STRONG),
+       CRASHTYPE(REPORT_STACK),
        CRASHTYPE(CORRUPT_LIST_ADD),
        CRASHTYPE(CORRUPT_LIST_DEL),
        CRASHTYPE(STACK_GUARD_PAGE_LEADING),
index 5ae48c64df24dffe794a0c7cb9f40b14582c9d85..99f90d3e5e9cb44be2f41f5aba307cc15a4b24d6 100644 (file)
@@ -17,6 +17,7 @@ void lkdtm_LOOP(void);
 void lkdtm_EXHAUST_STACK(void);
 void lkdtm_CORRUPT_STACK(void);
 void lkdtm_CORRUPT_STACK_STRONG(void);
+void lkdtm_REPORT_STACK(void);
 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
 void lkdtm_SOFTLOCKUP(void);
 void lkdtm_HARDLOCKUP(void);
index 14be76d4c2e618c817f3f5f89fefc1961627bfa8..cb34925e10f15d6cd65b79b48c4870257b9e6189 100644 (file)
 
 #define MEI_DEV_ID_ADP_S      0x7AE8  /* Alder Lake Point S */
 #define MEI_DEV_ID_ADP_LP     0x7A60  /* Alder Lake Point LP */
+#define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
 
 /*
  * MEI HW Section
index a7e179626b6359f156d2db76146c5208113a3a11..c3393b383e5989bf6ed3be487ba219a8b7fcaeb2 100644 (file)
@@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
 
        /* required last entry */
        {0, }
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
deleted file mode 100644 (file)
index f1655f5..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- *  Pvpanic Device Support
- *
- *  Copyright (C) 2013 Fujitsu.
- *  Copyright (C) 2018 ZTE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/kexec.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-
-#include <uapi/misc/pvpanic.h>
-
-static void __iomem *base;
-static unsigned int capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
-static unsigned int events;
-
-static ssize_t capability_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       return sysfs_emit(buf, "%x\n", capability);
-}
-static DEVICE_ATTR_RO(capability);
-
-static ssize_t events_show(struct device *dev,  struct device_attribute *attr, char *buf)
-{
-       return sysfs_emit(buf, "%x\n", events);
-}
-
-static ssize_t events_store(struct device *dev,  struct device_attribute *attr,
-                           const char *buf, size_t count)
-{
-       unsigned int tmp;
-       int err;
-
-       err = kstrtouint(buf, 16, &tmp);
-       if (err)
-               return err;
-
-       if ((tmp & capability) != tmp)
-               return -EINVAL;
-
-       events = tmp;
-
-       return count;
-
-}
-static DEVICE_ATTR_RW(events);
-
-static struct attribute *pvpanic_dev_attrs[] = {
-       &dev_attr_capability.attr,
-       &dev_attr_events.attr,
-       NULL
-};
-ATTRIBUTE_GROUPS(pvpanic_dev);
-
-MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
-MODULE_DESCRIPTION("pvpanic device driver");
-MODULE_LICENSE("GPL");
-
-static void
-pvpanic_send_event(unsigned int event)
-{
-       if (event & capability & events)
-               iowrite8(event, base);
-}
-
-static int
-pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
-                    void *unused)
-{
-       unsigned int event = PVPANIC_PANICKED;
-
-       if (kexec_crash_loaded())
-               event = PVPANIC_CRASH_LOADED;
-
-       pvpanic_send_event(event);
-
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block pvpanic_panic_nb = {
-       .notifier_call = pvpanic_panic_notify,
-       .priority = 1, /* let this called before broken drm_fb_helper */
-};
-
-static int pvpanic_mmio_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct resource *res;
-
-       res = platform_get_mem_or_io(pdev, 0);
-       if (!res)
-               return -EINVAL;
-
-       switch (resource_type(res)) {
-       case IORESOURCE_IO:
-               base = devm_ioport_map(dev, res->start, resource_size(res));
-               if (!base)
-                       return -ENOMEM;
-               break;
-       case IORESOURCE_MEM:
-               base = devm_ioremap_resource(dev, res);
-               if (IS_ERR(base))
-                       return PTR_ERR(base);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       /* initlize capability by RDPT */
-       capability &= ioread8(base);
-       events = capability;
-
-       if (capability)
-               atomic_notifier_chain_register(&panic_notifier_list,
-                                              &pvpanic_panic_nb);
-
-       return 0;
-}
-
-static int pvpanic_mmio_remove(struct platform_device *pdev)
-{
-
-       if (capability)
-               atomic_notifier_chain_unregister(&panic_notifier_list,
-                                                &pvpanic_panic_nb);
-
-       return 0;
-}
-
-static const struct of_device_id pvpanic_mmio_match[] = {
-       { .compatible = "qemu,pvpanic-mmio", },
-       {}
-};
-MODULE_DEVICE_TABLE(of, pvpanic_mmio_match);
-
-static const struct acpi_device_id pvpanic_device_ids[] = {
-       { "QEMU0001", 0 },
-       { "", 0 }
-};
-MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
-
-static struct platform_driver pvpanic_mmio_driver = {
-       .driver = {
-               .name = "pvpanic-mmio",
-               .of_match_table = pvpanic_mmio_match,
-               .acpi_match_table = pvpanic_device_ids,
-               .dev_groups = pvpanic_dev_groups,
-       },
-       .probe = pvpanic_mmio_probe,
-       .remove = pvpanic_mmio_remove,
-};
-module_platform_driver(pvpanic_mmio_driver);
diff --git a/drivers/misc/pvpanic/Kconfig b/drivers/misc/pvpanic/Kconfig
new file mode 100644 (file)
index 0000000..12d40a2
--- /dev/null
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Pvpanic Kconfig
+#
+# Copyright (C) 2021 Oracle.
+#
+
+config PVPANIC
+       bool "pvpanic device support"
+       help
+         This option allows to select a specific pvpanic device driver.
+         pvpanic is a paravirtualized device provided by QEMU; it lets
+         a virtual machine (guest) communicate panic events to the host.
+
+config PVPANIC_MMIO
+       tristate "pvpanic MMIO device support"
+       depends on HAS_IOMEM && (ACPI || OF) && PVPANIC
+       help
+         This driver provides support for the MMIO pvpanic device.
+
+config PVPANIC_PCI
+       tristate "pvpanic PCI device support"
+       depends on PCI && PVPANIC
+       help
+         This driver provides support for the PCI pvpanic device.
+         pvpanic is a paravirtualized device provided by QEMU which
+         forwards the panic events from the guest to the host.
diff --git a/drivers/misc/pvpanic/Makefile b/drivers/misc/pvpanic/Makefile
new file mode 100644 (file)
index 0000000..9471df7
--- /dev/null
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Pvpanic Makefile
+#
+# Copyright (C) 2021 Oracle.
+#
+obj-$(CONFIG_PVPANIC_MMIO)     += pvpanic.o pvpanic-mmio.o
+obj-$(CONFIG_PVPANIC_PCI)      += pvpanic.o pvpanic-pci.o
diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
new file mode 100644 (file)
index 0000000..4c08417
--- /dev/null
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Pvpanic MMIO Device Support
+ *
+ *  Copyright (C) 2013 Fujitsu.
+ *  Copyright (C) 2018 ZTE.
+ *  Copyright (C) 2021 Oracle.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include <uapi/misc/pvpanic.h>
+
+#include "pvpanic.h"
+
+MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
+MODULE_DESCRIPTION("pvpanic-mmio device driver");
+MODULE_LICENSE("GPL");
+
+static ssize_t capability_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%x\n", pi->capability);
+}
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t events_show(struct device *dev,  struct device_attribute *attr, char *buf)
+{
+       struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%x\n", pi->events);
+}
+
+static ssize_t events_store(struct device *dev,  struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct pvpanic_instance *pi = dev_get_drvdata(dev);
+       unsigned int tmp;
+       int err;
+
+       err = kstrtouint(buf, 16, &tmp);
+       if (err)
+               return err;
+
+       if ((tmp & pi->capability) != tmp)
+               return -EINVAL;
+
+       pi->events = tmp;
+
+       return count;
+}
+static DEVICE_ATTR_RW(events);
+
+static struct attribute *pvpanic_mmio_dev_attrs[] = {
+       &dev_attr_capability.attr,
+       &dev_attr_events.attr,
+       NULL
+};
+ATTRIBUTE_GROUPS(pvpanic_mmio_dev);
+
+static int pvpanic_mmio_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct pvpanic_instance *pi;
+       struct resource *res;
+       void __iomem *base;
+
+       res = platform_get_mem_or_io(pdev, 0);
+       if (!res)
+               return -EINVAL;
+
+       switch (resource_type(res)) {
+       case IORESOURCE_IO:
+               base = devm_ioport_map(dev, res->start, resource_size(res));
+               if (!base)
+                       return -ENOMEM;
+               break;
+       case IORESOURCE_MEM:
+               base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(base))
+                       return PTR_ERR(base);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       pi = kmalloc(sizeof(*pi), GFP_ATOMIC);
+       if (!pi)
+               return -ENOMEM;
+
+       pi->base = base;
+       pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+
+       /* initlize capability by RDPT */
+       pi->capability &= ioread8(base);
+       pi->events = pi->capability;
+
+       dev_set_drvdata(dev, pi);
+
+       return pvpanic_probe(pi);
+}
+
+static int pvpanic_mmio_remove(struct platform_device *pdev)
+{
+       struct pvpanic_instance *pi = dev_get_drvdata(&pdev->dev);
+
+       pvpanic_remove(pi);
+       kfree(pi);
+
+       return 0;
+}
+
+static const struct of_device_id pvpanic_mmio_match[] = {
+       { .compatible = "qemu,pvpanic-mmio", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, pvpanic_mmio_match);
+
+static const struct acpi_device_id pvpanic_device_ids[] = {
+       { "QEMU0001", 0 },
+       { "", 0 }
+};
+MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
+
+static struct platform_driver pvpanic_mmio_driver = {
+       .driver = {
+               .name = "pvpanic-mmio",
+               .of_match_table = pvpanic_mmio_match,
+               .acpi_match_table = pvpanic_device_ids,
+               .dev_groups = pvpanic_mmio_dev_groups,
+       },
+       .probe = pvpanic_mmio_probe,
+       .remove = pvpanic_mmio_remove,
+};
+module_platform_driver(pvpanic_mmio_driver);
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
new file mode 100644 (file)
index 0000000..9ecc4e8
--- /dev/null
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Pvpanic PCI Device Support
+ *
+ *  Copyright (C) 2021 Oracle.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include <uapi/misc/pvpanic.h>
+
+#include "pvpanic.h"
+
+#define PCI_VENDOR_ID_REDHAT             0x1b36
+#define PCI_DEVICE_ID_REDHAT_PVPANIC     0x0011
+
+MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
+MODULE_DESCRIPTION("pvpanic device driver ");
+MODULE_LICENSE("GPL");
+
+static const struct pci_device_id pvpanic_pci_id_tbl[]  = {
+       { PCI_DEVICE(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_PVPANIC)},
+       {}
+};
+
+static ssize_t capability_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%x\n", pi->capability);
+}
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t events_show(struct device *dev,  struct device_attribute *attr, char *buf)
+{
+       struct pvpanic_instance *pi = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%x\n", pi->events);
+}
+
+static ssize_t events_store(struct device *dev,  struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct pvpanic_instance *pi = dev_get_drvdata(dev);
+       unsigned int tmp;
+       int err;
+
+       err = kstrtouint(buf, 16, &tmp);
+       if (err)
+               return err;
+
+       if ((tmp & pi->capability) != tmp)
+               return -EINVAL;
+
+       pi->events = tmp;
+
+       return count;
+}
+static DEVICE_ATTR_RW(events);
+
+static struct attribute *pvpanic_pci_dev_attrs[] = {
+       &dev_attr_capability.attr,
+       &dev_attr_events.attr,
+       NULL
+};
+ATTRIBUTE_GROUPS(pvpanic_pci_dev);
+
+static int pvpanic_pci_probe(struct pci_dev *pdev,
+                            const struct pci_device_id *ent)
+{
+       struct device *dev = &pdev->dev;
+       struct pvpanic_instance *pi;
+       void __iomem *base;
+       int ret;
+
+       ret = pci_enable_device(pdev);
+       if (ret < 0)
+               return ret;
+
+       base = pci_iomap(pdev, 0, 0);
+       if (!base)
+               return -ENOMEM;
+
+       pi = kmalloc(sizeof(*pi), GFP_ATOMIC);
+       if (!pi)
+               return -ENOMEM;
+
+       pi->base = base;
+       pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+
+       /* initlize capability by RDPT */
+       pi->capability &= ioread8(base);
+       pi->events = pi->capability;
+
+       dev_set_drvdata(dev, pi);
+
+       return pvpanic_probe(pi);
+}
+
+static void pvpanic_pci_remove(struct pci_dev *pdev)
+{
+       struct pvpanic_instance *pi = dev_get_drvdata(&pdev->dev);
+
+       pvpanic_remove(pi);
+       iounmap(pi->base);
+       kfree(pi);
+       pci_disable_device(pdev);
+}
+
+static struct pci_driver pvpanic_pci_driver = {
+       .name =         "pvpanic-pci",
+       .id_table =     pvpanic_pci_id_tbl,
+       .probe =        pvpanic_pci_probe,
+       .remove =       pvpanic_pci_remove,
+       .driver = {
+               .dev_groups = pvpanic_pci_dev_groups,
+       },
+};
+
+module_pci_driver(pvpanic_pci_driver);
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
new file mode 100644 (file)
index 0000000..65f70a4
--- /dev/null
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Pvpanic Device Support
+ *
+ *  Copyright (C) 2013 Fujitsu.
+ *  Copyright (C) 2018 ZTE.
+ *  Copyright (C) 2021 Oracle.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+
+#include <uapi/misc/pvpanic.h>
+
+#include "pvpanic.h"
+
+MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
+MODULE_DESCRIPTION("pvpanic device driver ");
+MODULE_LICENSE("GPL");
+
+static struct list_head pvpanic_list;
+static spinlock_t pvpanic_lock;
+
+static void
+pvpanic_send_event(unsigned int event)
+{
+       struct pvpanic_instance *pi_cur;
+
+       spin_lock(&pvpanic_lock);
+       list_for_each_entry(pi_cur, &pvpanic_list, list) {
+               if (event & pi_cur->capability & pi_cur->events)
+                       iowrite8(event, pi_cur->base);
+       }
+       spin_unlock(&pvpanic_lock);
+}
+
+static int
+pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
+                    void *unused)
+{
+       unsigned int event = PVPANIC_PANICKED;
+
+       if (kexec_crash_loaded())
+               event = PVPANIC_CRASH_LOADED;
+
+       pvpanic_send_event(event);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block pvpanic_panic_nb = {
+       .notifier_call = pvpanic_panic_notify,
+       .priority = 1, /* let this called before broken drm_fb_helper */
+};
+
+int pvpanic_probe(struct pvpanic_instance *pi)
+{
+       if (!pi || !pi->base)
+               return -EINVAL;
+
+       spin_lock(&pvpanic_lock);
+       list_add(&pi->list, &pvpanic_list);
+       spin_unlock(&pvpanic_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pvpanic_probe);
+
+void pvpanic_remove(struct pvpanic_instance *pi)
+{
+       struct pvpanic_instance *pi_cur, *pi_next;
+
+       if (!pi)
+               return;
+
+       spin_lock(&pvpanic_lock);
+       list_for_each_entry_safe(pi_cur, pi_next, &pvpanic_list, list) {
+               if (pi_cur == pi) {
+                       list_del(&pi_cur->list);
+                       break;
+               }
+       }
+       spin_unlock(&pvpanic_lock);
+}
+EXPORT_SYMBOL_GPL(pvpanic_remove);
+
+static int pvpanic_init(void)
+{
+       INIT_LIST_HEAD(&pvpanic_list);
+       spin_lock_init(&pvpanic_lock);
+
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &pvpanic_panic_nb);
+
+       return 0;
+}
+
+static void pvpanic_exit(void)
+{
+       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                        &pvpanic_panic_nb);
+
+}
+
+module_init(pvpanic_init);
+module_exit(pvpanic_exit);
diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h
new file mode 100644 (file)
index 0000000..1afccc2
--- /dev/null
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *  Pvpanic Device Support
+ *
+ *  Copyright (C) 2021 Oracle.
+ */
+
+#ifndef PVPANIC_H_
+#define PVPANIC_H_
+
+struct pvpanic_instance {
+       void __iomem *base;
+       unsigned int capability;
+       unsigned int events;
+       struct list_head list;
+};
+
+int pvpanic_probe(struct pvpanic_instance *pi);
+void pvpanic_remove(struct pvpanic_instance *pi);
+
+#endif /* PVPANIC_H_ */
index cf2965aa5c0556748a3d8b245377bf7209f29f82..87d156c15f35bcd790e111bc3eb01f9758716f79 100644 (file)
 
 /* define the XP debug device structures to be used with dev_dbg() et al */
 
-struct device_driver xp_dbg_name = {
+static struct device_driver xp_dbg_name = {
        .name = "xp"
 };
 
-struct device xp_dbg_subname = {
+static struct device xp_dbg_subname = {
        .init_name = "",                /* set to "" */
        .driver = &xp_dbg_name
 };
index 84610bbcc131434c0f0088ce095b82c8377187ca..b2c3c22fc13c1361cd923d7154baa7abf57e6b42 100644 (file)
@@ -207,7 +207,7 @@ xpc_start_hb_beater(void)
 {
        xpc_arch_ops.heartbeat_init();
        timer_setup(&xpc_hb_timer, xpc_hb_beater, 0);
-       xpc_hb_beater(0);
+       xpc_hb_beater(NULL);
 }
 
 static void
index d07af4edfcacf4251d51594ed12fefee27f3de50..94843e0e51c66b018d29e07543877cda207ca002 100644 (file)
@@ -126,7 +126,7 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
 {
        struct uacce_device *uacce;
        struct uacce_queue *q;
-       int ret = 0;
+       int ret;
 
        uacce = xa_load(&uacce_xa, iminor(inode));
        if (!uacce)
index b837e7eba5f7dcec2ae1f15bb2052c18b9a0f306..f1d8ba6d485741336d48382dc00dd2d59f4fe630 100644 (file)
@@ -346,11 +346,6 @@ struct vmballoon {
        /* statistics */
        struct vmballoon_stats *stats;
 
-#ifdef CONFIG_DEBUG_FS
-       /* debugfs file exporting statistics */
-       struct dentry *dbg_entry;
-#endif
-
        /**
         * @b_dev_info: balloon device information descriptor.
         */
@@ -1709,14 +1704,14 @@ DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
 
 static void __init vmballoon_debugfs_init(struct vmballoon *b)
 {
-       b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
-                                          &vmballoon_debug_fops);
+       debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
+                           &vmballoon_debug_fops);
 }
 
 static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
 {
        static_key_disable(&balloon_stat_enabled.key);
-       debugfs_remove(b->dbg_entry);
+       debugfs_remove(debugfs_lookup("vmmemctl", NULL));
        kfree(b->stats);
        b->stats = NULL;
 }
index 345addd9306defffd6e43d667672df6f22966bc1..fa8a7fce4481bd84e9ef56c3f4ea9cfb0074497e 100644 (file)
@@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
 bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
 {
        int result;
-       struct vmci_notify_bm_set_msg bitmap_set_msg;
+       struct vmci_notify_bm_set_msg bitmap_set_msg = { };
 
        bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
                                                  VMCI_SET_NOTIFY_BITMAP);
index cc8eeb361fcdb580d5e2aa28e3283bfae2ddca18..1018dc77269d47b3079b0c9be6426295b5489fc5 100644 (file)
@@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
                                VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
        struct vmci_datagram *check_msg;
 
-       check_msg = kmalloc(msg_size, GFP_KERNEL);
+       check_msg = kzalloc(msg_size, GFP_KERNEL);
        if (!check_msg) {
                dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
                return -ENOMEM;
index 2d8328d928d53fe78832be7ea6cb793a123b1931..da1e2a773823e06ac6e34e39a6878be5cf5b0adc 100644 (file)
@@ -908,7 +908,7 @@ static long vmci_host_unlocked_ioctl(struct file *filp,
                                     unsigned int iocmd, unsigned long ioarg)
 {
 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do {                       \
-               char *name = __stringify(IOCTL_VMCI_ ## ioctl_name);    \
+               char *name = "IOCTL_VMCI_" # ioctl_name;                \
                return vmci_host_do_ ## ioctl_fn(                       \
                        vmci_host_dev, name, uptr);                     \
        } while (0)
index eb6c02bc4a02d94a9a1073be8391c4ca81535f89..b8b771b643cc8bfc9fb685d53936cd614104846c 100644 (file)
@@ -247,8 +247,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
                 */
                for_each_sg(data->sg, sg, data->sg_len, i) {
                        if (sg->length % data->blksz) {
-                               WARN_ONCE(1, "unaligned sg len %u blksize %u\n",
-                                         sg->length, data->blksz);
+                               dev_warn_once(mmc_dev(mmc),
+                                             "unaligned sg len %u blksize %u, disabling descriptor DMA for transfer\n",
+                                             sg->length, data->blksz);
                                return;
                        }
                }
index 044880760b584382fa247cde56d9cabd11cc6a8c..8908b9363a96900aa1d37acb966b1b6b7d13ae5c 100644 (file)
@@ -44,8 +44,8 @@ struct comp_channel {
 };
 
 #define to_channel(d) container_of(d, struct comp_channel, cdev)
-static struct list_head channel_list;
-static spinlock_t ch_list_lock;
+static LIST_HEAD(channel_list);
+static DEFINE_SPINLOCK(ch_list_lock);
 
 static inline bool ch_has_mbo(struct comp_channel *c)
 {
@@ -494,8 +494,6 @@ static int __init mod_init(void)
        if (IS_ERR(comp.class))
                return PTR_ERR(comp.class);
 
-       INIT_LIST_HEAD(&channel_list);
-       spin_lock_init(&ch_list_lock);
        ida_init(&comp.minor_id);
 
        err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
index 57f1f170899465461735a170128c876d6dee1ca4..5c5c92132287da07b43d380ced61c0f88f5008e4 100644 (file)
@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
                return 0;
        case NAND_OP_WAITRDY_INSTR:
                return readl_poll_timeout(nfc->regs + NFI_STA, status,
-                                         status & STA_BUSY, 20,
-                                         instr->ctx.waitrdy.timeout_ms);
+                                         !(status & STA_BUSY), 20,
+                                         instr->ctx.waitrdy.timeout_ms * 1000);
        default:
                break;
        }
index 02c1f2c014e8bf9116df68b26fc08cca612669c1..cc5f2c1861d4a22d984bcd37efb98dd3561ee765 100644 (file)
@@ -7,11 +7,12 @@
  * Author: Peter Rosin <peda@axentia.se>
  */
 
+#include <linux/bitmap.h>
 #include <linux/err.h>
 #include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/mux/driver.h>
-#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
 
@@ -23,8 +24,9 @@ static int mux_gpio_set(struct mux_control *mux, int state)
 {
        struct mux_gpio *mux_gpio = mux_chip_priv(mux->chip);
        DECLARE_BITMAP(values, BITS_PER_TYPE(state));
+       u32 value = state;
 
-       values[0] = state;
+       bitmap_from_arr32(values, &value, BITS_PER_TYPE(value));
 
        gpiod_set_array_value_cansleep(mux_gpio->gpios->ndescs,
                                       mux_gpio->gpios->desc,
@@ -64,14 +66,11 @@ static int mux_gpio_probe(struct platform_device *pdev)
        mux_chip->ops = &mux_gpio_ops;
 
        mux_gpio->gpios = devm_gpiod_get_array(dev, "mux", GPIOD_OUT_LOW);
-       if (IS_ERR(mux_gpio->gpios)) {
-               ret = PTR_ERR(mux_gpio->gpios);
-               if (ret != -EPROBE_DEFER)
-                       dev_err(dev, "failed to get gpios\n");
-               return ret;
-       }
+       if (IS_ERR(mux_gpio->gpios))
+               return dev_err_probe(dev, PTR_ERR(mux_gpio->gpios),
+                                    "failed to get gpios\n");
        WARN_ON(pins != mux_gpio->gpios->ndescs);
-       mux_chip->mux->states = 1 << pins;
+       mux_chip->mux->states = BIT(pins);
 
        ret = device_property_read_u32(dev, "idle-state", (u32 *)&idle_state);
        if (ret >= 0 && idle_state != MUX_IDLE_AS_IS) {
@@ -96,7 +95,7 @@ static int mux_gpio_probe(struct platform_device *pdev)
 static struct platform_driver mux_gpio_driver = {
        .driver = {
                .name = "gpio-mux",
-               .of_match_table = of_match_ptr(mux_gpio_dt_ids),
+               .of_match_table = mux_gpio_dt_ids,
        },
        .probe = mux_gpio_probe,
 };
index f69fb4238a654cb31c70ed2bcda663dc072abced..a57da43680d8f295ca343b5e0cf8f3bf5cba4dcf 100644 (file)
@@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
        return ret;
 }
 
+static int mcp251x_spi_write(struct spi_device *spi, int len)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       int ret;
+
+       ret = spi_write(spi, priv->spi_tx_buf, len);
+       if (ret)
+               dev_err(&spi->dev, "spi write failed: ret = %d\n", ret);
+
+       return ret;
+}
+
 static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
 {
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
        priv->spi_tx_buf[1] = reg;
        priv->spi_tx_buf[2] = val;
 
-       mcp251x_spi_trans(spi, 3);
+       mcp251x_spi_write(spi, 3);
 }
 
 static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
@@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
        priv->spi_tx_buf[2] = v1;
        priv->spi_tx_buf[3] = v2;
 
-       mcp251x_spi_trans(spi, 4);
+       mcp251x_spi_write(spi, 4);
 }
 
 static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
        priv->spi_tx_buf[2] = mask;
        priv->spi_tx_buf[3] = val;
 
-       mcp251x_spi_trans(spi, 4);
+       mcp251x_spi_write(spi, 4);
 }
 
 static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
                                          buf[i]);
        } else {
                memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
-               mcp251x_spi_trans(spi, TXBDAT_OFF + len);
+               mcp251x_spi_write(spi, TXBDAT_OFF + len);
        }
 }
 
@@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
 
        /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
        priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
-       mcp251x_spi_trans(priv->spi, 1);
+       mcp251x_spi_write(priv->spi, 1);
 }
 
 static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
@@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
        mdelay(MCP251X_OST_DELAY_MS);
 
        priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-       ret = mcp251x_spi_trans(spi, 1);
+       ret = mcp251x_spi_write(spi, 1);
        if (ret)
                return ret;
 
index 573b11559d733fc5328130fced5f910b8b81334e..28e916a04047d51548e5b3d1a22a86c1924ec379 100644 (file)
@@ -857,7 +857,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
        if (dev->adapter->dev_set_bus) {
                err = dev->adapter->dev_set_bus(dev, 0);
                if (err)
-                       goto lbl_unregister_candev;
+                       goto adap_dev_free;
        }
 
        /* get device number early */
@@ -869,6 +869,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
 
        return 0;
 
+adap_dev_free:
+       if (dev->adapter->dev_free)
+               dev->adapter->dev_free(dev);
+
 lbl_unregister_candev:
        unregister_candev(netdev);
 
index 52e865a3912cfc651edc9b5a87f2a8430121bf2d..bf5c62e5c0b0effc077b1e6c5e3fd94738051f56 100644 (file)
 
 /* GSWIP MII Registers */
 #define GSWIP_MII_CFGp(p)              (0x2 * (p))
+#define  GSWIP_MII_CFG_RESET           BIT(15)
 #define  GSWIP_MII_CFG_EN              BIT(14)
+#define  GSWIP_MII_CFG_ISOLATE         BIT(13)
 #define  GSWIP_MII_CFG_LDCLKDIS                BIT(12)
+#define  GSWIP_MII_CFG_RGMII_IBS       BIT(8)
+#define  GSWIP_MII_CFG_RMII_CLK                BIT(7)
 #define  GSWIP_MII_CFG_MODE_MIIP       0x0
 #define  GSWIP_MII_CFG_MODE_MIIM       0x1
 #define  GSWIP_MII_CFG_MODE_RMIIP      0x2
 #define GSWIP_PCE_DEFPVID(p)           (0x486 + ((p) * 0xA))
 
 #define GSWIP_MAC_FLEN                 0x8C5
+#define GSWIP_MAC_CTRL_0p(p)           (0x903 + ((p) * 0xC))
+#define  GSWIP_MAC_CTRL_0_PADEN                BIT(8)
+#define  GSWIP_MAC_CTRL_0_FCS_EN       BIT(7)
+#define  GSWIP_MAC_CTRL_0_FCON_MASK    0x0070
+#define  GSWIP_MAC_CTRL_0_FCON_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_FCON_RX      0x0010
+#define  GSWIP_MAC_CTRL_0_FCON_TX      0x0020
+#define  GSWIP_MAC_CTRL_0_FCON_RXTX    0x0030
+#define  GSWIP_MAC_CTRL_0_FCON_NONE    0x0040
+#define  GSWIP_MAC_CTRL_0_FDUP_MASK    0x000C
+#define  GSWIP_MAC_CTRL_0_FDUP_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_FDUP_EN      0x0004
+#define  GSWIP_MAC_CTRL_0_FDUP_DIS     0x000C
+#define  GSWIP_MAC_CTRL_0_GMII_MASK    0x0003
+#define  GSWIP_MAC_CTRL_0_GMII_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_GMII_MII     0x0001
+#define  GSWIP_MAC_CTRL_0_GMII_RGMII   0x0002
 #define GSWIP_MAC_CTRL_2p(p)           (0x905 + ((p) * 0xC))
 #define GSWIP_MAC_CTRL_2_MLEN          BIT(3) /* Maximum Untagged Frame Lnegth */
 
@@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
                          GSWIP_SDMA_PCTRLp(port));
 
        if (!dsa_is_cpu_port(ds, port)) {
-               u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
-                             GSWIP_MDIO_PHY_SPEED_AUTO |
-                             GSWIP_MDIO_PHY_FDUP_AUTO |
-                             GSWIP_MDIO_PHY_FCONTX_AUTO |
-                             GSWIP_MDIO_PHY_FCONRX_AUTO |
-                             (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
-
-               gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
-               /* Activate MDIO auto polling */
-               gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
+               u32 mdio_phy = 0;
+
+               if (phydev)
+                       mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+               gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
+                               GSWIP_MDIO_PHYp(port));
        }
 
        return 0;
@@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
        if (!dsa_is_user_port(ds, port))
                return;
 
-       if (!dsa_is_cpu_port(ds, port)) {
-               gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
-                               GSWIP_MDIO_PHY_LINK_MASK,
-                               GSWIP_MDIO_PHYp(port));
-               /* Deactivate MDIO auto polling */
-               gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
-       }
-
        gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
                          GSWIP_FDMA_PCTRLp(port));
        gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@@ -794,14 +804,32 @@ static int gswip_setup(struct dsa_switch *ds)
        gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
        gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
 
-       /* disable PHY auto polling */
+       /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+        * interoperability problem with this auto polling mechanism because
+        * their status registers think that the link is in a different state
+        * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+        * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+        * auto polling state machine consider the link being negotiated with
+        * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+        * to the switch port being completely dead (RX and TX are both not
+        * working).
+        * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+        * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+        * it would work fine for a few minutes to hours and then stop, on
+        * other device it would no traffic could be sent or received at all.
+        * Testing shows that when PHY auto polling is disabled these problems
+        * go away.
+        */
        gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
+
        /* Configure the MDIO Clock 2.5 MHz */
        gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
 
-       /* Disable the xMII link */
+       /* Disable the xMII interface and clear it's isolation bit */
        for (i = 0; i < priv->hw_info->max_ports; i++)
-               gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
+               gswip_mii_mask_cfg(priv,
+                                  GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+                                  0, i);
 
        /* enable special tag insertion on cpu port */
        gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1450,6 +1478,112 @@ unsupported:
        return;
 }
 
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+       u32 mdio_phy;
+
+       if (link)
+               mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+       else
+               mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+                                phy_interface_t interface)
+{
+       u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+       switch (speed) {
+       case SPEED_10:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+               if (interface == PHY_INTERFACE_MODE_RMII)
+                       mii_cfg = GSWIP_MII_CFG_RATE_M50;
+               else
+                       mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+               break;
+
+       case SPEED_100:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+               if (interface == PHY_INTERFACE_MODE_RMII)
+                       mii_cfg = GSWIP_MII_CFG_RATE_M50;
+               else
+                       mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+               break;
+
+       case SPEED_1000:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+               mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+               break;
+       }
+
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
+                         GSWIP_MAC_CTRL_0p(port));
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+       u32 mac_ctrl_0, mdio_phy;
+
+       if (duplex == DUPLEX_FULL) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+               mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+       } else {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+               mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+       }
+
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
+                         GSWIP_MAC_CTRL_0p(port));
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+                                bool tx_pause, bool rx_pause)
+{
+       u32 mac_ctrl_0, mdio_phy;
+
+       if (tx_pause && rx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+                          GSWIP_MDIO_PHY_FCONRX_EN;
+       } else if (tx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+                          GSWIP_MDIO_PHY_FCONRX_DIS;
+       } else if (rx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+                          GSWIP_MDIO_PHY_FCONRX_EN;
+       } else {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+                          GSWIP_MDIO_PHY_FCONRX_DIS;
+       }
+
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
+                         mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
+       gswip_mdio_mask(priv,
+                       GSWIP_MDIO_PHY_FCONTX_MASK |
+                       GSWIP_MDIO_PHY_FCONRX_MASK,
+                       mdio_phy, GSWIP_MDIO_PHYp(port));
+}
+
 static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                                     unsigned int mode,
                                     const struct phylink_link_state *state)
@@ -1469,6 +1603,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                break;
        case PHY_INTERFACE_MODE_RMII:
                miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+
+               /* Configure the RMII clock as output: */
+               miicfg |= GSWIP_MII_CFG_RMII_CLK;
                break;
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1481,7 +1618,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                        "Unsupported interface: %d\n", state->interface);
                return;
        }
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
+
+       gswip_mii_mask_cfg(priv,
+                          GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+                          GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+                          miicfg, port);
 
        switch (state->interface) {
        case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1506,6 +1647,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
        struct gswip_priv *priv = ds->priv;
 
        gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+       if (!dsa_is_cpu_port(ds, port))
+               gswip_port_set_link(priv, port, false);
 }
 
 static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -1517,6 +1661,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
 {
        struct gswip_priv *priv = ds->priv;
 
+       if (!dsa_is_cpu_port(ds, port)) {
+               gswip_port_set_link(priv, port, true);
+               gswip_port_set_speed(priv, port, speed, interface);
+               gswip_port_set_duplex(priv, port, duplex);
+               gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+       }
+
        gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
 }
 
index 903d619e08ed1e6e6ca5ae363f8750e86a24b846..e08bf937714009ea75163064e88039bc6c296be4 100644 (file)
@@ -3026,10 +3026,17 @@ out_resources:
        return err;
 }
 
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+       [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+       [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+};
+
 static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
 {
        struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
        struct mv88e6xxx_chip *chip = mdio_bus->chip;
+       u16 prod_id;
        u16 val;
        int err;
 
@@ -3040,23 +3047,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
        err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
        mv88e6xxx_reg_unlock(chip);
 
-       if (reg == MII_PHYSID2) {
-               /* Some internal PHYs don't have a model number. */
-               if (chip->info->family != MV88E6XXX_FAMILY_6165)
-                       /* Then there is the 6165 family. It gets is
-                        * PHYs correct. But it can also have two
-                        * SERDES interfaces in the PHY address
-                        * space. And these don't have a model
-                        * number. But they are not PHYs, so we don't
-                        * want to give them something a PHY driver
-                        * will recognise.
-                        *
-                        * Use the mv88e6390 family model number
-                        * instead, for anything which really could be
-                        * a PHY,
-                        */
-                       if (!(val & 0x3f0))
-                               val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+       /* Some internal PHYs don't have a model number. */
+       if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+           chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+               prod_id = family_prod_id_table[chip->info->family];
+               if (prod_id)
+                       val |= prod_id >> 4;
        }
 
        return err ? err : val;
index 187b0b9a6e1df6ab3ea7ecfe05b96fa731e225ad..f78daba60b35c07156405555b71a2e0d97965b38 100644 (file)
@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        pci_set_master(pdev);
 
-       ioaddr = pci_resource_start(pdev, 0);
-       if (!ioaddr) {
+       if (!pci_resource_len(pdev, 0)) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
                        pr_err("card has no PCI IO resources, aborting\n");
                err = -ENODEV;
@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
                        pr_err("architecture does not support 32bit PCI busmaster DMA\n");
                goto err_disable_dev;
        }
+
+       ioaddr = pci_resource_start(pdev, 0);
        if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
                        pr_err("io address range already allocated\n");
index ba8321ec1ee73eb70ee3d41f2552a263989fed0a..3305979a9f7c1fc43dba9efd9cec54d3c1286d07 100644 (file)
 #define XGBE_DMA_SYS_AWCR      0x30303030
 
 /* DMA cache settings - PCI device */
-#define XGBE_DMA_PCI_ARCR      0x00000003
-#define XGBE_DMA_PCI_AWCR      0x13131313
-#define XGBE_DMA_PCI_AWARCR    0x00000313
+#define XGBE_DMA_PCI_ARCR      0x000f0f0f
+#define XGBE_DMA_PCI_AWCR      0x0f0f0f0f
+#define XGBE_DMA_PCI_AWARCR    0x00000f0f
 
 /* DMA channel interrupt modes */
 #define XGBE_IRQ_MODE_EDGE     0
index 98cf82dea3e4a1cf67d6639d7a098f6a2f3ba16e..65981931a79899fb54754aecee86392ed544a51f 100644 (file)
@@ -172,6 +172,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
 
 err_free_buf_descs:
        dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
+       ring->cpu_addr = NULL;
        return -ENOMEM;
 }
 
index 15362d016a87e1862b756b7f10e8506795029462..0f6a6cb7e98d7714188bd6880b1d8d47b0d46a7d 100644 (file)
@@ -3239,6 +3239,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
        bool cmp_b = false;
        bool cmp_c = false;
 
+       if (!macb_is_gem(bp))
+               return;
+
        tp4sp_v = &(fs->h_u.tcp_ip4_spec);
        tp4sp_m = &(fs->m_u.tcp_ip4_spec);
 
@@ -3607,6 +3610,7 @@ static void macb_restore_features(struct macb *bp)
 {
        struct net_device *netdev = bp->dev;
        netdev_features_t features = netdev->features;
+       struct ethtool_rx_fs_item *item;
 
        /* TX checksum offload */
        macb_set_txcsum_feature(bp, features);
@@ -3615,6 +3619,9 @@ static void macb_restore_features(struct macb *bp)
        macb_set_rxcsum_feature(bp, features);
 
        /* RX Flow Filters */
+       list_for_each_entry(item, &bp->rx_fs_list.list, list)
+               gem_prog_cmp_regs(bp, &item->fs);
+
        macb_set_rxflow_feature(bp, features);
 }
 
@@ -3911,6 +3918,7 @@ static int macb_init(struct platform_device *pdev)
        reg = gem_readl(bp, DCFG8);
        bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
                        GEM_BFEXT(T2SCR, reg));
+       INIT_LIST_HEAD(&bp->rx_fs_list.list);
        if (bp->max_tuples > 0) {
                /* also needs one ethtype match to check IPv4 */
                if (GEM_BFEXT(SCR2ETH, reg) > 0) {
@@ -3921,7 +3929,6 @@ static int macb_init(struct platform_device *pdev)
                        /* Filtering is supported in hw but don't enable it in kernel now */
                        dev->hw_features |= NETIF_F_NTUPLE;
                        /* init Rx flow definitions */
-                       INIT_LIST_HEAD(&bp->rx_fs_list.list);
                        bp->rx_fs_list.count = 0;
                        spin_lock_init(&bp->rx_fs_lock);
                } else
index b248966837b4c2b2e8ae25cf05431d52c94c5832..7aad40b2aa736079a4260fac3cc8e58eb62c8a01 100644 (file)
           | CN6XXX_INTR_M0UNWI_ERR             \
           | CN6XXX_INTR_M1UPB0_ERR             \
           | CN6XXX_INTR_M1UPWI_ERR             \
-          | CN6XXX_INTR_M1UPB0_ERR             \
+          | CN6XXX_INTR_M1UNB0_ERR             \
           | CN6XXX_INTR_M1UNWI_ERR             \
           | CN6XXX_INTR_INSTR_DB_OF_ERR        \
           | CN6XXX_INTR_SLIST_DB_OF_ERR        \
index 6c85a10f465cd44a6c4a76af0be0389f92518a01..23a2ebdfd503b2e34ec0ecd798fc095db1b14b51 100644 (file)
@@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
        struct cudbg_buffer temp_buff = { 0 };
        struct sge_qbase_reg_field *sge_qbase;
        struct ireg_buf *ch_sge_dbg;
+       u8 padap_running = 0;
        int i, rc;
+       u32 size;
 
-       rc = cudbg_get_buff(pdbg_init, dbg_buff,
-                           sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
-                           &temp_buff);
+       /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
+        * lead to SGE missing doorbells under heavy traffic. So, only
+        * collect them when adapter is idle.
+        */
+       for_each_port(padap, i) {
+               padap_running = netif_running(padap->port[i]);
+               if (padap_running)
+                       break;
+       }
+
+       size = sizeof(*ch_sge_dbg) * 2;
+       if (!padap_running)
+               size += sizeof(*sge_qbase);
+
+       rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
        if (rc)
                return rc;
 
@@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
                ch_sge_dbg++;
        }
 
-       if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+       if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
+           !padap_running) {
                sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
                /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
                 * SGE_QBASE_MAP[0-3]
index 98829e482bfa95f15b464122906a1a2201c32357..80882cfc370f5f00307874e97a06a91ee00cd264 100644 (file)
@@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x1190, 0x1194,
                0x11a0, 0x11a4,
                0x11b0, 0x11b4,
-               0x11fc, 0x1274,
+               0x11fc, 0x123c,
+               0x1254, 0x1274,
                0x1280, 0x133c,
                0x1800, 0x18fc,
                0x3000, 0x302c,
index 1115b8f9ea4e393180f6379b108917030e70dce2..a3f5b80888e5a6c1923c32164086a76c32ef19fe 100644 (file)
@@ -349,18 +349,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
        return cxgb4_ofld_send(tx_info->netdev, skb);
 }
 
-/*
- * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
- * @tx_info - driver specific tls info.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
-{
-       return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
-                                 TCB_T_STATE_V(TCB_T_STATE_M),
-                                 CHCR_TCB_STATE_CLOSED, 1);
-}
-
 /*
  * chcr_ktls_dev_del:  call back for tls_dev_del.
  * Remove the tid and l2t entry and close the connection.
@@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
 
        /* clear tid */
        if (tx_info->tid != -1) {
-               /* clear tcb state and then release tid */
-               chcr_ktls_mark_tcb_close(tx_info);
                cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
                                 tx_info->tid, tx_info->ip_family);
        }
@@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
        return 0;
 
 free_tid:
-       chcr_ktls_mark_tcb_close(tx_info);
 #if IS_ENABLED(CONFIG_IPV6)
        /* clear clip entry */
        if (tx_info->ip_family == AF_INET6)
@@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
        if (tx_info->pending_close) {
                spin_unlock(&tx_info->lock);
                if (!status) {
-                       /* it's a late success, tcb status is established,
-                        * mark it close.
-                        */
-                       chcr_ktls_mark_tcb_close(tx_info);
                        cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
                                         tid, tx_info->ip_family);
                }
@@ -1663,54 +1644,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
        refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
 }
 
-/*
- * chcr_ktls_update_snd_una:  Reset the SEND_UNA. It will be done to avoid
- * sending the same segment again. It will discard the segment which is before
- * the current tx max.
- * @tx_info - driver specific tls info.
- * @q - TX queue.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
-                                   struct sge_eth_txq *q)
-{
-       struct fw_ulptx_wr *wr;
-       unsigned int ndesc;
-       int credits;
-       void *pos;
-       u32 len;
-
-       len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
-       ndesc = DIV_ROUND_UP(len, 64);
-
-       credits = chcr_txq_avail(&q->q) - ndesc;
-       if (unlikely(credits < 0)) {
-               chcr_eth_txq_stop(q);
-               return NETDEV_TX_BUSY;
-       }
-
-       pos = &q->q.desc[q->q.pidx];
-
-       wr = pos;
-       /* ULPTX wr */
-       wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
-       wr->cookie = 0;
-       /* fill len in wr field */
-       wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
-
-       pos += sizeof(*wr);
-
-       pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
-                                        TCB_SND_UNA_RAW_W,
-                                        TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
-                                        TCB_SND_UNA_RAW_V(0), 0);
-
-       chcr_txq_advance(&q->q, ndesc);
-       cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
-
-       return 0;
-}
-
 /*
  * chcr_end_part_handler: This handler will handle the record which
  * is complete or if record's end part is received. T6 adapter has a issue that
@@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
                                 struct sge_eth_txq *q, u32 skb_offset,
                                 u32 tls_end_offset, bool last_wr)
 {
+       bool free_skb_if_tx_fails = false;
        struct sk_buff *nskb = NULL;
+
        /* check if it is a complete record */
        if (tls_end_offset == record->len) {
                nskb = skb;
@@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
 
                if (last_wr)
                        dev_kfree_skb_any(skb);
+               else
+                       free_skb_if_tx_fails = true;
 
                last_wr = true;
 
@@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
                                       record->num_frags,
                                       (last_wr && tcp_push_no_fin),
                                       mss)) {
+               if (free_skb_if_tx_fails)
+                       dev_kfree_skb_any(skb);
                goto out;
        }
        tx_info->prev_seq = record->end_seq;
@@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
                        /* reset tcp_seq as per the prior_data_required len */
                        tcp_seq -= prior_data_len;
                }
-               /* reset snd una, so the middle record won't send the already
-                * sent part.
-                */
-               if (chcr_ktls_update_snd_una(tx_info, q))
-                       goto out;
                atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
        } else {
                atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
@@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
         * we will send the complete record again.
         */
 
+       spin_lock_irqsave(&tx_ctx->base.lock, flags);
+
        do {
-               int i;
 
                cxgb4_reclaim_completed_tx(adap, &q->q, true);
-               /* lock taken */
-               spin_lock_irqsave(&tx_ctx->base.lock, flags);
                /* fetch the tls record */
                record = tls_get_record(&tx_ctx->base, tcp_seq,
                                        &tx_info->record_no);
@@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                                                    tls_end_offset, skb_offset,
                                                    0);
 
-                       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                        if (ret) {
                                /* free the refcount taken earlier */
                                if (tls_end_offset < data_len)
                                        dev_kfree_skb_any(skb);
+                               spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                                goto out;
                        }
 
@@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                        continue;
                }
 
-               /* increase page reference count of the record, so that there
-                * won't be any chance of page free in middle if in case stack
-                * receives ACK and try to delete the record.
-                */
-               for (i = 0; i < record->num_frags; i++)
-                       __skb_frag_ref(&record->frags[i]);
-               /* lock cleared */
-               spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
-
-
                /* if a tls record is finishing in this SKB */
                if (tls_end_offset <= data_len) {
                        ret = chcr_end_part_handler(tx_info, skb, record,
@@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                        data_len = 0;
                }
 
-               /* clear the frag ref count which increased locally before */
-               for (i = 0; i < record->num_frags; i++) {
-                       /* clear the frag ref count */
-                       __skb_frag_unref(&record->frags[i]);
-               }
                /* if any failure, come out from the loop. */
                if (ret) {
+                       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                        if (th->fin)
                                dev_kfree_skb_any(skb);
 
@@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
 
        } while (data_len > 0);
 
+       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
        atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
        atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
 
index 252adfa5d837ba1002498f3c6a83efd5f62187d1..8a9096aa85cdfe6c59048a0b02663c7d768bd9d2 100644 (file)
@@ -1471,8 +1471,10 @@ dm9000_probe(struct platform_device *pdev)
 
        /* Init network device */
        ndev = alloc_etherdev(sizeof(struct board_info));
-       if (!ndev)
-               return -ENOMEM;
+       if (!ndev) {
+               ret = -ENOMEM;
+               goto out_regulator_disable;
+       }
 
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
index e3a8858915b37acda846518e43f0c6395a24a084..df0eab479d51223e1f1c19eeb4e061827a18f68a 100644 (file)
@@ -963,7 +963,7 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
        unsigned long flag;
 
        netif_stop_queue(dev);
-       tasklet_disable(&np->tx_tasklet);
+       tasklet_disable_in_atomic(&np->tx_tasklet);
        iowrite16(0, ioaddr + IntrEnable);
        printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
                   "TxFrameId %2.2x,"
index 1cf8ef717453dd46bbde2c98e1525618f789b8e8..3ec4d9fddd521bb3fb5f36e275f40c7e5a8bcb4c 100644 (file)
@@ -363,7 +363,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 
 static int gfar_set_mac_addr(struct net_device *dev, void *p)
 {
-       eth_mac_addr(dev, p);
+       int ret;
+
+       ret = eth_mac_addr(dev, p);
+       if (ret)
+               return ret;
 
        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
 
index e3f81c7e0ce74fa185773d3fc5f1284abf88d0e4..b0dbe6dcaa7b59b62fd26b1da308b8935219fa31 100644 (file)
@@ -3966,7 +3966,6 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
         *    normalcy is to reset.
         * 2. A new reset request from the stack due to timeout
         *
-        * For the first case,error event might not have ae handle available.
         * check if this is a new reset request and we are not here just because
         * last reset attempt did not succeed and watchdog hit us again. We will
         * know this if last reset request did not occur very recently (watchdog
@@ -3976,14 +3975,14 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
         * want to make sure we throttle the reset request. Therefore, we will
         * not allow it again before 3*HZ times.
         */
-       if (!handle)
-               handle = &hdev->vport[0].nic;
 
        if (time_before(jiffies, (hdev->last_reset_time +
                                  HCLGE_RESET_INTERVAL))) {
                mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
                return;
-       } else if (hdev->default_reset_request) {
+       }
+
+       if (hdev->default_reset_request) {
                hdev->reset_level =
                        hclge_get_reset_level(ae_dev,
                                              &hdev->default_reset_request);
@@ -11211,7 +11210,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
        if (ret)
                return ret;
 
-       /* RSS indirection table has been configuared by user */
+       /* RSS indirection table has been configured by user */
        if (rxfh_configured)
                goto out;
 
index 700e068764c81aff402640ec1989f20a851598d5..e295d359e912cb9065c31ce8eea26bc591917503 100644 (file)
@@ -2193,7 +2193,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
 
        if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
                               &hdev->reset_state)) {
-               /* PF has initmated that it is about to reset the hardware.
+               /* PF has intimated that it is about to reset the hardware.
                 * We now have to poll & check if hardware has actually
                 * completed the reset sequence. On hardware reset completion,
                 * VF needs to reset the client and ae device.
@@ -2624,14 +2624,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 
+       clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
        hclgevf_reset_tqp_stats(handle);
 
        hclgevf_request_link_info(hdev);
 
        hclgevf_update_link_mode(hdev);
 
-       clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
-
        return 0;
 }
 
@@ -3497,7 +3497,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
        if (ret)
                return ret;
 
-       /* RSS indirection table has been configuared by user */
+       /* RSS indirection table has been configured by user */
        if (rxfh_configured)
                goto out;
 
index 9c6438d3b3a5be52fe3cd02f97481964b6bcedf7..ffb2a91750c7e09a17a7148e1ded5cfdbe8542c0 100644 (file)
@@ -1149,19 +1149,13 @@ static int __ibmvnic_open(struct net_device *netdev)
 
        rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
        if (rc) {
-               for (i = 0; i < adapter->req_rx_queues; i++)
-                       napi_disable(&adapter->napi[i]);
+               ibmvnic_napi_disable(adapter);
                release_resources(adapter);
                return rc;
        }
 
        netif_tx_start_all_queues(netdev);
 
-       if (prev_state == VNIC_CLOSED) {
-               for (i = 0; i < adapter->req_rx_queues; i++)
-                       napi_schedule(&adapter->napi[i]);
-       }
-
        adapter->state = VNIC_OPEN;
        return rc;
 }
@@ -1922,7 +1916,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        u64 old_num_rx_queues, old_num_tx_queues;
        u64 old_num_rx_slots, old_num_tx_slots;
        struct net_device *netdev = adapter->netdev;
-       int i, rc;
+       int rc;
 
        netdev_dbg(adapter->netdev,
                   "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
@@ -2111,10 +2105,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        /* refresh device's multicast list */
        ibmvnic_set_multi(netdev);
 
-       /* kick napi */
-       for (i = 0; i < adapter->req_rx_queues; i++)
-               napi_schedule(&adapter->napi[i]);
-
        if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
            adapter->reset_reason == VNIC_RESET_MOBILITY)
                __netdev_notify_peers(netdev);
@@ -3204,9 +3194,6 @@ restart_loop:
 
                next = ibmvnic_next_scrq(adapter, scrq);
                for (i = 0; i < next->tx_comp.num_comps; i++) {
-                       if (next->tx_comp.rcs[i])
-                               dev_err(dev, "tx error %x\n",
-                                       next->tx_comp.rcs[i]);
                        index = be32_to_cpu(next->tx_comp.correlators[i]);
                        if (index & IBMVNIC_TSO_POOL_MASK) {
                                tx_pool = &adapter->tso_pool[pool];
@@ -3220,7 +3207,13 @@ restart_loop:
                        num_entries += txbuff->num_entries;
                        if (txbuff->skb) {
                                total_bytes += txbuff->skb->len;
-                               dev_consume_skb_irq(txbuff->skb);
+                               if (next->tx_comp.rcs[i]) {
+                                       dev_err(dev, "tx error %x\n",
+                                               next->tx_comp.rcs[i]);
+                                       dev_kfree_skb_irq(txbuff->skb);
+                               } else {
+                                       dev_consume_skb_irq(txbuff->skb);
+                               }
                                txbuff->skb = NULL;
                        } else {
                                netdev_warn(adapter->netdev,
index cd53981fa5e092469e3568ec4e6520ee14e23355..15f93b3550990b5e768b073a9f9171c64e61a2a7 100644 (file)
@@ -142,6 +142,7 @@ enum i40e_state_t {
        __I40E_VIRTCHNL_OP_PENDING,
        __I40E_RECOVERY_MODE,
        __I40E_VF_RESETS_DISABLED,      /* disable resets during i40e_remove */
+       __I40E_VFS_RELEASING,
        /* This must be last as it determines the size of the BITMAP */
        __I40E_STATE_SIZE__,
 };
index d7c13ca9be7dd8f258415b240c6af86db7db4c59..d627b59ad446523983f3646662ab52921221880d 100644 (file)
@@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
        case RING_TYPE_XDP:
                ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
                break;
+       default:
+               ring = NULL;
+               break;
        }
        if (!ring)
                return;
index c70dec65a57264fd101cfd7419cc903469ea7ca0..0e92668012e36879a3391419699dd8e860d6cd44 100644 (file)
@@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
        I40E_STAT(struct i40e_vsi, _name, _stat)
 #define I40E_VEB_STAT(_name, _stat) \
        I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_VEB_TC_STAT(_name, _stat) \
+       I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
 #define I40E_PFC_STAT(_name, _stat) \
        I40E_STAT(struct i40e_pfc_stats, _name, _stat)
 #define I40E_QUEUE_STAT(_name, _stat) \
@@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
        I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
 };
 
+struct i40e_cp_veb_tc_stats {
+       u64 tc_rx_packets;
+       u64 tc_rx_bytes;
+       u64 tc_tx_packets;
+       u64 tc_tx_bytes;
+};
+
 static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
-       I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
-       I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
-       I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
-       I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+       I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
+       I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
+       I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
+       I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
 };
 
 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
 
        /* Set flow control settings */
        ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
 
        switch (hw->fc.requested_mode) {
        case I40E_FC_FULL:
@@ -2216,6 +2226,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
+/**
+ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
+ * @tc: the TC statistics in VEB structure (veb->tc_stats)
+ * @i: the index of traffic class in (veb->tc_stats) structure to copy
+ *
+ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
+ * one dimensional structure i40e_cp_veb_tc_stats.
+ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
+ * statistics for the given TC.
+ **/
+static struct i40e_cp_veb_tc_stats
+i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
+{
+       struct i40e_cp_veb_tc_stats veb_tc = {
+               .tc_rx_packets = tc->tc_rx_packets[i],
+               .tc_rx_bytes = tc->tc_rx_bytes[i],
+               .tc_tx_packets = tc->tc_tx_packets[i],
+               .tc_tx_bytes = tc->tc_tx_bytes[i],
+       };
+
+       return veb_tc;
+}
+
 /**
  * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
  * @pf: the PF device structure
@@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                               i40e_gstrings_veb_stats);
 
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
-               i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
-                                      i40e_gstrings_veb_tc_stats);
+               if (veb_stats) {
+                       struct i40e_cp_veb_tc_stats veb_tc =
+                               i40e_get_veb_tc_stats(&veb->tc_stats, i);
+
+                       i40e_add_ethtool_stats(&data, &veb_tc,
+                                              i40e_gstrings_veb_tc_stats);
+               } else {
+                       i40e_add_ethtool_stats(&data, NULL,
+                                              i40e_gstrings_veb_tc_stats);
+               }
 
        i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
 
@@ -5439,7 +5480,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
 
                status = i40e_aq_get_phy_register(hw,
                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
-                               true, addr, offset, &value, NULL);
+                               addr, true, offset, &value, NULL);
                if (status)
                        return -EIO;
                data[i] = value;
index 17f3b800640e0d3024de1d02468849dc14c8a1bc..527023ee4c076c7381fd4d57043221ccfd861506 100644 (file)
@@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                 i40e_stat_str(hw, aq_ret),
                                 i40e_aq_str(hw, hw->aq.asq_last_status));
                } else {
-                       dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
-                                vsi->netdev->name,
+                       dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
                                 cur_multipromisc ? "entering" : "leaving");
                }
        }
@@ -6738,9 +6737,9 @@ out:
                        set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
                        set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
                }
-       /* registers are set, lets apply */
-       if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
-               ret = i40e_hw_set_dcb_config(pf, new_cfg);
+               /* registers are set, lets apply */
+               if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+                       ret = i40e_hw_set_dcb_config(pf, new_cfg);
        }
 
 err:
@@ -10573,12 +10572,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                goto end_core_reset;
        }
 
-       if (!lock_acquired)
-               rtnl_lock();
-       ret = i40e_setup_pf_switch(pf, reinit);
-       if (ret)
-               goto end_unlock;
-
 #ifdef CONFIG_I40E_DCB
        /* Enable FW to write a default DCB config on link-up
         * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
@@ -10593,7 +10586,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                        i40e_aq_set_dcb_parameters(hw, false, NULL);
                        dev_warn(&pf->pdev->dev,
                                 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
-                                pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+                       pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
                } else {
                        i40e_aq_set_dcb_parameters(hw, true, NULL);
                        ret = i40e_init_pf_dcb(pf);
@@ -10607,6 +10600,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        }
 
 #endif /* CONFIG_I40E_DCB */
+       if (!lock_acquired)
+               rtnl_lock();
+       ret = i40e_setup_pf_switch(pf, reinit);
+       if (ret)
+               goto end_unlock;
 
        /* The driver only wants link up/down and module qualification
         * reports from firmware.  Note the negative logic.
@@ -12359,6 +12357,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
 {
        int err = 0;
        int size;
+       u16 pow;
 
        /* Set default capability flags */
        pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
@@ -12377,6 +12376,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
        pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
+
+       /* find the next higher power-of-2 of num cpus */
+       pow = roundup_pow_of_two(num_online_cpus());
+       pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
        if (pf->hw.func_caps.rss) {
                pf->flags |= I40E_FLAG_RSS_ENABLED;
                pf->alloc_rss_size = min_t(int, pf->rss_size_max,
@@ -15140,12 +15144,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
         * in order to register the netdev
         */
        v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
-       if (v_idx < 0)
+       if (v_idx < 0) {
+               err = v_idx;
                goto err_switch_setup;
+       }
        pf->lan_vsi = v_idx;
        vsi = pf->vsi[v_idx];
-       if (!vsi)
+       if (!vsi) {
+               err = -EFAULT;
                goto err_switch_setup;
+       }
        vsi->alloc_queue_pairs = 1;
        err = i40e_config_netdev(vsi);
        if (err)
index 5747a99122fb4b49863efaeac49fcbc2d9cec021..06b4271219b1435b93b5d22e6793a4f739b584ce 100644 (file)
@@ -2295,8 +2295,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
  * @rx_ring: Rx ring being processed
  * @xdp: XDP buffer containing the frame
  **/
-static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
-                                   struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 {
        int err, result = I40E_XDP_PASS;
        struct i40e_ring *xdp_ring;
@@ -2335,7 +2334,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
        }
 xdp_out:
        rcu_read_unlock();
-       return ERR_PTR(-result);
+       return result;
 }
 
 /**
@@ -2448,6 +2447,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        unsigned int xdp_xmit = 0;
        bool failure = false;
        struct xdp_buff xdp;
+       int xdp_res = 0;
 
 #if (PAGE_SIZE < 8192)
        frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
@@ -2513,12 +2513,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
                        xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
 #endif
-                       skb = i40e_run_xdp(rx_ring, &xdp);
+                       xdp_res = i40e_run_xdp(rx_ring, &xdp);
                }
 
-               if (IS_ERR(skb)) {
-                       unsigned int xdp_res = -PTR_ERR(skb);
-
+               if (xdp_res) {
                        if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
                                xdp_xmit |= xdp_res;
                                i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
index 1b6ec9be155a6352eb1a330172c50b70b1bad98a..5d301a466f5c516cfd5ed745ee7981d12c4aab07 100644 (file)
@@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  **/
 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
 {
+       struct i40e_pf *pf = vf->pf;
        int i;
 
        i40e_vc_notify_vf_reset(vf);
@@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
         * ensure a reset.
         */
        for (i = 0; i < 20; i++) {
+               /* If PF is in VFs releasing state reset VF is impossible,
+                * so leave it.
+                */
+               if (test_bit(__I40E_VFS_RELEASING, pf->state))
+                       return;
                if (i40e_reset_vf(vf, false))
                        return;
                usleep_range(10000, 20000);
@@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
 
        if (!pf->vf)
                return;
+
+       set_bit(__I40E_VFS_RELEASING, pf->state);
        while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
                usleep_range(1000, 2000);
 
@@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
                }
        }
        clear_bit(__I40E_VF_DISABLE, pf->state);
+       clear_bit(__I40E_VFS_RELEASING, pf->state);
 }
 
 #ifdef CONFIG_PCI_IOV
index fc32c5019b0f847fe18fa5c61493044c9a9e6e0c..12ca84113587d077b2633de91230cd8844217431 100644 (file)
@@ -471,7 +471,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 
        nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
        if (!nb_pkts)
-               return false;
+               return true;
 
        if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
                nb_processed = xdp_ring->count - xdp_ring->next_to_use;
@@ -488,7 +488,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 
        i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
 
-       return true;
+       return nb_pkts < budget;
 }
 
 /**
index 357706444dd506aed7b5d86f80c1084f737a3602..17101c45cbcd847350c634191a1eb3ad22963d45 100644 (file)
@@ -196,7 +196,6 @@ enum ice_state {
        __ICE_NEEDS_RESTART,
        __ICE_PREPARED_FOR_RESET,       /* set by driver when prepared */
        __ICE_RESET_OICR_RECV,          /* set by driver after rcv reset OICR */
-       __ICE_DCBNL_DEVRESET,           /* set by dcbnl devreset */
        __ICE_PFR_REQ,                  /* set by driver and peers */
        __ICE_CORER_REQ,                /* set by driver and peers */
        __ICE_GLOBR_REQ,                /* set by driver and peers */
@@ -624,7 +623,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
 const char *ice_stat_str(enum ice_status stat_err);
 const char *ice_aq_str(enum ice_aq_err aq_err);
-bool ice_is_wol_supported(struct ice_pf *pf);
+bool ice_is_wol_supported(struct ice_hw *hw);
 int
 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
                    bool is_tun);
@@ -642,6 +641,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf);
 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
                          struct ice_rq_event_info *event);
 int ice_open(struct net_device *netdev);
+int ice_open_internal(struct net_device *netdev);
 int ice_stop(struct net_device *netdev);
 void ice_service_task_schedule(struct ice_pf *pf);
 
index 3d9475e222cda6fc8c44505feacb38aa837d7e71..a20edf1538a0003c7c2b4c5f3bc83a236b6f6947 100644 (file)
@@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
 
                        if (!data) {
                                data = devm_kcalloc(ice_hw_to_dev(hw),
-                                                   sizeof(*data),
                                                    ICE_AQC_FW_LOG_ID_MAX,
+                                                   sizeof(*data),
                                                    GFP_KERNEL);
                                if (!data)
                                        return ICE_ERR_NO_MEMORY;
index faaa08e8171b58fe1d6f26835d9deeaecc7e0e6f..68866f4f0eb09ec5e4dc9fb364b3a334abd20a1b 100644 (file)
@@ -31,8 +31,8 @@ enum ice_ctl_q {
        ICE_CTL_Q_MAILBOX,
 };
 
-/* Control Queue timeout settings - max delay 250ms */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT       2500  /* Count 2500 times */
+/* Control Queue timeout settings - max delay 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT       10000 /* Count 10000 times */
 #define ICE_CTL_Q_SQ_CMD_USEC          100   /* Check every 100usec */
 #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT   10    /* Count 10 times */
 #define ICE_CTL_Q_ADMIN_INIT_MSEC      100   /* Check every 100msec */
index e42727941ef539ea1115f03008b495e8d9c6b776..28e834a128c07be428b17a5d46d0a784394e6227 100644 (file)
@@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
 /**
  * ice_cee_to_dcb_cfg
  * @cee_cfg: pointer to CEE configuration struct
- * @dcbcfg: DCB configuration struct
+ * @pi: port information structure
  *
  * Convert CEE configuration from firmware to DCB configuration
  */
 static void
 ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
-                  struct ice_dcbx_cfg *dcbcfg)
+                  struct ice_port_info *pi)
 {
        u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
-       u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
-       u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+       u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
        u8 i, err, sync, oper, app_index, ice_app_sel_type;
+       u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
        u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+       struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
        u16 ice_app_prot_id_type;
 
-       /* CEE PG data to ETS config */
+       dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+       dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+       dcbcfg->tlv_status = tlv_status;
+
+       /* CEE PG data */
        dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
 
        /* Note that the FW creates the oper_prio_tc nibbles reversed
@@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
                }
        }
 
-       /* CEE PFC data to ETS config */
+       /* CEE PFC data */
        dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
        dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
 
+       /* CEE APP TLV data */
+       if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+               cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
+       else
+               cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
+
        app_index = 0;
        for (i = 0; i < 3; i++) {
                if (i == 0) {
@@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
                        ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
                        ice_app_sel_type = ICE_APP_SEL_TCPIP;
                        ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+
+                       for (j = 0; j < cmp_dcbcfg->numapps; j++) {
+                               u16 prot_id = cmp_dcbcfg->app[j].prot_id;
+                               u8 sel = cmp_dcbcfg->app[j].selector;
+
+                               if  (sel == ICE_APP_SEL_TCPIP &&
+                                    (prot_id == ICE_APP_PROT_ID_ISCSI ||
+                                     prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
+                                       ice_app_prot_id_type = prot_id;
+                                       break;
+                               }
+                       }
                } else {
                        /* FIP APP */
                        ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
@@ -892,11 +915,8 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
        ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
        if (!ret) {
                /* CEE mode */
-               dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
-               dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
-               dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
-               ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
                ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+               ice_cee_to_dcb_cfg(&cee_cfg, pi);
        } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
                /* CEE mode not enabled try querying IEEE data */
                dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
index 468a63f7eff929de38743deb13c119c1a0a13678..4180f1f35fb89caccb986278495ba091a6822e1f 100644 (file)
@@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
        while (ice_is_reset_in_progress(pf->state))
                usleep_range(1000, 2000);
 
-       set_bit(__ICE_DCBNL_DEVRESET, pf->state);
        dev_close(netdev);
        netdev_state_change(netdev);
        dev_open(netdev, NULL);
        netdev_state_change(netdev);
-       clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
 }
 
 /**
index 2dcfa685b76393aba429d791fd224f58633329c3..32ba71a1616520d97592eef9e69a1db816ae5b1a 100644 (file)
@@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
 
        /* Get WoL settings based on the HW capability */
-       if (ice_is_wol_supported(pf)) {
+       if (ice_is_wol_supported(&pf->hw)) {
                wol->supported = WAKE_MAGIC;
                wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
        } else {
@@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct ice_vsi *vsi = np->vsi;
        struct ice_pf *pf = vsi->back;
 
-       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
index 8d4e2ad4328d1abba195e7a441281b199391b63f..d13c7fc8fb0a24cbf4921bb3dd5f7534e77c1dc0 100644 (file)
@@ -2620,7 +2620,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
                        if (!locked)
                                rtnl_lock();
 
-                       err = ice_open(vsi->netdev);
+                       err = ice_open_internal(vsi->netdev);
 
                        if (!locked)
                                rtnl_unlock();
@@ -2649,7 +2649,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
                        if (!locked)
                                rtnl_lock();
 
-                       ice_stop(vsi->netdev);
+                       ice_vsi_close(vsi);
 
                        if (!locked)
                                rtnl_unlock();
@@ -3078,7 +3078,6 @@ err_vsi:
 bool ice_is_reset_in_progress(unsigned long *state)
 {
        return test_bit(__ICE_RESET_OICR_RECV, state) ||
-              test_bit(__ICE_DCBNL_DEVRESET, state) ||
               test_bit(__ICE_PFR_REQ, state) ||
               test_bit(__ICE_CORER_REQ, state) ||
               test_bit(__ICE_GLOBR_REQ, state);
index 2c23c8f468a5494994f615782897ae3a95701472..d821c687f239c5c82bbb3ffa4fb30145a1f5a23d 100644 (file)
@@ -3537,15 +3537,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
 }
 
 /**
- * ice_is_wol_supported - get NVM state of WoL
- * @pf: board private structure
+ * ice_is_wol_supported - check if WoL is supported
+ * @hw: pointer to hardware info
  *
  * Check if WoL is supported based on the HW configuration.
  * Returns true if NVM supports and enables WoL for this port, false otherwise
  */
-bool ice_is_wol_supported(struct ice_pf *pf)
+bool ice_is_wol_supported(struct ice_hw *hw)
 {
-       struct ice_hw *hw = &pf->hw;
        u16 wol_ctrl;
 
        /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
@@ -3554,7 +3553,7 @@ bool ice_is_wol_supported(struct ice_pf *pf)
        if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
                return false;
 
-       return !(BIT(hw->pf_id) & wol_ctrl);
+       return !(BIT(hw->port_info->lport) & wol_ctrl);
 }
 
 /**
@@ -4192,28 +4191,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                goto err_send_version_unroll;
        }
 
+       /* not a fatal error if this fails */
        err = ice_init_nvm_phy_type(pf->hw.port_info);
-       if (err) {
+       if (err)
                dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
-               goto err_send_version_unroll;
-       }
 
+       /* not a fatal error if this fails */
        err = ice_update_link_info(pf->hw.port_info);
-       if (err) {
+       if (err)
                dev_err(dev, "ice_update_link_info failed: %d\n", err);
-               goto err_send_version_unroll;
-       }
 
        ice_init_link_dflt_override(pf->hw.port_info);
 
        /* if media available, initialize PHY settings */
        if (pf->hw.port_info->phy.link_info.link_info &
            ICE_AQ_MEDIA_AVAILABLE) {
+               /* not a fatal error if this fails */
                err = ice_init_phy_user_cfg(pf->hw.port_info);
-               if (err) {
+               if (err)
                        dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
-                       goto err_send_version_unroll;
-               }
 
                if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
                        struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -4568,6 +4564,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
                        continue;
                ice_vsi_free_q_vectors(pf->vsi[v]);
        }
+       ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
        ice_clear_interrupt_scheme(pf);
 
        pci_save_state(pdev);
@@ -6635,6 +6632,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
  * Returns 0 on success, negative value on failure
  */
 int ice_open(struct net_device *netdev)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_pf *pf = np->vsi->back;
+
+       if (ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't open net device while reset is in progress");
+               return -EBUSY;
+       }
+
+       return ice_open_internal(netdev);
+}
+
+/**
+ * ice_open_internal - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * Internal ice_open implementation. Should not be used directly except for ice_open and reset
+ * handling routine
+ *
+ * Returns 0 on success, negative value on failure
+ */
+int ice_open_internal(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
@@ -6715,6 +6734,12 @@ int ice_stop(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+
+       if (ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't stop net device while reset is in progress");
+               return -EBUSY;
+       }
 
        ice_vsi_close(vsi);
 
index 67c965a3f5d28a5da9ddf4e2a67e2d696b9711b3..834cbd3f7b31945b2a2c59971bebbdf31ee0bfde 100644 (file)
@@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
                        ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
                                                vsi_list_id);
 
+               if (!m_entry->vsi_list_info)
+                       return ICE_ERR_NO_MEMORY;
+
                /* If this entry was large action then the large action needs
                 * to be updated to point to FWD to VSI list
                 */
@@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
        return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
                 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
                (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+                fm_entry->vsi_list_info &&
                 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
 }
 
@@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
                return ICE_ERR_PARAM;
 
        list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
-               struct ice_fltr_info *fi;
-
-               fi = &fm_entry->fltr_info;
-               if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+               if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
                        continue;
 
                status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
-                                                       vsi_list_head, fi);
+                                                       vsi_list_head,
+                                                       &fm_entry->fltr_info);
                if (status)
                        return status;
        }
@@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                                          &remove_list_head);
        mutex_unlock(rule_lock);
        if (status)
-               return;
+               goto free_fltr_list;
 
        switch (lkup) {
        case ICE_SW_LKUP_MAC:
@@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                break;
        }
 
+free_fltr_list:
        list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
                list_del(&fm_entry->list_entry);
                devm_kfree(ice_hw_to_dev(hw), fm_entry);
index a6cb0c35748c5fe9fec80d93341da4ed63cacf18..266036b7a49ab2fd1ccaa15081cc4de82fc990e1 100644 (file)
@@ -535,6 +535,7 @@ struct ice_dcb_app_priority_table {
 #define ICE_TLV_STATUS_ERR     0x4
 #define ICE_APP_PROT_ID_FCOE   0x8906
 #define ICE_APP_PROT_ID_ISCSI  0x0cbc
+#define ICE_APP_PROT_ID_ISCSI_860 0x035c
 #define ICE_APP_PROT_ID_FIP    0x8914
 #define ICE_APP_SEL_ETHTYPE    0x1
 #define ICE_APP_SEL_TCPIP      0x2
index 03d9aad516d4e3c3a8005b40c521f3882d7694b5..cffb95f8f63266bd63814a90d8703b5e9fa81ea9 100644 (file)
@@ -6536,6 +6536,13 @@ err_setup_tx:
        return err;
 }
 
+static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
+{
+       struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
+
+       return q_vector ? q_vector->napi.napi_id : 0;
+}
+
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  * @adapter: pointer to ixgbe_adapter
@@ -6583,7 +6590,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 
        /* XDP RX-queue info */
        if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
-                            rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
+                            rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
                goto err;
 
        rx_ring->xdp_prog = adapter->xdp_prog;
@@ -6892,6 +6899,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
 
        adapter->hw.hw_addr = adapter->io_addr;
 
+       err = pci_enable_device_mem(pdev);
+       if (err) {
+               e_dev_err("Cannot enable PCI device from suspend\n");
+               return err;
+       }
        smp_mb__before_atomic();
        clear_bit(__IXGBE_DISABLED, &adapter->state);
        pci_set_master(pdev);
index e9efe074edc113d01d1416cc8bf5bbadab7ea7fc..f1b9284e0bea8c5ef3b3923ccde33d3d4cacdb11 100644 (file)
@@ -1265,9 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme)
        jwrite32f(jme, JME_APMC, apmc);
 }
 
-static void jme_link_change_tasklet(struct tasklet_struct *t)
+static void jme_link_change_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
+       struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task);
        struct net_device *netdev = jme->dev;
        int rc;
 
@@ -1510,7 +1510,7 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
                 * all other events are ignored
                 */
                jwrite32(jme, JME_IEVE, intrstat);
-               tasklet_schedule(&jme->linkch_task);
+               schedule_work(&jme->linkch_task);
                goto out_reenable;
        }
 
@@ -1832,7 +1832,6 @@ jme_open(struct net_device *netdev)
        jme_clear_pm_disable_wol(jme);
        JME_NAPI_ENABLE(jme);
 
-       tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
        tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
        tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
        tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
@@ -1920,7 +1919,7 @@ jme_close(struct net_device *netdev)
 
        JME_NAPI_DISABLE(jme);
 
-       tasklet_kill(&jme->linkch_task);
+       cancel_work_sync(&jme->linkch_task);
        tasklet_kill(&jme->txclean_task);
        tasklet_kill(&jme->rxclean_task);
        tasklet_kill(&jme->rxempty_task);
@@ -3035,6 +3034,7 @@ jme_init_one(struct pci_dev *pdev,
        atomic_set(&jme->rx_empty, 1);
 
        tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+       INIT_WORK(&jme->linkch_task, jme_link_change_work);
        jme->dpi.cur = PCC_P1;
 
        jme->reg_ghc = 0;
index a2c3b00d939d049dd6fb3e7e3627888e1c4aa675..2af76329b4a27d3bb83bce30774ca31d0fecf542 100644 (file)
@@ -411,7 +411,7 @@ struct jme_adapter {
        struct tasklet_struct   rxempty_task;
        struct tasklet_struct   rxclean_task;
        struct tasklet_struct   txclean_task;
-       struct tasklet_struct   linkch_task;
+       struct work_struct      linkch_task;
        struct tasklet_struct   pcc_task;
        unsigned long           flags;
        u32                     reg_txcs;
index b051417ede67bdff2fd6bb732f01ba51a5206b4f..9153c9bda96fa5bf9a0abfd3dba47c0d9994cd47 100644 (file)
@@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
 }
 
 enum {
-       MLX5_INTERFACE_PROTOCOL_ETH_REP,
        MLX5_INTERFACE_PROTOCOL_ETH,
+       MLX5_INTERFACE_PROTOCOL_ETH_REP,
 
+       MLX5_INTERFACE_PROTOCOL_IB,
        MLX5_INTERFACE_PROTOCOL_IB_REP,
        MLX5_INTERFACE_PROTOCOL_MPIB,
-       MLX5_INTERFACE_PROTOCOL_IB,
 
        MLX5_INTERFACE_PROTOCOL_VNET,
 };
index d7d8a68ef23d7e1abf5f50d70dcc6b218e1aa031..d0f9d3cee97d596b335d66a10686920e14fb0e98 100644 (file)
@@ -246,6 +246,11 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
        struct mlx5_devlink_trap *dl_trap;
        int err = 0;
 
+       if (is_mdev_switchdev_mode(dev)) {
+               NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
+               return -EOPNOTSUPP;
+       }
+
        dl_trap = mlx5_find_trap_by_id(dev, trap->id);
        if (!dl_trap) {
                mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
index 304b296fe8b989210824b62606bd934b0972c9c4..bc6f77ea0a31f1f2a945e7a87d221cf331300970 100644 (file)
@@ -516,6 +516,7 @@ struct mlx5e_icosq {
        struct mlx5_wq_cyc         wq;
        void __iomem              *uar_map;
        u32                        sqn;
+       u16                        reserved_room;
        unsigned long              state;
 
        /* control path */
index 308fd279669ece9861cf7cf0c0f1fe4cb4310fd4..89510cac46c22ca4410425a9735feb16b8a60865 100644 (file)
@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
                        *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
        } while (0)
 
-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link)                  \
-       do {                                                                            \
-               unsigned long policy_long;                                              \
-               u16 *__policy = &(policy);                                              \
-               bool _write = (write);                                                  \
-                                                                                       \
-               policy_long = *__policy;                                                \
-               if (_write && *__policy)                                                \
-                       *__policy = find_first_bit(&policy_long,                        \
-                                                  sizeof(policy_long) * BITS_PER_BYTE);\
-               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link);          \
-               if (!_write && *__policy)                                               \
-                       *__policy = 1 << *__policy;                                     \
-       } while (0)
-
 /* get/set FEC admin field for a given speed */
 static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
                                 enum mlx5e_fec_supported_link_mode link_mode)
@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
                MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
                break;
        case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
-               MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
                break;
        case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
-               MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
                break;
        case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
-               MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
                break;
        case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
-               MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
                break;
        default:
                return -EINVAL;
index b2cd29847a371ee3dfb4b3b74a2d1154c88ab535..68e54cc1cd1664261d4b596d2da3b596a0171f1c 100644 (file)
@@ -185,6 +185,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
        return !!(entry->tuple_nat_node.next);
 }
 
+static int
+mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
+                      u32 *labels, u32 *id)
+{
+       if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
+               *id = 0;
+               return 0;
+       }
+
+       if (mapping_add(ct_priv->labels_mapping, labels, id))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+static void
+mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
+{
+       if (id)
+               mapping_remove(ct_priv->labels_mapping, id);
+}
+
 static int
 mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
 {
@@ -436,7 +458,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
        mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
        mlx5e_mod_hdr_detach(ct_priv->dev,
                             ct_priv->mod_hdr_tbl, zone_rule->mh);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        kfree(attr);
 }
 
@@ -639,8 +661,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
        if (!meta)
                return -EOPNOTSUPP;
 
-       err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
-                         &attr->ct_attr.ct_labels_id);
+       err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
+                                    &attr->ct_attr.ct_labels_id);
        if (err)
                return -EOPNOTSUPP;
        if (nat) {
@@ -677,7 +699,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
 
 err_mapping:
        dealloc_mod_hdr_actions(&mod_acts);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        return err;
 }
 
@@ -745,7 +767,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
 err_rule:
        mlx5e_mod_hdr_detach(ct_priv->dev,
                             ct_priv->mod_hdr_tbl, zone_rule->mh);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
 err_mod_hdr:
        kfree(attr);
 err_attr:
@@ -1197,7 +1219,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
        if (!priv || !ct_attr->ct_labels_id)
                return;
 
-       mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
+       mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
 }
 
 int
@@ -1280,7 +1302,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
                ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
                ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
                ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
-               if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+               if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
                        return -EOPNOTSUPP;
                mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
                                            MLX5_CT_LABELS_MASK);
index 67de2bf36861d2c006c2c7335536cf3e53138103..e1271998b937917133e1dc12ea403f110dae66ce 100644 (file)
@@ -21,6 +21,11 @@ enum {
        MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
 };
 
+struct mlx5e_encap_key {
+       const struct ip_tunnel_key *ip_tun_key;
+       struct mlx5e_tc_tunnel     *tc_tunnel;
+};
+
 struct mlx5e_tc_tunnel {
        int tunnel_type;
        enum mlx5_flow_match_level match_level;
@@ -44,6 +49,8 @@ struct mlx5e_tc_tunnel {
                            struct flow_cls_offload *f,
                            void *headers_c,
                            void *headers_v);
+       bool (*encap_info_equal)(struct mlx5e_encap_key *a,
+                                struct mlx5e_encap_key *b);
 };
 
 extern struct mlx5e_tc_tunnel vxlan_tunnel;
@@ -101,6 +108,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
                                 void *headers_c,
                                 void *headers_v);
 
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+                                          struct mlx5e_encap_key *b);
+
 #endif /* CONFIG_MLX5_ESWITCH */
 
 #endif //__MLX5_EN_TC_TUNNEL_H__
index 7f7b0f6dcdf954f87d5681454f9082c063fac387..9f16ad2c0710bff4e4056dd5c769445dedd2225c 100644 (file)
@@ -476,16 +476,11 @@ void mlx5e_detach_decap(struct mlx5e_priv *priv,
        mlx5e_decap_dealloc(priv, d);
 }
 
-struct encap_key {
-       const struct ip_tunnel_key *ip_tun_key;
-       struct mlx5e_tc_tunnel *tc_tunnel;
-};
-
-static int cmp_encap_info(struct encap_key *a,
-                         struct encap_key *b)
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+                                          struct mlx5e_encap_key *b)
 {
-       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
-               a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
+       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 &&
+               a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
 }
 
 static int cmp_decap_info(struct mlx5e_decap_key *a,
@@ -494,7 +489,7 @@ static int cmp_decap_info(struct mlx5e_decap_key *a,
        return memcmp(&a->key, &b->key, sizeof(b->key));
 }
 
-static int hash_encap_info(struct encap_key *key)
+static int hash_encap_info(struct mlx5e_encap_key *key)
 {
        return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
                     key->tc_tunnel->tunnel_type);
@@ -516,18 +511,18 @@ static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
 }
 
 static struct mlx5e_encap_entry *
-mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key,
                uintptr_t hash_key)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5e_encap_key e_key;
        struct mlx5e_encap_entry *e;
-       struct encap_key e_key;
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
                e_key.ip_tun_key = &e->tun_info->key;
                e_key.tc_tunnel = e->tunnel;
-               if (!cmp_encap_info(&e_key, key) &&
+               if (e->tunnel->encap_info_equal(&e_key, key) &&
                    mlx5e_encap_take(e))
                        return e;
        }
@@ -694,8 +689,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_flow_attr *attr = flow->attr;
        const struct ip_tunnel_info *tun_info;
        unsigned long tbl_time_before = 0;
-       struct encap_key key;
        struct mlx5e_encap_entry *e;
+       struct mlx5e_encap_key key;
        bool entry_created = false;
        unsigned short family;
        uintptr_t hash_key;
index 7ed3f9f79f11ac95ce86f9b1c8630b43c689d2e7..f5b26f5a7de46054ae57d4605c3513210bee5336 100644 (file)
@@ -329,6 +329,34 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
        return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
 }
 
+static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
+                                                struct mlx5e_encap_key *b)
+{
+       struct ip_tunnel_info *a_info;
+       struct ip_tunnel_info *b_info;
+       bool a_has_opts, b_has_opts;
+
+       if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
+               return false;
+
+       a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+       b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+
+       /* keys are equal when both don't have any options attached */
+       if (!a_has_opts && !b_has_opts)
+               return true;
+
+       if (a_has_opts != b_has_opts)
+               return false;
+
+       /* geneve options stored in memory next to ip_tunnel_info struct */
+       a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
+       b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
+
+       return a_info->options_len == b_info->options_len &&
+               memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0;
+}
+
 struct mlx5e_tc_tunnel geneve_tunnel = {
        .tunnel_type          = MLX5E_TC_TUNNEL_TYPE_GENEVE,
        .match_level          = MLX5_MATCH_L4,
@@ -338,4 +366,5 @@ struct mlx5e_tc_tunnel geneve_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_geneve,
        .parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_geneve,
        .parse_tunnel         = mlx5e_tc_tun_parse_geneve,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_geneve,
 };
index 2805416c32a3cbfa373c0b08706d88f182ff4669..ada14f0574dc6cb1d7d940a69e8dbddc7a75c337 100644 (file)
@@ -94,4 +94,5 @@ struct mlx5e_tc_tunnel gre_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_gretap,
        .parse_udp_ports      = NULL,
        .parse_tunnel         = mlx5e_tc_tun_parse_gretap,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 3479672e84cf4659cd0157a61ecfdec7b7d709af..60952b33b5688835ddd5bfdddc2a0e30fa56fd5f 100644 (file)
@@ -131,4 +131,5 @@ struct mlx5e_tc_tunnel mplsoudp_tunnel = {
        .generate_ip_tun_hdr  = generate_ip_tun_hdr,
        .parse_udp_ports      = parse_udp_ports,
        .parse_tunnel         = parse_tunnel,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 038a0f1cecec63eb3199306d75fd888bde24dd12..4267f3a1059e7f4933e2da58d33df186dc3797ee 100644 (file)
@@ -150,4 +150,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_vxlan,
        .parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_vxlan,
        .parse_tunnel         = mlx5e_tc_tun_parse_vxlan,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 2371b83dad9ca86e344795535ae31572135325e2..055c3bc2373393dd656ef8a03372f02c81fd6aaf 100644 (file)
@@ -441,4 +441,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
        return wqe_size * 2 - 1;
 }
 
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+{
+       u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+
+       return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
+}
 #endif
index d06532d0baa430e7970190a58969507a98ef3fa4..19d22a63313f38e7cabae20a5e31898a5f426f7b 100644 (file)
@@ -46,7 +46,8 @@ struct mlx5e_ktls_offload_context_rx {
        struct tls12_crypto_info_aes_gcm_128 crypto_info;
        struct accel_rule rule;
        struct sock *sk;
-       struct mlx5e_rq_stats *stats;
+       struct mlx5e_rq_stats *rq_stats;
+       struct mlx5e_tls_sw_stats *sw_stats;
        struct completion add_ctx;
        u32 tirn;
        u32 key_id;
@@ -137,11 +138,10 @@ post_static_params(struct mlx5e_icosq *sq,
 {
        struct mlx5e_set_tls_static_params_wqe *wqe;
        struct mlx5e_icosq_wqe_info wi;
-       u16 pi, num_wqebbs, room;
+       u16 pi, num_wqebbs;
 
        num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
-       room = mlx5e_stop_room_for_wqe(num_wqebbs);
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
                return ERR_PTR(-ENOSPC);
 
        pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -168,11 +168,10 @@ post_progress_params(struct mlx5e_icosq *sq,
 {
        struct mlx5e_set_tls_progress_params_wqe *wqe;
        struct mlx5e_icosq_wqe_info wi;
-       u16 pi, num_wqebbs, room;
+       u16 pi, num_wqebbs;
 
        num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
-       room = mlx5e_stop_room_for_wqe(num_wqebbs);
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
                return ERR_PTR(-ENOSPC);
 
        pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -218,7 +217,7 @@ unlock:
        return err;
 
 err_out:
-       priv_rx->stats->tls_resync_req_skip++;
+       priv_rx->rq_stats->tls_resync_req_skip++;
        err = PTR_ERR(cseg);
        complete(&priv_rx->add_ctx);
        goto unlock;
@@ -277,17 +276,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        buf->priv_rx = priv_rx;
 
-       BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
-
        spin_lock_bh(&sq->channel->async_icosq_lock);
 
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
                spin_unlock_bh(&sq->channel->async_icosq_lock);
                err = -ENOSPC;
                goto err_dma_unmap;
        }
 
-       pi = mlx5e_icosq_get_next_pi(sq, 1);
+       pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
        wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
 
 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
@@ -307,7 +304,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        wi = (struct mlx5e_icosq_wqe_info) {
                .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
-               .num_wqebbs = 1,
+               .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
                .tls_get_params.buf = buf,
        };
        icosq_fill_wi(sq, pi, &wi);
@@ -322,7 +319,7 @@ err_dma_unmap:
 err_free:
        kfree(buf);
 err_out:
-       priv_rx->stats->tls_resync_req_skip++;
+       priv_rx->rq_stats->tls_resync_req_skip++;
        return err;
 }
 
@@ -378,13 +375,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
 
        cseg = post_static_params(sq, priv_rx);
        if (IS_ERR(cseg)) {
-               priv_rx->stats->tls_resync_res_skip++;
+               priv_rx->rq_stats->tls_resync_res_skip++;
                err = PTR_ERR(cseg);
                goto unlock;
        }
        /* Do not increment priv_rx refcnt, CQE handling is empty */
        mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
-       priv_rx->stats->tls_resync_res_ok++;
+       priv_rx->rq_stats->tls_resync_res_ok++;
 unlock:
        spin_unlock_bh(&c->async_icosq_lock);
 
@@ -420,13 +417,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
        auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
        if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
            auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
-               priv_rx->stats->tls_resync_req_skip++;
+               priv_rx->rq_stats->tls_resync_req_skip++;
                goto out;
        }
 
        hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
        tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
-       priv_rx->stats->tls_resync_req_end++;
+       priv_rx->rq_stats->tls_resync_req_end++;
 out:
        mlx5e_ktls_priv_rx_put(priv_rx);
        dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
@@ -609,7 +606,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        priv_rx->rxq = rxq;
        priv_rx->sk = sk;
 
-       priv_rx->stats = &priv->channel_stats[rxq].rq;
+       priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+       priv_rx->sw_stats = &priv->tls->sw_stats;
        mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
 
        rqtn = priv->direct_tir[rxq].rqt.rqtn;
@@ -630,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_post_wqes;
 
-       priv_rx->stats->tls_ctx++;
+       atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx);
 
        return 0;
 
@@ -666,7 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
        if (cancel_work_sync(&resync->work))
                mlx5e_ktls_priv_rx_put(priv_rx);
 
-       priv_rx->stats->tls_del++;
+       atomic64_inc(&priv_rx->sw_stats->rx_tls_del);
        if (priv_rx->rule.rule)
                mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
 
index d16def68ecff7214c02985ae934c45d059a9e13a..51bdf71073f31f691515e39b392f352a87c49044 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 // Copyright (c) 2019 Mellanox Technologies.
 
+#include "en_accel/tls.h"
 #include "en_accel/ktls_txrx.h"
 #include "en_accel/ktls_utils.h"
 
@@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
 struct mlx5e_ktls_offload_context_tx {
        struct tls_offload_context_tx *tx_ctx;
        struct tls12_crypto_info_aes_gcm_128 crypto_info;
+       struct mlx5e_tls_sw_stats *sw_stats;
        u32 expected_seq;
        u32 tisn;
        u32 key_id;
@@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_create_key;
 
+       priv_tx->sw_stats = &priv->tls->sw_stats;
        priv_tx->expected_seq = start_offload_tcp_sn;
        priv_tx->crypto_info  =
                *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
                goto err_create_tis;
 
        priv_tx->ctx_post_pending = true;
+       atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
 
        return 0;
 
@@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
 
        if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
                mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
-               stats->tls_ctx++;
        }
 
        seq = ntohl(tcp_hdr(skb)->seq);
index bd270a85c8044074b6efcb1c1b17c7bf3bbbeb76..4c9274d390da1d9345474a2a5efc06319b3d3ab3 100644 (file)
 #include "en.h"
 
 struct mlx5e_tls_sw_stats {
+       atomic64_t tx_tls_ctx;
        atomic64_t tx_tls_drop_metadata;
        atomic64_t tx_tls_drop_resync_alloc;
        atomic64_t tx_tls_drop_no_sync_data;
        atomic64_t tx_tls_drop_bypass_required;
+       atomic64_t rx_tls_ctx;
+       atomic64_t rx_tls_del;
        atomic64_t rx_tls_drop_resync_request;
        atomic64_t rx_tls_resync_request;
        atomic64_t rx_tls_resync_reply;
index b949b9a7538b0d1ee8b5e996703dec76334ab6b3..29463bdb77159c1a5b8813f31dd95a60838a448e 100644 (file)
@@ -45,49 +45,60 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
 };
 
+static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
+};
+
 #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
        atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
 
-#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
-
-static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
+static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
 {
-       return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
+       if (!priv->tls)
+               return NULL;
+       if (mlx5_accel_is_ktls_device(priv->mdev))
+               return mlx5e_ktls_sw_stats_desc;
+       return mlx5e_tls_sw_stats_desc;
 }
 
 int mlx5e_tls_get_count(struct mlx5e_priv *priv)
 {
-       if (!is_tls_atomic_stats(priv))
+       if (!priv->tls)
                return 0;
-
-       return NUM_TLS_SW_COUNTERS;
+       if (mlx5_accel_is_ktls_device(priv->mdev))
+               return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
+       return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
 }
 
 int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
 {
-       unsigned int i, idx = 0;
+       const struct counter_desc *stats_desc;
+       unsigned int i, n, idx = 0;
 
-       if (!is_tls_atomic_stats(priv))
-               return 0;
+       stats_desc = get_tls_atomic_stats(priv);
+       n = mlx5e_tls_get_count(priv);
 
-       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+       for (i = 0; i < n; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      mlx5e_tls_sw_stats_desc[i].format);
+                      stats_desc[i].format);
 
-       return NUM_TLS_SW_COUNTERS;
+       return n;
 }
 
 int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
 {
-       int i, idx = 0;
+       const struct counter_desc *stats_desc;
+       unsigned int i, n, idx = 0;
 
-       if (!is_tls_atomic_stats(priv))
-               return 0;
+       stats_desc = get_tls_atomic_stats(priv);
+       n = mlx5e_tls_get_count(priv);
 
-       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+       for (i = 0; i < n; i++)
                data[idx++] =
                    MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
-                                           mlx5e_tls_sw_stats_desc, i);
+                                           stats_desc, i);
 
-       return NUM_TLS_SW_COUNTERS;
+       return n;
 }
index f5f2a8fd004695def84fde768e7268ec297fb4bd..53802e18af900cfc4456cea8cc4211a92f62ed7e 100644 (file)
@@ -758,11 +758,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
        return 0;
 }
 
-static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
-                                                  u32 eth_proto_cap,
-                                                  u8 connector_type, bool ext)
+static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
+                                                  struct ethtool_link_ksettings *link_ksettings,
+                                                  u32 eth_proto_cap, u8 connector_type)
 {
-       if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+       if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
                if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
                                   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
                                   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -898,9 +898,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
                [MLX5E_PORT_OTHER]              = PORT_OTHER,
        };
 
-static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
+static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
 {
-       if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
                return ptys2connector_type[connector_type];
 
        if (eth_proto &
@@ -1001,11 +1001,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                         data_rate_oper, link_ksettings);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-
-       link_ksettings->base.port = get_connector_port(eth_proto_oper,
-                                                      connector_type, ext);
-       ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
-                                              connector_type, ext);
+       connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
+                        connector_type : MLX5E_PORT_UNKNOWN;
+       link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
+       ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
+                                              connector_type);
        get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
 
        if (an_status == MLX5_AN_COMPLETE)
index 158f947a85031ea0147e5d50f8c259813f87733a..5db63b9f3b70d99a36d2f6a6e6043ee6f085b74f 100644 (file)
@@ -1091,6 +1091,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
 
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
+       sq->reserved_room = param->stop_room;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -2350,6 +2351,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
        mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
 }
 
+static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
+                                         struct mlx5e_params *params,
+                                         u8 log_wq_size,
+                                         struct mlx5e_sq_param *param)
+{
+       void *sqc = param->sqc;
+       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       mlx5e_build_sq_param_common(priv, param);
+
+       /* async_icosq is used by XSK only if xdp_prog is active */
+       if (params->xdp_prog)
+               param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+       MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+       MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+       mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
+}
+
 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
                             struct mlx5e_params *params,
                             struct mlx5e_sq_param *param)
@@ -2398,7 +2417,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
        mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
        mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
        mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
-       mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
+       mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
 }
 
 int mlx5e_open_channels(struct mlx5e_priv *priv,
index a132fff7a980fb4ecb3649befdc18d8971afbf95..8d39bfee84a936479fde5e07176d618f21ff3008 100644 (file)
@@ -1107,8 +1107,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
 
        mlx5e_rep_tc_enable(priv);
 
-       mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
-                                     0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
+       if (MLX5_CAP_GEN(mdev, uplink_follow))
+               mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+                                             0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
        mlx5_lag_add(mdev, netdev);
        priv->events_nb.notifier_call = uplink_rep_async_event;
        mlx5_notifier_register(mdev, &priv->events_nb);
index 92c5b81427b971f81817f4149c662fb9e92e71e4..88a01c59ce612083fff1031d57545729e30287b2 100644 (file)
@@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
@@ -180,8 +179,6 @@ static const struct counter_desc sw_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
@@ -342,8 +339,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
 #ifdef CONFIG_MLX5_EN_TLS
        s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
        s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
-       s->rx_tls_ctx                 += rq_stats->tls_ctx;
-       s->rx_tls_del                 += rq_stats->tls_del;
        s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
        s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
        s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
@@ -390,7 +385,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
 #ifdef CONFIG_MLX5_EN_TLS
        s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
-       s->tx_tls_ctx               += sq_stats->tls_ctx;
        s->tx_tls_ooo               += sq_stats->tls_ooo;
        s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
        s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
@@ -1622,8 +1616,6 @@ static const struct counter_desc rq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
@@ -1650,7 +1642,6 @@ static const struct counter_desc sq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
@@ -1776,7 +1767,6 @@ static const struct counter_desc qos_sq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
-       { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
index 93c41312fb037ac36b00e229fadeafbe708bcb99..adf9b7b8b71201d5bb63da405aaa5c76bada12f5 100644 (file)
@@ -191,7 +191,6 @@ struct mlx5e_sw_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tx_tls_encrypted_packets;
        u64 tx_tls_encrypted_bytes;
-       u64 tx_tls_ctx;
        u64 tx_tls_ooo;
        u64 tx_tls_dump_packets;
        u64 tx_tls_dump_bytes;
@@ -202,8 +201,6 @@ struct mlx5e_sw_stats {
 
        u64 rx_tls_decrypted_packets;
        u64 rx_tls_decrypted_bytes;
-       u64 rx_tls_ctx;
-       u64 rx_tls_del;
        u64 rx_tls_resync_req_pkt;
        u64 rx_tls_resync_req_start;
        u64 rx_tls_resync_req_end;
@@ -334,8 +331,6 @@ struct mlx5e_rq_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_decrypted_packets;
        u64 tls_decrypted_bytes;
-       u64 tls_ctx;
-       u64 tls_del;
        u64 tls_resync_req_pkt;
        u64 tls_resync_req_start;
        u64 tls_resync_req_end;
@@ -364,7 +359,6 @@ struct mlx5e_sq_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_encrypted_packets;
        u64 tls_encrypted_bytes;
-       u64 tls_ctx;
        u64 tls_ooo;
        u64 tls_dump_packets;
        u64 tls_dump_bytes;
index df2a0af854bbf6c7789585d090589c64d31bc6f6..d675107d9ecabec3a44b8b5124c50432251e459b 100644 (file)
@@ -1895,6 +1895,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
                return 0;
 
        flow_rule_match_meta(rule, &match);
+       if (!match.mask->ingress_ifindex)
+               return 0;
+
        if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
                return -EOPNOTSUPP;
index 174dfbc996c6164de252648c6f140232a63fada4..1fa9c18563da911085c4ab2de44c0e08bced092d 100644 (file)
@@ -931,13 +931,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
        mutex_unlock(&table->lock);
 }
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#define MLX5_MAX_ASYNC_EQS 4
+#else
+#define MLX5_MAX_ASYNC_EQS 3
+#endif
+
 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+       int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+                     MLX5_CAP_GEN(dev, max_num_eqs) :
+                     1 << MLX5_CAP_GEN(dev, log_max_eq);
        int err;
 
        eq_table->num_comp_eqs =
-               mlx5_irq_get_num_comp(eq_table->irq_table);
+               min_t(int,
+                     mlx5_irq_get_num_comp(eq_table->irq_table),
+                     num_eqs - MLX5_MAX_ASYNC_EQS);
 
        err = create_async_eqs(dev);
        if (err) {
index 6f6772bf61a263afb632a09a7e65401c435aaaef..3da7becc1069f74c4320f9387638f2a39cd7ed56 100644 (file)
@@ -248,7 +248,7 @@ err_mod_hdr_regc0:
 err_ethertype:
        kfree(rule);
 out:
-       kfree(rule_spec);
+       kvfree(rule_spec);
        return err;
 }
 
@@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
        e->recirc_cnt = 0;
 
 out:
-       kfree(in);
+       kvfree(in);
        return err;
 }
 
@@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
 
        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
        if (!spec) {
-               kfree(in);
+               kvfree(in);
                return -ENOMEM;
        }
 
@@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
        }
 
 err_out:
-       kfree(spec);
-       kfree(in);
+       kvfree(spec);
+       kvfree(in);
        return err;
 }
 
index 8694b83968b4c4fae3a681b8c39200407a8b1242..d4a2f8d1ee9f154d0ce71cb4ff5ca983b6635d88 100644 (file)
@@ -537,6 +537,14 @@ esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *
        return i;
 }
 
+static bool
+esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
+{
+       return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
+              mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+              MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
+}
+
 static int
 esw_setup_dests(struct mlx5_flow_destination *dest,
                struct mlx5_flow_act *flow_act,
@@ -550,9 +558,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
        int err = 0;
 
        if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
-           MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
-           mlx5_eswitch_vport_match_metadata_enabled(esw) &&
-           MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+           esw_src_port_rewrite_supported(esw))
                attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
 
        if (attr->dest_ft) {
@@ -1716,36 +1722,40 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
        }
        esw->fdb_table.offloads.send_to_vport_grp = g;
 
-       /* meta send to vport */
-       memset(flow_group_in, 0, inlen);
-       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-                MLX5_MATCH_MISC_PARAMETERS_2);
-
-       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+       if (esw_src_port_rewrite_supported(esw)) {
+               /* meta send to vport */
+               memset(flow_group_in, 0, inlen);
+               MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                        MLX5_MATCH_MISC_PARAMETERS_2);
 
-       MLX5_SET(fte_match_param, match_criteria,
-                misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
-       MLX5_SET(fte_match_param, match_criteria,
-                misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+               match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
 
-       num_vfs = esw->esw_funcs.num_vfs;
-       if (num_vfs) {
-               MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
-               MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
-               ix += num_vfs;
+               MLX5_SET(fte_match_param, match_criteria,
+                        misc_parameters_2.metadata_reg_c_0,
+                        mlx5_eswitch_get_vport_metadata_mask());
+               MLX5_SET(fte_match_param, match_criteria,
+                        misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
 
-               g = mlx5_create_flow_group(fdb, flow_group_in);
-               if (IS_ERR(g)) {
-                       err = PTR_ERR(g);
-                       esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
-                                err);
-                       goto send_vport_meta_err;
+               num_vfs = esw->esw_funcs.num_vfs;
+               if (num_vfs) {
+                       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+                       MLX5_SET(create_flow_group_in, flow_group_in,
+                                end_flow_index, ix + num_vfs - 1);
+                       ix += num_vfs;
+
+                       g = mlx5_create_flow_group(fdb, flow_group_in);
+                       if (IS_ERR(g)) {
+                               err = PTR_ERR(g);
+                               esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+                                        err);
+                               goto send_vport_meta_err;
+                       }
+                       esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+                       err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+                       if (err)
+                               goto meta_rule_err;
                }
-               esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
-               err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
-               if (err)
-                       goto meta_rule_err;
        }
 
        if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
index d9d9e1f488f94e7ba2498b8e87ad9ea263badf55..ba28ac7e79bcd5cbde98df6d77412452dcf0ac24 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/red.h>
 #include <net/vxlan.h>
 #include <net/flow_offload.h>
+#include <net/inet_ecn.h>
 
 #include "port.h"
 #include "core.h"
@@ -347,6 +348,20 @@ struct mlxsw_sp_port_type_speed_ops {
        u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
 };
 
+static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
+                                          bool *trap_en)
+{
+       bool set_ce = false;
+
+       *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+       if (set_ce)
+               return INET_ECN_CE;
+       else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
+               return INET_ECN_ECT_1;
+       else
+               return inner_ecn;
+}
+
 static inline struct net_device *
 mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
 {
index 0bd64169bf8121c6c39f19be9ba8add48bf3374f..078601d31cded3e5d2c1ac69760cb285ba11981a 100644 (file)
@@ -1230,16 +1230,22 @@ mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
                              u32 ptys_eth_proto,
                              struct ethtool_link_ksettings *cmd)
 {
+       struct mlxsw_sp1_port_link_mode link;
        int i;
 
-       cmd->link_mode = -1;
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+       cmd->lanes = 0;
 
        if (!carrier_ok)
                return;
 
        for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
-               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
-                       cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool;
+               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
+                       link = mlxsw_sp1_port_link_mode[i];
+                       ethtool_params_from_link_mode(cmd,
+                                                     link.mask_ethtool);
+               }
        }
 }
 
@@ -1672,7 +1678,9 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
        struct mlxsw_sp2_port_link_mode link;
        int i;
 
-       cmd->link_mode = -1;
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+       cmd->lanes = 0;
 
        if (!carrier_ok)
                return;
@@ -1680,7 +1688,8 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
        for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
                if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
                        link = mlxsw_sp2_port_link_mode[i];
-                       cmd->link_mode = link.mask_ethtool[1];
+                       ethtool_params_from_link_mode(cmd,
+                                                     link.mask_ethtool[1]);
                }
        }
 }
index 6ccca39bae84529e8167c1e99a633d01a69c84aa..64a8f838eb53238cdba0925eb6f4b715730c7a9f 100644 (file)
@@ -335,12 +335,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
                                            u8 inner_ecn, u8 outer_ecn)
 {
        char tidem_pl[MLXSW_REG_TIDEM_LEN];
-       bool trap_en, set_ce = false;
        u8 new_inner_ecn;
+       bool trap_en;
 
-       trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
-       new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+       new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+                                                 &trap_en);
        mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
                             trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
index e5ec595593f45e6c12238d6468e43c899d220560..9eba8fa684aee1fa0fcf8d7384052a800848d4f5 100644 (file)
@@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
                                         u8 inner_ecn, u8 outer_ecn)
 {
        char tndem_pl[MLXSW_REG_TNDEM_LEN];
-       bool trap_en, set_ce = false;
        u8 new_inner_ecn;
+       bool trap_en;
 
-       trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
-       new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+       new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+                                                 &trap_en);
        mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
                             trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
index 1c3e204d727cf8b242e6befc3a8f79791b96306f..7b6794aa8ea96d3b62b50bec27eccf4fde3d8a79 100644 (file)
@@ -885,8 +885,8 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
        }
 
        mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
-       mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
-                 MAC_RX_MAX_SIZE_MASK_);
+       mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
+                 << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
        lan743x_csr_write(adapter, MAC_RX, mac_rx);
 
        if (enabled) {
@@ -1944,7 +1944,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
        struct sk_buff *skb;
        dma_addr_t dma_ptr;
 
-       buffer_length = netdev->mtu + ETH_HLEN + 4 + RX_HEAD_PADDING;
+       buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
 
        descriptor = &rx->ring_cpu_ptr[index];
        buffer_info = &rx->buffer_info[index];
@@ -2040,7 +2040,7 @@ lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
                dev_kfree_skb_irq(skb);
                return NULL;
        }
-       frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 4);
+       frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
        if (skb->len > frame_length) {
                skb->tail -= skb->len - frame_length;
                skb->len = frame_length;
index 1634ca6d4a8f02ec9e86c287d5eb0d4807648dc5..c84c8bf2bc20eaefb6711b5836652502f9196da8 100644 (file)
@@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
                        dev_kfree_skb_any(curr);
                        if (segs != NULL) {
                                curr = segs;
-                               segs = segs->next;
+                               segs = next;
                                curr->next = NULL;
                                dev_kfree_skb_any(segs);
                        }
index 0e2db6ea79e96f7e9daca0fdcd84ce7b4483b0ed..2ec62c8d86e1c1858d9cce0efcfab055f2ef9add 100644 (file)
@@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
                        dev_consume_skb_any(skb);
                else
                        dev_kfree_skb_any(skb);
+               return;
        }
 
        nfp_ccm_rx(&bpf->ccm, skb);
index caf12eec99459ab01c03d23df38eb6fbf4578051..56833a41f3d27b41de4b21a8685bb7a17dcfc4b5 100644 (file)
@@ -190,6 +190,7 @@ struct nfp_fl_internal_ports {
  * @qos_rate_limiters: Current active qos rate limiters
  * @qos_stats_lock:    Lock on qos stats updates
  * @pre_tun_rule_cnt:  Number of pre-tunnel rules offloaded
+ * @merge_table:       Hash table to store merged flows
  */
 struct nfp_flower_priv {
        struct nfp_app *app;
@@ -223,6 +224,7 @@ struct nfp_flower_priv {
        unsigned int qos_rate_limiters;
        spinlock_t qos_stats_lock; /* Protect the qos stats */
        int pre_tun_rule_cnt;
+       struct rhashtable merge_table;
 };
 
 /**
@@ -350,6 +352,12 @@ struct nfp_fl_payload_link {
 };
 
 extern const struct rhashtable_params nfp_flower_table_params;
+extern const struct rhashtable_params merge_table_params;
+
+struct nfp_merge_info {
+       u64 parent_ctx;
+       struct rhash_head ht_node;
+};
 
 struct nfp_fl_stats_frame {
        __be32 stats_con_id;
index aa06fcb38f8b993a1f6c684d2f9c66eb98e3e429..327bb56b3ef5696ff894776985f2c73f0726238a 100644 (file)
@@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
        .automatic_shrinking    = true,
 };
 
+const struct rhashtable_params merge_table_params = {
+       .key_offset     = offsetof(struct nfp_merge_info, parent_ctx),
+       .head_offset    = offsetof(struct nfp_merge_info, ht_node),
+       .key_len        = sizeof(u64),
+};
+
 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                             unsigned int host_num_mems)
 {
@@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
        if (err)
                goto err_free_flow_table;
 
+       err = rhashtable_init(&priv->merge_table, &merge_table_params);
+       if (err)
+               goto err_free_stats_ctx_table;
+
        get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
 
        /* Init ring buffer and unallocated mask_ids. */
@@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
                              NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
        if (!priv->mask_ids.mask_id_free_list.buf)
-               goto err_free_stats_ctx_table;
+               goto err_free_merge_table;
 
        priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
 
@@ -550,6 +560,8 @@ err_free_last_used:
        kfree(priv->mask_ids.last_used);
 err_free_mask_id:
        kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_merge_table:
+       rhashtable_destroy(&priv->merge_table);
 err_free_stats_ctx_table:
        rhashtable_destroy(&priv->stats_ctx_table);
 err_free_flow_table:
@@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
                                    nfp_check_rhashtable_empty, NULL);
        rhashtable_free_and_destroy(&priv->stats_ctx_table,
                                    nfp_check_rhashtable_empty, NULL);
+       rhashtable_free_and_destroy(&priv->merge_table,
+                                   nfp_check_rhashtable_empty, NULL);
        kvfree(priv->stats);
        kfree(priv->mask_ids.mask_id_free_list.buf);
        kfree(priv->mask_ids.last_used);
index d72225d64a75da13a123a3fcf7d362b5310a3a4f..e95969c462e46de7403272452fcde5633b203fe4 100644 (file)
@@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        struct netlink_ext_ack *extack = NULL;
        struct nfp_fl_payload *merge_flow;
        struct nfp_fl_key_ls merge_key_ls;
+       struct nfp_merge_info *merge_info;
+       u64 parent_ctx = 0;
        int err;
 
        ASSERT_RTNL();
@@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
            nfp_flower_is_merge_flow(sub_flow2))
                return -EINVAL;
 
+       /* check if the two flows are already merged */
+       parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
+       parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
+       if (rhashtable_lookup_fast(&priv->merge_table,
+                                  &parent_ctx, merge_table_params)) {
+               nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
+               return 0;
+       }
+
        err = nfp_flower_can_merge(sub_flow1, sub_flow2);
        if (err)
                return err;
@@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        if (err)
                goto err_release_metadata;
 
+       merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
+       if (!merge_info) {
+               err = -ENOMEM;
+               goto err_remove_rhash;
+       }
+       merge_info->parent_ctx = parent_ctx;
+       err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
+                                    merge_table_params);
+       if (err)
+               goto err_destroy_merge_info;
+
        err = nfp_flower_xmit_flow(app, merge_flow,
                                   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
        if (err)
-               goto err_remove_rhash;
+               goto err_remove_merge_info;
 
        merge_flow->in_hw = true;
        sub_flow1->in_hw = false;
 
        return 0;
 
+err_remove_merge_info:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+                                           &merge_info->ht_node,
+                                           merge_table_params));
+err_destroy_merge_info:
+       kfree(merge_info);
 err_remove_rhash:
        WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
                                            &merge_flow->fl_node,
@@ -1359,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
 {
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload_link *link, *temp;
+       struct nfp_merge_info *merge_info;
        struct nfp_fl_payload *origin;
+       u64 parent_ctx = 0;
        bool mod = false;
        int err;
 
@@ -1396,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
 err_free_links:
        /* Clean any links connected with the merged flow. */
        list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
-                                merge_flow.list)
+                                merge_flow.list) {
+               u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
+
+               parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
                nfp_flower_unlink_flow(link);
+       }
+
+       merge_info = rhashtable_lookup_fast(&priv->merge_table,
+                                           &parent_ctx,
+                                           merge_table_params);
+       if (merge_info) {
+               WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+                                                   &merge_info->ht_node,
+                                                   merge_table_params));
+               kfree(merge_info);
+       }
 
        kfree(merge_flow->action_data);
        kfree(merge_flow->mask_data);
index 581a92fc32924f1cc7953142d85a7bfaf7733e56..1df2c002c9f642a1e047cbbb122d383ddb01dc4f 100644 (file)
@@ -2350,6 +2350,13 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
 
        if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
                pcie_set_readrq(tp->pci_dev, readrq);
+
+       /* Chip doesn't support pause in jumbo mode */
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+                        tp->phydev->advertising, !jumbo);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                        tp->phydev->advertising, !jumbo);
+       phy_start_aneg(tp->phydev);
 }
 
 DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4630,8 +4637,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
        if (!tp->supports_gmii)
                phy_set_max_speed(phydev, SPEED_100);
 
-       phy_support_asym_pause(phydev);
-
        phy_attached_info(phydev);
 
        return 0;
index 208cae344ffa26432885dea59be676fa1b6dca9e..4749bd0af160734c1433b71b952bfabd9b60a411 100644 (file)
@@ -1379,88 +1379,6 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
        }
 }
 
-/**
- * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
- * @priv: driver private structure
- * Description: this function is called to re-allocate a receive buffer, perform
- * the DMA mapping and init the descriptor.
- */
-static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
-{
-       u32 rx_count = priv->plat->rx_queues_to_use;
-       u32 queue;
-       int i;
-
-       for (queue = 0; queue < rx_count; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
-               for (i = 0; i < priv->dma_rx_size; i++) {
-                       struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
-                       if (buf->page) {
-                               page_pool_recycle_direct(rx_q->page_pool, buf->page);
-                               buf->page = NULL;
-                       }
-
-                       if (priv->sph && buf->sec_page) {
-                               page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
-                               buf->sec_page = NULL;
-                       }
-               }
-       }
-
-       for (queue = 0; queue < rx_count; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
-               for (i = 0; i < priv->dma_rx_size; i++) {
-                       struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-                       struct dma_desc *p;
-
-                       if (priv->extend_desc)
-                               p = &((rx_q->dma_erx + i)->basic);
-                       else
-                               p = rx_q->dma_rx + i;
-
-                       if (!buf->page) {
-                               buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
-                               if (!buf->page)
-                                       goto err_reinit_rx_buffers;
-
-                               buf->addr = page_pool_get_dma_addr(buf->page);
-                       }
-
-                       if (priv->sph && !buf->sec_page) {
-                               buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
-                               if (!buf->sec_page)
-                                       goto err_reinit_rx_buffers;
-
-                               buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
-                       }
-
-                       stmmac_set_desc_addr(priv, p, buf->addr);
-                       if (priv->sph)
-                               stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
-                       else
-                               stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
-                       if (priv->dma_buf_sz == BUF_SIZE_16KiB)
-                               stmmac_init_desc3(priv, p);
-               }
-       }
-
-       return;
-
-err_reinit_rx_buffers:
-       do {
-               while (--i >= 0)
-                       stmmac_free_rx_buffer(priv, queue, i);
-
-               if (queue == 0)
-                       break;
-
-               i = priv->dma_rx_size;
-       } while (queue-- > 0);
-}
-
 /**
  * init_dma_rx_desc_rings - init the RX descriptor rings
  * @dev: net device structure
@@ -5428,7 +5346,7 @@ int stmmac_resume(struct device *dev)
        mutex_lock(&priv->lock);
 
        stmmac_reset_queues_param(priv);
-       stmmac_reinit_rx_buffers(priv);
+
        stmmac_free_tx_skbufs(priv);
        stmmac_clear_descriptors(priv);
 
index 1e966a39967e5f8eb885c109d857be722158c343..aca7f82f6791b6f7551df1720b349e69af41e7cc 100644 (file)
@@ -504,6 +504,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
        return axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
 }
 
+static inline void axienet_lock_mii(struct axienet_local *lp)
+{
+       if (lp->mii_bus)
+               mutex_lock(&lp->mii_bus->mdio_lock);
+}
+
+static inline void axienet_unlock_mii(struct axienet_local *lp)
+{
+       if (lp->mii_bus)
+               mutex_unlock(&lp->mii_bus->mdio_lock);
+}
+
 /**
  * axienet_iow - Memory mapped Axi Ethernet register write
  * @lp:         Pointer to axienet local structure
index 5d677db0aee5dd0c071a387a9a914d63e7c3cac1..f8f8654ea728c24dcefca36cdabfd0256ff6078b 100644 (file)
@@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev)
         * including the MDIO. MDIO must be disabled before resetting.
         * Hold MDIO bus lock to avoid MDIO accesses during the reset.
         */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        ret = axienet_device_reset(ndev);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
        if (ret) {
@@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev)
        }
 
        /* Do a reset to ensure DMA is really stopped */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        __axienet_device_reset(lp);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        cancel_work_sync(&lp->dma_err_task);
 
@@ -1709,9 +1709,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
         * including the MDIO. MDIO must be disabled before resetting.
         * Hold MDIO bus lock to avoid MDIO accesses during the reset.
         */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        __axienet_device_reset(lp);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        for (i = 0; i < lp->tx_bd_num; i++) {
                cur_p = &lp->tx_bd_v[i];
index 4ac0373326efd995171b1179bd7a5e7a202b89a2..42f31c6818462e633db49bcdc7f965f3f1185959 100644 (file)
@@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        __be16 sport;
        int err;
 
+       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+               return -EINVAL;
+
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
                              geneve->cfg.info.key.tp_dst, sport);
@@ -908,8 +911,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 
                info = skb_tunnel_info(skb);
                if (info) {
-                       info->key.u.ipv4.dst = fl4.saddr;
-                       info->key.u.ipv4.src = fl4.daddr;
+                       struct ip_tunnel_info *unclone;
+
+                       unclone = skb_tunnel_info_unclone(skb);
+                       if (unlikely(!unclone)) {
+                               dst_release(&rt->dst);
+                               return -ENOMEM;
+                       }
+
+                       unclone->key.u.ipv4.dst = fl4.saddr;
+                       unclone->key.u.ipv4.src = fl4.daddr;
                }
 
                if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -977,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        __be16 sport;
        int err;
 
+       if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+               return -EINVAL;
+
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
                                geneve->cfg.info.key.tp_dst, sport);
@@ -993,8 +1007,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                struct ip_tunnel_info *info = skb_tunnel_info(skb);
 
                if (info) {
-                       info->key.u.ipv6.dst = fl6.saddr;
-                       info->key.u.ipv6.src = fl6.daddr;
+                       struct ip_tunnel_info *unclone;
+
+                       unclone = skb_tunnel_info_unclone(skb);
+                       if (unlikely(!unclone)) {
+                               dst_release(dst);
+                               return -ENOMEM;
+                       }
+
+                       unclone->key.u.ipv6.dst = fl6.saddr;
+                       unclone->key.u.ipv6.src = fl6.daddr;
                }
 
                if (!pskb_may_pull(skb, ETH_HLEN)) {
index 0dd0ba915ab970cf7a142a57279c9271c22c84a9..23ee0b14cbfa1f39f5d3a828d2cad130b456bffd 100644 (file)
@@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
                        return -ENOMEM;
                }
                usb_anchor_urb(urb, &atusb->idle_urbs);
+               usb_free_urb(urb);
                n--;
        }
        return 0;
index 53282a6d5928f1f426f5a1e592f72a2e8d1e536f..287cccf8f7f4e5417d2750609e40ec9e393cee2a 100644 (file)
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
 
 int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
 {
-       int val;
+       int val, mask = 0;
 
        /* Enable EEE at PHY level */
        val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
@@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
        if (val < 0)
                return val;
 
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                             phydev->supported))
+               mask |= MDIO_EEE_1000T;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+                             phydev->supported))
+               mask |= MDIO_EEE_100TX;
+
        if (enable)
-               val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
+               val |= mask;
        else
-               val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
+               val &= ~mask;
 
        phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
 
index e26a5d663f8a7603eeeaee099e3c273b565c11c6..8018ddf7f31622378010942c0f89d241c1b8a20b 100644 (file)
@@ -3021,9 +3021,34 @@ static struct phy_driver marvell_drivers[] = {
                .get_stats = marvell_get_stats,
        },
        {
-               .phy_id = MARVELL_PHY_ID_88E6390,
+               .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
-               .name = "Marvell 88E6390",
+               .name = "Marvell 88E6341 Family",
+               /* PHY_GBIT_FEATURES */
+               .flags = PHY_POLL_CABLE_TEST,
+               .probe = m88e1510_probe,
+               .config_init = marvell_config_init,
+               .config_aneg = m88e6390_config_aneg,
+               .read_status = marvell_read_status,
+               .config_intr = marvell_config_intr,
+               .handle_interrupt = marvell_handle_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
+               .get_sset_count = marvell_get_sset_count,
+               .get_strings = marvell_get_strings,
+               .get_stats = marvell_get_stats,
+               .get_tunable = m88e1540_get_tunable,
+               .set_tunable = m88e1540_set_tunable,
+               .cable_test_start = marvell_vct7_cable_test_start,
+               .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+               .cable_test_get_status = marvell_vct7_cable_test_get_status,
+       },
+       {
+               .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E6390 Family",
                /* PHY_GBIT_FEATURES */
                .flags = PHY_POLL_CABLE_TEST,
                .probe = m88e6390_probe,
@@ -3107,7 +3132,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
        { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
-       { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
        { }
index fc86da7f1628f728778d8401b59eef638e60e738..4cf38be26dc99e455349f22a59eee58002e54424 100644 (file)
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/mutex.h>
+#include <linux/ieee802154.h>
+#include <linux/if_ltalk.h>
+#include <uapi/linux/if_fddi.h>
+#include <uapi/linux/if_hippi.h>
+#include <uapi/linux/if_fc.h>
+#include <net/ax25.h>
+#include <net/rose.h>
+#include <net/6lowpan.h>
 
 #include <linux/uaccess.h>
 #include <linux/proc_fs.h>
@@ -2919,6 +2927,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
        return __tun_set_ebpf(tun, prog_p, prog);
 }
 
+/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
+static unsigned char tun_get_addr_len(unsigned short type)
+{
+       switch (type) {
+       case ARPHRD_IP6GRE:
+       case ARPHRD_TUNNEL6:
+               return sizeof(struct in6_addr);
+       case ARPHRD_IPGRE:
+       case ARPHRD_TUNNEL:
+       case ARPHRD_SIT:
+               return 4;
+       case ARPHRD_ETHER:
+               return ETH_ALEN;
+       case ARPHRD_IEEE802154:
+       case ARPHRD_IEEE802154_MONITOR:
+               return IEEE802154_EXTENDED_ADDR_LEN;
+       case ARPHRD_PHONET_PIPE:
+       case ARPHRD_PPP:
+       case ARPHRD_NONE:
+               return 0;
+       case ARPHRD_6LOWPAN:
+               return EUI64_ADDR_LEN;
+       case ARPHRD_FDDI:
+               return FDDI_K_ALEN;
+       case ARPHRD_HIPPI:
+               return HIPPI_ALEN;
+       case ARPHRD_IEEE802:
+               return FC_ALEN;
+       case ARPHRD_ROSE:
+               return ROSE_ADDR_LEN;
+       case ARPHRD_NETROM:
+               return AX25_ADDR_LEN;
+       case ARPHRD_LOCALTLK:
+               return LTALK_ALEN;
+       default:
+               return 0;
+       }
+}
+
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                            unsigned long arg, int ifreq_len)
 {
@@ -3082,6 +3129,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                                break;
                        }
                        tun->dev->type = (int) arg;
+                       tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
                        netif_info(tun, drv, tun->dev, "linktype set to %d\n",
                                   tun->dev->type);
                        call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
index 31d51346786abe75ea5480a2f7e1b6b1acb4cf8b..9bc58e64b5b7acc22d13a6861a72441ce2ccb13b 100644 (file)
@@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
        return serial;
 }
 
-static int get_free_serial_index(void)
+static int obtain_minor(struct hso_serial *serial)
 {
        int index;
        unsigned long flags;
@@ -619,8 +619,10 @@ static int get_free_serial_index(void)
        spin_lock_irqsave(&serial_table_lock, flags);
        for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
                if (serial_table[index] == NULL) {
+                       serial_table[index] = serial->parent;
+                       serial->minor = index;
                        spin_unlock_irqrestore(&serial_table_lock, flags);
-                       return index;
+                       return 0;
                }
        }
        spin_unlock_irqrestore(&serial_table_lock, flags);
@@ -629,15 +631,12 @@ static int get_free_serial_index(void)
        return -1;
 }
 
-static void set_serial_by_index(unsigned index, struct hso_serial *serial)
+static void release_minor(struct hso_serial *serial)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&serial_table_lock, flags);
-       if (serial)
-               serial_table[index] = serial->parent;
-       else
-               serial_table[index] = NULL;
+       serial_table[serial->minor] = NULL;
        spin_unlock_irqrestore(&serial_table_lock, flags);
 }
 
@@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
 static void hso_serial_tty_unregister(struct hso_serial *serial)
 {
        tty_unregister_device(tty_drv, serial->minor);
+       release_minor(serial);
 }
 
 static void hso_serial_common_free(struct hso_serial *serial)
@@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
 static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
                                    int rx_size, int tx_size)
 {
-       int minor;
        int i;
 
        tty_port_init(&serial->port);
 
-       minor = get_free_serial_index();
-       if (minor < 0)
+       if (obtain_minor(serial))
                goto exit2;
 
        /* register our minor number */
        serial->parent->dev = tty_port_register_device_attr(&serial->port,
-                       tty_drv, minor, &serial->parent->interface->dev,
+                       tty_drv, serial->minor, &serial->parent->interface->dev,
                        serial->parent, hso_serial_dev_groups);
-       if (IS_ERR(serial->parent->dev))
+       if (IS_ERR(serial->parent->dev)) {
+               release_minor(serial);
                goto exit2;
+       }
 
-       /* fill in specific data for later use */
-       serial->minor = minor;
        serial->magic = HSO_SERIAL_MAGIC;
        spin_lock_init(&serial->serial_lock);
        serial->num_rx_urbs = num_urbs;
@@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device(
 
        serial->write_data = hso_std_serial_write_data;
 
-       /* and record this serial */
-       set_serial_by_index(serial->minor, serial);
-
        /* setup the proc dirs and files if needed */
        hso_log_port(hso_dev);
 
@@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
        serial->shared_int->ref_count++;
        mutex_unlock(&serial->shared_int->shared_int_lock);
 
-       /* and record this serial */
-       set_serial_by_index(serial->minor, serial);
-
        /* setup the proc dirs and files if needed */
        hso_log_port(hso_dev);
 
@@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface)
                        cancel_work_sync(&serial_table[i]->async_get_intf);
                        hso_serial_tty_unregister(serial);
                        kref_put(&serial_table[i]->ref, hso_serial_ref_free);
-                       set_serial_by_index(i, NULL);
                }
        }
 
index 82e520d2cb1229a0c7b9fd0def3e4a7135536478..0824e6999e49957f7aaf7c990f6259792d42f32b 100644 (file)
@@ -406,9 +406,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        offset += hdr_padded_len;
        p += hdr_padded_len;
 
-       copy = len;
-       if (copy > skb_tailroom(skb))
-               copy = skb_tailroom(skb);
+       /* Copy all frame if it fits skb->head, otherwise
+        * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
+        */
+       if (len <= skb_tailroom(skb))
+               copy = len;
+       else
+               copy = ETH_HLEN + metasize;
        skb_put_data(skb, p, copy);
 
        if (metasize) {
index 6d9130859c55aaf2b5d7aa2d66b26f1f7ef8c1f1..503e2fd7ce518765d0f15ae6aa202aa9612148c9 100644 (file)
@@ -471,9 +471,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
 
        skb_dst_drop(skb);
 
-       /* if dst.dev is loopback or the VRF device again this is locally
-        * originated traffic destined to a local address. Short circuit
-        * to Rx path
+       /* if dst.dev is the VRF device again this is locally originated traffic
+        * destined to a local address. Short circuit to Rx path.
         */
        if (dst->dev == dev)
                return vrf_local_xmit(skb, dev, dst);
@@ -547,9 +546,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
 
        skb_dst_drop(skb);
 
-       /* if dst.dev is loopback or the VRF device again this is locally
-        * originated traffic destined to a local address. Short circuit
-        * to Rx path
+       /* if dst.dev is the VRF device again this is locally originated traffic
+        * destined to a local address. Short circuit to Rx path.
         */
        if (rt->dst.dev == vrf_dev)
                return vrf_local_xmit(skb, vrf_dev, &rt->dst);
index 666dd201c3d5fac335d9251456bf408675928376..53dbc67e8a34f2535cead882faf4f4e19b9d3810 100644 (file)
@@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                } else if (err) {
                        if (info) {
+                               struct ip_tunnel_info *unclone;
                                struct in_addr src, dst;
 
+                               unclone = skb_tunnel_info_unclone(skb);
+                               if (unlikely(!unclone))
+                                       goto tx_error;
+
                                src = remote_ip.sin.sin_addr;
                                dst = local_ip.sin.sin_addr;
-                               info->key.u.ipv4.src = src.s_addr;
-                               info->key.u.ipv4.dst = dst.s_addr;
+                               unclone->key.u.ipv4.src = src.s_addr;
+                               unclone->key.u.ipv4.dst = dst.s_addr;
                        }
                        vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
                        dst_release(ndst);
@@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                } else if (err) {
                        if (info) {
+                               struct ip_tunnel_info *unclone;
                                struct in6_addr src, dst;
 
+                               unclone = skb_tunnel_info_unclone(skb);
+                               if (unlikely(!unclone))
+                                       goto tx_error;
+
                                src = remote_ip.sin6.sin6_addr;
                                dst = local_ip.sin6.sin6_addr;
-                               info->key.u.ipv6.src = src;
-                               info->key.u.ipv6.dst = dst;
+                               unclone->key.u.ipv6.src = src;
+                               unclone->key.u.ipv6.dst = dst;
                        }
 
                        vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
index 0720f5f92caa7b0bfcb80ffb432340356ea7034c..4d9dc7d159089c2ee12e87c8cffb167ef31af753 100644 (file)
@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
 
                if (pad > 0) { /* Pad the frame with zeros */
                        if (__skb_pad(skb, pad, false))
-                               goto drop;
+                               goto out;
                        skb_put(skb, pad);
                }
        }
@@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 drop:
-       dev->stats.tx_dropped++;
        kfree_skb(skb);
+out:
+       dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
 
index 71e2ada86793f5b63ab839faf67aaffcfaee4726..72e2e71aac0e637192ddc37daa94fa1a19123494 100644 (file)
@@ -251,7 +251,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc)
        int first_slot = ATH_BCBUF;
        int slot;
 
-       tasklet_disable(&sc->bcon_tasklet);
+       tasklet_disable_in_atomic(&sc->bcon_tasklet);
 
        /* Find first taken slot. */
        for (slot = 0; slot < ATH_BCBUF; slot++) {
index 6d30a0fceceae1a822180564db60093696101da3..34cd8a7401fe6f5fd9209e78d919fd309ed0888a 100644 (file)
@@ -2439,7 +2439,7 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool locked)
        vif = ifp->vif;
        cfg = wdev_to_cfg(&vif->wdev);
        cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-       if (locked) {
+       if (!locked) {
                rtnl_lock();
                wiphy_lock(cfg->wiphy);
                cfg80211_unregister_wdev(&vif->wdev);
index 3dbc6f3f92cc9389014435901a9c8f74b08a45d5..231d2517f398ce76ae6bbeb29f364c8cc7d25b72 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2005-2014, 2021 Intel Corporation
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
 #include <linux/sched.h>
@@ -26,7 +26,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
        if (!list_empty(&notif_wait->notif_waits)) {
                struct iwl_notification_wait *w;
 
-               spin_lock(&notif_wait->notif_wait_lock);
+               spin_lock_bh(&notif_wait->notif_wait_lock);
                list_for_each_entry(w, &notif_wait->notif_waits, list) {
                        int i;
                        bool found = false;
@@ -59,7 +59,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
                                triggered = true;
                        }
                }
-               spin_unlock(&notif_wait->notif_wait_lock);
+               spin_unlock_bh(&notif_wait->notif_wait_lock);
        }
 
        return triggered;
@@ -70,10 +70,10 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
 {
        struct iwl_notification_wait *wait_entry;
 
-       spin_lock(&notif_wait->notif_wait_lock);
+       spin_lock_bh(&notif_wait->notif_wait_lock);
        list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
                wait_entry->aborted = true;
-       spin_unlock(&notif_wait->notif_wait_lock);
+       spin_unlock_bh(&notif_wait->notif_wait_lock);
 
        wake_up_all(&notif_wait->notif_waitq);
 }
index 75f99ff7f90847ab19bdb8c65dadf6eda3f4956d..c4f5da76f1c0e0237ca13f5e31297ead9ef69320 100644 (file)
@@ -414,6 +414,7 @@ struct iwl_cfg {
 #define IWL_CFG_MAC_TYPE_QNJ           0x36
 #define IWL_CFG_MAC_TYPE_SO            0x37
 #define IWL_CFG_MAC_TYPE_SNJ           0x42
+#define IWL_CFG_MAC_TYPE_SOF           0x43
 #define IWL_CFG_MAC_TYPE_MA            0x44
 
 #define IWL_CFG_RF_TYPE_TH             0x105
index af684f80b0cc57f6350a64fb16f3b5f1b6b5a5a1..c5a1e84dc1abcf8ff012e9b544a0d23bf353f121 100644 (file)
@@ -232,7 +232,7 @@ enum iwl_reg_capa_flags_v2 {
        REG_CAPA_V2_MCS_9_ALLOWED       = BIT(6),
        REG_CAPA_V2_WEATHER_DISABLED    = BIT(7),
        REG_CAPA_V2_40MHZ_ALLOWED       = BIT(8),
-       REG_CAPA_V2_11AX_DISABLED       = BIT(13),
+       REG_CAPA_V2_11AX_DISABLED       = BIT(10),
 };
 
 /*
index 130760572262b934174414673bad226a7446e8b8..34ddef97b0990bc32709c30fd05b026f3cba841c 100644 (file)
@@ -1786,10 +1786,13 @@ static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf,
                return -EINVAL;
 
        /* value zero triggers re-sending the default table to the device */
-       if (!op_id)
+       if (!op_id) {
+               mutex_lock(&mvm->mutex);
                ret = iwl_rfi_send_config_cmd(mvm, NULL);
-       else
+               mutex_unlock(&mvm->mutex);
+       } else {
                ret = -EOPNOTSUPP; /* in the future a new table will be added */
+       }
 
        return ret ?: count;
 }
index 87391904814315f7762b342ea0277799b5f7bbc7..0b818067067ce616446f9e44851a1e11ed5c7213 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020 - 2021 Intel Corporation
  */
 
 #include "mvm.h"
@@ -66,6 +66,8 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
        if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
                return -EOPNOTSUPP;
 
+       lockdep_assert_held(&mvm->mutex);
+
        /* in case no table is passed, use the default one */
        if (!rfi_table) {
                memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
@@ -75,9 +77,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
                cmd.oem = 1;
        }
 
-       mutex_lock(&mvm->mutex);
        ret = iwl_mvm_send_cmd(mvm, &hcmd);
-       mutex_unlock(&mvm->mutex);
 
        if (ret)
                IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret);
index c21736f80c298ddde64ce034bddef09c67e39799..af5a6dd81c41368c7deab2b32eb8a992185e0eb4 100644 (file)
@@ -272,10 +272,10 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
        rx_status->chain_signal[2] = S8_MIN;
 }
 
-static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
-                                 struct ieee80211_hdr *hdr,
-                                 struct iwl_rx_mpdu_desc *desc,
-                                 u32 status)
+static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+                               struct ieee80211_hdr *hdr,
+                               struct iwl_rx_mpdu_desc *desc,
+                               u32 status)
 {
        struct iwl_mvm_sta *mvmsta;
        struct iwl_mvm_vif *mvmvif;
@@ -285,6 +285,9 @@ static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
        u32 len = le16_to_cpu(desc->mpdu_len);
        const u8 *frame = (void *)hdr;
 
+       if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
+               return 0;
+
        /*
         * For non-beacon, we don't really care. But beacons may
         * be filtered out, and we thus need the firmware's replay
@@ -356,6 +359,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
            IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
                return -1;
 
+       if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+                    !ieee80211_has_protected(hdr->frame_control)))
+               return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status);
+
        if (!ieee80211_has_protected(hdr->frame_control) ||
            (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
            IWL_RX_MPDU_STATUS_SEC_NONE)
@@ -411,7 +418,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                stats->flag |= RX_FLAG_DECRYPTED;
                return 0;
        case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
-               return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status);
+               break;
        default:
                /*
                 * Sometimes we can get frames that were not decrypted
index 8fba190e84cf3098a798769e39237fea1d901bc1..cecc32e7dbe8a3c3603169e1b1a683f4c397bd34 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include "iwl-trans.h"
 #include "iwl-fh.h"
@@ -75,15 +75,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                                 const struct fw_img *fw)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
-                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
-                     u32_encode_bits(250,
-                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
-                     CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
-                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                     CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
-                     u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
        struct iwl_context_info_gen3 *ctxt_info_gen3;
        struct iwl_prph_scratch *prph_scratch;
        struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -217,26 +208,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
                    CSR_AUTO_FUNC_BOOT_ENA);
 
-       /*
-        * To workaround hardware latency issues during the boot process,
-        * initialize the LTR to ~250 usec (see ltr_val above).
-        * The firmware initializes this again later (to a smaller value).
-        */
-       if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
-            trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
-           !trans->trans_cfg->integrated) {
-               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
-       } else if (trans->trans_cfg->integrated &&
-                  trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
-               iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
-               iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
-       }
-
-       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
-       else
-               iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
-
        return 0;
 
 err_free_ctxt_info:
index d1bb273d6b6d9bec3c92290e7f2ebaf82409d7e9..74ce31fdf45e974bdd6c5c8ce5cc1f959647f97b 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include "iwl-trans.h"
 #include "iwl-fh.h"
@@ -240,7 +240,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
 
        /* kick FW self load */
        iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
-       iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
 
        /* Context info will be released upon alive or failure to get one */
 
index ffaf973dae948bfc5616d4a025ea150bc0dfddb1..558a0b2ef0fc8b6790ab400c610400a7ca52af81 100644 (file)
@@ -592,6 +592,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
        IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+       IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
 
        /* So with HR */
        IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
@@ -1040,7 +1041,31 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
                      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
-                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name)
+                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Hr */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Gf */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
 
 #endif /* CONFIG_IWLMVM */
 };
index 497ef3405da3a29bc064a5fad0f24fadd37eeb55..94ffc1ae484dc1a47cb0410ab2f8f8a8bbf9bfc4 100644 (file)
@@ -266,6 +266,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        mutex_unlock(&trans_pcie->mutex);
 }
 
+static void iwl_pcie_set_ltr(struct iwl_trans *trans)
+{
+       u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+                     u32_encode_bits(250,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+                     CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+                     u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+       /*
+        * To workaround hardware latency issues during the boot process,
+        * initialize the LTR to ~250 usec (see ltr_val above).
+        * The firmware initializes this again later (to a smaller value).
+        */
+       if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+            trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+           !trans->trans_cfg->integrated) {
+               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+       } else if (trans->trans_cfg->integrated &&
+                  trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+               iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+               iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
+       }
+}
+
 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
                                 const struct fw_img *fw, bool run_in_rfkill)
 {
@@ -332,6 +360,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
        if (ret)
                goto out;
 
+       iwl_pcie_set_ltr(trans);
+
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+               iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+       else
+               iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
        /* re-check RF-Kill state since we may have missed the interrupt */
        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
        if (hw_rfkill && !run_in_rfkill)
index 381e8f90b6f25fddeecd01ef48137218cd1ba09b..7ae32491b5dae275df23ada603167c1ee3b4b68a 100644 (file)
@@ -928,6 +928,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        u32 cmd_pos;
        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+       unsigned long flags;
 
        if (WARN(!trans->wide_cmd_header &&
                 group_id > IWL_ALWAYS_LONG_GROUP,
@@ -1011,10 +1012,10 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                goto free_dup_buf;
        }
 
-       spin_lock_bh(&txq->lock);
+       spin_lock_irqsave(&txq->lock, flags);
 
        if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
-               spin_unlock_bh(&txq->lock);
+               spin_unlock_irqrestore(&txq->lock, flags);
 
                IWL_ERR(trans, "No space in command queue\n");
                iwl_op_mode_cmd_queue_full(trans->op_mode);
@@ -1174,7 +1175,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
  unlock_reg:
        spin_unlock(&trans_pcie->reg_lock);
  out:
-       spin_unlock_bh(&txq->lock);
+       spin_unlock_irqrestore(&txq->lock, flags);
  free_dup_buf:
        if (idx < 0)
                kfree(dup_buf);
index 18980bb32dee2080bd688a6cbac87af2f8e1d4d6..6dad7f6ab09dfe1e6d3eda89d612423f4003a031 100644 (file)
 
 #define MT_WTBLON_TOP_BASE             0x34000
 #define MT_WTBLON_TOP(ofs)             (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR            MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR            MT_WTBLON_TOP(0x200)
 #define MT_WTBLON_TOP_WDUCR_GROUP      GENMASK(2, 0)
 
-#define MT_WTBL_UPDATE                 MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE                 MT_WTBLON_TOP(0x230)
 #define MT_WTBL_UPDATE_WLAN_IDX                GENMASK(9, 0)
 #define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
 #define MT_WTBL_UPDATE_BUSY            BIT(31)
index c878097f0ddaf664716fd0570644fa45808f1ca1..1df959532c7d345c38714dec45d20534ffb3ec6f 100644 (file)
@@ -12,6 +12,7 @@
 #include <net/cfg80211.h>
 #include <net/rtnetlink.h>
 #include <linux/etherdevice.h>
+#include <linux/math64.h>
 #include <linux/module.h>
 
 static struct wiphy *common_wiphy;
@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work)
                             scan_result.work);
        struct wiphy *wiphy = priv_to_wiphy(priv);
        struct cfg80211_scan_info scan_info = { .aborted = false };
+       u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
 
        informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
                                           CFG80211_BSS_FTYPE_PRESP,
-                                          fake_router_bssid,
-                                          ktime_get_boottime_ns(),
+                                          fake_router_bssid, tsf,
                                           WLAN_CAPABILITY_ESS, 0,
                                           (void *)&ssid, sizeof(ssid),
                                           DBM_TO_MBM(-50), GFP_KERNEL);
index a5439c130130f89152d35fd774fbedf16d8c175c..d24b7a7993aa0545b721675c2432061b718a3d45 100644 (file)
@@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
        xenvif_carrier_on(be->vif);
 
        unregister_hotplug_status_watch(be);
-       err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
-                                  hotplug_status_changed,
-                                  "%s/%s", dev->nodename, "hotplug-status");
-       if (!err)
+       if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
+               err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+                                          NULL, hotplug_status_changed,
+                                          "%s/%s", dev->nodename,
+                                          "hotplug-status");
+               if (err)
+                       goto err;
                be->have_hotplug_status_watch = 1;
+       }
 
        netif_tx_wake_all_queues(be->vif->dev);
 
index 48f0985ca8a023bc99599e38d9578d724cc6f9c9..3a777d0073b7203743059a17fedea61dc6ade3ca 100644 (file)
@@ -631,16 +631,14 @@ void nvdimm_check_and_set_ro(struct gendisk *disk)
        struct nd_region *nd_region = to_nd_region(dev->parent);
        int disk_ro = get_disk_ro(disk);
 
-       /*
-        * Upgrade to read-only if the region is read-only preserve as
-        * read-only if the disk is already read-only.
-        */
-       if (disk_ro || nd_region->ro == disk_ro)
+       /* catch the disk up with the region ro state */
+       if (disk_ro == nd_region->ro)
                return;
 
-       dev_info(dev, "%s read-only, marking %s read-only\n",
-                       dev_name(&nd_region->dev), disk->disk_name);
-       set_disk_ro(disk, 1);
+       dev_info(dev, "%s read-%s, marking %s read-%s\n",
+                dev_name(&nd_region->dev), nd_region->ro ? "only" : "write",
+                disk->disk_name, nd_region->ro ? "only" : "write");
+       set_disk_ro(disk, nd_region->ro);
 }
 EXPORT_SYMBOL(nvdimm_check_and_set_ro);
 
index b8a85bfb2e95b0bc6ed1cb8956be376700231fbf..7daac795db393cb4669144c2ed9bcb7faab6b39e 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/mm.h>
 #include <asm/cacheflush.h>
 #include "pmem.h"
+#include "btt.h"
 #include "pfn.h"
 #include "nd.h"
 
@@ -585,7 +586,7 @@ static void nd_pmem_shutdown(struct device *dev)
        nvdimm_flush(to_nd_region(dev->parent), NULL);
 }
 
-static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+static void pmem_revalidate_poison(struct device *dev)
 {
        struct nd_region *nd_region;
        resource_size_t offset = 0, end_trunc = 0;
@@ -595,9 +596,6 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
        struct range range;
        struct kernfs_node *bb_state;
 
-       if (event != NVDIMM_REVALIDATE_POISON)
-               return;
-
        if (is_nd_btt(dev)) {
                struct nd_btt *nd_btt = to_nd_btt(dev);
 
@@ -635,6 +633,37 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
                sysfs_notify_dirent(bb_state);
 }
 
+static void pmem_revalidate_region(struct device *dev)
+{
+       struct pmem_device *pmem;
+
+       if (is_nd_btt(dev)) {
+               struct nd_btt *nd_btt = to_nd_btt(dev);
+               struct btt *btt = nd_btt->btt;
+
+               nvdimm_check_and_set_ro(btt->btt_disk);
+               return;
+       }
+
+       pmem = dev_get_drvdata(dev);
+       nvdimm_check_and_set_ro(pmem->disk);
+}
+
+static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+{
+       switch (event) {
+       case NVDIMM_REVALIDATE_POISON:
+               pmem_revalidate_poison(dev);
+               break;
+       case NVDIMM_REVALIDATE_REGION:
+               pmem_revalidate_region(dev);
+               break;
+       default:
+               dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
+               break;
+       }
+}
+
 MODULE_ALIAS("pmem");
 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
index ef23119db574663822c64c4cad32be0fa377fb8c..9ccf3d608799311820cc4a58abd7d56777767cdd 100644 (file)
@@ -518,6 +518,12 @@ static ssize_t read_only_show(struct device *dev,
        return sprintf(buf, "%d\n", nd_region->ro);
 }
 
+static int revalidate_read_only(struct device *dev, void *data)
+{
+       nd_device_notify(dev, NVDIMM_REVALIDATE_REGION);
+       return 0;
+}
+
 static ssize_t read_only_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
@@ -529,6 +535,7 @@ static ssize_t read_only_store(struct device *dev,
                return rc;
 
        nd_region->ro = ro;
+       device_for_each_child(dev, NULL, revalidate_read_only);
        return len;
 }
 static DEVICE_ATTR_RW(read_only);
@@ -1239,6 +1246,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
                        || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
                return -ENXIO;
 
+       /* Test if an explicit flush function is defined */
+       if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
+               return 1;
+
+       /* Test if any flush hints for the region are available */
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -1249,8 +1261,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
        }
 
        /*
-        * The platform defines dimm devices without hints, assume
-        * platform persistence mechanism like ADR
+        * The platform defines dimm devices without hints nor explicit flush,
+        * assume platform persistence mechanism like ADR
         */
        return 0;
 }
index 75d2594c16e19aa64f6bc7119ae8beab87e31064..dd20190068387c82b3e8fe9954e34c9f3ae3eb1c 100644 (file)
@@ -272,10 +272,20 @@ config SPRD_EFUSE
 
 config NVMEM_RMEM
        tristate "Reserved Memory Based Driver Support"
+       depends on HAS_IOMEM
        help
          This driver maps reserved memory into an nvmem device. It might be
          useful to expose information left by firmware in memory.
 
          This driver can also be built as a module. If so, the module
          will be called nvmem-rmem.
+
+config NVMEM_BRCM_NVRAM
+       tristate "Broadcom's NVRAM support"
+       depends on ARCH_BCM_5301X || COMPILE_TEST
+       depends on HAS_IOMEM
+       help
+         This driver provides support for Broadcom's NVRAM that can be accessed
+         using I/O mapping.
+
 endif
index 5376b8e0dae5a9da24cf957407a37c5bfe0ba011..bbea1410240a887d42b8d959fa2606fd569c2b30 100644 (file)
@@ -57,3 +57,5 @@ obj-$(CONFIG_SPRD_EFUSE)      += nvmem_sprd_efuse.o
 nvmem_sprd_efuse-y             := sprd-efuse.o
 obj-$(CONFIG_NVMEM_RMEM)       += nvmem-rmem.o
 nvmem-rmem-y                   := rmem.o
+obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
+nvmem_brcm_nvram-y             := brcm_nvram.o
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
new file mode 100644 (file)
index 0000000..bd2ecaa
--- /dev/null
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct brcm_nvram {
+       struct device *dev;
+       void __iomem *base;
+};
+
+static int brcm_nvram_read(void *context, unsigned int offset, void *val,
+                          size_t bytes)
+{
+       struct brcm_nvram *priv = context;
+       u8 *dst = val;
+
+       while (bytes--)
+               *dst++ = readb(priv->base + offset++);
+
+       return 0;
+}
+
+static int brcm_nvram_probe(struct platform_device *pdev)
+{
+       struct nvmem_config config = {
+               .name = "brcm-nvram",
+               .reg_read = brcm_nvram_read,
+       };
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       struct brcm_nvram *priv;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+       priv->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       config.dev = dev;
+       config.priv = priv;
+       config.size = resource_size(res);
+
+       return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+}
+
+static const struct of_device_id brcm_nvram_of_match_table[] = {
+       { .compatible = "brcm,nvram", },
+       {},
+};
+
+static struct platform_driver brcm_nvram_driver = {
+       .probe = brcm_nvram_probe,
+       .driver = {
+               .name = "brcm_nvram",
+               .of_match_table = brcm_nvram_of_match_table,
+       },
+};
+
+static int __init brcm_nvram_init(void)
+{
+       return platform_driver_register(&brcm_nvram_driver);
+}
+
+subsys_initcall_sync(brcm_nvram_init);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, brcm_nvram_of_match_table);
index a5ab1e0c74cf644fe978d978f0cc286ae39c80b4..bca671ff4e54607ffd4e53e699b664da7d336f11 100644 (file)
@@ -1606,6 +1606,101 @@ int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
 }
 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
 
+static void *nvmem_cell_read_variable_common(struct device *dev,
+                                            const char *cell_id,
+                                            size_t max_len, size_t *len)
+{
+       struct nvmem_cell *cell;
+       int nbits;
+       void *buf;
+
+       cell = nvmem_cell_get(dev, cell_id);
+       if (IS_ERR(cell))
+               return cell;
+
+       nbits = cell->nbits;
+       buf = nvmem_cell_read(cell, len);
+       nvmem_cell_put(cell);
+       if (IS_ERR(buf))
+               return buf;
+
+       /*
+        * If nbits is set then nvmem_cell_read() can significantly exaggerate
+        * the length of the real data. Throw away the extra junk.
+        */
+       if (nbits)
+               *len = DIV_ROUND_UP(nbits, 8);
+
+       if (*len > max_len) {
+               kfree(buf);
+               return ERR_PTR(-ERANGE);
+       }
+
+       return buf;
+}
+
+/**
+ * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
+                                   u32 *val)
+{
+       size_t len;
+       u8 *buf;
+       int i;
+
+       buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
+
+       /* Copy w/ implicit endian conversion */
+       *val = 0;
+       for (i = 0; i < len; i++)
+               *val |= buf[i] << (8 * i);
+
+       kfree(buf);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
+
+/**
+ * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
+                                   u64 *val)
+{
+       size_t len;
+       u8 *buf;
+       int i;
+
+       buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
+
+       /* Copy w/ implicit endian conversion */
+       *val = 0;
+       for (i = 0; i < len; i++)
+               *val |= (uint64_t)buf[i] << (8 * i);
+
+       kfree(buf);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
+
 /**
  * nvmem_device_cell_read() - Read a given nvmem device and cell
  *
index f6e9f96933ca291d65b2d5f8b8118bd4bfd6c3d4..4fcb63507ecd151e95cd8089a5000036a1fa2367 100644 (file)
@@ -141,7 +141,7 @@ static int sdam_probe(struct platform_device *pdev)
        sdam->sdam_config.dev = &pdev->dev;
        sdam->sdam_config.name = "spmi_sdam";
        sdam->sdam_config.id = NVMEM_DEVID_AUTO;
-       sdam->sdam_config.owner = THIS_MODULE,
+       sdam->sdam_config.owner = THIS_MODULE;
        sdam->sdam_config.stride = 1;
        sdam->sdam_config.word_size = 1;
        sdam->sdam_config.reg_read = sdam_read;
index 6cace24dfbf734d0917fe06e01eed07d59fdb5cb..d6d3f24685a85e1c65c69b60a8ba56eeaeb76314 100644 (file)
@@ -45,11 +45,13 @@ MODULE_PARM_DESC(read_raw_data, "Read raw instead of corrected data");
  * @qfprom_blow_timer_value: The timer value of qfprom when doing efuse blow.
  * @qfprom_blow_set_freq:    The frequency required to set when we start the
  *                           fuse blowing.
+ * @qfprom_blow_uV:          LDO voltage to be set when doing efuse blow
  */
 struct qfprom_soc_data {
        u32 accel_value;
        u32 qfprom_blow_timer_value;
        u32 qfprom_blow_set_freq;
+       int qfprom_blow_uV;
 };
 
 /**
@@ -111,6 +113,15 @@ static const struct qfprom_soc_compatible_data sc7180_qfprom = {
        .nkeepout = ARRAY_SIZE(sc7180_qfprom_keepout)
 };
 
+static const struct nvmem_keepout sc7280_qfprom_keepout[] = {
+       {.start = 0x128, .end = 0x148},
+       {.start = 0x238, .end = 0x248}
+};
+
+static const struct qfprom_soc_compatible_data sc7280_qfprom = {
+       .keepout = sc7280_qfprom_keepout,
+       .nkeepout = ARRAY_SIZE(sc7280_qfprom_keepout)
+};
 /**
  * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
  * @priv: Our driver data.
@@ -127,6 +138,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
 {
        int ret;
 
+       /*
+        * This may be a shared rail and may be able to run at a lower rate
+        * when we're not blowing fuses.  At the moment, the regulator framework
+        * applies voltage constraints even on disabled rails, so remove our
+        * constraints and allow the rail to be adjusted by other users.
+        */
+       ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
+       if (ret)
+               dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
+
        ret = regulator_disable(priv->vcc);
        if (ret)
                dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
@@ -158,6 +179,7 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
                                      struct qfprom_touched_values *old)
 {
        int ret;
+       int qfprom_blow_uV = priv->soc_data->qfprom_blow_uV;
 
        ret = clk_prepare_enable(priv->secclk);
        if (ret) {
@@ -172,6 +194,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
                goto err_clk_prepared;
        }
 
+       /*
+        * Hardware requires 1.8V min for fuse blowing; this may be
+        * a rail shared do don't specify a max--regulator constraints
+        * will handle.
+        */
+       ret = regulator_set_voltage(priv->vcc, qfprom_blow_uV, INT_MAX);
+       if (ret) {
+               dev_err(priv->dev, "Failed to set %duV\n", qfprom_blow_uV);
+               goto err_clk_rate_set;
+       }
+
        ret = regulator_enable(priv->vcc);
        if (ret) {
                dev_err(priv->dev, "Failed to enable regulator\n");
@@ -290,6 +323,14 @@ static const struct qfprom_soc_data qfprom_7_8_data = {
        .accel_value = 0xD10,
        .qfprom_blow_timer_value = 25,
        .qfprom_blow_set_freq = 4800000,
+       .qfprom_blow_uV = 1800000,
+};
+
+static const struct qfprom_soc_data qfprom_7_15_data = {
+       .accel_value = 0xD08,
+       .qfprom_blow_timer_value = 24,
+       .qfprom_blow_set_freq = 4800000,
+       .qfprom_blow_uV = 1900000,
 };
 
 static int qfprom_probe(struct platform_device *pdev)
@@ -358,6 +399,8 @@ static int qfprom_probe(struct platform_device *pdev)
 
                if (major_version == 7 && minor_version == 8)
                        priv->soc_data = &qfprom_7_8_data;
+               if (major_version == 7 && minor_version == 15)
+                       priv->soc_data = &qfprom_7_15_data;
 
                priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
                if (IS_ERR(priv->vcc))
@@ -384,6 +427,7 @@ static int qfprom_probe(struct platform_device *pdev)
 static const struct of_device_id qfprom_of_match[] = {
        { .compatible = "qcom,qfprom",},
        { .compatible = "qcom,sc7180-qfprom", .data = &sc7180_qfprom},
+       { .compatible = "qcom,sc7280-qfprom", .data = &sc7280_qfprom},
        {/* sentinel */},
 };
 MODULE_DEVICE_TABLE(of, qfprom_of_match);
index c527d26ca6ac12c801fb85c4f215d33e6df0e881..4692aa985bd6ffd847cc644e7669ca5afd2d3422 100644 (file)
@@ -123,7 +123,7 @@ static int snvs_lpgpr_probe(struct platform_device *pdev)
        cfg->dev = dev;
        cfg->stride = 4;
        cfg->word_size = 4;
-       cfg->size = dcfg->size,
+       cfg->size = dcfg->size;
        cfg->owner = THIS_MODULE;
        cfg->reg_read  = snvs_lpgpr_read;
        cfg->reg_write = snvs_lpgpr_write;
index dcc1dd96911a9e93b3cc22c711de15c93ebf43f7..adb26aff481d5584c55db4621e915d678b44cdd6 100644 (file)
@@ -205,7 +205,7 @@ static void populate_properties(const void *blob,
                *pprev = NULL;
 }
 
-static bool populate_node(const void *blob,
+static int populate_node(const void *blob,
                          int offset,
                          void **mem,
                          struct device_node *dad,
@@ -214,24 +214,24 @@ static bool populate_node(const void *blob,
 {
        struct device_node *np;
        const char *pathp;
-       unsigned int l, allocl;
+       int len;
 
-       pathp = fdt_get_name(blob, offset, &l);
+       pathp = fdt_get_name(blob, offset, &len);
        if (!pathp) {
                *pnp = NULL;
-               return false;
+               return len;
        }
 
-       allocl = ++l;
+       len++;
 
-       np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
+       np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
                                __alignof__(struct device_node));
        if (!dryrun) {
                char *fn;
                of_node_init(np);
                np->full_name = fn = ((char *)np) + sizeof(*np);
 
-               memcpy(fn, pathp, l);
+               memcpy(fn, pathp, len);
 
                if (dad != NULL) {
                        np->parent = dad;
@@ -295,6 +295,7 @@ static int unflatten_dt_nodes(const void *blob,
        struct device_node *nps[FDT_MAX_DEPTH];
        void *base = mem;
        bool dryrun = !base;
+       int ret;
 
        if (nodepp)
                *nodepp = NULL;
@@ -322,9 +323,10 @@ static int unflatten_dt_nodes(const void *blob,
                    !of_fdt_device_is_available(blob, offset))
                        continue;
 
-               if (!populate_node(blob, offset, &mem, nps[depth],
-                                  &nps[depth+1], dryrun))
-                       return mem - base;
+               ret = populate_node(blob, offset, &mem, nps[depth],
+                                  &nps[depth+1], dryrun);
+               if (ret < 0)
+                       return ret;
 
                if (!dryrun && nodepp && !*nodepp)
                        *nodepp = nps[depth+1];
@@ -372,6 +374,10 @@ void *__unflatten_device_tree(const void *blob,
 {
        int size;
        void *mem;
+       int ret;
+
+       if (mynodes)
+               *mynodes = NULL;
 
        pr_debug(" -> unflatten_device_tree()\n");
 
@@ -392,7 +398,7 @@ void *__unflatten_device_tree(const void *blob,
 
        /* First pass, scan for size */
        size = unflatten_dt_nodes(blob, NULL, dad, NULL);
-       if (size < 0)
+       if (size <= 0)
                return NULL;
 
        size = ALIGN(size, 4);
@@ -410,12 +416,16 @@ void *__unflatten_device_tree(const void *blob,
        pr_debug("  unflattening %p...\n", mem);
 
        /* Second pass, do actual unflattening */
-       unflatten_dt_nodes(blob, mem, dad, mynodes);
+       ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
+
        if (be32_to_cpup(mem + size) != 0xdeadbeef)
                pr_warn("End of tree marker overwritten: %08x\n",
                        be32_to_cpup(mem + size));
 
-       if (detached && mynodes) {
+       if (ret <= 0)
+               return NULL;
+
+       if (detached && mynodes && *mynodes) {
                of_node_set_flag(*mynodes, OF_DETACHED);
                pr_debug("unflattened tree is detached\n");
        }
index d9e6a324de0a776989e3c25fce20b04e5edc8eb9..d717efbd637dae7423e3e98583ce7d0947712856 100644 (file)
@@ -8,6 +8,8 @@
  * Copyright (C) 1996-2005 Paul Mackerras.
  */
 
+#define FDT_ALIGN_SIZE 8
+
 /**
  * struct alias_prop - Alias property in 'aliases' node
  * @link:      List node to link the structure in aliases_lookup list
index 50bbe0edf538022c2f9f2f13546499fe4addf643..23effe5e50ece15469f8ff6fe8d20f4820bf23d5 100644 (file)
@@ -57,7 +57,7 @@ struct fragment {
  * struct overlay_changeset
  * @id:                        changeset identifier
  * @ovcs_list:         list on which we are located
- * @fdt:               FDT that was unflattened to create @overlay_tree
+ * @fdt:               base of memory allocated to hold aligned FDT that was unflattened to create @overlay_tree
  * @overlay_tree:      expanded device tree that contains the fragment nodes
  * @count:             count of fragment structures
  * @fragments:         fragment nodes in the overlay expanded device tree
@@ -719,8 +719,8 @@ static struct device_node *find_target(struct device_node *info_node)
 /**
  * init_overlay_changeset() - initialize overlay changeset from overlay tree
  * @ovcs:      Overlay changeset to build
- * @fdt:       the FDT that was unflattened to create @tree
- * @tree:      Contains all the overlay fragments and overlay fixup nodes
+ * @fdt:       base of memory allocated to hold aligned FDT that was unflattened to create @tree
+ * @tree:      Contains the overlay fragments and overlay fixup nodes
  *
  * Initialize @ovcs.  Populate @ovcs->fragments with node information from
  * the top level of @tree.  The relevant top level nodes are the fragment
@@ -873,7 +873,7 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
  * internal documentation
  *
  * of_overlay_apply() - Create and apply an overlay changeset
- * @fdt:       the FDT that was unflattened to create @tree
+ * @fdt:       base of memory allocated to hold the aligned FDT
  * @tree:      Expanded overlay device tree
  * @ovcs_id:   Pointer to overlay changeset id
  *
@@ -953,7 +953,9 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
        /*
         * after overlay_notify(), ovcs->overlay_tree related pointers may have
         * leaked to drivers, so can not kfree() tree, aka ovcs->overlay_tree;
-        * and can not free fdt, aka ovcs->fdt
+        * and can not free memory containing aligned fdt.  The aligned fdt
+        * is contained within the memory at ovcs->fdt, possibly at an offset
+        * from ovcs->fdt.
         */
        ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY);
        if (ret) {
@@ -1014,10 +1016,11 @@ out:
 int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
                         int *ovcs_id)
 {
-       const void *new_fdt;
+       void *new_fdt;
+       void *new_fdt_align;
        int ret;
        u32 size;
-       struct device_node *overlay_root;
+       struct device_node *overlay_root = NULL;
 
        *ovcs_id = 0;
        ret = 0;
@@ -1036,11 +1039,14 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
         * Must create permanent copy of FDT because of_fdt_unflatten_tree()
         * will create pointers to the passed in FDT in the unflattened tree.
         */
-       new_fdt = kmemdup(overlay_fdt, size, GFP_KERNEL);
+       new_fdt = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
        if (!new_fdt)
                return -ENOMEM;
 
-       of_fdt_unflatten_tree(new_fdt, NULL, &overlay_root);
+       new_fdt_align = PTR_ALIGN(new_fdt, FDT_ALIGN_SIZE);
+       memcpy(new_fdt_align, overlay_fdt, size);
+
+       of_fdt_unflatten_tree(new_fdt_align, NULL, &overlay_root);
        if (!overlay_root) {
                pr_err("unable to unflatten overlay_fdt\n");
                ret = -EINVAL;
index 5036a362f52e711ff0a3381b280a2d37e4262022..aab6383f02198928f47319f9e70ef8b076dd2c00 100644 (file)
@@ -1038,6 +1038,25 @@ static bool of_is_ancestor_of(struct device_node *test_ancestor,
        return false;
 }
 
+static struct device_node *of_get_compat_node(struct device_node *np)
+{
+       of_node_get(np);
+
+       while (np) {
+               if (!of_device_is_available(np)) {
+                       of_node_put(np);
+                       np = NULL;
+               }
+
+               if (of_find_property(np, "compatible", NULL))
+                       break;
+
+               np = of_get_next_parent(np);
+       }
+
+       return np;
+}
+
 /**
  * of_link_to_phandle - Add fwnode link to supplier from supplier phandle
  * @con_np: consumer device tree node
@@ -1061,25 +1080,11 @@ static int of_link_to_phandle(struct device_node *con_np,
        struct device *sup_dev;
        struct device_node *tmp_np = sup_np;
 
-       of_node_get(sup_np);
        /*
         * Find the device node that contains the supplier phandle.  It may be
         * @sup_np or it may be an ancestor of @sup_np.
         */
-       while (sup_np) {
-
-               /* Don't allow linking to a disabled supplier */
-               if (!of_device_is_available(sup_np)) {
-                       of_node_put(sup_np);
-                       sup_np = NULL;
-               }
-
-               if (of_find_property(sup_np, "compatible", NULL))
-                       break;
-
-               sup_np = of_get_next_parent(sup_np);
-       }
-
+       sup_np = of_get_compat_node(sup_np);
        if (!sup_np) {
                pr_debug("Not linking %pOFP to %pOFP - No device\n",
                         con_np, tmp_np);
@@ -1225,6 +1230,8 @@ static struct device_node *parse_##fname(struct device_node *np,       \
  * @parse_prop.prop_name: Name of property holding a phandle value
  * @parse_prop.index: For properties holding a list of phandles, this is the
  *                   index into the list
+ * @optional: The property can be an optional dependency.
+ * @node_not_dev: The consumer node containing the property is never a device.
  *
  * Returns:
  * parse_prop() return values are
@@ -1236,6 +1243,7 @@ struct supplier_bindings {
        struct device_node *(*parse_prop)(struct device_node *np,
                                          const char *prop_name, int index);
        bool optional;
+       bool node_not_dev;
 };
 
 DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
@@ -1260,9 +1268,19 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
 DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
 DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
 DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
+DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
-DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
+
+static struct device_node *parse_gpios(struct device_node *np,
+                                      const char *prop_name, int index)
+{
+       if (!strcmp_suffix(prop_name, ",nr-gpios"))
+               return NULL;
+
+       return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
+                                      "#gpio-cells");
+}
 
 static struct device_node *parse_iommu_maps(struct device_node *np,
                                            const char *prop_name, int index)
@@ -1334,6 +1352,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
        { .parse_prop = parse_pinctrl6, },
        { .parse_prop = parse_pinctrl7, },
        { .parse_prop = parse_pinctrl8, },
+       { .parse_prop = parse_remote_endpoint, .node_not_dev = true, },
        { .parse_prop = parse_gpio_compat, },
        { .parse_prop = parse_interrupts, },
        { .parse_prop = parse_regulators, },
@@ -1378,10 +1397,16 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
                }
 
                while ((phandle = s->parse_prop(con_np, prop_name, i))) {
+                       struct device_node *con_dev_np;
+
+                       con_dev_np = s->node_not_dev
+                                       ? of_get_compat_node(con_np)
+                                       : of_node_get(con_np);
                        matched = true;
                        i++;
-                       of_link_to_phandle(con_np, phandle);
+                       of_link_to_phandle(con_dev_np, phandle);
                        of_node_put(phandle);
+                       of_node_put(con_dev_np);
                }
                s++;
        }
index eb100627c186adab7138549fc76b81f6e8e5908e..819a20acaa939861b0420519e251715e86942f86 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/kernel.h>
 
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
@@ -1408,7 +1409,8 @@ static void attach_node_and_children(struct device_node *np)
 static int __init unittest_data_add(void)
 {
        void *unittest_data;
-       struct device_node *unittest_data_node, *np;
+       void *unittest_data_align;
+       struct device_node *unittest_data_node = NULL, *np;
        /*
         * __dtb_testcases_begin[] and __dtb_testcases_end[] are magically
         * created by cmd_dt_S_dtb in scripts/Makefile.lib
@@ -1417,21 +1419,29 @@ static int __init unittest_data_add(void)
        extern uint8_t __dtb_testcases_end[];
        const int size = __dtb_testcases_end - __dtb_testcases_begin;
        int rc;
+       void *ret;
 
        if (!size) {
-               pr_warn("%s: No testcase data to attach; not running tests\n",
-                       __func__);
+               pr_warn("%s: testcases is empty\n", __func__);
                return -ENODATA;
        }
 
        /* creating copy */
-       unittest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL);
+       unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
        if (!unittest_data)
                return -ENOMEM;
 
-       of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
+       unittest_data_align = PTR_ALIGN(unittest_data, FDT_ALIGN_SIZE);
+       memcpy(unittest_data_align, __dtb_testcases_begin, size);
+
+       ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node);
+       if (!ret) {
+               pr_warn("%s: unflatten testcases tree failed\n", __func__);
+               kfree(unittest_data);
+               return -ENODATA;
+       }
        if (!unittest_data_node) {
-               pr_warn("%s: No tree to attach; not running tests\n", __func__);
+               pr_warn("%s: testcases tree is empty\n", __func__);
                kfree(unittest_data);
                return -ENODATA;
        }
index 27a17a1e4a7c3205a60ee2232d10e1caeb4b1846..1ff4ce24f4b300e05c0276f64456cfa6f99caeb0 100644 (file)
@@ -1292,7 +1292,7 @@ exit_unlock:
         * resumes, hv_pci_restore_msi_state() is able to correctly restore
         * the interrupt with the correct affinity.
         */
-       if (res && hbus->state != hv_pcibus_removing)
+       if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
                dev_err(&hbus->hdev->device,
                        "%s() failed: %#llx", __func__, res);
 
@@ -1458,7 +1458,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
         * Prevents hv_pci_onchannelcallback() from running concurrently
         * in the tasklet.
         */
-       tasklet_disable(&channel->callback_event);
+       tasklet_disable_in_atomic(&channel->callback_event);
 
        /*
         * Since this function is called with IRQ locks held, can't
index f81e2ec9000507f04742a4bf22b6a09201c428e3..666d8a9b557fced7e4a51b501a2a76d66c867bcd 100644 (file)
@@ -306,7 +306,7 @@ static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
 {
        struct dev_ext_attribute *eattr = container_of(attr,
                                struct dev_ext_attribute, attr);
-       return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var);
+       return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var);
 }
 
 static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
@@ -525,8 +525,8 @@ static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
        struct dev_ext_attribute *eattr = container_of(attr,
                                        struct dev_ext_attribute, attr);
        /* Global events have single fixed source code */
-       return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
-                               (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
+       return sysfs_emit(buf, "event=0x%lx,source=0x%x\n",
+                         (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
 }
 
 /*
@@ -696,7 +696,7 @@ static ssize_t cci_pmu_format_show(struct device *dev,
 {
        struct dev_ext_attribute *eattr = container_of(attr,
                                struct dev_ext_attribute, attr);
-       return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+       return sysfs_emit(buf, "%s\n", (char *)eattr->var);
 }
 
 static ssize_t cci_pmu_event_show(struct device *dev,
@@ -705,8 +705,8 @@ static ssize_t cci_pmu_event_show(struct device *dev,
        struct dev_ext_attribute *eattr = container_of(attr,
                                struct dev_ext_attribute, attr);
        /* source parameter is mandatory for normal PMU events */
-       return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n",
-                                        (unsigned long)eattr->var);
+       return sysfs_emit(buf, "source=?,event=0x%lx\n",
+                         (unsigned long)eattr->var);
 }
 
 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
index a0a71c1df042abf25ff8935aa95ca80f7a1565bf..96d47cb302dd1001d907ee9773116a07243d4ae9 100644 (file)
@@ -221,7 +221,7 @@ static ssize_t arm_ccn_pmu_format_show(struct device *dev,
        struct dev_ext_attribute *ea = container_of(attr,
                        struct dev_ext_attribute, attr);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
+       return sysfs_emit(buf, "%s\n", (char *)ea->var);
 }
 
 #define CCN_FORMAT_ATTR(_name, _config) \
@@ -326,43 +326,38 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
        struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
        struct arm_ccn_pmu_event *event = container_of(attr,
                        struct arm_ccn_pmu_event, attr);
-       ssize_t res;
+       int res;
 
-       res = scnprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
+       res = sysfs_emit(buf, "type=0x%x", event->type);
        if (event->event)
-               res += scnprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
-                               event->event);
+               res += sysfs_emit_at(buf, res, ",event=0x%x", event->event);
        if (event->def)
-               res += scnprintf(buf + res, PAGE_SIZE - res, ",%s",
-                               event->def);
+               res += sysfs_emit_at(buf, res, ",%s", event->def);
        if (event->mask)
-               res += scnprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
-                               event->mask);
+               res += sysfs_emit_at(buf, res, ",mask=0x%x", event->mask);
 
        /* Arguments required by an event */
        switch (event->type) {
        case CCN_TYPE_CYCLES:
                break;
        case CCN_TYPE_XP:
-               res += scnprintf(buf + res, PAGE_SIZE - res,
-                               ",xp=?,vc=?");
+               res += sysfs_emit_at(buf, res, ",xp=?,vc=?");
                if (event->event == CCN_EVENT_WATCHPOINT)
-                       res += scnprintf(buf + res, PAGE_SIZE - res,
+                       res += sysfs_emit_at(buf, res,
                                        ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
                else
-                       res += scnprintf(buf + res, PAGE_SIZE - res,
-                                       ",bus=?");
+                       res += sysfs_emit_at(buf, res, ",bus=?");
 
                break;
        case CCN_TYPE_MN:
-               res += scnprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
+               res += sysfs_emit_at(buf, res, ",node=%d", ccn->mn_id);
                break;
        default:
-               res += scnprintf(buf + res, PAGE_SIZE - res, ",node=?");
+               res += sysfs_emit_at(buf, res, ",node=?");
                break;
        }
 
-       res += scnprintf(buf + res, PAGE_SIZE - res, "\n");
+       res += sysfs_emit_at(buf, res, "\n");
 
        return res;
 }
@@ -476,7 +471,7 @@ static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
        struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
        u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
 
-       return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
+       return mask ? sysfs_emit(buf, "0x%016llx\n", *mask) : -EINVAL;
 }
 
 static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
index 1328159fe564d6187177ce1d73bc9ccff18d6835..56a5c355701d0192a5d3429c080032f904b45254 100644 (file)
@@ -348,19 +348,19 @@ static ssize_t arm_cmn_event_show(struct device *dev,
        eattr = container_of(attr, typeof(*eattr), attr);
 
        if (eattr->type == CMN_TYPE_DTC)
-               return snprintf(buf, PAGE_SIZE, "type=0x%x\n", eattr->type);
+               return sysfs_emit(buf, "type=0x%x\n", eattr->type);
 
        if (eattr->type == CMN_TYPE_WP)
-               return snprintf(buf, PAGE_SIZE,
-                               "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
-                               eattr->type, eattr->eventid);
+               return sysfs_emit(buf,
+                                 "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
+                                 eattr->type, eattr->eventid);
 
        if (arm_cmn_is_occup_event(eattr->type, eattr->eventid))
-               return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
-                               eattr->type, eattr->eventid, eattr->occupid);
+               return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
+                                 eattr->type, eattr->eventid, eattr->occupid);
 
-       return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x\n",
-                       eattr->type, eattr->eventid);
+       return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type,
+                         eattr->eventid);
 }
 
 static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
@@ -560,12 +560,12 @@ static ssize_t arm_cmn_format_show(struct device *dev,
        int lo = __ffs(fmt->field), hi = __fls(fmt->field);
 
        if (lo == hi)
-               return snprintf(buf, PAGE_SIZE, "config:%d\n", lo);
+               return sysfs_emit(buf, "config:%d\n", lo);
 
        if (!fmt->config)
-               return snprintf(buf, PAGE_SIZE, "config:%d-%d\n", lo, hi);
+               return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
 
-       return snprintf(buf, PAGE_SIZE, "config%d:%d-%d\n", fmt->config, lo, hi);
+       return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
 }
 
 #define _CMN_FORMAT_ATTR(_name, _cfg, _fld)                            \
index f2a85500258d0bca8bfb57f01bd0d1b4f545d456..b6c2511d59af27862baffdb9028da48f96197bb2 100644 (file)
@@ -113,7 +113,7 @@ dmc620_pmu_event_show(struct device *dev,
 
        eattr = container_of(attr, typeof(*eattr), attr);
 
-       return sprintf(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
+       return sysfs_emit(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
 }
 
 #define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2)               \
index 0459a340346942f96b41e9afc573d16b4930f74f..196faea074d04be361ec9c0b659058426be82958 100644 (file)
@@ -136,8 +136,7 @@ static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
 {
        struct dev_ext_attribute *eattr = container_of(attr,
                                        struct dev_ext_attribute, attr);
-       return snprintf(buf, PAGE_SIZE, "event=0x%lx\n",
-                                        (unsigned long)eattr->var);
+       return sysfs_emit(buf, "event=0x%lx\n", (unsigned long)eattr->var);
 }
 
 static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
@@ -146,7 +145,7 @@ static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
 {
        struct dev_ext_attribute *eattr = container_of(attr,
                                        struct dev_ext_attribute, attr);
-       return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+       return sysfs_emit(buf, "%s\n", (char *)eattr->var);
 }
 
 static ssize_t dsu_pmu_cpumask_show(struct device *dev,
index 933bd8410fc2afa94eb186c6be3c58148ea41837..513de1f54e2d7371ceca11b9427a265394aeadc9 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  */
 #define pr_fmt(fmt) "hw perfevents: " fmt
+#define dev_fmt pr_fmt
 
 #include <linux/bug.h>
 #include <linux/cpumask.h>
@@ -62,7 +63,7 @@ static bool pmu_has_irq_affinity(struct device_node *node)
        return !!of_find_property(node, "interrupt-affinity", NULL);
 }
 
-static int pmu_parse_irq_affinity(struct device_node *node, int i)
+static int pmu_parse_irq_affinity(struct device *dev, int i)
 {
        struct device_node *dn;
        int cpu;
@@ -72,19 +73,18 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
         * affinity matches our logical CPU order, as we used to assume.
         * This is fragile, so we'll warn in pmu_parse_irqs().
         */
-       if (!pmu_has_irq_affinity(node))
+       if (!pmu_has_irq_affinity(dev->of_node))
                return i;
 
-       dn = of_parse_phandle(node, "interrupt-affinity", i);
+       dn = of_parse_phandle(dev->of_node, "interrupt-affinity", i);
        if (!dn) {
-               pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n",
-                       i, node);
+               dev_warn(dev, "failed to parse interrupt-affinity[%d]\n", i);
                return -EINVAL;
        }
 
        cpu = of_cpu_node_to_id(dn);
        if (cpu < 0) {
-               pr_warn("failed to find logical CPU for %pOFn\n", dn);
+               dev_warn(dev, "failed to find logical CPU for %pOFn\n", dn);
                cpu = nr_cpu_ids;
        }
 
@@ -98,19 +98,18 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
        int i = 0, num_irqs;
        struct platform_device *pdev = pmu->plat_device;
        struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+       struct device *dev = &pdev->dev;
 
        num_irqs = platform_irq_count(pdev);
-       if (num_irqs < 0) {
-               pr_err("unable to count PMU IRQs\n");
-               return num_irqs;
-       }
+       if (num_irqs < 0)
+               return dev_err_probe(dev, num_irqs, "unable to count PMU IRQs\n");
 
        /*
         * In this case we have no idea which CPUs are covered by the PMU.
         * To match our prior behaviour, we assume all CPUs in this case.
         */
        if (num_irqs == 0) {
-               pr_warn("no irqs for PMU, sampling events not supported\n");
+               dev_warn(dev, "no irqs for PMU, sampling events not supported\n");
                pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
                cpumask_setall(&pmu->supported_cpus);
                return 0;
@@ -122,10 +121,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
                        return pmu_parse_percpu_irq(pmu, irq);
        }
 
-       if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) {
-               pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
-                       pdev->dev.of_node);
-       }
+       if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node))
+               dev_warn(dev, "no interrupt-affinity property, guessing.\n");
 
        for (i = 0; i < num_irqs; i++) {
                int cpu, irq;
@@ -135,18 +132,18 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
                        continue;
 
                if (irq_is_percpu_devid(irq)) {
-                       pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
+                       dev_warn(dev, "multiple PPIs or mismatched SPI/PPI detected\n");
                        return -EINVAL;
                }
 
-               cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
+               cpu = pmu_parse_irq_affinity(dev, i);
                if (cpu < 0)
                        return cpu;
                if (cpu >= nr_cpu_ids)
                        continue;
 
                if (per_cpu(hw_events->irq, cpu)) {
-                       pr_warn("multiple PMU IRQs for the same CPU detected\n");
+                       dev_warn(dev, "multiple PMU IRQs for the same CPU detected\n");
                        return -EINVAL;
                }
 
@@ -191,9 +188,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
                         const struct of_device_id *of_table,
                         const struct pmu_probe_info *probe_table)
 {
-       const struct of_device_id *of_id;
        armpmu_init_fn init_fn;
-       struct device_node *node = pdev->dev.of_node;
+       struct device *dev = &pdev->dev;
        struct arm_pmu *pmu;
        int ret = -ENODEV;
 
@@ -207,15 +203,14 @@ int arm_pmu_device_probe(struct platform_device *pdev,
        if (ret)
                goto out_free;
 
-       if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
-               init_fn = of_id->data;
-
-               pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+       init_fn = of_device_get_match_data(dev);
+       if (init_fn) {
+               pmu->secure_access = of_property_read_bool(dev->of_node,
                                                           "secure-reg-access");
 
                /* arm64 systems boot only as non-secure */
                if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
-                       pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+                       dev_warn(dev, "ignoring \"secure-reg-access\" property for arm64\n");
                        pmu->secure_access = false;
                }
 
@@ -226,7 +221,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
        }
 
        if (ret) {
-               pr_info("%pOF: failed to probe PMU!\n", node);
+               dev_err(dev, "failed to probe PMU!\n");
                goto out_free;
        }
 
@@ -235,15 +230,16 @@ int arm_pmu_device_probe(struct platform_device *pdev,
                goto out_free_irqs;
 
        ret = armpmu_register(pmu);
-       if (ret)
-               goto out_free;
+       if (ret) {
+               dev_err(dev, "failed to register PMU devices!\n");
+               goto out_free_irqs;
+       }
 
        return 0;
 
 out_free_irqs:
        armpmu_free_irqs(pmu);
 out_free:
-       pr_info("%pOF: failed to register PMU devices!\n", node);
        armpmu_free(pmu);
        return ret;
 }
index 8ff7a67f691cf93b9d9576af74bce580a06317f3..ff6fab4bae30dc1873c4d4299cffacb24a015b66 100644 (file)
@@ -506,30 +506,24 @@ static ssize_t smmu_pmu_event_show(struct device *dev,
 
        pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
 
-       return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+       return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 }
 
-#define SMMU_EVENT_ATTR(name, config) \
-       PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
-                      config, smmu_pmu_event_show)
-SMMU_EVENT_ATTR(cycles, 0);
-SMMU_EVENT_ATTR(transaction, 1);
-SMMU_EVENT_ATTR(tlb_miss, 2);
-SMMU_EVENT_ATTR(config_cache_miss, 3);
-SMMU_EVENT_ATTR(trans_table_walk_access, 4);
-SMMU_EVENT_ATTR(config_struct_access, 5);
-SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
-SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
+#define SMMU_EVENT_ATTR(name, config)                                  \
+       (&((struct perf_pmu_events_attr) {                              \
+               .attr = __ATTR(name, 0444, smmu_pmu_event_show, NULL),  \
+               .id = config,                                           \
+       }).attr.attr)
 
 static struct attribute *smmu_pmu_events[] = {
-       &smmu_event_attr_cycles.attr.attr,
-       &smmu_event_attr_transaction.attr.attr,
-       &smmu_event_attr_tlb_miss.attr.attr,
-       &smmu_event_attr_config_cache_miss.attr.attr,
-       &smmu_event_attr_trans_table_walk_access.attr.attr,
-       &smmu_event_attr_config_struct_access.attr.attr,
-       &smmu_event_attr_pcie_ats_trans_rq.attr.attr,
-       &smmu_event_attr_pcie_ats_trans_passed.attr.attr,
+       SMMU_EVENT_ATTR(cycles, 0),
+       SMMU_EVENT_ATTR(transaction, 1),
+       SMMU_EVENT_ATTR(tlb_miss, 2),
+       SMMU_EVENT_ATTR(config_cache_miss, 3),
+       SMMU_EVENT_ATTR(trans_table_walk_access, 4),
+       SMMU_EVENT_ATTR(config_struct_access, 5),
+       SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
+       SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
        NULL
 };
 
@@ -560,7 +554,7 @@ static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
 {
        struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
 
-       return snprintf(page, PAGE_SIZE, "0x%08x\n", smmu_pmu->iidr);
+       return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
 }
 
 static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
index d3929ccebfd2fa1dbea1d83dbb1c196d086d5781..8a1e86ab2d8e4d4e878ecb0861d73785ccf67557 100644 (file)
@@ -126,8 +126,7 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
                container_of(attr, struct dev_ext_attribute, attr);
        int cap = (long)ea->var;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n",
-               arm_spe_pmu_cap_get(spe_pmu, cap));
+       return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
 }
 
 #define SPE_EXT_ATTR_ENTRY(_name, _func, _var)                         \
index be1f26b62ddb8a1540449286a789742e9b374c8e..2bbb9318806495687e2d9ef9dceb968a1cab1dbf 100644 (file)
@@ -110,7 +110,7 @@ static ssize_t ddr_perf_identifier_show(struct device *dev,
 {
        struct ddr_pmu *pmu = dev_get_drvdata(dev);
 
-       return sprintf(page, "%s\n", pmu->devtype_data->identifier);
+       return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
 }
 
 static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
@@ -170,8 +170,7 @@ static ssize_t ddr_perf_filter_cap_show(struct device *dev,
                container_of(attr, struct dev_ext_attribute, attr);
        int cap = (long)ea->var;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n",
-                       ddr_perf_filter_cap_get(pmu, cap));
+       return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap));
 }
 
 #define PERF_EXT_ATTR_ENTRY(_name, _func, _var)                                \
@@ -220,7 +219,7 @@ ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
        struct perf_pmu_events_attr *pmu_attr;
 
        pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
-       return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+       return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 }
 
 #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id)                            \
index e8377061845ff77f325c2079ecbe3c9110fb37cd..7643c9f93e367a697b17df3efd7aeef9f5f5643f 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
-                         hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o
+                         hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
+                         hisi_uncore_pa_pmu.o
index ac1a8c120a008c124b7f1b044647ecc39662c42f..7c8a4bc21db4ca2fdc1fed3da1d2e2760a714360 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/list.h>
-#include <linux/platform_device.h>
 #include <linux/smp.h>
 
 #include "hisi_uncore_pmu.h"
 
-/* DDRC register definition */
+/* DDRC register definition in v1 */
 #define DDRC_PERF_CTRL         0x010
 #define DDRC_FLUX_WR           0x380
 #define DDRC_FLUX_RD           0x384
 #define DDRC_INT_CLEAR         0x6d0
 #define DDRC_VERSION           0x710
 
+/* DDRC register definition in v2 */
+#define DDRC_V2_INT_MASK       0x528
+#define DDRC_V2_INT_STATUS     0x52c
+#define DDRC_V2_INT_CLEAR      0x530
+#define DDRC_V2_EVENT_CNT      0xe00
+#define DDRC_V2_EVENT_CTRL     0xe70
+#define DDRC_V2_EVENT_TYPE     0xe74
+#define DDRC_V2_PERF_CTRL      0xeA0
+
 /* DDRC has 8-counters */
 #define DDRC_NR_COUNTERS       0x8
-#define DDRC_PERF_CTRL_EN      0x2
+#define DDRC_V1_PERF_CTRL_EN   0x2
+#define DDRC_V2_PERF_CTRL_EN   0x1
+#define DDRC_V1_NR_EVENTS      0x7
+#define DDRC_V2_NR_EVENTS      0x90
 
 /*
- * For DDRC PMU, there are eight-events and every event has been mapped
+ * For PMU v1, there are eight-events and every event has been mapped
  * to fixed-purpose counters which register offset is not consistent.
  * Therefore there is no write event type and we assume that event
  * code (0 to 7) is equal to counter index in PMU driver.
@@ -54,73 +65,85 @@ static const u32 ddrc_reg_off[] = {
 
 /*
  * Select the counter register offset using the counter index.
- * In DDRC there are no programmable counter, the count
- * is readed form the statistics counter register itself.
+ * In PMU v1, there are no programmable counter, the count
+ * is read form the statistics counter register itself.
  */
-static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
+static u32 hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx)
 {
        return ddrc_reg_off[cntr_idx];
 }
 
-static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
-                                     struct hw_perf_event *hwc)
+static u32 hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx)
 {
-       /* Use event code as counter index */
-       u32 idx = GET_DDRC_EVENTID(hwc);
-
-       if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
-               dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
-               return 0;
-       }
+       return DDRC_V2_EVENT_CNT + cntr_idx * 8;
+}
 
-       return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+static u64 hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu *ddrc_pmu,
+                                     struct hw_perf_event *hwc)
+{
+       return readl(ddrc_pmu->base +
+                    hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
 }
 
-static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
+static void hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu *ddrc_pmu,
                                        struct hw_perf_event *hwc, u64 val)
 {
-       u32 idx = GET_DDRC_EVENTID(hwc);
+       writel((u32)val,
+              ddrc_pmu->base + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
+}
 
-       if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
-               dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
-               return;
-       }
+static u64 hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu *ddrc_pmu,
+                                        struct hw_perf_event *hwc)
+{
+       return readq(ddrc_pmu->base +
+                    hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
+}
 
-       writel((u32)val,
-              ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
+                                          struct hw_perf_event *hwc, u64 val)
+{
+       writeq(val,
+              ddrc_pmu->base + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
 }
 
 /*
- * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
- * so there is no need to write event type.
+ * For DDRC PMU v1, event has been mapped to fixed-purpose counter by hardware,
+ * so there is no need to write event type, while it is programmable counter in
+ * PMU v2.
  */
 static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
                                       u32 type)
 {
+       u32 offset;
+
+       if (hha_pmu->identifier >= HISI_PMU_V2) {
+               offset = DDRC_V2_EVENT_TYPE + 4 * idx;
+               writel(type, hha_pmu->base + offset);
+       }
 }
 
-static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu *ddrc_pmu)
 {
        u32 val;
 
        /* Set perf_enable in DDRC_PERF_CTRL to start event counting */
        val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
-       val |= DDRC_PERF_CTRL_EN;
+       val |= DDRC_V1_PERF_CTRL_EN;
        writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
 }
 
-static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu *ddrc_pmu)
 {
        u32 val;
 
        /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
        val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
-       val &= ~DDRC_PERF_CTRL_EN;
+       val &= ~DDRC_V1_PERF_CTRL_EN;
        writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
 }
 
-static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
-                                        struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu *ddrc_pmu,
+                                           struct hw_perf_event *hwc)
 {
        u32 val;
 
@@ -130,8 +153,8 @@ static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
        writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
 }
 
-static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
-                                         struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu *ddrc_pmu,
+                                            struct hw_perf_event *hwc)
 {
        u32 val;
 
@@ -141,7 +164,7 @@ static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
        writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
 }
 
-static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
+static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
 {
        struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
        unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
@@ -157,87 +180,117 @@ static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
        return idx;
 }
 
-static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+static int hisi_ddrc_pmu_v2_get_event_idx(struct perf_event *event)
+{
+       return hisi_uncore_pmu_get_event_idx(event);
+}
+
+static void hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu *ddrc_pmu)
+{
+       u32 val;
+
+       val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+       val |= DDRC_V2_PERF_CTRL_EN;
+       writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu *ddrc_pmu)
+{
+       u32 val;
+
+       val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+       val &= ~DDRC_V2_PERF_CTRL_EN;
+       writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu *ddrc_pmu,
+                                           struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+       val |= 1 << hwc->idx;
+       writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu *ddrc_pmu,
                                             struct hw_perf_event *hwc)
 {
        u32 val;
 
+       val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+       val &= ~(1 << hwc->idx);
+       writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+                                               struct hw_perf_event *hwc)
+{
+       u32 val;
+
        /* Write 0 to enable interrupt */
        val = readl(ddrc_pmu->base + DDRC_INT_MASK);
-       val &= ~(1 << GET_DDRC_EVENTID(hwc));
+       val &= ~(1 << hwc->idx);
        writel(val, ddrc_pmu->base + DDRC_INT_MASK);
 }
 
-static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
-                                             struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+                                                struct hw_perf_event *hwc)
 {
        u32 val;
 
        /* Write 1 to mask interrupt */
        val = readl(ddrc_pmu->base + DDRC_INT_MASK);
-       val |= (1 << GET_DDRC_EVENTID(hwc));
+       val |= 1 << hwc->idx;
        writel(val, ddrc_pmu->base + DDRC_INT_MASK);
 }
 
-static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
+static void hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+                                               struct hw_perf_event *hwc)
 {
-       struct hisi_pmu *ddrc_pmu = dev_id;
-       struct perf_event *event;
-       unsigned long overflown;
-       int idx;
-
-       /* Read the DDRC_INT_STATUS register */
-       overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
-       if (!overflown)
-               return IRQ_NONE;
+       u32 val;
 
-       /*
-        * Find the counter index which overflowed if the bit was set
-        * and handle it
-        */
-       for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
-               /* Write 1 to clear the IRQ status flag */
-               writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
+       val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
+       val &= ~(1 << hwc->idx);
+       writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
+}
 
-               /* Get the corresponding event struct */
-               event = ddrc_pmu->pmu_events.hw_events[idx];
-               if (!event)
-                       continue;
+static void hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+                                               struct hw_perf_event *hwc)
+{
+       u32 val;
 
-               hisi_uncore_pmu_event_update(event);
-               hisi_uncore_pmu_set_event_period(event);
-       }
+       val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
+       val |= 1 << hwc->idx;
+       writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
+}
 
-       return IRQ_HANDLED;
+static u32 hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu *ddrc_pmu)
+{
+       return readl(ddrc_pmu->base + DDRC_INT_STATUS);
 }
 
-static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
-                                 struct platform_device *pdev)
+static void hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu *ddrc_pmu,
+                                             int idx)
 {
-       int irq, ret;
-
-       /* Read and init IRQ */
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0)
-               return irq;
-
-       ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
-                              IRQF_NOBALANCING | IRQF_NO_THREAD,
-                              dev_name(&pdev->dev), ddrc_pmu);
-       if (ret < 0) {
-               dev_err(&pdev->dev,
-                       "Fail to request IRQ:%d ret:%d\n", irq, ret);
-               return ret;
-       }
+       writel(1 << idx, ddrc_pmu->base + DDRC_INT_CLEAR);
+}
 
-       ddrc_pmu->irq = irq;
+static u32 hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu *ddrc_pmu)
+{
+       return readl(ddrc_pmu->base + DDRC_V2_INT_STATUS);
+}
 
-       return 0;
+static void hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu *ddrc_pmu,
+                                             int idx)
+{
+       writel(1 << idx, ddrc_pmu->base + DDRC_V2_INT_CLEAR);
 }
 
 static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
        { "HISI0233", },
-       {},
+       { "HISI0234", },
+       {}
 };
 MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
 
@@ -269,21 +322,38 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
        }
 
        ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
+       if (ddrc_pmu->identifier >= HISI_PMU_V2) {
+               if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
+                                            &ddrc_pmu->sub_id)) {
+                       dev_err(&pdev->dev, "Can not read sub-id!\n");
+                       return -EINVAL;
+               }
+       }
 
        return 0;
 }
 
-static struct attribute *hisi_ddrc_pmu_format_attr[] = {
+static struct attribute *hisi_ddrc_pmu_v1_format_attr[] = {
        HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
        NULL,
 };
 
-static const struct attribute_group hisi_ddrc_pmu_format_group = {
+static const struct attribute_group hisi_ddrc_pmu_v1_format_group = {
+       .name = "format",
+       .attrs = hisi_ddrc_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_v2_format_attr[] = {
+       HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+       NULL
+};
+
+static const struct attribute_group hisi_ddrc_pmu_v2_format_group = {
        .name = "format",
-       .attrs = hisi_ddrc_pmu_format_attr,
+       .attrs = hisi_ddrc_pmu_v2_format_attr,
 };
 
-static struct attribute *hisi_ddrc_pmu_events_attr[] = {
+static struct attribute *hisi_ddrc_pmu_v1_events_attr[] = {
        HISI_PMU_EVENT_ATTR(flux_wr,            0x00),
        HISI_PMU_EVENT_ATTR(flux_rd,            0x01),
        HISI_PMU_EVENT_ATTR(flux_wcmd,          0x02),
@@ -295,9 +365,21 @@ static struct attribute *hisi_ddrc_pmu_events_attr[] = {
        NULL,
 };
 
-static const struct attribute_group hisi_ddrc_pmu_events_group = {
+static const struct attribute_group hisi_ddrc_pmu_v1_events_group = {
        .name = "events",
-       .attrs = hisi_ddrc_pmu_events_attr,
+       .attrs = hisi_ddrc_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_v2_events_attr[] = {
+       HISI_PMU_EVENT_ATTR(cycles,             0x00),
+       HISI_PMU_EVENT_ATTR(flux_wr,            0x83),
+       HISI_PMU_EVENT_ATTR(flux_rd,            0x84),
+       NULL
+};
+
+static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
+       .name = "events",
+       .attrs = hisi_ddrc_pmu_v2_events_attr,
 };
 
 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -323,25 +405,50 @@ static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
        .attrs = hisi_ddrc_pmu_identifier_attrs,
 };
 
-static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
-       &hisi_ddrc_pmu_format_group,
-       &hisi_ddrc_pmu_events_group,
+static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
+       &hisi_ddrc_pmu_v1_format_group,
+       &hisi_ddrc_pmu_v1_events_group,
        &hisi_ddrc_pmu_cpumask_attr_group,
        &hisi_ddrc_pmu_identifier_group,
        NULL,
 };
 
-static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
+static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
+       &hisi_ddrc_pmu_v2_format_group,
+       &hisi_ddrc_pmu_v2_events_group,
+       &hisi_ddrc_pmu_cpumask_attr_group,
+       &hisi_ddrc_pmu_identifier_group,
+       NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_v1_ops = {
+       .write_evtype           = hisi_ddrc_pmu_write_evtype,
+       .get_event_idx          = hisi_ddrc_pmu_v1_get_event_idx,
+       .start_counters         = hisi_ddrc_pmu_v1_start_counters,
+       .stop_counters          = hisi_ddrc_pmu_v1_stop_counters,
+       .enable_counter         = hisi_ddrc_pmu_v1_enable_counter,
+       .disable_counter        = hisi_ddrc_pmu_v1_disable_counter,
+       .enable_counter_int     = hisi_ddrc_pmu_v1_enable_counter_int,
+       .disable_counter_int    = hisi_ddrc_pmu_v1_disable_counter_int,
+       .write_counter          = hisi_ddrc_pmu_v1_write_counter,
+       .read_counter           = hisi_ddrc_pmu_v1_read_counter,
+       .get_int_status         = hisi_ddrc_pmu_v1_get_int_status,
+       .clear_int_status       = hisi_ddrc_pmu_v1_clear_int_status,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_v2_ops = {
        .write_evtype           = hisi_ddrc_pmu_write_evtype,
-       .get_event_idx          = hisi_ddrc_pmu_get_event_idx,
-       .start_counters         = hisi_ddrc_pmu_start_counters,
-       .stop_counters          = hisi_ddrc_pmu_stop_counters,
-       .enable_counter         = hisi_ddrc_pmu_enable_counter,
-       .disable_counter        = hisi_ddrc_pmu_disable_counter,
-       .enable_counter_int     = hisi_ddrc_pmu_enable_counter_int,
-       .disable_counter_int    = hisi_ddrc_pmu_disable_counter_int,
-       .write_counter          = hisi_ddrc_pmu_write_counter,
-       .read_counter           = hisi_ddrc_pmu_read_counter,
+       .get_event_idx          = hisi_ddrc_pmu_v2_get_event_idx,
+       .start_counters         = hisi_ddrc_pmu_v2_start_counters,
+       .stop_counters          = hisi_ddrc_pmu_v2_stop_counters,
+       .enable_counter         = hisi_ddrc_pmu_v2_enable_counter,
+       .disable_counter        = hisi_ddrc_pmu_v2_disable_counter,
+       .enable_counter_int     = hisi_ddrc_pmu_v2_enable_counter_int,
+       .disable_counter_int    = hisi_ddrc_pmu_v2_disable_counter_int,
+       .write_counter          = hisi_ddrc_pmu_v2_write_counter,
+       .read_counter           = hisi_ddrc_pmu_v2_read_counter,
+       .get_int_status         = hisi_ddrc_pmu_v2_get_int_status,
+       .clear_int_status       = hisi_ddrc_pmu_v2_clear_int_status,
 };
 
 static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
@@ -353,16 +460,25 @@ static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
        if (ret)
                return ret;
 
-       ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
+       ret = hisi_uncore_pmu_init_irq(ddrc_pmu, pdev);
        if (ret)
                return ret;
 
+       if (ddrc_pmu->identifier >= HISI_PMU_V2) {
+               ddrc_pmu->counter_bits = 48;
+               ddrc_pmu->check_event = DDRC_V2_NR_EVENTS;
+               ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v2_attr_groups;
+               ddrc_pmu->ops = &hisi_uncore_ddrc_v2_ops;
+       } else {
+               ddrc_pmu->counter_bits = 32;
+               ddrc_pmu->check_event = DDRC_V1_NR_EVENTS;
+               ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v1_attr_groups;
+               ddrc_pmu->ops = &hisi_uncore_ddrc_v1_ops;
+       }
+
        ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
-       ddrc_pmu->counter_bits = 32;
-       ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
        ddrc_pmu->dev = &pdev->dev;
        ddrc_pmu->on_cpu = -1;
-       ddrc_pmu->check_event = 7;
 
        return 0;
 }
@@ -390,8 +506,16 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
                return ret;
        }
 
-       name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
-                             ddrc_pmu->sccl_id, ddrc_pmu->index_id);
+       if (ddrc_pmu->identifier >= HISI_PMU_V2)
+               name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+                                     "hisi_sccl%u_ddrc%u_%u",
+                                     ddrc_pmu->sccl_id, ddrc_pmu->index_id,
+                                     ddrc_pmu->sub_id);
+       else
+               name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+                                     "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
+                                     ddrc_pmu->index_id);
+
        ddrc_pmu->pmu = (struct pmu) {
                .name           = name,
                .module         = THIS_MODULE,
@@ -404,7 +528,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
                .start          = hisi_uncore_pmu_start,
                .stop           = hisi_uncore_pmu_stop,
                .read           = hisi_uncore_pmu_read,
-               .attr_groups    = hisi_ddrc_pmu_attr_groups,
+               .attr_groups    = ddrc_pmu->pmu_events.attr_groups,
                .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
        };
 
index 3402f1a395a89e3196a6fecc664d41e2887dc30c..0316fabe32f1a1fc79928e9bd92a19af8e4dd35b 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/list.h>
-#include <linux/platform_device.h>
 #include <linux/smp.h>
 
 #include "hisi_uncore_pmu.h"
 #define HHA_VERSION            0x1cf0
 #define HHA_PERF_CTRL          0x1E00
 #define HHA_EVENT_CTRL         0x1E04
+#define HHA_SRCID_CTRL         0x1E08
+#define HHA_DATSRC_CTRL                0x1BF0
 #define HHA_EVENT_TYPE0                0x1E80
 /*
- * Each counter is 48-bits and [48:63] are reserved
- * which are Read-As-Zero and Writes-Ignored.
+ * If the HW version only supports a 48-bit counter, then
+ * bits [63:48] are reserved, which are Read-As-Zero and
+ * Writes-Ignored.
  */
 #define HHA_CNT0_LOWER         0x1F00
 
-/* HHA has 16-counters */
-#define HHA_NR_COUNTERS                0x10
+/* HHA PMU v1 has 16 counters and v2 only has 8 counters */
+#define HHA_V1_NR_COUNTERS     0x10
+#define HHA_V2_NR_COUNTERS     0x8
 
 #define HHA_PERF_CTRL_EN       0x1
+#define HHA_TRACETAG_EN                BIT(31)
+#define HHA_SRCID_EN           BIT(2)
+#define HHA_SRCID_CMD_SHIFT    6
+#define HHA_SRCID_MSK_SHIFT    20
+#define HHA_SRCID_CMD          GENMASK(16, 6)
+#define HHA_SRCID_MSK          GENMASK(30, 20)
+#define HHA_DATSRC_SKT_EN      BIT(23)
 #define HHA_EVTYPE_NONE                0xff
+#define HHA_V1_NR_EVENT                0x65
+#define HHA_V2_NR_EVENT                0xCE
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 22, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 23, 23);
+
+static void hisi_hha_pmu_enable_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+       u32 tt_en = hisi_get_tracetag_en(event);
+
+       if (tt_en) {
+               u32 val;
+
+               val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+               val |= HHA_TRACETAG_EN;
+               writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+       }
+}
+
+static void hisi_hha_pmu_clear_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+       u32 val;
+
+       val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+       val &= ~HHA_TRACETAG_EN;
+       writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+}
+
+static void hisi_hha_pmu_config_ds(struct perf_event *event)
+{
+       struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+       u32 ds_skt = hisi_get_datasrc_skt(event);
+
+       if (ds_skt) {
+               u32 val;
+
+               val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
+               val |= HHA_DATSRC_SKT_EN;
+               writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+       }
+}
+
+static void hisi_hha_pmu_clear_ds(struct perf_event *event)
+{
+       struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+       u32 ds_skt = hisi_get_datasrc_skt(event);
+
+       if (ds_skt) {
+               u32 val;
+
+               val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
+               val &= ~HHA_DATSRC_SKT_EN;
+               writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+       }
+}
+
+static void hisi_hha_pmu_config_srcid(struct perf_event *event)
+{
+       struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+       u32 cmd = hisi_get_srcid_cmd(event);
+
+       if (cmd) {
+               u32 val, msk;
+
+               msk = hisi_get_srcid_msk(event);
+               val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+               val |= HHA_SRCID_EN | (cmd << HHA_SRCID_CMD_SHIFT) |
+                       (msk << HHA_SRCID_MSK_SHIFT);
+               writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+       }
+}
+
+static void hisi_hha_pmu_disable_srcid(struct perf_event *event)
+{
+       struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
+       u32 cmd = hisi_get_srcid_cmd(event);
+
+       if (cmd) {
+               u32 val;
+
+               val = readl(hha_pmu->base + HHA_SRCID_CTRL);
+               val &= ~(HHA_SRCID_EN | HHA_SRCID_MSK | HHA_SRCID_CMD);
+               writel(val, hha_pmu->base + HHA_SRCID_CTRL);
+       }
+}
+
+static void hisi_hha_pmu_enable_filter(struct perf_event *event)
+{
+       if (event->attr.config1 != 0x0) {
+               hisi_hha_pmu_enable_tracetag(event);
+               hisi_hha_pmu_config_ds(event);
+               hisi_hha_pmu_config_srcid(event);
+       }
+}
+
+static void hisi_hha_pmu_disable_filter(struct perf_event *event)
+{
+       if (event->attr.config1 != 0x0) {
+               hisi_hha_pmu_disable_srcid(event);
+               hisi_hha_pmu_clear_ds(event);
+               hisi_hha_pmu_clear_tracetag(event);
+       }
+}
 
 /*
  * Select the counter register offset using the counter index
@@ -51,29 +168,15 @@ static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
 static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
                                     struct hw_perf_event *hwc)
 {
-       u32 idx = hwc->idx;
-
-       if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
-               dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
-               return 0;
-       }
-
        /* Read 64 bits and like L3C, top 16 bits are RAZ */
-       return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+       return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx));
 }
 
 static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
                                       struct hw_perf_event *hwc, u64 val)
 {
-       u32 idx = hwc->idx;
-
-       if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
-               dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
-               return;
-       }
-
        /* Write 64 bits and like L3C, top 16 bits are WI */
-       writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+       writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx));
 }
 
 static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
@@ -169,65 +272,20 @@ static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
        writel(val, hha_pmu->base + HHA_INT_MASK);
 }
 
-static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
+static u32 hisi_hha_pmu_get_int_status(struct hisi_pmu *hha_pmu)
 {
-       struct hisi_pmu *hha_pmu = dev_id;
-       struct perf_event *event;
-       unsigned long overflown;
-       int idx;
-
-       /* Read HHA_INT_STATUS register */
-       overflown = readl(hha_pmu->base + HHA_INT_STATUS);
-       if (!overflown)
-               return IRQ_NONE;
-
-       /*
-        * Find the counter index which overflowed if the bit was set
-        * and handle it
-        */
-       for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
-               /* Write 1 to clear the IRQ status flag */
-               writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
-
-               /* Get the corresponding event struct */
-               event = hha_pmu->pmu_events.hw_events[idx];
-               if (!event)
-                       continue;
-
-               hisi_uncore_pmu_event_update(event);
-               hisi_uncore_pmu_set_event_period(event);
-       }
-
-       return IRQ_HANDLED;
+       return readl(hha_pmu->base + HHA_INT_STATUS);
 }
 
-static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
-                                struct platform_device *pdev)
+static void hisi_hha_pmu_clear_int_status(struct hisi_pmu *hha_pmu, int idx)
 {
-       int irq, ret;
-
-       /* Read and init IRQ */
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0)
-               return irq;
-
-       ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
-                             IRQF_NOBALANCING | IRQF_NO_THREAD,
-                             dev_name(&pdev->dev), hha_pmu);
-       if (ret < 0) {
-               dev_err(&pdev->dev,
-                       "Fail to request IRQ:%d ret:%d\n", irq, ret);
-               return ret;
-       }
-
-       hha_pmu->irq = irq;
-
-       return 0;
+       writel(1 << idx, hha_pmu->base + HHA_INT_CLEAR);
 }
 
 static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
        { "HISI0243", },
-       {},
+       { "HISI0244", },
+       {}
 };
 MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
 
@@ -237,13 +295,6 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
        unsigned long long id;
        acpi_status status;
 
-       status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
-                                      "_UID", NULL, &id);
-       if (ACPI_FAILURE(status))
-               return -EINVAL;
-
-       hha_pmu->index_id = id;
-
        /*
         * Use SCCL_ID and UID to identify the HHA PMU, while
         * SCCL_ID is in MPIDR[aff2].
@@ -253,6 +304,22 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
                dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
                return -EINVAL;
        }
+
+       /*
+        * Early versions of BIOS support _UID by mistake, so we support
+        * both "hisilicon, idx-id" as preference, if available.
+        */
+       if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+                                    &hha_pmu->index_id)) {
+               status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+                                              "_UID", NULL, &id);
+               if (ACPI_FAILURE(status)) {
+                       dev_err(&pdev->dev, "Cannot read idx-id!\n");
+                       return -EINVAL;
+               }
+
+               hha_pmu->index_id = id;
+       }
        /* HHA PMUs only share the same SCCL */
        hha_pmu->ccl_id = -1;
 
@@ -267,17 +334,31 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
        return 0;
 }
 
-static struct attribute *hisi_hha_pmu_format_attr[] = {
+static struct attribute *hisi_hha_pmu_v1_format_attr[] = {
        HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
        NULL,
 };
 
-static const struct attribute_group hisi_hha_pmu_format_group = {
+static const struct attribute_group hisi_hha_pmu_v1_format_group = {
+       .name = "format",
+       .attrs = hisi_hha_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_hha_pmu_v2_format_attr[] = {
+       HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+       HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:0-10"),
+       HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:11-21"),
+       HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:22"),
+       HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:23"),
+       NULL
+};
+
+static const struct attribute_group hisi_hha_pmu_v2_format_group = {
        .name = "format",
-       .attrs = hisi_hha_pmu_format_attr,
+       .attrs = hisi_hha_pmu_v2_format_attr,
 };
 
-static struct attribute *hisi_hha_pmu_events_attr[] = {
+static struct attribute *hisi_hha_pmu_v1_events_attr[] = {
        HISI_PMU_EVENT_ATTR(rx_ops_num,         0x00),
        HISI_PMU_EVENT_ATTR(rx_outer,           0x01),
        HISI_PMU_EVENT_ATTR(rx_sccl,            0x02),
@@ -307,9 +388,23 @@ static struct attribute *hisi_hha_pmu_events_attr[] = {
        NULL,
 };
 
-static const struct attribute_group hisi_hha_pmu_events_group = {
+static const struct attribute_group hisi_hha_pmu_v1_events_group = {
        .name = "events",
-       .attrs = hisi_hha_pmu_events_attr,
+       .attrs = hisi_hha_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_hha_pmu_v2_events_attr[] = {
+       HISI_PMU_EVENT_ATTR(rx_ops_num,         0x00),
+       HISI_PMU_EVENT_ATTR(rx_outer,           0x01),
+       HISI_PMU_EVENT_ATTR(rx_sccl,            0x02),
+       HISI_PMU_EVENT_ATTR(hha_retry,          0x2e),
+       HISI_PMU_EVENT_ATTR(cycles,             0x55),
+       NULL
+};
+
+static const struct attribute_group hisi_hha_pmu_v2_events_group = {
+       .name = "events",
+       .attrs = hisi_hha_pmu_v2_events_attr,
 };
 
 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -335,14 +430,22 @@ static const struct attribute_group hisi_hha_pmu_identifier_group = {
        .attrs = hisi_hha_pmu_identifier_attrs,
 };
 
-static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
-       &hisi_hha_pmu_format_group,
-       &hisi_hha_pmu_events_group,
+static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = {
+       &hisi_hha_pmu_v1_format_group,
+       &hisi_hha_pmu_v1_events_group,
        &hisi_hha_pmu_cpumask_attr_group,
        &hisi_hha_pmu_identifier_group,
        NULL,
 };
 
+static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = {
+       &hisi_hha_pmu_v2_format_group,
+       &hisi_hha_pmu_v2_events_group,
+       &hisi_hha_pmu_cpumask_attr_group,
+       &hisi_hha_pmu_identifier_group,
+       NULL
+};
+
 static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
        .write_evtype           = hisi_hha_pmu_write_evtype,
        .get_event_idx          = hisi_uncore_pmu_get_event_idx,
@@ -354,6 +457,10 @@ static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
        .disable_counter_int    = hisi_hha_pmu_disable_counter_int,
        .write_counter          = hisi_hha_pmu_write_counter,
        .read_counter           = hisi_hha_pmu_read_counter,
+       .get_int_status         = hisi_hha_pmu_get_int_status,
+       .clear_int_status       = hisi_hha_pmu_clear_int_status,
+       .enable_filter          = hisi_hha_pmu_enable_filter,
+       .disable_filter         = hisi_hha_pmu_disable_filter,
 };
 
 static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
@@ -365,16 +472,24 @@ static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
        if (ret)
                return ret;
 
-       ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
+       ret = hisi_uncore_pmu_init_irq(hha_pmu, pdev);
        if (ret)
                return ret;
 
-       hha_pmu->num_counters = HHA_NR_COUNTERS;
-       hha_pmu->counter_bits = 48;
+       if (hha_pmu->identifier >= HISI_PMU_V2) {
+               hha_pmu->counter_bits = 64;
+               hha_pmu->check_event = HHA_V2_NR_EVENT;
+               hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v2_attr_groups;
+               hha_pmu->num_counters = HHA_V2_NR_COUNTERS;
+       } else {
+               hha_pmu->counter_bits = 48;
+               hha_pmu->check_event = HHA_V1_NR_EVENT;
+               hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v1_attr_groups;
+               hha_pmu->num_counters = HHA_V1_NR_COUNTERS;
+       }
        hha_pmu->ops = &hisi_uncore_hha_ops;
        hha_pmu->dev = &pdev->dev;
        hha_pmu->on_cpu = -1;
-       hha_pmu->check_event = 0x65;
 
        return 0;
 }
@@ -416,7 +531,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
                .start          = hisi_uncore_pmu_start,
                .stop           = hisi_uncore_pmu_stop,
                .read           = hisi_uncore_pmu_read,
-               .attr_groups    = hisi_hha_pmu_attr_groups,
+               .attr_groups    = hha_pmu->pmu_events.attr_groups,
                .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
        };
 
index 7d792435c2aa4e2f7889a8a022777ae90f3461e3..bf9f7772cac96e30e73e92dddb44f9daedbf350e 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/list.h>
-#include <linux/platform_device.h>
 #include <linux/smp.h>
 
 #include "hisi_uncore_pmu.h"
 #define L3C_INT_MASK           0x0800
 #define L3C_INT_STATUS         0x0808
 #define L3C_INT_CLEAR          0x080c
+#define L3C_CORE_CTRL           0x1b04
+#define L3C_TRACETAG_CTRL       0x1b20
+#define L3C_DATSRC_TYPE         0x1b48
+#define L3C_DATSRC_CTRL         0x1bf0
 #define L3C_EVENT_CTRL         0x1c00
 #define L3C_VERSION            0x1cf0
 #define L3C_EVENT_TYPE0                0x1d00
 /*
- * Each counter is 48-bits and [48:63] are reserved
- * which are Read-As-Zero and Writes-Ignored.
+ * If the HW version only supports a 48-bit counter, then
+ * bits [63:48] are reserved, which are Read-As-Zero and
+ * Writes-Ignored.
  */
 #define L3C_CNTR0_LOWER                0x1e00
 
 #define L3C_NR_COUNTERS                0x8
 
 #define L3C_PERF_CTRL_EN       0x10000
+#define L3C_TRACETAG_EN                BIT(31)
+#define L3C_TRACETAG_REQ_SHIFT 7
+#define L3C_TRACETAG_MARK_EN   BIT(0)
+#define L3C_TRACETAG_REQ_EN    (L3C_TRACETAG_MARK_EN | BIT(2))
+#define L3C_TRACETAG_CORE_EN   (L3C_TRACETAG_MARK_EN | BIT(3))
+#define L3C_CORE_EN            BIT(20)
+#define L3C_COER_NONE          0x0
+#define L3C_DATSRC_MASK                0xFF
+#define L3C_DATSRC_SKT_EN      BIT(23)
+#define L3C_DATSRC_NONE                0x0
 #define L3C_EVTYPE_NONE                0xff
+#define L3C_V1_NR_EVENTS       0x59
+#define L3C_V2_NR_EVENTS       0xFF
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config1, 7, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16);
+
+static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+       u32 tt_req = hisi_get_tt_req(event);
+
+       if (tt_req) {
+               u32 val;
+
+               /* Set request-type for tracetag */
+               val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+               val |= tt_req << L3C_TRACETAG_REQ_SHIFT;
+               val |= L3C_TRACETAG_REQ_EN;
+               writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+
+               /* Enable request-tracetag statistics */
+               val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+               val |= L3C_TRACETAG_EN;
+               writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+       }
+}
+
+static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+       u32 tt_req = hisi_get_tt_req(event);
+
+       if (tt_req) {
+               u32 val;
+
+               /* Clear request-type */
+               val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+               val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT);
+               val &= ~L3C_TRACETAG_REQ_EN;
+               writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+
+               /* Disable request-tracetag statistics */
+               val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+               val &= ~L3C_TRACETAG_EN;
+               writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+       }
+}
+
+static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
+{
+       struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       u32 reg, reg_idx, shift, val;
+       int idx = hwc->idx;
+
+       /*
+        * Select the appropriate datasource register(L3C_DATSRC_TYPE0/1).
+        * There are 2 datasource ctrl register for the 8 hardware counters.
+        * Datasrc is 8-bits and for the former 4 hardware counters,
+        * L3C_DATSRC_TYPE0 is chosen. For the latter 4 hardware counters,
+        * L3C_DATSRC_TYPE1 is chosen.
+        */
+       reg = L3C_DATSRC_TYPE + (idx / 4) * 4;
+       reg_idx = idx % 4;
+       shift = 8 * reg_idx;
+
+       val = readl(l3c_pmu->base + reg);
+       val &= ~(L3C_DATSRC_MASK << shift);
+       val |= ds_cfg << shift;
+       writel(val, l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_config_ds(struct perf_event *event)
+{
+       struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+       u32 ds_cfg = hisi_get_datasrc_cfg(event);
+       u32 ds_skt = hisi_get_datasrc_skt(event);
+
+       if (ds_cfg)
+               hisi_l3c_pmu_write_ds(event, ds_cfg);
+
+       if (ds_skt) {
+               u32 val;
+
+               val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+               val |= L3C_DATSRC_SKT_EN;
+               writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+       }
+}
+
+static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
+{
+       struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+       u32 ds_cfg = hisi_get_datasrc_cfg(event);
+       u32 ds_skt = hisi_get_datasrc_skt(event);
+
+       if (ds_cfg)
+               hisi_l3c_pmu_write_ds(event, L3C_DATSRC_NONE);
+
+       if (ds_skt) {
+               u32 val;
+
+               val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+               val &= ~L3C_DATSRC_SKT_EN;
+               writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+       }
+}
+
+static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+       u32 core = hisi_get_tt_core(event);
+
+       if (core) {
+               u32 val;
+
+               /* Config and enable core information */
+               writel(core, l3c_pmu->base + L3C_CORE_CTRL);
+               val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+               val |= L3C_CORE_EN;
+               writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+
+               /* Enable core-tracetag statistics */
+               val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+               val |= L3C_TRACETAG_CORE_EN;
+               writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+       }
+}
+
+static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+       u32 core = hisi_get_tt_core(event);
+
+       if (core) {
+               u32 val;
+
+               /* Clear core information */
+               writel(L3C_COER_NONE, l3c_pmu->base + L3C_CORE_CTRL);
+               val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+               val &= ~L3C_CORE_EN;
+               writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+
+               /* Disable core-tracetag statistics */
+               val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+               val &= ~L3C_TRACETAG_CORE_EN;
+               writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+       }
+}
+
+static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
+{
+       if (event->attr.config1 != 0x0) {
+               hisi_l3c_pmu_config_req_tracetag(event);
+               hisi_l3c_pmu_config_core_tracetag(event);
+               hisi_l3c_pmu_config_ds(event);
+       }
+}
+
+static void hisi_l3c_pmu_disable_filter(struct perf_event *event)
+{
+       if (event->attr.config1 != 0x0) {
+               hisi_l3c_pmu_clear_ds(event);
+               hisi_l3c_pmu_clear_core_tracetag(event);
+               hisi_l3c_pmu_clear_req_tracetag(event);
+       }
+}
 
 /*
  * Select the counter register offset using the counter index
@@ -50,29 +233,13 @@ static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
 static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
                                     struct hw_perf_event *hwc)
 {
-       u32 idx = hwc->idx;
-
-       if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
-               dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
-               return 0;
-       }
-
-       /* Read 64-bits and the upper 16 bits are RAZ */
-       return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+       return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
 }
 
 static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
                                       struct hw_perf_event *hwc, u64 val)
 {
-       u32 idx = hwc->idx;
-
-       if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
-               dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
-               return;
-       }
-
-       /* Write 64-bits and the upper 16 bits are WI */
-       writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+       writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
 }
 
 static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
@@ -168,81 +335,26 @@ static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
        writel(val, l3c_pmu->base + L3C_INT_MASK);
 }
 
-static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
+static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu)
 {
-       struct hisi_pmu *l3c_pmu = dev_id;
-       struct perf_event *event;
-       unsigned long overflown;
-       int idx;
-
-       /* Read L3C_INT_STATUS register */
-       overflown = readl(l3c_pmu->base + L3C_INT_STATUS);
-       if (!overflown)
-               return IRQ_NONE;
-
-       /*
-        * Find the counter index which overflowed if the bit was set
-        * and handle it.
-        */
-       for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) {
-               /* Write 1 to clear the IRQ status flag */
-               writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR);
-
-               /* Get the corresponding event struct */
-               event = l3c_pmu->pmu_events.hw_events[idx];
-               if (!event)
-                       continue;
-
-               hisi_uncore_pmu_event_update(event);
-               hisi_uncore_pmu_set_event_period(event);
-       }
-
-       return IRQ_HANDLED;
+       return readl(l3c_pmu->base + L3C_INT_STATUS);
 }
 
-static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
-                                struct platform_device *pdev)
+static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx)
 {
-       int irq, ret;
-
-       /* Read and init IRQ */
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0)
-               return irq;
-
-       ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
-                              IRQF_NOBALANCING | IRQF_NO_THREAD,
-                              dev_name(&pdev->dev), l3c_pmu);
-       if (ret < 0) {
-               dev_err(&pdev->dev,
-                       "Fail to request IRQ:%d ret:%d\n", irq, ret);
-               return ret;
-       }
-
-       l3c_pmu->irq = irq;
-
-       return 0;
+       writel(1 << idx, l3c_pmu->base + L3C_INT_CLEAR);
 }
 
 static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
        { "HISI0213", },
-       {},
+       { "HISI0214", },
+       {}
 };
 MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
 
 static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
                                  struct hisi_pmu *l3c_pmu)
 {
-       unsigned long long id;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
-                                      "_UID", NULL, &id);
-       if (ACPI_FAILURE(status))
-               return -EINVAL;
-
-       l3c_pmu->index_id = id;
-
        /*
         * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
         * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
@@ -270,17 +382,31 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
        return 0;
 }
 
-static struct attribute *hisi_l3c_pmu_format_attr[] = {
+static struct attribute *hisi_l3c_pmu_v1_format_attr[] = {
        HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
        NULL,
 };
 
-static const struct attribute_group hisi_l3c_pmu_format_group = {
+static const struct attribute_group hisi_l3c_pmu_v1_format_group = {
+       .name = "format",
+       .attrs = hisi_l3c_pmu_v1_format_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_v2_format_attr[] = {
+       HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+       HISI_PMU_FORMAT_ATTR(tt_core, "config1:0-7"),
+       HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
+       HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"),
+       HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"),
+       NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v2_format_group = {
        .name = "format",
-       .attrs = hisi_l3c_pmu_format_attr,
+       .attrs = hisi_l3c_pmu_v2_format_attr,
 };
 
-static struct attribute *hisi_l3c_pmu_events_attr[] = {
+static struct attribute *hisi_l3c_pmu_v1_events_attr[] = {
        HISI_PMU_EVENT_ATTR(rd_cpipe,           0x00),
        HISI_PMU_EVENT_ATTR(wr_cpipe,           0x01),
        HISI_PMU_EVENT_ATTR(rd_hit_cpipe,       0x02),
@@ -297,9 +423,22 @@ static struct attribute *hisi_l3c_pmu_events_attr[] = {
        NULL,
 };
 
-static const struct attribute_group hisi_l3c_pmu_events_group = {
+static const struct attribute_group hisi_l3c_pmu_v1_events_group = {
+       .name = "events",
+       .attrs = hisi_l3c_pmu_v1_events_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_v2_events_attr[] = {
+       HISI_PMU_EVENT_ATTR(l3c_hit,            0x48),
+       HISI_PMU_EVENT_ATTR(cycles,             0x7f),
+       HISI_PMU_EVENT_ATTR(l3c_ref,            0xb8),
+       HISI_PMU_EVENT_ATTR(dat_access,         0xb9),
+       NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
        .name = "events",
-       .attrs = hisi_l3c_pmu_events_attr,
+       .attrs = hisi_l3c_pmu_v2_events_attr,
 };
 
 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
@@ -325,14 +464,22 @@ static const struct attribute_group hisi_l3c_pmu_identifier_group = {
        .attrs = hisi_l3c_pmu_identifier_attrs,
 };
 
-static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
-       &hisi_l3c_pmu_format_group,
-       &hisi_l3c_pmu_events_group,
+static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
+       &hisi_l3c_pmu_v1_format_group,
+       &hisi_l3c_pmu_v1_events_group,
        &hisi_l3c_pmu_cpumask_attr_group,
        &hisi_l3c_pmu_identifier_group,
        NULL,
 };
 
+static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
+       &hisi_l3c_pmu_v2_format_group,
+       &hisi_l3c_pmu_v2_events_group,
+       &hisi_l3c_pmu_cpumask_attr_group,
+       &hisi_l3c_pmu_identifier_group,
+       NULL
+};
+
 static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
        .write_evtype           = hisi_l3c_pmu_write_evtype,
        .get_event_idx          = hisi_uncore_pmu_get_event_idx,
@@ -344,6 +491,10 @@ static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
        .disable_counter_int    = hisi_l3c_pmu_disable_counter_int,
        .write_counter          = hisi_l3c_pmu_write_counter,
        .read_counter           = hisi_l3c_pmu_read_counter,
+       .get_int_status         = hisi_l3c_pmu_get_int_status,
+       .clear_int_status       = hisi_l3c_pmu_clear_int_status,
+       .enable_filter          = hisi_l3c_pmu_enable_filter,
+       .disable_filter         = hisi_l3c_pmu_disable_filter,
 };
 
 static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
@@ -355,16 +506,24 @@ static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
        if (ret)
                return ret;
 
-       ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
+       ret = hisi_uncore_pmu_init_irq(l3c_pmu, pdev);
        if (ret)
                return ret;
 
+       if (l3c_pmu->identifier >= HISI_PMU_V2) {
+               l3c_pmu->counter_bits = 64;
+               l3c_pmu->check_event = L3C_V2_NR_EVENTS;
+               l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v2_attr_groups;
+       } else {
+               l3c_pmu->counter_bits = 48;
+               l3c_pmu->check_event = L3C_V1_NR_EVENTS;
+               l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v1_attr_groups;
+       }
+
        l3c_pmu->num_counters = L3C_NR_COUNTERS;
-       l3c_pmu->counter_bits = 48;
        l3c_pmu->ops = &hisi_uncore_l3c_ops;
        l3c_pmu->dev = &pdev->dev;
        l3c_pmu->on_cpu = -1;
-       l3c_pmu->check_event = 0x59;
 
        return 0;
 }
@@ -392,8 +551,12 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
                return ret;
        }
 
+       /*
+        * CCL_ID is used to identify the L3C in the same SCCL which was
+        * used _UID by mistake.
+        */
        name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
-                             l3c_pmu->sccl_id, l3c_pmu->index_id);
+                             l3c_pmu->sccl_id, l3c_pmu->ccl_id);
        l3c_pmu->pmu = (struct pmu) {
                .name           = name,
                .module         = THIS_MODULE,
@@ -406,7 +569,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
                .start          = hisi_uncore_pmu_start,
                .stop           = hisi_uncore_pmu_stop,
                .read           = hisi_uncore_pmu_read,
-               .attr_groups    = hisi_l3c_pmu_attr_groups,
+               .attr_groups    = l3c_pmu->pmu_events.attr_groups,
                .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
        };
 
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
new file mode 100644 (file)
index 0000000..14f23eb
--- /dev/null
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon PA uncore Hardware event counters support
+ *
+ * Copyright (C) 2020 HiSilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ */
+#include <linux/acpi.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* PA register definition */
+#define PA_PERF_CTRL                   0x1c00
+#define PA_EVENT_CTRL                  0x1c04
+#define PA_TT_CTRL                     0x1c08
+#define PA_TGTID_CTRL                  0x1c14
+#define PA_SRCID_CTRL                  0x1c18
+#define PA_INT_MASK                    0x1c70
+#define PA_INT_STATUS                  0x1c78
+#define PA_INT_CLEAR                   0x1c7c
+#define PA_EVENT_TYPE0                 0x1c80
+#define PA_PMU_VERSION                 0x1cf0
+#define PA_EVENT_CNT0_L                        0x1f00
+
+#define PA_EVTYPE_MASK                 0xff
+#define PA_NR_COUNTERS                 0x8
+#define PA_PERF_CTRL_EN                        BIT(0)
+#define PA_TRACETAG_EN                 BIT(4)
+#define PA_TGTID_EN                    BIT(11)
+#define PA_SRCID_EN                    BIT(11)
+#define PA_TGTID_NONE                  0
+#define PA_SRCID_NONE                  0
+#define PA_TGTID_MSK_SHIFT             12
+#define PA_SRCID_MSK_SHIFT             12
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_cmd, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_msk, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
+
+static void hisi_pa_pmu_enable_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+       u32 tt_en = hisi_get_tracetag_en(event);
+
+       if (tt_en) {
+               u32 val;
+
+               val = readl(pa_pmu->base + PA_TT_CTRL);
+               val |= PA_TRACETAG_EN;
+               writel(val, pa_pmu->base + PA_TT_CTRL);
+       }
+}
+
+static void hisi_pa_pmu_clear_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+       u32 tt_en = hisi_get_tracetag_en(event);
+
+       if (tt_en) {
+               u32 val;
+
+               val = readl(pa_pmu->base + PA_TT_CTRL);
+               val &= ~PA_TRACETAG_EN;
+               writel(val, pa_pmu->base + PA_TT_CTRL);
+       }
+}
+
+static void hisi_pa_pmu_config_tgtid(struct perf_event *event)
+{
+       struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+       u32 cmd = hisi_get_tgtid_cmd(event);
+
+       if (cmd) {
+               u32 msk = hisi_get_tgtid_msk(event);
+               u32 val = cmd | PA_TGTID_EN | (msk << PA_TGTID_MSK_SHIFT);
+
+               writel(val, pa_pmu->base + PA_TGTID_CTRL);
+       }
+}
+
+static void hisi_pa_pmu_clear_tgtid(struct perf_event *event)
+{
+       struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+       u32 cmd = hisi_get_tgtid_cmd(event);
+
+       if (cmd)
+               writel(PA_TGTID_NONE, pa_pmu->base + PA_TGTID_CTRL);
+}
+
+static void hisi_pa_pmu_config_srcid(struct perf_event *event)
+{
+       struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+       u32 cmd = hisi_get_srcid_cmd(event);
+
+       if (cmd) {
+               u32 msk = hisi_get_srcid_msk(event);
+               u32 val = cmd | PA_SRCID_EN | (msk << PA_SRCID_MSK_SHIFT);
+
+               writel(val, pa_pmu->base + PA_SRCID_CTRL);
+       }
+}
+
+static void hisi_pa_pmu_clear_srcid(struct perf_event *event)
+{
+       struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
+       u32 cmd = hisi_get_srcid_cmd(event);
+
+       if (cmd)
+               writel(PA_SRCID_NONE, pa_pmu->base + PA_SRCID_CTRL);
+}
+
+static void hisi_pa_pmu_enable_filter(struct perf_event *event)
+{
+       if (event->attr.config1 != 0x0) {
+               hisi_pa_pmu_enable_tracetag(event);
+               hisi_pa_pmu_config_srcid(event);
+               hisi_pa_pmu_config_tgtid(event);
+       }
+}
+
+static void hisi_pa_pmu_disable_filter(struct perf_event *event)
+{
+       if (event->attr.config1 != 0x0) {
+               hisi_pa_pmu_clear_tgtid(event);
+               hisi_pa_pmu_clear_srcid(event);
+               hisi_pa_pmu_clear_tracetag(event);
+       }
+}
+
+static u32 hisi_pa_pmu_get_counter_offset(int idx)
+{
+       return (PA_EVENT_CNT0_L + idx * 8);
+}
+
+static u64 hisi_pa_pmu_read_counter(struct hisi_pmu *pa_pmu,
+                                   struct hw_perf_event *hwc)
+{
+       return readq(pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_pa_pmu_write_counter(struct hisi_pmu *pa_pmu,
+                                     struct hw_perf_event *hwc, u64 val)
+{
+       writeq(val, pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_pa_pmu_write_evtype(struct hisi_pmu *pa_pmu, int idx,
+                                    u32 type)
+{
+       u32 reg, reg_idx, shift, val;
+
+       /*
+        * Select the appropriate event select register(PA_EVENT_TYPE0/1).
+        * There are 2 event select registers for the 8 hardware counters.
+        * Event code is 8-bits and for the former 4 hardware counters,
+        * PA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+        * PA_EVENT_TYPE1 is chosen.
+        */
+       reg = PA_EVENT_TYPE0 + (idx / 4) * 4;
+       reg_idx = idx % 4;
+       shift = 8 * reg_idx;
+
+       /* Write event code to pa_EVENT_TYPEx Register */
+       val = readl(pa_pmu->base + reg);
+       val &= ~(PA_EVTYPE_MASK << shift);
+       val |= (type << shift);
+       writel(val, pa_pmu->base + reg);
+}
+
+static void hisi_pa_pmu_start_counters(struct hisi_pmu *pa_pmu)
+{
+       u32 val;
+
+       val = readl(pa_pmu->base + PA_PERF_CTRL);
+       val |= PA_PERF_CTRL_EN;
+       writel(val, pa_pmu->base + PA_PERF_CTRL);
+}
+
+static void hisi_pa_pmu_stop_counters(struct hisi_pmu *pa_pmu)
+{
+       u32 val;
+
+       val = readl(pa_pmu->base + PA_PERF_CTRL);
+       val &= ~(PA_PERF_CTRL_EN);
+       writel(val, pa_pmu->base + PA_PERF_CTRL);
+}
+
+static void hisi_pa_pmu_enable_counter(struct hisi_pmu *pa_pmu,
+                                      struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       /* Enable counter index in PA_EVENT_CTRL register */
+       val = readl(pa_pmu->base + PA_EVENT_CTRL);
+       val |= 1 << hwc->idx;
+       writel(val, pa_pmu->base + PA_EVENT_CTRL);
+}
+
+static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu,
+                                       struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       /* Clear counter index in PA_EVENT_CTRL register */
+       val = readl(pa_pmu->base + PA_EVENT_CTRL);
+       val &= ~(1 << hwc->idx);
+       writel(val, pa_pmu->base + PA_EVENT_CTRL);
+}
+
+static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu,
+                                          struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       /* Write 0 to enable interrupt */
+       val = readl(pa_pmu->base + PA_INT_MASK);
+       val &= ~(1 << hwc->idx);
+       writel(val, pa_pmu->base + PA_INT_MASK);
+}
+
+static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu,
+                                           struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       /* Write 1 to mask interrupt */
+       val = readl(pa_pmu->base + PA_INT_MASK);
+       val |= 1 << hwc->idx;
+       writel(val, pa_pmu->base + PA_INT_MASK);
+}
+
+static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu)
+{
+       return readl(pa_pmu->base + PA_INT_STATUS);
+}
+
+static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
+{
+       writel(1 << idx, pa_pmu->base + PA_INT_CLEAR);
+}
+
+static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
+       { "HISI0273", },
+       {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match);
+
+static int hisi_pa_pmu_init_data(struct platform_device *pdev,
+                                  struct hisi_pmu *pa_pmu)
+{
+       /*
+        * Use the SCCL_ID and the index ID to identify the PA PMU,
+        * while SCCL_ID is the nearst SCCL_ID from this SICL and
+        * CPU core is chosen from this SCCL to manage this PMU.
+        */
+       if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+                                    &pa_pmu->sccl_id)) {
+               dev_err(&pdev->dev, "Cannot read sccl-id!\n");
+               return -EINVAL;
+       }
+
+       if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+                                    &pa_pmu->index_id)) {
+               dev_err(&pdev->dev, "Cannot read idx-id!\n");
+               return -EINVAL;
+       }
+
+       pa_pmu->ccl_id = -1;
+
+       pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(pa_pmu->base)) {
+               dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n");
+               return PTR_ERR(pa_pmu->base);
+       }
+
+       pa_pmu->identifier = readl(pa_pmu->base + PA_PMU_VERSION);
+
+       return 0;
+}
+
+static struct attribute *hisi_pa_pmu_v2_format_attr[] = {
+       HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+       HISI_PMU_FORMAT_ATTR(tgtid_cmd, "config1:0-10"),
+       HISI_PMU_FORMAT_ATTR(tgtid_msk, "config1:11-21"),
+       HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
+       HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
+       HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
+       NULL,
+};
+
+static const struct attribute_group hisi_pa_pmu_v2_format_group = {
+       .name = "format",
+       .attrs = hisi_pa_pmu_v2_format_attr,
+};
+
+static struct attribute *hisi_pa_pmu_v2_events_attr[] = {
+       HISI_PMU_EVENT_ATTR(rx_req,             0x40),
+       HISI_PMU_EVENT_ATTR(tx_req,             0x5c),
+       HISI_PMU_EVENT_ATTR(cycle,              0x78),
+       NULL
+};
+
+static const struct attribute_group hisi_pa_pmu_v2_events_group = {
+       .name = "events",
+       .attrs = hisi_pa_pmu_v2_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
+       &dev_attr_cpumask.attr,
+       NULL
+};
+
+static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
+       .attrs = hisi_pa_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_pa_pmu_identifier_attr =
+       __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
+       &hisi_pa_pmu_identifier_attr.attr,
+       NULL
+};
+
+static struct attribute_group hisi_pa_pmu_identifier_group = {
+       .attrs = hisi_pa_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
+       &hisi_pa_pmu_v2_format_group,
+       &hisi_pa_pmu_v2_events_group,
+       &hisi_pa_pmu_cpumask_attr_group,
+       &hisi_pa_pmu_identifier_group,
+       NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_pa_ops = {
+       .write_evtype           = hisi_pa_pmu_write_evtype,
+       .get_event_idx          = hisi_uncore_pmu_get_event_idx,
+       .start_counters         = hisi_pa_pmu_start_counters,
+       .stop_counters          = hisi_pa_pmu_stop_counters,
+       .enable_counter         = hisi_pa_pmu_enable_counter,
+       .disable_counter        = hisi_pa_pmu_disable_counter,
+       .enable_counter_int     = hisi_pa_pmu_enable_counter_int,
+       .disable_counter_int    = hisi_pa_pmu_disable_counter_int,
+       .write_counter          = hisi_pa_pmu_write_counter,
+       .read_counter           = hisi_pa_pmu_read_counter,
+       .get_int_status         = hisi_pa_pmu_get_int_status,
+       .clear_int_status       = hisi_pa_pmu_clear_int_status,
+       .enable_filter          = hisi_pa_pmu_enable_filter,
+       .disable_filter         = hisi_pa_pmu_disable_filter,
+};
+
+static int hisi_pa_pmu_dev_probe(struct platform_device *pdev,
+                                struct hisi_pmu *pa_pmu)
+{
+       int ret;
+
+       ret = hisi_pa_pmu_init_data(pdev, pa_pmu);
+       if (ret)
+               return ret;
+
+       ret = hisi_uncore_pmu_init_irq(pa_pmu, pdev);
+       if (ret)
+               return ret;
+
+       pa_pmu->pmu_events.attr_groups = hisi_pa_pmu_v2_attr_groups;
+       pa_pmu->num_counters = PA_NR_COUNTERS;
+       pa_pmu->ops = &hisi_uncore_pa_ops;
+       pa_pmu->check_event = 0xB0;
+       pa_pmu->counter_bits = 64;
+       pa_pmu->dev = &pdev->dev;
+       pa_pmu->on_cpu = -1;
+
+       return 0;
+}
+
+static int hisi_pa_pmu_probe(struct platform_device *pdev)
+{
+       struct hisi_pmu *pa_pmu;
+       char *name;
+       int ret;
+
+       pa_pmu = devm_kzalloc(&pdev->dev, sizeof(*pa_pmu), GFP_KERNEL);
+       if (!pa_pmu)
+               return -ENOMEM;
+
+       ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu);
+       if (ret)
+               return ret;
+       /*
+        * PA is attached in SICL and the CPU core is chosen to manage this
+        * PMU which is the nearest SCCL, while its SCCL_ID is greater than
+        * one with the SICL_ID.
+        */
+       name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u",
+                             pa_pmu->sccl_id - 1, pa_pmu->index_id);
+       if (!name)
+               return -ENOMEM;
+
+       ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+                                      &pa_pmu->node);
+       if (ret) {
+               dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+               return ret;
+       }
+
+       pa_pmu->pmu = (struct pmu) {
+               .module         = THIS_MODULE,
+               .task_ctx_nr    = perf_invalid_context,
+               .event_init     = hisi_uncore_pmu_event_init,
+               .pmu_enable     = hisi_uncore_pmu_enable,
+               .pmu_disable    = hisi_uncore_pmu_disable,
+               .add            = hisi_uncore_pmu_add,
+               .del            = hisi_uncore_pmu_del,
+               .start          = hisi_uncore_pmu_start,
+               .stop           = hisi_uncore_pmu_stop,
+               .read           = hisi_uncore_pmu_read,
+               .attr_groups    = pa_pmu->pmu_events.attr_groups,
+               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       };
+
+       ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
+       if (ret) {
+               dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+               cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+                                           &pa_pmu->node);
+               irq_set_affinity_hint(pa_pmu->irq, NULL);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, pa_pmu);
+       return ret;
+}
+
+static int hisi_pa_pmu_remove(struct platform_device *pdev)
+{
+       struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
+
+       perf_pmu_unregister(&pa_pmu->pmu);
+       cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+                                           &pa_pmu->node);
+       irq_set_affinity_hint(pa_pmu->irq, NULL);
+
+       return 0;
+}
+
+static struct platform_driver hisi_pa_pmu_driver = {
+       .driver = {
+               .name = "hisi_pa_pmu",
+               .acpi_match_table = hisi_pa_pmu_acpi_match,
+               .suppress_bind_attrs = true,
+       },
+       .probe = hisi_pa_pmu_probe,
+       .remove = hisi_pa_pmu_remove,
+};
+
+static int __init hisi_pa_pmu_module_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+                                     "AP_PERF_ARM_HISI_PA_ONLINE",
+                                     hisi_uncore_pmu_online_cpu,
+                                     hisi_uncore_pmu_offline_cpu);
+       if (ret) {
+               pr_err("PA PMU: cpuhp state setup failed, ret = %d\n", ret);
+               return ret;
+       }
+
+       ret = platform_driver_register(&hisi_pa_pmu_driver);
+       if (ret)
+               cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
+
+       return ret;
+}
+module_init(hisi_pa_pmu_module_init);
+
+static void __exit hisi_pa_pmu_module_exit(void)
+{
+       platform_driver_unregister(&hisi_pa_pmu_driver);
+       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
+}
+module_exit(hisi_pa_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
index 9dbdc3fc3bb4baccf0d91cd86a4bbe4d0a6259ac..13c68b5e39c4c0cc317cd0c3b1e38f1f93cb406e 100644 (file)
@@ -21,7 +21,7 @@
 #include "hisi_uncore_pmu.h"
 
 #define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
-#define HISI_MAX_PERIOD(nr) (BIT_ULL(nr) - 1)
+#define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0))
 
 /*
  * PMU format attributes
@@ -33,7 +33,7 @@ ssize_t hisi_format_sysfs_show(struct device *dev,
 
        eattr = container_of(attr, struct dev_ext_attribute, attr);
 
-       return sprintf(buf, "%s\n", (char *)eattr->var);
+       return sysfs_emit(buf, "%s\n", (char *)eattr->var);
 }
 EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
 
@@ -47,7 +47,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev,
 
        eattr = container_of(attr, struct dev_ext_attribute, attr);
 
-       return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
+       return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
 }
 EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
 
@@ -59,7 +59,7 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
 {
        struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
 
-       return sprintf(buf, "%d\n", hisi_pmu->on_cpu);
+       return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
 }
 EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
 
@@ -96,12 +96,6 @@ static bool hisi_validate_event_group(struct perf_event *event)
        return counters <= hisi_pmu->num_counters;
 }
 
-int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx)
-{
-       return idx >= 0 && idx < hisi_pmu->num_counters;
-}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_counter_valid);
-
 int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
 {
        struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
@@ -125,19 +119,68 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
 {
        struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
 
-       return snprintf(page, PAGE_SIZE, "0x%08x\n", hisi_pmu->identifier);
+       return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
 }
 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
 
 static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
 {
-       if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) {
-               dev_err(hisi_pmu->dev, "Unsupported event index:%d!\n", idx);
-               return;
+       clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+}
+
+static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
+{
+       struct hisi_pmu *hisi_pmu = data;
+       struct perf_event *event;
+       unsigned long overflown;
+       int idx;
+
+       overflown = hisi_pmu->ops->get_int_status(hisi_pmu);
+       if (!overflown)
+               return IRQ_NONE;
+
+       /*
+        * Find the counter index which overflowed if the bit was set
+        * and handle it.
+        */
+       for_each_set_bit(idx, &overflown, hisi_pmu->num_counters) {
+               /* Write 1 to clear the IRQ status flag */
+               hisi_pmu->ops->clear_int_status(hisi_pmu, idx);
+               /* Get the corresponding event struct */
+               event = hisi_pmu->pmu_events.hw_events[idx];
+               if (!event)
+                       continue;
+
+               hisi_uncore_pmu_event_update(event);
+               hisi_uncore_pmu_set_event_period(event);
        }
 
-       clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+       return IRQ_HANDLED;
+}
+
+int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
+                            struct platform_device *pdev)
+{
+       int irq, ret;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
+                              IRQF_NOBALANCING | IRQF_NO_THREAD,
+                              dev_name(&pdev->dev), hisi_pmu);
+       if (ret < 0) {
+               dev_err(&pdev->dev,
+                       "Fail to request IRQ: %d ret: %d.\n", irq, ret);
+               return ret;
+       }
+
+       hisi_pmu->irq = irq;
+
+       return 0;
 }
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
 
 int hisi_uncore_pmu_event_init(struct perf_event *event)
 {
@@ -202,6 +245,9 @@ static void hisi_uncore_pmu_enable_event(struct perf_event *event)
        hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
                                    HISI_GET_EVENTID(event));
 
+       if (hisi_pmu->ops->enable_filter)
+               hisi_pmu->ops->enable_filter(event);
+
        hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
        hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
 }
@@ -216,6 +262,9 @@ static void hisi_uncore_pmu_disable_event(struct perf_event *event)
 
        hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
        hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
+
+       if (hisi_pmu->ops->disable_filter)
+               hisi_pmu->ops->disable_filter(event);
 }
 
 void hisi_uncore_pmu_set_event_period(struct perf_event *event)
index 25b7cbe1f8185e20250749a96182c23daf63af56..ea9d89bbc1ea0559fd1d6a7a36486d02407bb583 100644 (file)
 #ifndef __HISI_UNCORE_PMU_H__
 #define __HISI_UNCORE_PMU_H__
 
+#include <linux/bitfield.h>
 #include <linux/cpumask.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/perf_event.h>
+#include <linux/platform_device.h>
 #include <linux/types.h>
 
 #undef pr_fmt
 #define pr_fmt(fmt)     "hisi_pmu: " fmt
 
+#define HISI_PMU_V2            0x30
 #define HISI_MAX_COUNTERS 0x10
 #define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu))
 
 #define HISI_PMU_EVENT_ATTR(_name, _config)            \
        HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config)
 
+#define HISI_PMU_EVENT_ATTR_EXTRACTOR(name, config, hi, lo)        \
+       static inline u32 hisi_get_##name(struct perf_event *event)            \
+       {                                                                  \
+               return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config);  \
+       }
+
 struct hisi_pmu;
 
 struct hisi_uncore_ops {
@@ -47,11 +56,16 @@ struct hisi_uncore_ops {
        void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
        void (*start_counters)(struct hisi_pmu *);
        void (*stop_counters)(struct hisi_pmu *);
+       u32 (*get_int_status)(struct hisi_pmu *hisi_pmu);
+       void (*clear_int_status)(struct hisi_pmu *hisi_pmu, int idx);
+       void (*enable_filter)(struct perf_event *event);
+       void (*disable_filter)(struct perf_event *event);
 };
 
 struct hisi_pmu_hwevents {
        struct perf_event *hw_events[HISI_MAX_COUNTERS];
        DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS);
+       const struct attribute_group **attr_groups;
 };
 
 /* Generic pmu struct for different pmu types */
@@ -71,6 +85,8 @@ struct hisi_pmu {
        void __iomem *base;
        /* the ID of the PMU modules */
        u32 index_id;
+       /* For DDRC PMU v2: each DDRC has more than one DMC */
+       u32 sub_id;
        int num_counters;
        int counter_bits;
        /* check event code range */
@@ -78,7 +94,6 @@ struct hisi_pmu {
        u32 identifier;
 };
 
-int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx);
 int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
 void hisi_uncore_pmu_read(struct perf_event *event);
 int hisi_uncore_pmu_add(struct perf_event *event, int flags);
@@ -102,6 +117,7 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
 ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
                                             struct device_attribute *attr,
                                             char *page);
-
+int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
+                            struct platform_device *pdev);
 
 #endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
new file mode 100644 (file)
index 0000000..46be312
--- /dev/null
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SLLC uncore Hardware event counters support
+ *
+ * Copyright (C) 2020 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ */
+#include <linux/acpi.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* SLLC register definition */
+#define SLLC_INT_MASK                  0x0814
+#define SLLC_INT_STATUS                        0x0818
+#define SLLC_INT_CLEAR                 0x081c
+#define SLLC_PERF_CTRL                 0x1c00
+#define SLLC_SRCID_CTRL                        0x1c04
+#define SLLC_TGTID_CTRL                        0x1c08
+#define SLLC_EVENT_CTRL                        0x1c14
+#define SLLC_EVENT_TYPE0               0x1c18
+#define SLLC_VERSION                   0x1cf0
+#define SLLC_EVENT_CNT0_L              0x1d00
+
+#define SLLC_EVTYPE_MASK               0xff
+#define SLLC_PERF_CTRL_EN              BIT(0)
+#define SLLC_FILT_EN                   BIT(1)
+#define SLLC_TRACETAG_EN               BIT(2)
+#define SLLC_SRCID_EN                  BIT(4)
+#define SLLC_SRCID_NONE                        0x0
+#define SLLC_TGTID_EN                  BIT(5)
+#define SLLC_TGTID_NONE                        0x0
+#define SLLC_TGTID_MIN_SHIFT           1
+#define SLLC_TGTID_MAX_SHIFT           12
+#define SLLC_SRCID_CMD_SHIFT           1
+#define SLLC_SRCID_MSK_SHIFT           12
+#define SLLC_NR_EVENTS                 0x80
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_min, config1, 10, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_max, config1, 21, 11);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
+
+static bool tgtid_is_valid(u32 max, u32 min)
+{
+       return max > 0 && max >= min;
+}
+
+static void hisi_sllc_pmu_enable_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+       u32 tt_en = hisi_get_tracetag_en(event);
+
+       if (tt_en) {
+               u32 val;
+
+               val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+               val |= SLLC_TRACETAG_EN | SLLC_FILT_EN;
+               writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+       }
+}
+
+static void hisi_sllc_pmu_disable_tracetag(struct perf_event *event)
+{
+       struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+       u32 tt_en = hisi_get_tracetag_en(event);
+
+       if (tt_en) {
+               u32 val;
+
+               val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+               val &= ~(SLLC_TRACETAG_EN | SLLC_FILT_EN);
+               writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+       }
+}
+
+static void hisi_sllc_pmu_config_tgtid(struct perf_event *event)
+{
+       struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+       u32 min = hisi_get_tgtid_min(event);
+       u32 max = hisi_get_tgtid_max(event);
+
+       if (tgtid_is_valid(max, min)) {
+               u32 val = (max << SLLC_TGTID_MAX_SHIFT) | (min << SLLC_TGTID_MIN_SHIFT);
+
+               writel(val, sllc_pmu->base + SLLC_TGTID_CTRL);
+               /* Enable the tgtid */
+               val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+               val |= SLLC_TGTID_EN | SLLC_FILT_EN;
+               writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+       }
+}
+
+static void hisi_sllc_pmu_clear_tgtid(struct perf_event *event)
+{
+       struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+       u32 min = hisi_get_tgtid_min(event);
+       u32 max = hisi_get_tgtid_max(event);
+
+       if (tgtid_is_valid(max, min)) {
+               u32 val;
+
+               writel(SLLC_TGTID_NONE, sllc_pmu->base + SLLC_TGTID_CTRL);
+               /* Disable the tgtid */
+               val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+               val &= ~(SLLC_TGTID_EN | SLLC_FILT_EN);
+               writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+       }
+}
+
+static void hisi_sllc_pmu_config_srcid(struct perf_event *event)
+{
+       struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+       u32 cmd = hisi_get_srcid_cmd(event);
+
+       if (cmd) {
+               u32 val, msk;
+
+               msk = hisi_get_srcid_msk(event);
+               val = (cmd << SLLC_SRCID_CMD_SHIFT) | (msk << SLLC_SRCID_MSK_SHIFT);
+               writel(val, sllc_pmu->base + SLLC_SRCID_CTRL);
+               /* Enable the srcid */
+               val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+               val |= SLLC_SRCID_EN | SLLC_FILT_EN;
+               writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+       }
+}
+
+static void hisi_sllc_pmu_clear_srcid(struct perf_event *event)
+{
+       struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+       u32 cmd = hisi_get_srcid_cmd(event);
+
+       if (cmd) {
+               u32 val;
+
+               writel(SLLC_SRCID_NONE, sllc_pmu->base + SLLC_SRCID_CTRL);
+               /* Disable the srcid */
+               val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+               val &= ~(SLLC_SRCID_EN | SLLC_FILT_EN);
+               writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+       }
+}
+
+static void hisi_sllc_pmu_enable_filter(struct perf_event *event)
+{
+       if (event->attr.config1 != 0x0) {
+               hisi_sllc_pmu_enable_tracetag(event);
+               hisi_sllc_pmu_config_srcid(event);
+               hisi_sllc_pmu_config_tgtid(event);
+       }
+}
+
+static void hisi_sllc_pmu_clear_filter(struct perf_event *event)
+{
+       if (event->attr.config1 != 0x0) {
+               hisi_sllc_pmu_disable_tracetag(event);
+               hisi_sllc_pmu_clear_srcid(event);
+               hisi_sllc_pmu_clear_tgtid(event);
+       }
+}
+
+static u32 hisi_sllc_pmu_get_counter_offset(int idx)
+{
+       return (SLLC_EVENT_CNT0_L + idx * 8);
+}
+
+static u64 hisi_sllc_pmu_read_counter(struct hisi_pmu *sllc_pmu,
+                                     struct hw_perf_event *hwc)
+{
+       return readq(sllc_pmu->base +
+                    hisi_sllc_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_sllc_pmu_write_counter(struct hisi_pmu *sllc_pmu,
+                                       struct hw_perf_event *hwc, u64 val)
+{
+       writeq(val, sllc_pmu->base +
+              hisi_sllc_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_sllc_pmu_write_evtype(struct hisi_pmu *sllc_pmu, int idx,
+                                      u32 type)
+{
+       u32 reg, reg_idx, shift, val;
+
+       /*
+        * Select the appropriate event select register(SLLC_EVENT_TYPE0/1).
+        * There are 2 event select registers for the 8 hardware counters.
+        * Event code is 8-bits and for the former 4 hardware counters,
+        * SLLC_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+        * SLLC_EVENT_TYPE1 is chosen.
+        */
+       reg = SLLC_EVENT_TYPE0 + (idx / 4) * 4;
+       reg_idx = idx % 4;
+       shift = 8 * reg_idx;
+
+       /* Write event code to SLLC_EVENT_TYPEx Register */
+       val = readl(sllc_pmu->base + reg);
+       val &= ~(SLLC_EVTYPE_MASK << shift);
+       val |= (type << shift);
+       writel(val, sllc_pmu->base + reg);
+}
+
+static void hisi_sllc_pmu_start_counters(struct hisi_pmu *sllc_pmu)
+{
+       u32 val;
+
+       val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+       val |= SLLC_PERF_CTRL_EN;
+       writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+}
+
+static void hisi_sllc_pmu_stop_counters(struct hisi_pmu *sllc_pmu)
+{
+       u32 val;
+
+       val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+       val &= ~(SLLC_PERF_CTRL_EN);
+       writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+}
+
+static void hisi_sllc_pmu_enable_counter(struct hisi_pmu *sllc_pmu,
+                                        struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
+       val |= 1 << hwc->idx;
+       writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
+}
+
+static void hisi_sllc_pmu_disable_counter(struct hisi_pmu *sllc_pmu,
+                                         struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
+       val &= ~(1 << hwc->idx);
+       writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
+}
+
+static void hisi_sllc_pmu_enable_counter_int(struct hisi_pmu *sllc_pmu,
+                                            struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       val = readl(sllc_pmu->base + SLLC_INT_MASK);
+       /* Write 0 to enable interrupt */
+       val &= ~(1 << hwc->idx);
+       writel(val, sllc_pmu->base + SLLC_INT_MASK);
+}
+
+static void hisi_sllc_pmu_disable_counter_int(struct hisi_pmu *sllc_pmu,
+                                             struct hw_perf_event *hwc)
+{
+       u32 val;
+
+       val = readl(sllc_pmu->base + SLLC_INT_MASK);
+       /* Write 1 to mask interrupt */
+       val |= 1 << hwc->idx;
+       writel(val, sllc_pmu->base + SLLC_INT_MASK);
+}
+
+static u32 hisi_sllc_pmu_get_int_status(struct hisi_pmu *sllc_pmu)
+{
+       return readl(sllc_pmu->base + SLLC_INT_STATUS);
+}
+
+static void hisi_sllc_pmu_clear_int_status(struct hisi_pmu *sllc_pmu, int idx)
+{
+       writel(1 << idx, sllc_pmu->base + SLLC_INT_CLEAR);
+}
+
+static const struct acpi_device_id hisi_sllc_pmu_acpi_match[] = {
+       { "HISI0263", },
+       {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
+
+static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
+                                  struct hisi_pmu *sllc_pmu)
+{
+       /*
+        * Use the SCCL_ID and the index ID to identify the SLLC PMU,
+        * while SCCL_ID is from MPIDR_EL1 by CPU.
+        */
+       if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+                                    &sllc_pmu->sccl_id)) {
+               dev_err(&pdev->dev, "Cannot read sccl-id!\n");
+               return -EINVAL;
+       }
+
+       if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+                                    &sllc_pmu->index_id)) {
+               dev_err(&pdev->dev, "Cannot read idx-id!\n");
+               return -EINVAL;
+       }
+
+       /* SLLC PMUs only share the same SCCL */
+       sllc_pmu->ccl_id = -1;
+
+       sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(sllc_pmu->base)) {
+               dev_err(&pdev->dev, "ioremap failed for sllc_pmu resource.\n");
+               return PTR_ERR(sllc_pmu->base);
+       }
+
+       sllc_pmu->identifier = readl(sllc_pmu->base + SLLC_VERSION);
+
+       return 0;
+}
+
+static struct attribute *hisi_sllc_pmu_v2_format_attr[] = {
+       HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+       HISI_PMU_FORMAT_ATTR(tgtid_min, "config1:0-10"),
+       HISI_PMU_FORMAT_ATTR(tgtid_max, "config1:11-21"),
+       HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
+       HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
+       HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
+       NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_v2_format_group = {
+       .name = "format",
+       .attrs = hisi_sllc_pmu_v2_format_attr,
+};
+
+static struct attribute *hisi_sllc_pmu_v2_events_attr[] = {
+       HISI_PMU_EVENT_ATTR(rx_req,             0x30),
+       HISI_PMU_EVENT_ATTR(rx_data,            0x31),
+       HISI_PMU_EVENT_ATTR(tx_req,             0x34),
+       HISI_PMU_EVENT_ATTR(tx_data,            0x35),
+       HISI_PMU_EVENT_ATTR(cycles,             0x09),
+       NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_v2_events_group = {
+       .name = "events",
+       .attrs = hisi_sllc_pmu_v2_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = {
+       &dev_attr_cpumask.attr,
+       NULL
+};
+
+static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = {
+       .attrs = hisi_sllc_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_sllc_pmu_identifier_attr =
+       __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
+       &hisi_sllc_pmu_identifier_attr.attr,
+       NULL
+};
+
+static struct attribute_group hisi_sllc_pmu_identifier_group = {
+       .attrs = hisi_sllc_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
+       &hisi_sllc_pmu_v2_format_group,
+       &hisi_sllc_pmu_v2_events_group,
+       &hisi_sllc_pmu_cpumask_attr_group,
+       &hisi_sllc_pmu_identifier_group,
+       NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_sllc_ops = {
+       .write_evtype           = hisi_sllc_pmu_write_evtype,
+       .get_event_idx          = hisi_uncore_pmu_get_event_idx,
+       .start_counters         = hisi_sllc_pmu_start_counters,
+       .stop_counters          = hisi_sllc_pmu_stop_counters,
+       .enable_counter         = hisi_sllc_pmu_enable_counter,
+       .disable_counter        = hisi_sllc_pmu_disable_counter,
+       .enable_counter_int     = hisi_sllc_pmu_enable_counter_int,
+       .disable_counter_int    = hisi_sllc_pmu_disable_counter_int,
+       .write_counter          = hisi_sllc_pmu_write_counter,
+       .read_counter           = hisi_sllc_pmu_read_counter,
+       .get_int_status         = hisi_sllc_pmu_get_int_status,
+       .clear_int_status       = hisi_sllc_pmu_clear_int_status,
+       .enable_filter          = hisi_sllc_pmu_enable_filter,
+       .disable_filter         = hisi_sllc_pmu_clear_filter,
+};
+
+static int hisi_sllc_pmu_dev_probe(struct platform_device *pdev,
+                                  struct hisi_pmu *sllc_pmu)
+{
+       int ret;
+
+       ret = hisi_sllc_pmu_init_data(pdev, sllc_pmu);
+       if (ret)
+               return ret;
+
+       ret = hisi_uncore_pmu_init_irq(sllc_pmu, pdev);
+       if (ret)
+               return ret;
+
+       sllc_pmu->pmu_events.attr_groups = hisi_sllc_pmu_v2_attr_groups;
+       sllc_pmu->ops = &hisi_uncore_sllc_ops;
+       sllc_pmu->check_event = SLLC_NR_EVENTS;
+       sllc_pmu->counter_bits = 64;
+       sllc_pmu->num_counters = 8;
+       sllc_pmu->dev = &pdev->dev;
+       sllc_pmu->on_cpu = -1;
+
+       return 0;
+}
+
+static int hisi_sllc_pmu_probe(struct platform_device *pdev)
+{
+       struct hisi_pmu *sllc_pmu;
+       char *name;
+       int ret;
+
+       sllc_pmu = devm_kzalloc(&pdev->dev, sizeof(*sllc_pmu), GFP_KERNEL);
+       if (!sllc_pmu)
+               return -ENOMEM;
+
+       ret = hisi_sllc_pmu_dev_probe(pdev, sllc_pmu);
+       if (ret)
+               return ret;
+
+       name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u",
+                             sllc_pmu->sccl_id, sllc_pmu->index_id);
+       if (!name)
+               return -ENOMEM;
+
+       ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+                                      &sllc_pmu->node);
+       if (ret) {
+               dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+               return ret;
+       }
+
+       sllc_pmu->pmu = (struct pmu) {
+               .module         = THIS_MODULE,
+               .task_ctx_nr    = perf_invalid_context,
+               .event_init     = hisi_uncore_pmu_event_init,
+               .pmu_enable     = hisi_uncore_pmu_enable,
+               .pmu_disable    = hisi_uncore_pmu_disable,
+               .add            = hisi_uncore_pmu_add,
+               .del            = hisi_uncore_pmu_del,
+               .start          = hisi_uncore_pmu_start,
+               .stop           = hisi_uncore_pmu_stop,
+               .read           = hisi_uncore_pmu_read,
+               .attr_groups    = sllc_pmu->pmu_events.attr_groups,
+               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       };
+
+       ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
+       if (ret) {
+               dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
+               cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+                                           &sllc_pmu->node);
+               irq_set_affinity_hint(sllc_pmu->irq, NULL);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, sllc_pmu);
+
+       return ret;
+}
+
+static int hisi_sllc_pmu_remove(struct platform_device *pdev)
+{
+       struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev);
+
+       perf_pmu_unregister(&sllc_pmu->pmu);
+       cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+                                           &sllc_pmu->node);
+       irq_set_affinity_hint(sllc_pmu->irq, NULL);
+
+       return 0;
+}
+
+static struct platform_driver hisi_sllc_pmu_driver = {
+       .driver = {
+               .name = "hisi_sllc_pmu",
+               .acpi_match_table = hisi_sllc_pmu_acpi_match,
+               .suppress_bind_attrs = true,
+       },
+       .probe = hisi_sllc_pmu_probe,
+       .remove = hisi_sllc_pmu_remove,
+};
+
+static int __init hisi_sllc_pmu_module_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+                                     "AP_PERF_ARM_HISI_SLLC_ONLINE",
+                                     hisi_uncore_pmu_online_cpu,
+                                     hisi_uncore_pmu_offline_cpu);
+       if (ret) {
+               pr_err("SLLC PMU: cpuhp state setup failed, ret = %d\n", ret);
+               return ret;
+       }
+
+       ret = platform_driver_register(&hisi_sllc_pmu_driver);
+       if (ret)
+               cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE);
+
+       return ret;
+}
+module_init(hisi_sllc_pmu_module_init);
+
+static void __exit hisi_sllc_pmu_module_exit(void)
+{
+       platform_driver_unregister(&hisi_sllc_pmu_driver);
+       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE);
+}
+module_exit(hisi_sllc_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
index 8883af955a2a3d2f8e59896b11c6d467b726491f..fc54a80f9c5cfc940cacd684c3b1386f92cac619 100644 (file)
@@ -676,7 +676,7 @@ static ssize_t l2cache_pmu_event_show(struct device *dev,
        struct perf_pmu_events_attr *pmu_attr;
 
        pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
-       return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+       return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 }
 
 #define L2CACHE_EVENT_ATTR(_name, _id)                                      \
index fb34b87b94712bf1766ca4775fa13a5290fa7f74..bba078077c93cb417e794f32f5fe8494cfc5c8a5 100644 (file)
@@ -615,7 +615,7 @@ static ssize_t l3cache_pmu_format_show(struct device *dev,
        struct dev_ext_attribute *eattr;
 
        eattr = container_of(attr, struct dev_ext_attribute, attr);
-       return sprintf(buf, "%s\n", (char *) eattr->var);
+       return sysfs_emit(buf, "%s\n", (char *) eattr->var);
 }
 
 #define L3CACHE_PMU_FORMAT_ATTR(_name, _config)                                      \
@@ -643,7 +643,7 @@ static ssize_t l3cache_pmu_event_show(struct device *dev,
        struct perf_pmu_events_attr *pmu_attr;
 
        pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
-       return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+       return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 }
 
 #define L3CACHE_EVENT_ATTR(_name, _id)                                      \
index e116815fa8092fcc1bc96e7957ccf32ac628ceb8..06a6d569b0b56e556ea4917e4717d44821794fb6 100644 (file)
@@ -128,7 +128,7 @@ __tx2_pmu_##_var##_show(struct device *dev,                         \
                               char *page)                              \
 {                                                                      \
        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
-       return sprintf(page, _format "\n");                             \
+       return sysfs_emit(page, _format "\n");                          \
 }                                                                      \
                                                                        \
 static struct device_attribute format_attr_##_var =                    \
@@ -176,7 +176,7 @@ static ssize_t tx2_pmu_event_show(struct device *dev,
        struct dev_ext_attribute *eattr;
 
        eattr = container_of(attr, struct dev_ext_attribute, attr);
-       return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
+       return sysfs_emit(buf, "event=0x%lx\n", (unsigned long) eattr->var);
 }
 
 #define TX2_EVENT_ATTR(name, config) \
index 44faa51ba799291c707242a5327fdc300d84e692..ffe3bdeec8459efe26f0f3823b6e210c09c16b7a 100644 (file)
@@ -170,7 +170,7 @@ static ssize_t xgene_pmu_format_show(struct device *dev,
        struct dev_ext_attribute *eattr;
 
        eattr = container_of(attr, struct dev_ext_attribute, attr);
-       return sprintf(buf, "%s\n", (char *) eattr->var);
+       return sysfs_emit(buf, "%s\n", (char *) eattr->var);
 }
 
 #define XGENE_PMU_FORMAT_ATTR(_name, _config)          \
@@ -281,7 +281,7 @@ static ssize_t xgene_pmu_event_show(struct device *dev,
        struct dev_ext_attribute *eattr;
 
        eattr = container_of(attr, struct dev_ext_attribute, attr);
-       return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
+       return sysfs_emit(buf, "config=0x%lx\n", (unsigned long) eattr->var);
 }
 
 #define XGENE_PMU_EVENT_ATTR(_name, _config)           \
index 68d9c2f6a5cafb17354768c7271934e247e8f20c..54c1f2f0985fc818f4d7fedcd60816a53e0cddc3 100644 (file)
@@ -71,6 +71,7 @@ source "drivers/phy/ingenic/Kconfig"
 source "drivers/phy/lantiq/Kconfig"
 source "drivers/phy/marvell/Kconfig"
 source "drivers/phy/mediatek/Kconfig"
+source "drivers/phy/microchip/Kconfig"
 source "drivers/phy/motorola/Kconfig"
 source "drivers/phy/mscc/Kconfig"
 source "drivers/phy/qualcomm/Kconfig"
index 32261e164abda782ae86c8b4ba158e00091bf077..adac1b1a39d1cded3a2ef03ad043c709aba99b7b 100644 (file)
@@ -20,6 +20,7 @@ obj-y                                 += allwinner/   \
                                           lantiq/      \
                                           marvell/     \
                                           mediatek/    \
+                                          microchip/   \
                                           motorola/    \
                                           mscc/        \
                                           qualcomm/    \
index 09256339bd046694de65b21c37ec88df7eaf84a0..fd92b73b710951fa69848ed76662d2fa18a6906b 100644 (file)
@@ -94,7 +94,7 @@ config PHY_BRCM_USB
        depends on ARCH_BCM4908 || ARCH_BRCMSTB || COMPILE_TEST
        depends on OF
        select GENERIC_PHY
-       select SOC_BRCMSTB
+       select SOC_BRCMSTB if ARCH_BRCMSTB
        default ARCH_BCM4908
        default ARCH_BRCMSTB
        help
index 432832bdbd16d13bb6bdbba788825636febc7ab1..a62910ff55910db5c2bdff61ed416c28a0444362 100644 (file)
@@ -7,6 +7,7 @@ config PHY_CADENCE_TORRENT
        tristate "Cadence Torrent PHY driver"
        depends on OF
        depends on HAS_IOMEM
+       depends on COMMON_CLK
        select GENERIC_PHY
        help
          Support for Cadence Torrent PHY.
@@ -24,6 +25,7 @@ config PHY_CADENCE_DPHY
 config PHY_CADENCE_SIERRA
        tristate "Cadence Sierra PHY Driver"
        depends on OF && HAS_IOMEM && RESET_CONTROLLER
+       depends on COMMON_CLK
        select GENERIC_PHY
        help
          Enable this to support the Cadence Sierra PHY driver
index 26a0badabe38b77852c03279dbe0ad59bcc8db25..5c68e31c5939985a80c1415c3fef9817a3803fb3 100644 (file)
@@ -7,6 +7,7 @@
  *
  */
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
 
 /* PHY register offsets */
 #define SIERRA_COMMON_CDB_OFFSET                       0x0
 #define SIERRA_MACRO_ID_REG                            0x0
+#define SIERRA_CMN_PLLLC_GEN_PREG                      0x42
 #define SIERRA_CMN_PLLLC_MODE_PREG                     0x48
 #define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG           0x49
 #define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG           0x4A
@@ -31,6 +34,9 @@
 #define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG              0x4F
 #define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG              0x50
 #define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG    0x62
+#define SIERRA_CMN_REFRCV_PREG                         0x98
+#define SIERRA_CMN_REFRCV1_PREG                                0xB8
+#define SIERRA_CMN_PLLLC1_GEN_PREG                     0xC2
 
 #define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset)   \
                                ((0x4000 << (block_offset)) + \
 #define SIERRA_MAX_LANES                               16
 #define PLL_LOCK_TIME                                  100000
 
+#define CDNS_SIERRA_OUTPUT_CLOCKS                      2
+#define CDNS_SIERRA_INPUT_CLOCKS                       5
+enum cdns_sierra_clock_input {
+       PHY_CLK,
+       CMN_REFCLK_DIG_DIV,
+       CMN_REFCLK1_DIG_DIV,
+       PLL0_REFCLK,
+       PLL1_REFCLK,
+};
+
+#define SIERRA_NUM_CMN_PLLC                            2
+#define SIERRA_NUM_CMN_PLLC_PARENTS                    2
+
 static const struct reg_field macro_id_type =
                                REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
 static const struct reg_field phy_pll_cfg_1 =
@@ -151,6 +170,53 @@ static const struct reg_field phy_pll_cfg_1 =
 static const struct reg_field pllctrl_lock =
                                REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
 
+static const char * const clk_names[] = {
+       [CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
+       [CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
+};
+
+enum cdns_sierra_cmn_plllc {
+       CMN_PLLLC,
+       CMN_PLLLC1,
+};
+
+struct cdns_sierra_pll_mux_reg_fields {
+       struct reg_field        pfdclk_sel_preg;
+       struct reg_field        plllc1en_field;
+       struct reg_field        termen_field;
+};
+
+static const struct cdns_sierra_pll_mux_reg_fields cmn_plllc_pfdclk1_sel_preg[] = {
+       [CMN_PLLLC] = {
+               .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC_GEN_PREG, 1, 1),
+               .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 8, 8),
+               .termen_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 0, 0),
+       },
+       [CMN_PLLLC1] = {
+               .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC1_GEN_PREG, 1, 1),
+               .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 8, 8),
+               .termen_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 0, 0),
+       },
+};
+
+struct cdns_sierra_pll_mux {
+       struct clk_hw           hw;
+       struct regmap_field     *pfdclk_sel_preg;
+       struct regmap_field     *plllc1en_field;
+       struct regmap_field     *termen_field;
+       struct clk_init_data    clk_data;
+};
+
+#define to_cdns_sierra_pll_mux(_hw)    \
+                       container_of(_hw, struct cdns_sierra_pll_mux, hw)
+
+static const int pll_mux_parent_index[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+       [CMN_PLLLC] = { PLL0_REFCLK, PLL1_REFCLK },
+       [CMN_PLLLC1] = { PLL1_REFCLK, PLL0_REFCLK },
+};
+
+static u32 cdns_sierra_pll_mux_table[] = { 0, 1 };
+
 struct cdns_sierra_inst {
        struct phy *phy;
        u32 phy_type;
@@ -197,12 +263,15 @@ struct cdns_sierra_phy {
        struct regmap_field *macro_id_type;
        struct regmap_field *phy_pll_cfg_1;
        struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
-       struct clk *clk;
-       struct clk *cmn_refclk_dig_div;
-       struct clk *cmn_refclk1_dig_div;
+       struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
+       struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
+       struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
+       struct clk *input_clks[CDNS_SIERRA_INPUT_CLOCKS];
        int nsubnodes;
        u32 num_lanes;
        bool autoconf;
+       struct clk_onecell_data clk_data;
+       struct clk *output_clks[CDNS_SIERRA_OUTPUT_CLOCKS];
 };
 
 static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
@@ -281,8 +350,8 @@ static int cdns_sierra_phy_init(struct phy *gphy)
        if (phy->autoconf)
                return 0;
 
-       clk_set_rate(phy->cmn_refclk_dig_div, 25000000);
-       clk_set_rate(phy->cmn_refclk1_dig_div, 25000000);
+       clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+       clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
        if (ins->phy_type == PHY_TYPE_PCIE) {
                num_cmn_regs = phy->init_data->pcie_cmn_regs;
                num_ln_regs = phy->init_data->pcie_ln_regs;
@@ -319,6 +388,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
        u32 val;
        int ret;
 
+       ret = reset_control_deassert(sp->phy_rst);
+       if (ret) {
+               dev_err(dev, "Failed to take the PHY out of reset\n");
+               return ret;
+       }
+
        /* Take the PHY lane group out of reset */
        ret = reset_control_deassert(ins->lnk_rst);
        if (ret) {
@@ -358,6 +433,153 @@ static const struct phy_ops ops = {
        .owner          = THIS_MODULE,
 };
 
+static u8 cdns_sierra_pll_mux_get_parent(struct clk_hw *hw)
+{
+       struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+       struct regmap_field *field = mux->pfdclk_sel_preg;
+       unsigned int val;
+
+       regmap_field_read(field, &val);
+       return clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table, 0, val);
+}
+
+static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+       struct regmap_field *plllc1en_field = mux->plllc1en_field;
+       struct regmap_field *termen_field = mux->termen_field;
+       struct regmap_field *field = mux->pfdclk_sel_preg;
+       int val, ret;
+
+       ret = regmap_field_write(plllc1en_field, 0);
+       ret |= regmap_field_write(termen_field, 0);
+       if (index == 1) {
+               ret |= regmap_field_write(plllc1en_field, 1);
+               ret |= regmap_field_write(termen_field, 1);
+       }
+
+       val = cdns_sierra_pll_mux_table[index];
+       ret |= regmap_field_write(field, val);
+
+       return ret;
+}
+
+static const struct clk_ops cdns_sierra_pll_mux_ops = {
+       .set_parent = cdns_sierra_pll_mux_set_parent,
+       .get_parent = cdns_sierra_pll_mux_get_parent,
+};
+
+static int cdns_sierra_pll_mux_register(struct cdns_sierra_phy *sp,
+                                       struct regmap_field *pfdclk1_sel_field,
+                                       struct regmap_field *plllc1en_field,
+                                       struct regmap_field *termen_field,
+                                       int clk_index)
+{
+       struct cdns_sierra_pll_mux *mux;
+       struct device *dev = sp->dev;
+       struct clk_init_data *init;
+       const char **parent_names;
+       unsigned int num_parents;
+       char clk_name[100];
+       struct clk *clk;
+       int i;
+
+       mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return -ENOMEM;
+
+       num_parents = SIERRA_NUM_CMN_PLLC_PARENTS;
+       parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), GFP_KERNEL);
+       if (!parent_names)
+               return -ENOMEM;
+
+       for (i = 0; i < num_parents; i++) {
+               clk = sp->input_clks[pll_mux_parent_index[clk_index][i]];
+               if (IS_ERR_OR_NULL(clk)) {
+                       dev_err(dev, "No parent clock for derived_refclk\n");
+                       return PTR_ERR(clk);
+               }
+               parent_names[i] = __clk_get_name(clk);
+       }
+
+       snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), clk_names[clk_index]);
+
+       init = &mux->clk_data;
+
+       init->ops = &cdns_sierra_pll_mux_ops;
+       init->flags = CLK_SET_RATE_NO_REPARENT;
+       init->parent_names = parent_names;
+       init->num_parents = num_parents;
+       init->name = clk_name;
+
+       mux->pfdclk_sel_preg = pfdclk1_sel_field;
+       mux->plllc1en_field = plllc1en_field;
+       mux->termen_field = termen_field;
+       mux->hw.init = init;
+
+       clk = devm_clk_register(dev, &mux->hw);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       sp->output_clks[clk_index] = clk;
+
+       return 0;
+}
+
+static int cdns_sierra_phy_register_pll_mux(struct cdns_sierra_phy *sp)
+{
+       struct regmap_field *pfdclk1_sel_field;
+       struct regmap_field *plllc1en_field;
+       struct regmap_field *termen_field;
+       struct device *dev = sp->dev;
+       int ret = 0, i, clk_index;
+
+       clk_index = CDNS_SIERRA_PLL_CMNLC;
+       for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++, clk_index++) {
+               pfdclk1_sel_field = sp->cmn_plllc_pfdclk1_sel_preg[i];
+               plllc1en_field = sp->cmn_refrcv_refclk_plllc1en_preg[i];
+               termen_field = sp->cmn_refrcv_refclk_termen_preg[i];
+
+               ret = cdns_sierra_pll_mux_register(sp, pfdclk1_sel_field, plllc1en_field,
+                                                  termen_field, clk_index);
+               if (ret) {
+                       dev_err(dev, "Fail to register cmn plllc mux\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void cdns_sierra_clk_unregister(struct cdns_sierra_phy *sp)
+{
+       struct device *dev = sp->dev;
+       struct device_node *node = dev->of_node;
+
+       of_clk_del_provider(node);
+}
+
+static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
+{
+       struct device *dev = sp->dev;
+       struct device_node *node = dev->of_node;
+       int ret;
+
+       ret = cdns_sierra_phy_register_pll_mux(sp);
+       if (ret) {
+               dev_err(dev, "Failed to pll mux clocks\n");
+               return ret;
+       }
+
+       sp->clk_data.clks = sp->output_clks;
+       sp->clk_data.clk_num = CDNS_SIERRA_OUTPUT_CLOCKS;
+       ret = of_clk_add_provider(node, of_clk_src_onecell_get, &sp->clk_data);
+       if (ret)
+               dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+
+       return ret;
+}
+
 static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
                                    struct device_node *child)
 {
@@ -396,6 +618,7 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
 {
        struct device *dev = sp->dev;
        struct regmap_field *field;
+       struct reg_field reg_field;
        struct regmap *regmap;
        int i;
 
@@ -407,6 +630,32 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
        }
        sp->macro_id_type = field;
 
+       for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++) {
+               reg_field = cmn_plllc_pfdclk1_sel_preg[i].pfdclk_sel_preg;
+               field = devm_regmap_field_alloc(dev, regmap, reg_field);
+               if (IS_ERR(field)) {
+                       dev_err(dev, "PLLLC%d_PFDCLK1_SEL failed\n", i);
+                       return PTR_ERR(field);
+               }
+               sp->cmn_plllc_pfdclk1_sel_preg[i] = field;
+
+               reg_field = cmn_plllc_pfdclk1_sel_preg[i].plllc1en_field;
+               field = devm_regmap_field_alloc(dev, regmap, reg_field);
+               if (IS_ERR(field)) {
+                       dev_err(dev, "REFRCV%d_REFCLK_PLLLC1EN failed\n", i);
+                       return PTR_ERR(field);
+               }
+               sp->cmn_refrcv_refclk_plllc1en_preg[i] = field;
+
+               reg_field = cmn_plllc_pfdclk1_sel_preg[i].termen_field;
+               field = devm_regmap_field_alloc(dev, regmap, reg_field);
+               if (IS_ERR(field)) {
+                       dev_err(dev, "REFRCV%d_REFCLK_TERMEN failed\n", i);
+                       return PTR_ERR(field);
+               }
+               sp->cmn_refrcv_refclk_termen_preg[i] = field;
+       }
+
        regmap = sp->regmap_phy_config_ctrl;
        field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
        if (IS_ERR(field)) {
@@ -471,6 +720,110 @@ static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
        return 0;
 }
 
+static int cdns_sierra_phy_get_clocks(struct cdns_sierra_phy *sp,
+                                     struct device *dev)
+{
+       struct clk *clk;
+       int ret;
+
+       clk = devm_clk_get_optional(dev, "phy_clk");
+       if (IS_ERR(clk)) {
+               dev_err(dev, "failed to get clock phy_clk\n");
+               return PTR_ERR(clk);
+       }
+       sp->input_clks[PHY_CLK] = clk;
+
+       clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+       if (IS_ERR(clk)) {
+               dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+               ret = PTR_ERR(clk);
+               return ret;
+       }
+       sp->input_clks[CMN_REFCLK_DIG_DIV] = clk;
+
+       clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+       if (IS_ERR(clk)) {
+               dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+               ret = PTR_ERR(clk);
+               return ret;
+       }
+       sp->input_clks[CMN_REFCLK1_DIG_DIV] = clk;
+
+       clk = devm_clk_get_optional(dev, "pll0_refclk");
+       if (IS_ERR(clk)) {
+               dev_err(dev, "pll0_refclk clock not found\n");
+               ret = PTR_ERR(clk);
+               return ret;
+       }
+       sp->input_clks[PLL0_REFCLK] = clk;
+
+       clk = devm_clk_get_optional(dev, "pll1_refclk");
+       if (IS_ERR(clk)) {
+               dev_err(dev, "pll1_refclk clock not found\n");
+               ret = PTR_ERR(clk);
+               return ret;
+       }
+       sp->input_clks[PLL1_REFCLK] = clk;
+
+       return 0;
+}
+
+static int cdns_sierra_phy_enable_clocks(struct cdns_sierra_phy *sp)
+{
+       int ret;
+
+       ret = clk_prepare_enable(sp->input_clks[PHY_CLK]);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+       if (ret)
+               goto err_pll_cmnlc;
+
+       ret = clk_prepare_enable(sp->output_clks[CDNS_SIERRA_PLL_CMNLC1]);
+       if (ret)
+               goto err_pll_cmnlc1;
+
+       return 0;
+
+err_pll_cmnlc1:
+       clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+
+err_pll_cmnlc:
+       clk_disable_unprepare(sp->input_clks[PHY_CLK]);
+
+       return ret;
+}
+
+static void cdns_sierra_phy_disable_clocks(struct cdns_sierra_phy *sp)
+{
+       clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC1]);
+       clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+       clk_disable_unprepare(sp->input_clks[PHY_CLK]);
+}
+
+static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
+                                     struct device *dev)
+{
+       struct reset_control *rst;
+
+       rst = devm_reset_control_get_exclusive(dev, "sierra_reset");
+       if (IS_ERR(rst)) {
+               dev_err(dev, "failed to get reset\n");
+               return PTR_ERR(rst);
+       }
+       sp->phy_rst = rst;
+
+       rst = devm_reset_control_get_optional_exclusive(dev, "sierra_apb");
+       if (IS_ERR(rst)) {
+               dev_err(dev, "failed to get apb reset\n");
+               return PTR_ERR(rst);
+       }
+       sp->apb_rst = rst;
+
+       return 0;
+}
+
 static int cdns_sierra_phy_probe(struct platform_device *pdev)
 {
        struct cdns_sierra_phy *sp;
@@ -481,7 +834,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
        unsigned int id_value;
        int i, ret, node = 0;
        void __iomem *base;
-       struct clk *clk;
        struct device_node *dn = dev->of_node, *child;
 
        if (of_get_child_count(dn) == 0)
@@ -518,43 +870,21 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, sp);
 
-       sp->clk = devm_clk_get_optional(dev, "phy_clk");
-       if (IS_ERR(sp->clk)) {
-               dev_err(dev, "failed to get clock phy_clk\n");
-               return PTR_ERR(sp->clk);
-       }
-
-       sp->phy_rst = devm_reset_control_get(dev, "sierra_reset");
-       if (IS_ERR(sp->phy_rst)) {
-               dev_err(dev, "failed to get reset\n");
-               return PTR_ERR(sp->phy_rst);
-       }
-
-       sp->apb_rst = devm_reset_control_get_optional(dev, "sierra_apb");
-       if (IS_ERR(sp->apb_rst)) {
-               dev_err(dev, "failed to get apb reset\n");
-               return PTR_ERR(sp->apb_rst);
-       }
-
-       clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
-       if (IS_ERR(clk)) {
-               dev_err(dev, "cmn_refclk_dig_div clock not found\n");
-               ret = PTR_ERR(clk);
+       ret = cdns_sierra_phy_get_clocks(sp, dev);
+       if (ret)
                return ret;
-       }
-       sp->cmn_refclk_dig_div = clk;
 
-       clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
-       if (IS_ERR(clk)) {
-               dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
-               ret = PTR_ERR(clk);
+       ret = cdns_sierra_clk_register(sp);
+       if (ret)
                return ret;
-       }
-       sp->cmn_refclk1_dig_div = clk;
 
-       ret = clk_prepare_enable(sp->clk);
+       ret = cdns_sierra_phy_get_resets(sp, dev);
        if (ret)
-               return ret;
+               goto unregister_clk;
+
+       ret = cdns_sierra_phy_enable_clocks(sp);
+       if (ret)
+               goto unregister_clk;
 
        /* Enable APB */
        reset_control_deassert(sp->apb_rst);
@@ -571,6 +901,10 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
        for_each_available_child_of_node(dn, child) {
                struct phy *gphy;
 
+               if (!(of_node_name_eq(child, "phy") ||
+                     of_node_name_eq(child, "link")))
+                       continue;
+
                sp->phys[node].lnk_rst =
                        of_reset_control_array_get_exclusive(child);
 
@@ -616,7 +950,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev);
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-       reset_control_deassert(sp->phy_rst);
        return PTR_ERR_OR_ZERO(phy_provider);
 
 put_child:
@@ -626,8 +959,10 @@ put_child2:
                reset_control_put(sp->phys[i].lnk_rst);
        of_node_put(child);
 clk_disable:
-       clk_disable_unprepare(sp->clk);
+       cdns_sierra_phy_disable_clocks(sp);
        reset_control_assert(sp->apb_rst);
+unregister_clk:
+       cdns_sierra_clk_unregister(sp);
        return ret;
 }
 
@@ -640,6 +975,7 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
        reset_control_assert(phy->apb_rst);
        pm_runtime_disable(&pdev->dev);
 
+       cdns_sierra_phy_disable_clocks(phy);
        /*
         * The device level resets will be put automatically.
         * Need to put the subnode resets here though.
@@ -648,6 +984,9 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
                reset_control_assert(phy->phys[i].lnk_rst);
                reset_control_put(phy->phys[i].lnk_rst);
        }
+
+       cdns_sierra_clk_unregister(phy);
+
        return 0;
 }
 
index 591a15834b48f7ba5a06f9fe4c2faa2dd5162d32..0477e7beebbf594ea72f1b7eaf55699b360416d9 100644 (file)
@@ -7,7 +7,9 @@
  */
 
 #include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -84,6 +86,8 @@
 #define CMN_PLLSM1_PLLLOCK_TMR         0x0034U
 #define CMN_CDIAG_CDB_PWRI_OVRD                0x0041U
 #define CMN_CDIAG_XCVRC_PWRI_OVRD      0x0047U
+#define CMN_CDIAG_REFCLK_OVRD          0x004CU
+#define CMN_CDIAG_REFCLK_DRV0_CTRL     0x0050U
 #define CMN_BGCAL_INIT_TMR             0x0064U
 #define CMN_BGCAL_ITER_TMR             0x0065U
 #define CMN_IBCAL_INIT_TMR             0x0074U
 #define CMN_PLL1_FRACDIVH_M0           0x00D2U
 #define CMN_PLL1_HIGH_THR_M0           0x00D3U
 #define CMN_PLL1_DSM_DIAG_M0           0x00D4U
+#define CMN_PLL1_DSM_FBH_OVRD_M0       0x00D5U
+#define CMN_PLL1_DSM_FBL_OVRD_M0       0x00D6U
 #define CMN_PLL1_SS_CTRL1_M0           0x00D8U
 #define CMN_PLL1_SS_CTRL2_M0            0x00D9U
 #define CMN_PLL1_SS_CTRL3_M0            0x00DAU
 #define TX_TXCC_CPOST_MULT_00          0x004CU
 #define TX_TXCC_CPOST_MULT_01          0x004DU
 #define TX_TXCC_MGNFS_MULT_000         0x0050U
+#define TX_TXCC_MGNFS_MULT_100         0x0054U
 #define DRV_DIAG_TX_DRV                        0x00C6U
 #define XCVR_DIAG_PLLDRC_CTRL          0x00E5U
 #define XCVR_DIAG_HSCLK_SEL            0x00E6U
 #define XCVR_DIAG_HSCLK_DIV            0x00E7U
+#define XCVR_DIAG_RXCLK_CTRL           0x00E9U
 #define XCVR_DIAG_BIDI_CTRL            0x00EAU
 #define XCVR_DIAG_PSC_OVRD             0x00EBU
 #define TX_PSC_A0                      0x0100U
 #define RX_DIAG_ACYA                   0x01FFU
 
 /* PHY PCS common registers */
+#define PHY_PIPE_CMN_CTRL1             0x0000U
 #define PHY_PLL_CFG                    0x000EU
 #define PHY_PIPE_USB3_GEN2_PRE_CFG0    0x0020U
 #define PHY_PIPE_USB3_GEN2_POST_CFG0   0x0022U
 #define PHY_PMA_CMN_CTRL2              0x0001U
 #define PHY_PMA_PLL_RAW_CTRL           0x0003U
 
+static const char * const clk_names[] = {
+       [CDNS_TORRENT_REFCLK_DRIVER] = "refclk-driver",
+};
+
 static const struct reg_field phy_pll_cfg =
                                REG_FIELD(PHY_PLL_CFG, 0, 1);
 
@@ -231,6 +244,26 @@ static const struct reg_field phy_pma_pll_raw_ctrl =
 static const struct reg_field phy_reset_ctrl =
                                REG_FIELD(PHY_RESET, 8, 8);
 
+static const struct reg_field phy_pipe_cmn_ctrl1_0 = REG_FIELD(PHY_PIPE_CMN_CTRL1, 0, 0);
+
+#define REFCLK_OUT_NUM_CMN_CONFIG      5
+
+enum cdns_torrent_refclk_out_cmn {
+       CMN_CDIAG_REFCLK_OVRD_4,
+       CMN_CDIAG_REFCLK_DRV0_CTRL_1,
+       CMN_CDIAG_REFCLK_DRV0_CTRL_4,
+       CMN_CDIAG_REFCLK_DRV0_CTRL_5,
+       CMN_CDIAG_REFCLK_DRV0_CTRL_6,
+};
+
+static const struct reg_field refclk_out_cmn_cfg[] = {
+       [CMN_CDIAG_REFCLK_OVRD_4]       = REG_FIELD(CMN_CDIAG_REFCLK_OVRD, 4, 4),
+       [CMN_CDIAG_REFCLK_DRV0_CTRL_1]  = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 1, 1),
+       [CMN_CDIAG_REFCLK_DRV0_CTRL_4]  = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 4, 4),
+       [CMN_CDIAG_REFCLK_DRV0_CTRL_5]  = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 5, 5),
+       [CMN_CDIAG_REFCLK_DRV0_CTRL_6]  = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 6, 6),
+};
+
 enum cdns_torrent_phy_type {
        TYPE_NONE,
        TYPE_DP,
@@ -279,6 +312,8 @@ struct cdns_torrent_phy {
        struct regmap_field *phy_pma_cmn_ctrl_2;
        struct regmap_field *phy_pma_pll_raw_ctrl;
        struct regmap_field *phy_reset_ctrl;
+       struct clk *clks[CDNS_TORRENT_REFCLK_DRIVER + 1];
+       struct clk_onecell_data clk_data;
 };
 
 enum phy_powerstate {
@@ -288,6 +323,16 @@ enum phy_powerstate {
        POWERSTATE_A3 = 3,
 };
 
+struct cdns_torrent_derived_refclk {
+       struct clk_hw           hw;
+       struct regmap_field     *phy_pipe_cmn_ctrl1_0;
+       struct regmap_field     *cmn_fields[REFCLK_OUT_NUM_CMN_CONFIG];
+       struct clk_init_data    clk_data;
+};
+
+#define to_cdns_torrent_derived_refclk(_hw)    \
+                       container_of(_hw, struct cdns_torrent_derived_refclk, hw)
+
 static int cdns_torrent_phy_init(struct phy *phy);
 static int cdns_torrent_dp_init(struct phy *phy);
 static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
@@ -326,6 +371,19 @@ static const struct phy_ops cdns_torrent_phy_ops = {
        .owner          = THIS_MODULE,
 };
 
+static int cdns_torrent_noop_phy_on(struct phy *phy)
+{
+       /* Give 5ms to 10ms delay for the PIPE clock to be stable */
+       usleep_range(5000, 10000);
+
+       return 0;
+}
+
+static const struct phy_ops noop_ops = {
+       .power_on       = cdns_torrent_noop_phy_on,
+       .owner          = THIS_MODULE,
+};
+
 struct cdns_reg_pairs {
        u32 val;
        u32 off;
@@ -1604,6 +1662,108 @@ static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
        return ret;
 }
 
+static int cdns_torrent_derived_refclk_enable(struct clk_hw *hw)
+{
+       struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+
+       regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_6], 0);
+       regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], 1);
+       regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_5], 1);
+       regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], 0);
+       regmap_field_write(derived_refclk->cmn_fields[CMN_CDIAG_REFCLK_OVRD_4], 1);
+       regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 1);
+
+       return 0;
+}
+
+static void cdns_torrent_derived_refclk_disable(struct clk_hw *hw)
+{
+       struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+
+       regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 0);
+}
+
+static int cdns_torrent_derived_refclk_is_enabled(struct clk_hw *hw)
+{
+       struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+       int val;
+
+       regmap_field_read(derived_refclk->phy_pipe_cmn_ctrl1_0, &val);
+
+       return !!val;
+}
+
+static const struct clk_ops cdns_torrent_derived_refclk_ops = {
+       .enable = cdns_torrent_derived_refclk_enable,
+       .disable = cdns_torrent_derived_refclk_disable,
+       .is_enabled = cdns_torrent_derived_refclk_is_enabled,
+};
+
+static int cdns_torrent_derived_refclk_register(struct cdns_torrent_phy *cdns_phy)
+{
+       struct cdns_torrent_derived_refclk *derived_refclk;
+       struct device *dev = cdns_phy->dev;
+       struct regmap_field *field;
+       struct clk_init_data *init;
+       const char *parent_name;
+       struct regmap *regmap;
+       char clk_name[100];
+       struct clk *clk;
+       int i;
+
+       derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
+       if (!derived_refclk)
+               return -ENOMEM;
+
+       snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+                clk_names[CDNS_TORRENT_REFCLK_DRIVER]);
+
+       clk = devm_clk_get_optional(dev, "phy_en_refclk");
+       if (IS_ERR(clk)) {
+               dev_err(dev, "No parent clock for derived_refclk\n");
+               return PTR_ERR(clk);
+       }
+
+       init = &derived_refclk->clk_data;
+
+       if (clk) {
+               parent_name = __clk_get_name(clk);
+               init->parent_names = &parent_name;
+               init->num_parents = 1;
+       }
+       init->ops = &cdns_torrent_derived_refclk_ops;
+       init->flags = 0;
+       init->name = clk_name;
+
+       regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+       field = devm_regmap_field_alloc(dev, regmap, phy_pipe_cmn_ctrl1_0);
+       if (IS_ERR(field)) {
+               dev_err(dev, "phy_pipe_cmn_ctrl1_0 reg field init failed\n");
+               return PTR_ERR(field);
+       }
+       derived_refclk->phy_pipe_cmn_ctrl1_0 = field;
+
+       regmap = cdns_phy->regmap_common_cdb;
+       for (i = 0; i < REFCLK_OUT_NUM_CMN_CONFIG; i++) {
+               field = devm_regmap_field_alloc(dev, regmap, refclk_out_cmn_cfg[i]);
+               if (IS_ERR(field)) {
+                       dev_err(dev, "CMN reg field init failed\n");
+                       return PTR_ERR(field);
+               }
+               derived_refclk->cmn_fields[i] = field;
+       }
+
+       derived_refclk->hw.init = init;
+
+       clk = devm_clk_register(dev, &derived_refclk->hw);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       cdns_phy->clks[CDNS_TORRENT_REFCLK_DRIVER] = clk;
+
+       return 0;
+}
+
 static int cdns_torrent_phy_on(struct phy *phy)
 {
        struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
@@ -2071,6 +2231,85 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
        return 0;
 }
 
+static void cdns_torrent_clk_cleanup(struct cdns_torrent_phy *cdns_phy)
+{
+       struct device *dev = cdns_phy->dev;
+
+       of_clk_del_provider(dev->of_node);
+}
+
+static int cdns_torrent_clk_register(struct cdns_torrent_phy *cdns_phy)
+{
+       struct device *dev = cdns_phy->dev;
+       struct device_node *node = dev->of_node;
+       int ret;
+
+       ret = cdns_torrent_derived_refclk_register(cdns_phy);
+       if (ret) {
+               dev_err(dev, "failed to register derived refclk\n");
+               return ret;
+       }
+
+       cdns_phy->clk_data.clks = cdns_phy->clks;
+       cdns_phy->clk_data.clk_num = CDNS_TORRENT_REFCLK_DRIVER + 1;
+
+       ret = of_clk_add_provider(node, of_clk_src_onecell_get, &cdns_phy->clk_data);
+       if (ret) {
+               dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy)
+{
+       struct device *dev = cdns_phy->dev;
+
+       cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
+       if (IS_ERR(cdns_phy->phy_rst)) {
+               dev_err(dev, "%s: failed to get reset\n",
+                       dev->of_node->full_name);
+               return PTR_ERR(cdns_phy->phy_rst);
+       }
+
+       cdns_phy->apb_rst = devm_reset_control_get_optional_exclusive(dev, "torrent_apb");
+       if (IS_ERR(cdns_phy->apb_rst)) {
+               dev_err(dev, "%s: failed to get apb reset\n",
+                       dev->of_node->full_name);
+               return PTR_ERR(cdns_phy->apb_rst);
+       }
+
+       return 0;
+}
+
+static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
+{
+       struct device *dev = cdns_phy->dev;
+       int ret;
+
+       cdns_phy->clk = devm_clk_get(dev, "refclk");
+       if (IS_ERR(cdns_phy->clk)) {
+               dev_err(dev, "phy ref clock not found\n");
+               return PTR_ERR(cdns_phy->clk);
+       }
+
+       ret = clk_prepare_enable(cdns_phy->clk);
+       if (ret) {
+               dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+               return ret;
+       }
+
+       cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
+       if (!(cdns_phy->ref_clk_rate)) {
+               dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
+               clk_disable_unprepare(cdns_phy->clk);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int cdns_torrent_phy_probe(struct platform_device *pdev)
 {
        struct cdns_torrent_phy *cdns_phy;
@@ -2080,6 +2319,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
        struct device_node *child;
        int ret, subnodes, node = 0, i;
        u32 total_num_lanes = 0;
+       int already_configured;
        u8 init_dp_regmap = 0;
        u32 phy_type;
 
@@ -2096,26 +2336,6 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
        cdns_phy->dev = dev;
        cdns_phy->init_data = data;
 
-       cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
-       if (IS_ERR(cdns_phy->phy_rst)) {
-               dev_err(dev, "%s: failed to get reset\n",
-                       dev->of_node->full_name);
-               return PTR_ERR(cdns_phy->phy_rst);
-       }
-
-       cdns_phy->apb_rst = devm_reset_control_get_optional(dev, "torrent_apb");
-       if (IS_ERR(cdns_phy->apb_rst)) {
-               dev_err(dev, "%s: failed to get apb reset\n",
-                       dev->of_node->full_name);
-               return PTR_ERR(cdns_phy->apb_rst);
-       }
-
-       cdns_phy->clk = devm_clk_get(dev, "refclk");
-       if (IS_ERR(cdns_phy->clk)) {
-               dev_err(dev, "phy ref clock not found\n");
-               return PTR_ERR(cdns_phy->clk);
-       }
-
        cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(cdns_phy->sd_base))
                return PTR_ERR(cdns_phy->sd_base);
@@ -2134,21 +2354,24 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       ret = clk_prepare_enable(cdns_phy->clk);
-       if (ret) {
-               dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+       ret = cdns_torrent_clk_register(cdns_phy);
+       if (ret)
                return ret;
-       }
 
-       cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
-       if (!(cdns_phy->ref_clk_rate)) {
-               dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
-               clk_disable_unprepare(cdns_phy->clk);
-               return -EINVAL;
-       }
+       regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &already_configured);
 
-       /* Enable APB */
-       reset_control_deassert(cdns_phy->apb_rst);
+       if (!already_configured) {
+               ret = cdns_torrent_reset(cdns_phy);
+               if (ret)
+                       goto clk_cleanup;
+
+               ret = cdns_torrent_clk(cdns_phy);
+               if (ret)
+                       goto clk_cleanup;
+
+               /* Enable APB */
+               reset_control_deassert(cdns_phy->apb_rst);
+       }
 
        for_each_available_child_of_node(dev->of_node, child) {
                struct phy *gphy;
@@ -2218,7 +2441,10 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
                of_property_read_u32(child, "cdns,ssc-mode",
                                     &cdns_phy->phys[node].ssc_mode);
 
-               gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
+               if (!already_configured)
+                       gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
+               else
+                       gphy = devm_phy_create(dev, child, &noop_ops);
                if (IS_ERR(gphy)) {
                        ret = PTR_ERR(gphy);
                        goto put_child;
@@ -2302,7 +2528,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
                goto put_lnk_rst;
        }
 
-       if (cdns_phy->nsubnodes > 1) {
+       if (cdns_phy->nsubnodes > 1 && !already_configured) {
                ret = cdns_torrent_phy_configure_multilink(cdns_phy);
                if (ret)
                        goto put_lnk_rst;
@@ -2324,6 +2550,8 @@ put_lnk_rst:
        of_node_put(child);
        reset_control_assert(cdns_phy->apb_rst);
        clk_disable_unprepare(cdns_phy->clk);
+clk_cleanup:
+       cdns_torrent_clk_cleanup(cdns_phy);
        return ret;
 }
 
@@ -2340,6 +2568,7 @@ static int cdns_torrent_phy_remove(struct platform_device *pdev)
        }
 
        clk_disable_unprepare(cdns_phy->clk);
+       cdns_torrent_clk_cleanup(cdns_phy);
 
        return 0;
 }
@@ -2455,8 +2684,6 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
        {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
        {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
        {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
-       {0x0003, CMN_PLL0_VCOCAL_TCTRL},
-       {0x0003, CMN_PLL1_VCOCAL_TCTRL},
        {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2464,7 +2691,9 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
        {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
        {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
        {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
-       {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
+       {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
+       {0x007F, CMN_TXPUCAL_TUNE},
+       {0x007F, CMN_TXPDCAL_TUNE}
 };
 
 static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
@@ -2507,13 +2736,28 @@ static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
 };
 
 /* USB 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
+       {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+       {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+       {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
        {0x0003, CMN_PLL0_VCOCAL_TCTRL},
        {0x0003, CMN_PLL1_VCOCAL_TCTRL},
        {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
        {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
 };
 
+static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
+       .reg_pairs = sl_usb_100_no_ssc_cmn_regs,
+       .num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+       {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+       {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
+       {0x007F, CMN_TXPUCAL_TUNE},
+       {0x007F, CMN_TXPDCAL_TUNE}
+};
+
 static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
        {0x02FF, TX_PSC_A0},
        {0x06AF, TX_PSC_A1},
@@ -2645,12 +2889,22 @@ static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
 };
 
 /* SGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
+       {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+       {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+       {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
        {0x0003, CMN_PLL0_VCOCAL_TCTRL},
-       {0x0003, CMN_PLL1_VCOCAL_TCTRL},
-       {0x3700, CMN_DIAG_BIAS_OVRD1},
-       {0x0008, CMN_TXPUCAL_TUNE},
-       {0x0008, CMN_TXPDCAL_TUNE}
+       {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
+       .reg_pairs = sl_sgmii_100_no_ssc_cmn_regs,
+       .num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+       {0x007F, CMN_TXPUCAL_TUNE},
+       {0x007F, CMN_TXPDCAL_TUNE}
 };
 
 static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
@@ -2661,6 +2915,15 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
        {0x00B3, DRV_DIAG_TX_DRV}
 };
 
+static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
+       {0x00F3, TX_PSC_A0},
+       {0x04A2, TX_PSC_A2},
+       {0x04A2, TX_PSC_A3},
+       {0x0000, TX_TXCC_CPOST_MULT_00},
+       {0x00B3, DRV_DIAG_TX_DRV},
+       {0x4000, XCVR_DIAG_RXCLK_CTRL},
+};
+
 static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
        {0x091D, RX_PSC_A0},
        {0x0900, RX_PSC_A2},
@@ -2689,6 +2952,11 @@ static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
        .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs),
 };
 
+static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
+       .reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs,
+       .num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs),
+};
+
 static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
        .reg_pairs = sgmii_100_no_ssc_rx_ln_regs,
        .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
@@ -2736,17 +3004,14 @@ static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
        {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
        {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
        {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
-       {0x0003, CMN_PLL0_VCOCAL_TCTRL},
-       {0x0003, CMN_PLL1_VCOCAL_TCTRL},
        {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
        {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
        {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
        {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
-       {0x3700, CMN_DIAG_BIAS_OVRD1},
-       {0x0008, CMN_TXPUCAL_TUNE},
-       {0x0008, CMN_TXPDCAL_TUNE}
+       {0x007F, CMN_TXPUCAL_TUNE},
+       {0x007F, CMN_TXPDCAL_TUNE}
 };
 
 static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
@@ -2755,19 +3020,43 @@ static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
 };
 
 /* QSGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
+       {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+       {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+       {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
        {0x0003, CMN_PLL0_VCOCAL_TCTRL},
        {0x0003, CMN_PLL1_VCOCAL_TCTRL}
 };
 
+static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
+       .reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs,
+       .num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+       {0x007F, CMN_TXPUCAL_TUNE},
+       {0x007F, CMN_TXPDCAL_TUNE}
+};
+
 static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
        {0x00F3, TX_PSC_A0},
        {0x04A2, TX_PSC_A2},
        {0x04A2, TX_PSC_A3},
        {0x0000, TX_TXCC_CPOST_MULT_00},
+       {0x0011, TX_TXCC_MGNFS_MULT_100},
        {0x0003, DRV_DIAG_TX_DRV}
 };
 
+static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
+       {0x00F3, TX_PSC_A0},
+       {0x04A2, TX_PSC_A2},
+       {0x04A2, TX_PSC_A3},
+       {0x0000, TX_TXCC_CPOST_MULT_00},
+       {0x0011, TX_TXCC_MGNFS_MULT_100},
+       {0x0003, DRV_DIAG_TX_DRV},
+       {0x4000, XCVR_DIAG_RXCLK_CTRL},
+};
+
 static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
        {0x091D, RX_PSC_A0},
        {0x0900, RX_PSC_A2},
@@ -2796,6 +3085,11 @@ static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
        .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs),
 };
 
+static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
+       .reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs,
+       .num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs),
+};
+
 static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
        .reg_pairs = qsgmii_100_no_ssc_rx_ln_regs,
        .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
@@ -2843,14 +3137,14 @@ static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
        {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
        {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
        {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
-       {0x0003, CMN_PLL0_VCOCAL_TCTRL},
-       {0x0003, CMN_PLL1_VCOCAL_TCTRL},
        {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
        {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
        {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
-       {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+       {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+       {0x007F, CMN_TXPUCAL_TUNE},
+       {0x007F, CMN_TXPDCAL_TUNE}
 };
 
 static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
@@ -2922,8 +3216,6 @@ static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
        {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
        {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
        {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
-       {0x0003, CMN_PLL0_VCOCAL_TCTRL},
-       {0x0003, CMN_PLL1_VCOCAL_TCTRL},
        {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2979,8 +3271,6 @@ static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
        {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
        {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
        {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
-       {0x0003, CMN_PLL0_VCOCAL_TCTRL},
-       {0x0003, CMN_PLL1_VCOCAL_TCTRL},
        {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
        {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
@@ -2996,8 +3286,9 @@ static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
 
 /* PCIe, 100 MHz Ref clk, no SSC & external SSC */
 static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
-       {0x0003, CMN_PLL0_VCOCAL_TCTRL},
-       {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+       {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+       {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+       {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}
 };
 
 static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
@@ -3198,8 +3489,8 @@ static const struct cdns_torrent_data cdns_map_torrent = {
        .cmn_vals = {
                [TYPE_PCIE] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
-                               [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+                               [NO_SSC] = NULL,
+                               [EXTERNAL_SSC] = NULL,
                                [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
                        },
                        [TYPE_SGMII] = {
@@ -3220,7 +3511,7 @@ static const struct cdns_torrent_data cdns_map_torrent = {
                },
                [TYPE_SGMII] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
                        },
                        [TYPE_PCIE] = {
                                [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
@@ -3235,7 +3526,7 @@ static const struct cdns_torrent_data cdns_map_torrent = {
                },
                [TYPE_QSGMII] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
                        },
                        [TYPE_PCIE] = {
                                [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
@@ -3250,8 +3541,8 @@ static const struct cdns_torrent_data cdns_map_torrent = {
                },
                [TYPE_USB] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &usb_100_no_ssc_cmn_vals,
-                               [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+                               [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
                                [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
                        },
                        [TYPE_PCIE] = {
@@ -3260,13 +3551,13 @@ static const struct cdns_torrent_data cdns_map_torrent = {
                                [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
                        },
                        [TYPE_SGMII] = {
-                               [NO_SSC] = &usb_100_no_ssc_cmn_vals,
-                               [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+                               [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
                                [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
                        },
                        [TYPE_QSGMII] = {
-                               [NO_SSC] = &usb_100_no_ssc_cmn_vals,
-                               [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+                               [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
                                [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
                        },
                },
@@ -3607,8 +3898,8 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
        .cmn_vals = {
                [TYPE_PCIE] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
-                               [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+                               [NO_SSC] = NULL,
+                               [EXTERNAL_SSC] = NULL,
                                [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
                        },
                        [TYPE_SGMII] = {
@@ -3629,7 +3920,7 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
                },
                [TYPE_SGMII] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
                        },
                        [TYPE_PCIE] = {
                                [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
@@ -3644,7 +3935,7 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
                },
                [TYPE_QSGMII] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
                        },
                        [TYPE_PCIE] = {
                                [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
@@ -3659,8 +3950,8 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
                },
                [TYPE_USB] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &usb_100_no_ssc_cmn_vals,
-                               [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+                               [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
                                [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
                        },
                        [TYPE_PCIE] = {
@@ -3669,13 +3960,13 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
                                [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
                        },
                        [TYPE_SGMII] = {
-                               [NO_SSC] = &usb_100_no_ssc_cmn_vals,
-                               [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+                               [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
                                [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
                        },
                        [TYPE_QSGMII] = {
-                               [NO_SSC] = &usb_100_no_ssc_cmn_vals,
-                               [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+                               [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+                               [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
                                [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
                        },
                },
@@ -3705,32 +3996,32 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
                },
                [TYPE_SGMII] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+                               [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
                        },
                        [TYPE_PCIE] = {
-                               [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
-                               [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
-                               [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+                               [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+                               [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+                               [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
                        },
                        [TYPE_USB] = {
-                               [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
-                               [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
-                               [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+                               [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+                               [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+                               [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
                        },
                },
                [TYPE_QSGMII] = {
                        [TYPE_NONE] = {
-                               [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+                               [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
                        },
                        [TYPE_PCIE] = {
-                               [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
-                               [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
-                               [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+                               [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+                               [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+                               [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
                        },
                        [TYPE_USB] = {
-                               [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
-                               [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
-                               [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+                               [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+                               [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+                               [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
                        },
                },
                [TYPE_USB] = {
index be05292df8b817925709ed38dbc587e770046af4..e92ba78da4c83076cd0857d817319bd3fe5bd17e 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2015 Linaro Ltd.
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 HiSilicon Limited.
  */
 
 #include <linux/mfd/syscon.h>
index c67b78cd2602029fd163cf8082aee8f53c4143ff..b0f99a9ac857769add79ae6897c0653b88835355 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
+ * Copyright (c) 2014 HiSilicon Limited.
  */
 
 #include <linux/delay.h>
index ea127b177f46b3a4f69a962a9890af489cbe55c5..28c28d81648492b9e560c45fda35e38276002b3f 100644 (file)
@@ -352,8 +352,8 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
        }
 
        priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
-       if (IS_ERR(priv))
-               return PTR_ERR(priv);
+       if (IS_ERR(priv->phy))
+               return PTR_ERR(priv->phy);
 
        phy_set_drvdata(priv->phy, priv);
 
index 360b1eb2ebd694a47608ef307df054290a3e685b..157683d10367014e472446b3402032fd9d7dc986 100644 (file)
@@ -462,7 +462,7 @@ static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
 
        /*
         * syscfg and hsiocfg variables stores the handle of the registers set
-        * in which ComboPhy subsytem specific registers are subset. Using
+        * in which ComboPhy subsystem specific registers are subset. Using
         * Register map framework to access the registers set.
         */
        ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
index 6c96f2bf52665789ff67fb391b8dfc877e3df544..bdb87c976243599d61131b7b3d21365c56f825dd 100644 (file)
@@ -3,8 +3,8 @@
 # Phy drivers for Marvell platforms
 #
 config ARMADA375_USBCLUSTER_PHY
-       def_bool y
-       depends on MACH_ARMADA_375 || COMPILE_TEST
+       bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
+       default y if MACH_ARMADA_375
        depends on OF && HAS_IOMEM
        select GENERIC_PHY
 
@@ -67,6 +67,14 @@ config PHY_MVEBU_CP110_COMPHY
          lanes can be used by various controllers (Ethernet, sata, usb,
          PCIe...).
 
+config PHY_MVEBU_CP110_UTMI
+       tristate "Marvell CP110 UTMI driver"
+       depends on ARCH_MVEBU || COMPILE_TEST
+       depends on OF && USB_COMMON
+       select GENERIC_PHY
+       help
+         Enable this to support Marvell CP110 UTMI PHY driver.
+
 config PHY_MVEBU_SATA
        def_bool y
        depends on ARCH_DOVE || MACH_DOVE || MACH_KIRKWOOD
index 7f296ef028292d610e71d2918ea6bf62e8a248b7..90862c4daa262e4ef8525ca412c046e669c98670 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_PHY_MVEBU_A3700_COMPHY)    += phy-mvebu-a3700-comphy.o
 obj-$(CONFIG_PHY_MVEBU_A3700_UTMI)     += phy-mvebu-a3700-utmi.o
 obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY)    += phy-armada38x-comphy.o
 obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY)   += phy-mvebu-cp110-comphy.o
+obj-$(CONFIG_PHY_MVEBU_CP110_UTMI)     += phy-mvebu-cp110-utmi.o
 obj-$(CONFIG_PHY_MVEBU_SATA)           += phy-mvebu-sata.o
 obj-$(CONFIG_PHY_PXA_28NM_HSIC)                += phy-pxa-28nm-hsic.o
 obj-$(CONFIG_PHY_PXA_28NM_USB2)                += phy-pxa-28nm-usb2.o
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
new file mode 100644 (file)
index 0000000..08d178a
--- /dev/null
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ * Authors:
+ *   Konstantin Porotchkin <kostap@marvell.com>
+ *
+ * Marvell CP110 UTMI PHY driver
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+
+#define UTMI_PHY_PORTS                         2
+
+/* CP110 UTMI register macro definetions */
+#define SYSCON_USB_CFG_REG                     0x420
+#define   USB_CFG_DEVICE_EN_MASK               BIT(0)
+#define   USB_CFG_DEVICE_MUX_OFFSET            1
+#define   USB_CFG_DEVICE_MUX_MASK              BIT(1)
+#define   USB_CFG_PLL_MASK                     BIT(25)
+
+#define SYSCON_UTMI_CFG_REG(id)                        (0x440 + (id) * 4)
+#define   UTMI_PHY_CFG_PU_MASK                 BIT(5)
+
+#define UTMI_PLL_CTRL_REG                      0x0
+#define   PLL_REFDIV_OFFSET                    0
+#define   PLL_REFDIV_MASK                      GENMASK(6, 0)
+#define   PLL_REFDIV_VAL                       0x5
+#define   PLL_FBDIV_OFFSET                     16
+#define   PLL_FBDIV_MASK                       GENMASK(24, 16)
+#define   PLL_FBDIV_VAL                                0x60
+#define   PLL_SEL_LPFR_MASK                    GENMASK(29, 28)
+#define   PLL_RDY                              BIT(31)
+#define UTMI_CAL_CTRL_REG                      0x8
+#define   IMPCAL_VTH_OFFSET                    8
+#define   IMPCAL_VTH_MASK                      GENMASK(10, 8)
+#define   IMPCAL_VTH_VAL                       0x7
+#define   IMPCAL_DONE                          BIT(23)
+#define   PLLCAL_DONE                          BIT(31)
+#define UTMI_TX_CH_CTRL_REG                    0xC
+#define   DRV_EN_LS_OFFSET                     12
+#define   DRV_EN_LS_MASK                       GENMASK(15, 12)
+#define   IMP_SEL_LS_OFFSET                    16
+#define   IMP_SEL_LS_MASK                      GENMASK(19, 16)
+#define   TX_AMP_OFFSET                                20
+#define   TX_AMP_MASK                          GENMASK(22, 20)
+#define   TX_AMP_VAL                           0x4
+#define UTMI_RX_CH_CTRL0_REG                   0x14
+#define   SQ_DET_EN                            BIT(15)
+#define   SQ_ANA_DTC_SEL                       BIT(28)
+#define UTMI_RX_CH_CTRL1_REG                   0x18
+#define   SQ_AMP_CAL_OFFSET                    0
+#define   SQ_AMP_CAL_MASK                      GENMASK(2, 0)
+#define   SQ_AMP_CAL_VAL                       1
+#define   SQ_AMP_CAL_EN                                BIT(3)
+#define UTMI_CTRL_STATUS0_REG                  0x24
+#define   SUSPENDM                             BIT(22)
+#define   TEST_SEL                             BIT(25)
+#define UTMI_CHGDTC_CTRL_REG                   0x38
+#define   VDAT_OFFSET                          8
+#define   VDAT_MASK                            GENMASK(9, 8)
+#define   VDAT_VAL                             1
+#define   VSRC_OFFSET                          10
+#define   VSRC_MASK                            GENMASK(11, 10)
+#define   VSRC_VAL                             1
+
+#define PLL_LOCK_DELAY_US                      10000
+#define PLL_LOCK_TIMEOUT_US                    1000000
+
+#define PORT_REGS(p)                           ((p)->priv->regs + (p)->id * 0x1000)
+
+/**
+ * struct mvebu_cp110_utmi - PHY driver data
+ *
+ * @regs: PHY registers
+ * @syscom: Regmap with system controller registers
+ * @dev: device driver handle
+ * @caps: PHY capabilities
+ */
+struct mvebu_cp110_utmi {
+       void __iomem *regs;
+       struct regmap *syscon;
+       struct device *dev;
+       const struct phy_ops *ops;
+};
+
+/**
+ * struct mvebu_cp110_utmi_port - PHY port data
+ *
+ * @priv: PHY driver data
+ * @id: PHY port ID
+ * @dr_mode: PHY connection: USB_DR_MODE_HOST or USB_DR_MODE_PERIPHERAL
+ */
+struct mvebu_cp110_utmi_port {
+       struct mvebu_cp110_utmi *priv;
+       u32 id;
+       enum usb_dr_mode dr_mode;
+};
+
+static void mvebu_cp110_utmi_port_setup(struct mvebu_cp110_utmi_port *port)
+{
+       u32 reg;
+
+       /*
+        * Setup PLL.
+        * The reference clock is the frequency of quartz resonator
+        * connected to pins REFCLK_XIN and REFCLK_XOUT of the SoC.
+        * Register init values are matching the 40MHz default clock.
+        * The crystal used for all platform boards is now 25MHz.
+        * See the functional specification for details.
+        */
+       reg = readl(PORT_REGS(port) + UTMI_PLL_CTRL_REG);
+       reg &= ~(PLL_REFDIV_MASK | PLL_FBDIV_MASK | PLL_SEL_LPFR_MASK);
+       reg |= (PLL_REFDIV_VAL << PLL_REFDIV_OFFSET) |
+              (PLL_FBDIV_VAL << PLL_FBDIV_OFFSET);
+       writel(reg, PORT_REGS(port) + UTMI_PLL_CTRL_REG);
+
+       /* Impedance Calibration Threshold Setting */
+       reg = readl(PORT_REGS(port) + UTMI_CAL_CTRL_REG);
+       reg &= ~IMPCAL_VTH_MASK;
+       reg |= IMPCAL_VTH_VAL << IMPCAL_VTH_OFFSET;
+       writel(reg, PORT_REGS(port) + UTMI_CAL_CTRL_REG);
+
+       /* Set LS TX driver strength coarse control */
+       reg = readl(PORT_REGS(port) + UTMI_TX_CH_CTRL_REG);
+       reg &= ~TX_AMP_MASK;
+       reg |= TX_AMP_VAL << TX_AMP_OFFSET;
+       writel(reg, PORT_REGS(port) + UTMI_TX_CH_CTRL_REG);
+
+       /* Disable SQ and enable analog squelch detect */
+       reg = readl(PORT_REGS(port) + UTMI_RX_CH_CTRL0_REG);
+       reg &= ~SQ_DET_EN;
+       reg |= SQ_ANA_DTC_SEL;
+       writel(reg, PORT_REGS(port) + UTMI_RX_CH_CTRL0_REG);
+
+       /*
+        * Set External squelch calibration number and
+        * enable the External squelch calibration
+        */
+       reg = readl(PORT_REGS(port) + UTMI_RX_CH_CTRL1_REG);
+       reg &= ~SQ_AMP_CAL_MASK;
+       reg |= (SQ_AMP_CAL_VAL << SQ_AMP_CAL_OFFSET) | SQ_AMP_CAL_EN;
+       writel(reg, PORT_REGS(port) + UTMI_RX_CH_CTRL1_REG);
+
+       /*
+        * Set Control VDAT Reference Voltage - 0.325V and
+        * Control VSRC Reference Voltage - 0.6V
+        */
+       reg = readl(PORT_REGS(port) + UTMI_CHGDTC_CTRL_REG);
+       reg &= ~(VDAT_MASK | VSRC_MASK);
+       reg |= (VDAT_VAL << VDAT_OFFSET) | (VSRC_VAL << VSRC_OFFSET);
+       writel(reg, PORT_REGS(port) + UTMI_CHGDTC_CTRL_REG);
+}
+
+static int mvebu_cp110_utmi_phy_power_off(struct phy *phy)
+{
+       struct mvebu_cp110_utmi_port *port = phy_get_drvdata(phy);
+       struct mvebu_cp110_utmi *utmi = port->priv;
+       int i;
+
+       /* Power down UTMI PHY port */
+       regmap_clear_bits(utmi->syscon, SYSCON_UTMI_CFG_REG(port->id),
+                         UTMI_PHY_CFG_PU_MASK);
+
+       for (i = 0; i < UTMI_PHY_PORTS; i++) {
+               int test = regmap_test_bits(utmi->syscon,
+                                           SYSCON_UTMI_CFG_REG(i),
+                                           UTMI_PHY_CFG_PU_MASK);
+               /* skip PLL shutdown if there are active UTMI PHY ports */
+               if (test != 0)
+                       return 0;
+       }
+
+       /* PLL Power down if all UTMI PHYs are down */
+       regmap_clear_bits(utmi->syscon, SYSCON_USB_CFG_REG, USB_CFG_PLL_MASK);
+
+       return 0;
+}
+
+static int mvebu_cp110_utmi_phy_power_on(struct phy *phy)
+{
+       struct mvebu_cp110_utmi_port *port = phy_get_drvdata(phy);
+       struct mvebu_cp110_utmi *utmi = port->priv;
+       struct device *dev = &phy->dev;
+       int ret;
+       u32 reg;
+
+       /* It is necessary to power off UTMI before configuration */
+       ret = mvebu_cp110_utmi_phy_power_off(phy);
+       if (ret) {
+               dev_err(dev, "UTMI power OFF before power ON failed\n");
+               return ret;
+       }
+
+       /*
+        * If UTMI port is connected to USB Device controller,
+        * configure the USB MUX prior to UTMI PHY initialization.
+        * The single USB device controller can be connected
+        * to UTMI0 or to UTMI1 PHY port, but not to both.
+        */
+       if (port->dr_mode == USB_DR_MODE_PERIPHERAL) {
+               regmap_update_bits(utmi->syscon, SYSCON_USB_CFG_REG,
+                                  USB_CFG_DEVICE_EN_MASK | USB_CFG_DEVICE_MUX_MASK,
+                                  USB_CFG_DEVICE_EN_MASK |
+                                  (port->id << USB_CFG_DEVICE_MUX_OFFSET));
+       }
+
+       /* Set Test suspendm mode and enable Test UTMI select */
+       reg = readl(PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+       reg |= SUSPENDM | TEST_SEL;
+       writel(reg, PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+
+       /* Wait for UTMI power down */
+       mdelay(1);
+
+       /* PHY port setup first */
+       mvebu_cp110_utmi_port_setup(port);
+
+       /* Power UP UTMI PHY */
+       regmap_set_bits(utmi->syscon, SYSCON_UTMI_CFG_REG(port->id),
+                       UTMI_PHY_CFG_PU_MASK);
+
+       /* Disable Test UTMI select */
+       reg = readl(PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+       reg &= ~TEST_SEL;
+       writel(reg, PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+
+       /* Wait for impedance calibration */
+       ret = readl_poll_timeout(PORT_REGS(port) + UTMI_CAL_CTRL_REG, reg,
+                                reg & IMPCAL_DONE,
+                                PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+       if (ret) {
+               dev_err(dev, "Failed to end UTMI impedance calibration\n");
+               return ret;
+       }
+
+       /* Wait for PLL calibration */
+       ret = readl_poll_timeout(PORT_REGS(port) + UTMI_CAL_CTRL_REG, reg,
+                                reg & PLLCAL_DONE,
+                                PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+       if (ret) {
+               dev_err(dev, "Failed to end UTMI PLL calibration\n");
+               return ret;
+       }
+
+       /* Wait for PLL ready */
+       ret = readl_poll_timeout(PORT_REGS(port) + UTMI_PLL_CTRL_REG, reg,
+                                reg & PLL_RDY,
+                                PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+       if (ret) {
+               dev_err(dev, "PLL is not ready\n");
+               return ret;
+       }
+
+       /* PLL Power up */
+       regmap_set_bits(utmi->syscon, SYSCON_USB_CFG_REG, USB_CFG_PLL_MASK);
+
+       return 0;
+}
+
+static const struct phy_ops mvebu_cp110_utmi_phy_ops = {
+       .power_on = mvebu_cp110_utmi_phy_power_on,
+       .power_off = mvebu_cp110_utmi_phy_power_off,
+       .owner = THIS_MODULE,
+};
+
+static const struct of_device_id mvebu_cp110_utmi_of_match[] = {
+       { .compatible = "marvell,cp110-utmi-phy" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mvebu_cp110_utmi_of_match);
+
+static int mvebu_cp110_utmi_phy_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mvebu_cp110_utmi *utmi;
+       struct phy_provider *provider;
+       struct device_node *child;
+       u32 usb_devices = 0;
+
+       utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL);
+       if (!utmi)
+               return -ENOMEM;
+
+       utmi->dev = dev;
+
+       /* Get system controller region */
+       utmi->syscon = syscon_regmap_lookup_by_phandle(dev->of_node,
+                                                      "marvell,system-controller");
+       if (IS_ERR(utmi->syscon)) {
+               dev_err(dev, "Missing UTMI system controller\n");
+               return PTR_ERR(utmi->syscon);
+       }
+
+       /* Get UTMI memory region */
+       utmi->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(utmi->regs))
+               return PTR_ERR(utmi->regs);
+
+       for_each_available_child_of_node(dev->of_node, child) {
+               struct mvebu_cp110_utmi_port *port;
+               struct phy *phy;
+               int ret;
+               u32 port_id;
+
+               ret = of_property_read_u32(child, "reg", &port_id);
+               if ((ret < 0) || (port_id >= UTMI_PHY_PORTS)) {
+                       dev_err(dev,
+                               "invalid 'reg' property on child %pOF\n",
+                               child);
+                       continue;
+               }
+
+               port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+               if (!port) {
+                       of_node_put(child);
+                       return -ENOMEM;
+               }
+
+               port->dr_mode = of_usb_get_dr_mode_by_phy(child, -1);
+               if ((port->dr_mode != USB_DR_MODE_HOST) &&
+                   (port->dr_mode != USB_DR_MODE_PERIPHERAL)) {
+                       dev_err(&pdev->dev,
+                               "Missing dual role setting of the port%d, will use HOST mode\n",
+                               port_id);
+                       port->dr_mode = USB_DR_MODE_HOST;
+               }
+
+               if (port->dr_mode == USB_DR_MODE_PERIPHERAL) {
+                       usb_devices++;
+                       if (usb_devices > 1) {
+                               dev_err(dev,
+                                       "Single USB device allowed! Port%d will use HOST mode\n",
+                                       port_id);
+                               port->dr_mode = USB_DR_MODE_HOST;
+                       }
+               }
+
+               /* Retrieve PHY capabilities */
+               utmi->ops = &mvebu_cp110_utmi_phy_ops;
+
+               /* Instantiate the PHY */
+               phy = devm_phy_create(dev, child, utmi->ops);
+               if (IS_ERR(phy)) {
+                       dev_err(dev, "Failed to create the UTMI PHY\n");
+                       of_node_put(child);
+                       return PTR_ERR(phy);
+               }
+
+               port->priv = utmi;
+               port->id = port_id;
+               phy_set_drvdata(phy, port);
+
+               /* Ensure the PHY is powered off */
+               mvebu_cp110_utmi_phy_power_off(phy);
+       }
+
+       dev_set_drvdata(dev, utmi);
+       provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+       return PTR_ERR_OR_ZERO(provider);
+}
+
+static struct platform_driver mvebu_cp110_utmi_driver = {
+       .probe  = mvebu_cp110_utmi_phy_probe,
+       .driver = {
+               .name           = "mvebu-cp110-utmi-phy",
+               .of_match_table = mvebu_cp110_utmi_of_match,
+        },
+};
+module_platform_driver(mvebu_cp110_utmi_driver);
+
+MODULE_AUTHOR("Konstatin Porotchkin <kostap@marvell.com>");
+MODULE_DESCRIPTION("Marvell Armada CP110 UTMI PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/microchip/Kconfig b/drivers/phy/microchip/Kconfig
new file mode 100644 (file)
index 0000000..3728a28
--- /dev/null
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Phy drivers for Microchip devices
+#
+
+config PHY_SPARX5_SERDES
+       tristate "Microchip Sparx5 SerDes PHY driver"
+       select GENERIC_PHY
+       depends on ARCH_SPARX5 || COMPILE_TEST
+       depends on OF
+       depends on HAS_IOMEM
+       help
+         Enable this for support of the 10G/25G SerDes on Microchip Sparx5.
diff --git a/drivers/phy/microchip/Makefile b/drivers/phy/microchip/Makefile
new file mode 100644 (file)
index 0000000..7b98345
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip phy drivers.
+#
+
+obj-$(CONFIG_PHY_SPARX5_SERDES) := sparx5_serdes.o
diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c
new file mode 100644 (file)
index 0000000..c8a7d09
--- /dev/null
@@ -0,0 +1,2513 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Microchip Sparx5 Switch SerDes driver
+ *
+ * Copyright (c) 2020 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ * and the datasheet is available here:
+ * https://ww1.microchip.com/downloads/en/DeviceDoc/SparX-5_Family_L2L3_Enterprise_10G_Ethernet_Switches_Datasheet_00003822B.pdf
+ */
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+
+#include "sparx5_serdes.h"
+
+#define SPX5_CMU_MAX          14
+
+#define SPX5_SERDES_10G_START 13
+#define SPX5_SERDES_25G_START 25
+
+enum sparx5_10g28cmu_mode {
+       SPX5_SD10G28_CMU_MAIN = 0,
+       SPX5_SD10G28_CMU_AUX1 = 1,
+       SPX5_SD10G28_CMU_AUX2 = 3,
+       SPX5_SD10G28_CMU_NONE = 4,
+};
+
+enum sparx5_sd25g28_mode_preset_type {
+       SPX5_SD25G28_MODE_PRESET_25000,
+       SPX5_SD25G28_MODE_PRESET_10000,
+       SPX5_SD25G28_MODE_PRESET_5000,
+       SPX5_SD25G28_MODE_PRESET_SD_2G5,
+       SPX5_SD25G28_MODE_PRESET_1000BASEX,
+};
+
+enum sparx5_sd10g28_mode_preset_type {
+       SPX5_SD10G28_MODE_PRESET_10000,
+       SPX5_SD10G28_MODE_PRESET_SFI_5000_6G,
+       SPX5_SD10G28_MODE_PRESET_SFI_5000_10G,
+       SPX5_SD10G28_MODE_PRESET_QSGMII,
+       SPX5_SD10G28_MODE_PRESET_SD_2G5,
+       SPX5_SD10G28_MODE_PRESET_1000BASEX,
+};
+
+struct sparx5_serdes_io_resource {
+       enum sparx5_serdes_target id;
+       phys_addr_t offset;
+};
+
+struct sparx5_sd25g28_mode_preset {
+       u8 bitwidth;
+       u8 tx_pre_div;
+       u8 fifo_ck_div;
+       u8 pre_divsel;
+       u8 vco_div_mode;
+       u8 sel_div;
+       u8 ck_bitwidth;
+       u8 subrate;
+       u8 com_txcal_en;
+       u8 com_tx_reserve_msb;
+       u8 com_tx_reserve_lsb;
+       u8 cfg_itx_ipcml_base;
+       u8 tx_reserve_lsb;
+       u8 tx_reserve_msb;
+       u8 bw;
+       u8 rxterm;
+       u8 dfe_tap;
+       u8 dfe_enable;
+       bool txmargin;
+       u8 cfg_ctle_rstn;
+       u8 r_dfe_rstn;
+       u8 cfg_pi_bw_3_0;
+       u8 tx_tap_dly;
+       u8 tx_tap_adv;
+};
+
+struct sparx5_sd25g28_media_preset {
+       u8 cfg_eq_c_force_3_0;
+       u8 cfg_vga_ctrl_byp_4_0;
+       u8 cfg_eq_r_force_3_0;
+       u8 cfg_en_adv;
+       u8 cfg_en_main;
+       u8 cfg_en_dly;
+       u8 cfg_tap_adv_3_0;
+       u8 cfg_tap_main;
+       u8 cfg_tap_dly_4_0;
+       u8 cfg_alos_thr_2_0;
+};
+
+struct sparx5_sd25g28_args {
+       u8 if_width; /* UDL if-width: 10/16/20/32/64 */
+       bool skip_cmu_cfg:1; /* Enable/disable CMU cfg */
+       enum sparx5_10g28cmu_mode cmu_sel; /* Device/Mode serdes uses */
+       bool no_pwrcycle:1; /* Omit initial power-cycle */
+       bool txinvert:1; /* Enable inversion of output data */
+       bool rxinvert:1; /* Enable inversion of input data */
+       u16 txswing; /* Set output level */
+       u8 rate; /* Rate of network interface */
+       u8 pi_bw_gen1;
+       u8 duty_cycle; /* Set output level to  half/full */
+       bool mute:1; /* Mute Output Buffer */
+       bool reg_rst:1;
+       u8 com_pll_reserve;
+};
+
+struct sparx5_sd25g28_params {
+       u8 reg_rst;
+       u8 cfg_jc_byp;
+       u8 cfg_common_reserve_7_0;
+       u8 r_reg_manual;
+       u8 r_d_width_ctrl_from_hwt;
+       u8 r_d_width_ctrl_2_0;
+       u8 r_txfifo_ck_div_pmad_2_0;
+       u8 r_rxfifo_ck_div_pmad_2_0;
+       u8 cfg_pll_lol_set;
+       u8 cfg_vco_div_mode_1_0;
+       u8 cfg_pre_divsel_1_0;
+       u8 cfg_sel_div_3_0;
+       u8 cfg_vco_start_code_3_0;
+       u8 cfg_pma_tx_ck_bitwidth_2_0;
+       u8 cfg_tx_prediv_1_0;
+       u8 cfg_rxdiv_sel_2_0;
+       u8 cfg_tx_subrate_2_0;
+       u8 cfg_rx_subrate_2_0;
+       u8 r_multi_lane_mode;
+       u8 cfg_cdrck_en;
+       u8 cfg_dfeck_en;
+       u8 cfg_dfe_pd;
+       u8 cfg_dfedmx_pd;
+       u8 cfg_dfetap_en_5_1;
+       u8 cfg_dmux_pd;
+       u8 cfg_dmux_clk_pd;
+       u8 cfg_erramp_pd;
+       u8 cfg_pi_dfe_en;
+       u8 cfg_pi_en;
+       u8 cfg_pd_ctle;
+       u8 cfg_summer_en;
+       u8 cfg_pmad_ck_pd;
+       u8 cfg_pd_clk;
+       u8 cfg_pd_cml;
+       u8 cfg_pd_driver;
+       u8 cfg_rx_reg_pu;
+       u8 cfg_pd_rms_det;
+       u8 cfg_dcdr_pd;
+       u8 cfg_ecdr_pd;
+       u8 cfg_pd_sq;
+       u8 cfg_itx_ipdriver_base_2_0;
+       u8 cfg_tap_dly_4_0;
+       u8 cfg_tap_main;
+       u8 cfg_en_main;
+       u8 cfg_tap_adv_3_0;
+       u8 cfg_en_adv;
+       u8 cfg_en_dly;
+       u8 cfg_iscan_en;
+       u8 l1_pcs_en_fast_iscan;
+       u8 l0_cfg_bw_1_0;
+       u8 l0_cfg_txcal_en;
+       u8 cfg_en_dummy;
+       u8 cfg_pll_reserve_3_0;
+       u8 l0_cfg_tx_reserve_15_8;
+       u8 l0_cfg_tx_reserve_7_0;
+       u8 cfg_tx_reserve_15_8;
+       u8 cfg_tx_reserve_7_0;
+       u8 cfg_bw_1_0;
+       u8 cfg_txcal_man_en;
+       u8 cfg_phase_man_4_0;
+       u8 cfg_quad_man_1_0;
+       u8 cfg_txcal_shift_code_5_0;
+       u8 cfg_txcal_valid_sel_3_0;
+       u8 cfg_txcal_en;
+       u8 cfg_cdr_kf_2_0;
+       u8 cfg_cdr_m_7_0;
+       u8 cfg_pi_bw_3_0;
+       u8 cfg_pi_steps_1_0;
+       u8 cfg_dis_2ndorder;
+       u8 cfg_ctle_rstn;
+       u8 r_dfe_rstn;
+       u8 cfg_alos_thr_2_0;
+       u8 cfg_itx_ipcml_base_1_0;
+       u8 cfg_rx_reserve_7_0;
+       u8 cfg_rx_reserve_15_8;
+       u8 cfg_rxterm_2_0;
+       u8 cfg_fom_selm;
+       u8 cfg_rx_sp_ctle_1_0;
+       u8 cfg_isel_ctle_1_0;
+       u8 cfg_vga_ctrl_byp_4_0;
+       u8 cfg_vga_byp;
+       u8 cfg_agc_adpt_byp;
+       u8 cfg_eqr_byp;
+       u8 cfg_eqr_force_3_0;
+       u8 cfg_eqc_force_3_0;
+       u8 cfg_sum_setcm_en;
+       u8 cfg_init_pos_iscan_6_0;
+       u8 cfg_init_pos_ipi_6_0;
+       u8 cfg_dfedig_m_2_0;
+       u8 cfg_en_dfedig;
+       u8 cfg_pi_DFE_en;
+       u8 cfg_tx2rx_lp_en;
+       u8 cfg_txlb_en;
+       u8 cfg_rx2tx_lp_en;
+       u8 cfg_rxlb_en;
+       u8 r_tx_pol_inv;
+       u8 r_rx_pol_inv;
+};
+
+struct sparx5_sd10g28_media_preset {
+       u8 cfg_en_adv;
+       u8 cfg_en_main;
+       u8 cfg_en_dly;
+       u8 cfg_tap_adv_3_0;
+       u8 cfg_tap_main;
+       u8 cfg_tap_dly_4_0;
+       u8 cfg_vga_ctrl_3_0;
+       u8 cfg_vga_cp_2_0;
+       u8 cfg_eq_res_3_0;
+       u8 cfg_eq_r_byp;
+       u8 cfg_eq_c_force_3_0;
+       u8 cfg_alos_thr_3_0;
+};
+
+struct sparx5_sd10g28_mode_preset {
+       u8 bwidth; /* interface width: 10/16/20/32/64 */
+       enum sparx5_10g28cmu_mode cmu_sel; /* Device/Mode serdes uses */
+       u8 rate; /* Rate of network interface */
+       u8 dfe_tap;
+       u8 dfe_enable;
+       u8 pi_bw_gen1;
+       u8 duty_cycle; /* Set output level to  half/full */
+};
+
+struct sparx5_sd10g28_args {
+       bool skip_cmu_cfg:1; /* Enable/disable CMU cfg */
+       bool no_pwrcycle:1; /* Omit initial power-cycle */
+       bool txinvert:1; /* Enable inversion of output data */
+       bool rxinvert:1; /* Enable inversion of input data */
+       bool txmargin:1; /* Set output level to  half/full */
+       u16 txswing; /* Set output level */
+       bool mute:1; /* Mute Output Buffer */
+       bool is_6g:1;
+       bool reg_rst:1;
+};
+
+struct sparx5_sd10g28_params {
+       u8 cmu_sel;
+       u8 is_6g;
+       u8 skip_cmu_cfg;
+       u8 cfg_lane_reserve_7_0;
+       u8 cfg_ssc_rtl_clk_sel;
+       u8 cfg_lane_reserve_15_8;
+       u8 cfg_txrate_1_0;
+       u8 cfg_rxrate_1_0;
+       u8 r_d_width_ctrl_2_0;
+       u8 cfg_pma_tx_ck_bitwidth_2_0;
+       u8 cfg_rxdiv_sel_2_0;
+       u8 r_pcs2pma_phymode_4_0;
+       u8 cfg_lane_id_2_0;
+       u8 cfg_cdrck_en;
+       u8 cfg_dfeck_en;
+       u8 cfg_dfe_pd;
+       u8 cfg_dfetap_en_5_1;
+       u8 cfg_erramp_pd;
+       u8 cfg_pi_DFE_en;
+       u8 cfg_pi_en;
+       u8 cfg_pd_ctle;
+       u8 cfg_summer_en;
+       u8 cfg_pd_rx_cktree;
+       u8 cfg_pd_clk;
+       u8 cfg_pd_cml;
+       u8 cfg_pd_driver;
+       u8 cfg_rx_reg_pu;
+       u8 cfg_d_cdr_pd;
+       u8 cfg_pd_sq;
+       u8 cfg_rxdet_en;
+       u8 cfg_rxdet_str;
+       u8 r_multi_lane_mode;
+       u8 cfg_en_adv;
+       u8 cfg_en_main;
+       u8 cfg_en_dly;
+       u8 cfg_tap_adv_3_0;
+       u8 cfg_tap_main;
+       u8 cfg_tap_dly_4_0;
+       u8 cfg_vga_ctrl_3_0;
+       u8 cfg_vga_cp_2_0;
+       u8 cfg_eq_res_3_0;
+       u8 cfg_eq_r_byp;
+       u8 cfg_eq_c_force_3_0;
+       u8 cfg_en_dfedig;
+       u8 cfg_sum_setcm_en;
+       u8 cfg_en_preemph;
+       u8 cfg_itx_ippreemp_base_1_0;
+       u8 cfg_itx_ipdriver_base_2_0;
+       u8 cfg_ibias_tune_reserve_5_0;
+       u8 cfg_txswing_half;
+       u8 cfg_dis_2nd_order;
+       u8 cfg_rx_ssc_lh;
+       u8 cfg_pi_floop_steps_1_0;
+       u8 cfg_pi_ext_dac_23_16;
+       u8 cfg_pi_ext_dac_15_8;
+       u8 cfg_iscan_ext_dac_7_0;
+       u8 cfg_cdr_kf_gen1_2_0;
+       u8 cfg_cdr_kf_gen2_2_0;
+       u8 cfg_cdr_kf_gen3_2_0;
+       u8 cfg_cdr_kf_gen4_2_0;
+       u8 r_cdr_m_gen1_7_0;
+       u8 cfg_pi_bw_gen1_3_0;
+       u8 cfg_pi_bw_gen2;
+       u8 cfg_pi_bw_gen3;
+       u8 cfg_pi_bw_gen4;
+       u8 cfg_pi_ext_dac_7_0;
+       u8 cfg_pi_steps;
+       u8 cfg_mp_max_3_0;
+       u8 cfg_rstn_dfedig;
+       u8 cfg_alos_thr_3_0;
+       u8 cfg_predrv_slewrate_1_0;
+       u8 cfg_itx_ipcml_base_1_0;
+       u8 cfg_ip_pre_base_1_0;
+       u8 r_cdr_m_gen2_7_0;
+       u8 r_cdr_m_gen3_7_0;
+       u8 r_cdr_m_gen4_7_0;
+       u8 r_en_auto_cdr_rstn;
+       u8 cfg_oscal_afe;
+       u8 cfg_pd_osdac_afe;
+       u8 cfg_resetb_oscal_afe[2];
+       u8 cfg_center_spreading;
+       u8 cfg_m_cnt_maxval_4_0;
+       u8 cfg_ncnt_maxval_7_0;
+       u8 cfg_ncnt_maxval_10_8;
+       u8 cfg_ssc_en;
+       u8 cfg_tx2rx_lp_en;
+       u8 cfg_txlb_en;
+       u8 cfg_rx2tx_lp_en;
+       u8 cfg_rxlb_en;
+       u8 r_tx_pol_inv;
+       u8 r_rx_pol_inv;
+       u8 fx_100;
+};
+
+static struct sparx5_sd25g28_media_preset media_presets_25g[] = {
+       { /* ETH_MEDIA_DEFAULT */
+               .cfg_en_adv               = 0,
+               .cfg_en_main              = 1,
+               .cfg_en_dly               = 0,
+               .cfg_tap_adv_3_0          = 0,
+               .cfg_tap_main             = 1,
+               .cfg_tap_dly_4_0          = 0,
+               .cfg_eq_c_force_3_0       = 0xf,
+               .cfg_vga_ctrl_byp_4_0     = 4,
+               .cfg_eq_r_force_3_0       = 12,
+               .cfg_alos_thr_2_0         = 7,
+       },
+       { /* ETH_MEDIA_SR */
+               .cfg_en_adv               = 1,
+               .cfg_en_main              = 1,
+               .cfg_en_dly               = 1,
+               .cfg_tap_adv_3_0          = 0,
+               .cfg_tap_main             = 1,
+               .cfg_tap_dly_4_0          = 0x10,
+               .cfg_eq_c_force_3_0       = 0xf,
+               .cfg_vga_ctrl_byp_4_0     = 8,
+               .cfg_eq_r_force_3_0       = 4,
+               .cfg_alos_thr_2_0         = 0,
+       },
+       { /* ETH_MEDIA_DAC */
+               .cfg_en_adv               = 0,
+               .cfg_en_main              = 1,
+               .cfg_en_dly               = 0,
+               .cfg_tap_adv_3_0          = 0,
+               .cfg_tap_main             = 1,
+               .cfg_tap_dly_4_0          = 0,
+               .cfg_eq_c_force_3_0       = 0xf,
+               .cfg_vga_ctrl_byp_4_0     = 8,
+               .cfg_eq_r_force_3_0       = 0xc,
+               .cfg_alos_thr_2_0         = 0,
+       },
+};
+
+static struct sparx5_sd25g28_mode_preset mode_presets_25g[] = {
+       { /* SPX5_SD25G28_MODE_PRESET_25000 */
+               .bitwidth           = 40,
+               .tx_pre_div         = 0,
+               .fifo_ck_div        = 0,
+               .pre_divsel         = 1,
+               .vco_div_mode       = 0,
+               .sel_div            = 15,
+               .ck_bitwidth        = 3,
+               .subrate            = 0,
+               .com_txcal_en       = 0,
+               .com_tx_reserve_msb = (0x26 << 1),
+               .com_tx_reserve_lsb = 0xf0,
+               .cfg_itx_ipcml_base = 0,
+               .tx_reserve_msb     = 0xcc,
+               .tx_reserve_lsb     = 0xfe,
+               .bw                 = 3,
+               .rxterm             = 0,
+               .dfe_enable         = 1,
+               .dfe_tap            = 0x1f,
+               .txmargin           = 1,
+               .cfg_ctle_rstn      = 1,
+               .r_dfe_rstn         = 1,
+               .cfg_pi_bw_3_0      = 0,
+               .tx_tap_dly         = 8,
+               .tx_tap_adv         = 0xc,
+       },
+       { /* SPX5_SD25G28_MODE_PRESET_10000 */
+               .bitwidth           = 64,
+               .tx_pre_div         = 0,
+               .fifo_ck_div        = 2,
+               .pre_divsel         = 0,
+               .vco_div_mode       = 1,
+               .sel_div            = 9,
+               .ck_bitwidth        = 0,
+               .subrate            = 0,
+               .com_txcal_en       = 1,
+               .com_tx_reserve_msb = (0x20 << 1),
+               .com_tx_reserve_lsb = 0x40,
+               .cfg_itx_ipcml_base = 0,
+               .tx_reserve_msb     = 0x4c,
+               .tx_reserve_lsb     = 0x44,
+               .bw                 = 3,
+               .cfg_pi_bw_3_0      = 0,
+               .rxterm             = 3,
+               .dfe_enable         = 1,
+               .dfe_tap            = 0x1f,
+               .txmargin           = 0,
+               .cfg_ctle_rstn      = 1,
+               .r_dfe_rstn         = 1,
+               .tx_tap_dly         = 0,
+               .tx_tap_adv         = 0,
+       },
+       { /* SPX5_SD25G28_MODE_PRESET_5000 */
+               .bitwidth           = 64,
+               .tx_pre_div         = 0,
+               .fifo_ck_div        = 2,
+               .pre_divsel         = 0,
+               .vco_div_mode       = 2,
+               .sel_div            = 9,
+               .ck_bitwidth        = 0,
+               .subrate            = 0,
+               .com_txcal_en       = 1,
+               .com_tx_reserve_msb = (0x20 << 1),
+               .com_tx_reserve_lsb = 0,
+               .cfg_itx_ipcml_base = 0,
+               .tx_reserve_msb     = 0xe,
+               .tx_reserve_lsb     = 0x80,
+               .bw                 = 0,
+               .rxterm             = 0,
+               .cfg_pi_bw_3_0      = 6,
+               .dfe_enable         = 0,
+               .dfe_tap            = 0,
+               .tx_tap_dly         = 0,
+               .tx_tap_adv         = 0,
+       },
+       { /* SPX5_SD25G28_MODE_PRESET_SD_2G5 */
+               .bitwidth           = 10,
+               .tx_pre_div         = 0,
+               .fifo_ck_div        = 0,
+               .pre_divsel         = 0,
+               .vco_div_mode       = 1,
+               .sel_div            = 6,
+               .ck_bitwidth        = 3,
+               .subrate            = 2,
+               .com_txcal_en       = 1,
+               .com_tx_reserve_msb = (0x26 << 1),
+               .com_tx_reserve_lsb = (0xf << 4),
+               .cfg_itx_ipcml_base = 2,
+               .tx_reserve_msb     = 0x8,
+               .tx_reserve_lsb     = 0x8a,
+               .bw                 = 0,
+               .cfg_pi_bw_3_0      = 0,
+               .rxterm             = (1 << 2),
+               .dfe_enable         = 0,
+               .dfe_tap            = 0,
+               .tx_tap_dly         = 0,
+               .tx_tap_adv         = 0,
+       },
+       { /* SPX5_SD25G28_MODE_PRESET_1000BASEX */
+               .bitwidth           = 10,
+               .tx_pre_div         = 0,
+               .fifo_ck_div        = 1,
+               .pre_divsel         = 0,
+               .vco_div_mode       = 1,
+               .sel_div            = 8,
+               .ck_bitwidth        = 3,
+               .subrate            = 3,
+               .com_txcal_en       = 1,
+               .com_tx_reserve_msb = (0x26 << 1),
+               .com_tx_reserve_lsb = 0xf0,
+               .cfg_itx_ipcml_base = 0,
+               .tx_reserve_msb     = 0x8,
+               .tx_reserve_lsb     = 0xce,
+               .bw                 = 0,
+               .rxterm             = 0,
+               .cfg_pi_bw_3_0      = 0,
+               .dfe_enable         = 0,
+               .dfe_tap            = 0,
+               .tx_tap_dly         = 0,
+               .tx_tap_adv         = 0,
+       },
+};
+
+static struct sparx5_sd10g28_media_preset media_presets_10g[] = {
+       { /* ETH_MEDIA_DEFAULT */
+               .cfg_en_adv               = 0,
+               .cfg_en_main              = 1,
+               .cfg_en_dly               = 0,
+               .cfg_tap_adv_3_0          = 0,
+               .cfg_tap_main             = 1,
+               .cfg_tap_dly_4_0          = 0,
+               .cfg_vga_ctrl_3_0         = 5,
+               .cfg_vga_cp_2_0           = 0,
+               .cfg_eq_res_3_0           = 0xa,
+               .cfg_eq_r_byp             = 1,
+               .cfg_eq_c_force_3_0       = 0x8,
+               .cfg_alos_thr_3_0         = 0x3,
+       },
+       { /* ETH_MEDIA_SR */
+               .cfg_en_adv               = 1,
+               .cfg_en_main              = 1,
+               .cfg_en_dly               = 1,
+               .cfg_tap_adv_3_0          = 0,
+               .cfg_tap_main             = 1,
+               .cfg_tap_dly_4_0          = 0xc,
+               .cfg_vga_ctrl_3_0         = 0xa,
+               .cfg_vga_cp_2_0           = 0x4,
+               .cfg_eq_res_3_0           = 0xa,
+               .cfg_eq_r_byp             = 1,
+               .cfg_eq_c_force_3_0       = 0xF,
+               .cfg_alos_thr_3_0         = 0x3,
+       },
+       { /* ETH_MEDIA_DAC */
+               .cfg_en_adv               = 1,
+               .cfg_en_main              = 1,
+               .cfg_en_dly               = 1,
+               .cfg_tap_adv_3_0          = 12,
+               .cfg_tap_main             = 1,
+               .cfg_tap_dly_4_0          = 8,
+               .cfg_vga_ctrl_3_0         = 0xa,
+               .cfg_vga_cp_2_0           = 4,
+               .cfg_eq_res_3_0           = 0xa,
+               .cfg_eq_r_byp             = 1,
+               .cfg_eq_c_force_3_0       = 0xf,
+               .cfg_alos_thr_3_0         = 0x0,
+       }
+};
+
+static struct sparx5_sd10g28_mode_preset mode_presets_10g[] = {
+       { /* SPX5_SD10G28_MODE_PRESET_10000 */
+               .bwidth           = 64,
+               .cmu_sel          = SPX5_SD10G28_CMU_MAIN,
+               .rate             = 0x0,
+               .dfe_enable       = 1,
+               .dfe_tap          = 0x1f,
+               .pi_bw_gen1       = 0x0,
+               .duty_cycle       = 0x2,
+       },
+       { /* SPX5_SD10G28_MODE_PRESET_SFI_5000_6G */
+               .bwidth           = 16,
+               .cmu_sel          = SPX5_SD10G28_CMU_MAIN,
+               .rate             = 0x1,
+               .dfe_enable       = 0,
+               .dfe_tap          = 0,
+               .pi_bw_gen1       = 0x5,
+               .duty_cycle       = 0x0,
+       },
+       { /* SPX5_SD10G28_MODE_PRESET_SFI_5000_10G */
+               .bwidth           = 64,
+               .cmu_sel          = SPX5_SD10G28_CMU_MAIN,
+               .rate             = 0x1,
+               .dfe_enable       = 0,
+               .dfe_tap          = 0,
+               .pi_bw_gen1       = 0x5,
+               .duty_cycle       = 0x0,
+       },
+       { /* SPX5_SD10G28_MODE_PRESET_QSGMII */
+               .bwidth           = 20,
+               .cmu_sel          = SPX5_SD10G28_CMU_AUX1,
+               .rate             = 0x1,
+               .dfe_enable       = 0,
+               .dfe_tap          = 0,
+               .pi_bw_gen1       = 0x5,
+               .duty_cycle       = 0x0,
+       },
+       { /* SPX5_SD10G28_MODE_PRESET_SD_2G5 */
+               .bwidth           = 10,
+               .cmu_sel          = SPX5_SD10G28_CMU_AUX2,
+               .rate             = 0x2,
+               .dfe_enable       = 0,
+               .dfe_tap          = 0,
+               .pi_bw_gen1       = 0x7,
+               .duty_cycle       = 0x0,
+       },
+       { /* SPX5_SD10G28_MODE_PRESET_1000BASEX */
+               .bwidth           = 10,
+               .cmu_sel          = SPX5_SD10G28_CMU_AUX1,
+               .rate             = 0x3,
+               .dfe_enable       = 0,
+               .dfe_tap          = 0,
+               .pi_bw_gen1       = 0x7,
+               .duty_cycle       = 0x0,
+       },
+};
+
+/* map from SD25G28 interface width to configuration value */
+static u8 sd25g28_get_iw_setting(struct device *dev, const u8 interface_width)
+{
+       switch (interface_width) {
+       case 10: return 0;
+       case 16: return 1;
+       case 32: return 3;
+       case 40: return 4;
+       case 64: return 5;
+       default:
+               dev_err(dev, "%s: Illegal value %d for interface width\n",
+                      __func__, interface_width);
+       }
+       return 0;
+}
+
+/* map from SD10G28 interface width to configuration value */
+static u8 sd10g28_get_iw_setting(struct device *dev, const u8 interface_width)
+{
+       switch (interface_width) {
+       case 10: return 0;
+       case 16: return 1;
+       case 20: return 2;
+       case 32: return 3;
+       case 40: return 4;
+       case 64: return 7;
+       default:
+               dev_err(dev, "%s: Illegal value %d for interface width\n", __func__,
+                      interface_width);
+               return 0;
+       }
+}
+
+static int sparx5_sd10g25_get_mode_preset(struct sparx5_serdes_macro *macro,
+                                         struct sparx5_sd25g28_mode_preset *mode)
+{
+       switch (macro->serdesmode) {
+       case SPX5_SD_MODE_SFI:
+               if (macro->speed == SPEED_25000)
+                       *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_25000];
+               else if (macro->speed == SPEED_10000)
+                       *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_10000];
+               else if (macro->speed == SPEED_5000)
+                       *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_5000];
+               break;
+       case SPX5_SD_MODE_2G5:
+               *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_SD_2G5];
+               break;
+       case SPX5_SD_MODE_1000BASEX:
+               *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_1000BASEX];
+               break;
+       case SPX5_SD_MODE_100FX:
+                /* Not supported */
+               return -EINVAL;
+       default:
+               *mode = mode_presets_25g[SPX5_SD25G28_MODE_PRESET_25000];
+               break;
+       }
+       return 0;
+}
+
+static int sparx5_sd10g28_get_mode_preset(struct sparx5_serdes_macro *macro,
+                                         struct sparx5_sd10g28_mode_preset *mode,
+                                         struct sparx5_sd10g28_args *args)
+{
+       switch (macro->serdesmode) {
+       case SPX5_SD_MODE_SFI:
+               if (macro->speed == SPEED_10000) {
+                       *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_10000];
+               } else if (macro->speed == SPEED_5000) {
+                       if (args->is_6g)
+                               *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SFI_5000_6G];
+                       else
+                               *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SFI_5000_10G];
+               } else {
+                       dev_err(macro->priv->dev, "%s: Illegal speed: %02u, sidx: %02u, mode (%u)",
+                              __func__, macro->speed, macro->sidx,
+                              macro->serdesmode);
+                       return -EINVAL;
+               }
+               break;
+       case SPX5_SD_MODE_QSGMII:
+               *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_QSGMII];
+               break;
+       case SPX5_SD_MODE_2G5:
+               *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_SD_2G5];
+               break;
+       case SPX5_SD_MODE_100FX:
+       case SPX5_SD_MODE_1000BASEX:
+               *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_1000BASEX];
+               break;
+       default:
+               *mode = mode_presets_10g[SPX5_SD10G28_MODE_PRESET_10000];
+               break;
+       }
+       return 0;
+}
+
+static void sparx5_sd25g28_get_params(struct sparx5_serdes_macro *macro,
+                                     struct sparx5_sd25g28_media_preset *media,
+                                     struct sparx5_sd25g28_mode_preset *mode,
+                                     struct sparx5_sd25g28_args *args,
+                                     struct sparx5_sd25g28_params *params)
+{
+       u8 iw = sd25g28_get_iw_setting(macro->priv->dev, mode->bitwidth);
+       struct sparx5_sd25g28_params init = {
+               .r_d_width_ctrl_2_0         = iw,
+               .r_txfifo_ck_div_pmad_2_0   = mode->fifo_ck_div,
+               .r_rxfifo_ck_div_pmad_2_0   = mode->fifo_ck_div,
+               .cfg_vco_div_mode_1_0       = mode->vco_div_mode,
+               .cfg_pre_divsel_1_0         = mode->pre_divsel,
+               .cfg_sel_div_3_0            = mode->sel_div,
+               .cfg_vco_start_code_3_0     = 0,
+               .cfg_pma_tx_ck_bitwidth_2_0 = mode->ck_bitwidth,
+               .cfg_tx_prediv_1_0          = mode->tx_pre_div,
+               .cfg_rxdiv_sel_2_0          = mode->ck_bitwidth,
+               .cfg_tx_subrate_2_0         = mode->subrate,
+               .cfg_rx_subrate_2_0         = mode->subrate,
+               .r_multi_lane_mode          = 0,
+               .cfg_cdrck_en               = 1,
+               .cfg_dfeck_en               = mode->dfe_enable,
+               .cfg_dfe_pd                 = mode->dfe_enable == 1 ? 0 : 1,
+               .cfg_dfedmx_pd              = 1,
+               .cfg_dfetap_en_5_1          = mode->dfe_tap,
+               .cfg_dmux_pd                = 0,
+               .cfg_dmux_clk_pd            = 1,
+               .cfg_erramp_pd              = mode->dfe_enable == 1 ? 0 : 1,
+               .cfg_pi_DFE_en              = mode->dfe_enable,
+               .cfg_pi_en                  = 1,
+               .cfg_pd_ctle                = 0,
+               .cfg_summer_en              = 1,
+               .cfg_pmad_ck_pd             = 0,
+               .cfg_pd_clk                 = 0,
+               .cfg_pd_cml                 = 0,
+               .cfg_pd_driver              = 0,
+               .cfg_rx_reg_pu              = 1,
+               .cfg_pd_rms_det             = 1,
+               .cfg_dcdr_pd                = 0,
+               .cfg_ecdr_pd                = 1,
+               .cfg_pd_sq                  = 1,
+               .cfg_itx_ipdriver_base_2_0  = mode->txmargin,
+               .cfg_tap_dly_4_0            = media->cfg_tap_dly_4_0,
+               .cfg_tap_main               = media->cfg_tap_main,
+               .cfg_en_main                = media->cfg_en_main,
+               .cfg_tap_adv_3_0            = media->cfg_tap_adv_3_0,
+               .cfg_en_adv                 = media->cfg_en_adv,
+               .cfg_en_dly                 = media->cfg_en_dly,
+               .cfg_iscan_en               = 0,
+               .l1_pcs_en_fast_iscan       = 0,
+               .l0_cfg_bw_1_0              = 0,
+               .cfg_en_dummy               = 0,
+               .cfg_pll_reserve_3_0        = args->com_pll_reserve,
+               .l0_cfg_txcal_en            = mode->com_txcal_en,
+               .l0_cfg_tx_reserve_15_8     = mode->com_tx_reserve_msb,
+               .l0_cfg_tx_reserve_7_0      = mode->com_tx_reserve_lsb,
+               .cfg_tx_reserve_15_8        = mode->tx_reserve_msb,
+               .cfg_tx_reserve_7_0         = mode->tx_reserve_lsb,
+               .cfg_bw_1_0                 = mode->bw,
+               .cfg_txcal_man_en           = 1,
+               .cfg_phase_man_4_0          = 0,
+               .cfg_quad_man_1_0           = 0,
+               .cfg_txcal_shift_code_5_0   = 2,
+               .cfg_txcal_valid_sel_3_0    = 4,
+               .cfg_txcal_en               = 0,
+               .cfg_cdr_kf_2_0             = 1,
+               .cfg_cdr_m_7_0              = 6,
+               .cfg_pi_bw_3_0              = mode->cfg_pi_bw_3_0,
+               .cfg_pi_steps_1_0           = 0,
+               .cfg_dis_2ndorder           = 1,
+               .cfg_ctle_rstn              = mode->cfg_ctle_rstn,
+               .r_dfe_rstn                 = mode->r_dfe_rstn,
+               .cfg_alos_thr_2_0           = media->cfg_alos_thr_2_0,
+               .cfg_itx_ipcml_base_1_0     = mode->cfg_itx_ipcml_base,
+               .cfg_rx_reserve_7_0         = 0xbf,
+               .cfg_rx_reserve_15_8        = 0x61,
+               .cfg_rxterm_2_0             = mode->rxterm,
+               .cfg_fom_selm               = 0,
+               .cfg_rx_sp_ctle_1_0         = 0,
+               .cfg_isel_ctle_1_0          = 0,
+               .cfg_vga_ctrl_byp_4_0       = media->cfg_vga_ctrl_byp_4_0,
+               .cfg_vga_byp                = 1,
+               .cfg_agc_adpt_byp           = 1,
+               .cfg_eqr_byp                = 1,
+               .cfg_eqr_force_3_0          = media->cfg_eq_r_force_3_0,
+               .cfg_eqc_force_3_0          = media->cfg_eq_c_force_3_0,
+               .cfg_sum_setcm_en           = 1,
+               .cfg_pi_dfe_en              = 1,
+               .cfg_init_pos_iscan_6_0     = 6,
+               .cfg_init_pos_ipi_6_0       = 9,
+               .cfg_dfedig_m_2_0           = 6,
+               .cfg_en_dfedig              = mode->dfe_enable,
+               .r_d_width_ctrl_from_hwt    = 0,
+               .r_reg_manual               = 1,
+               .reg_rst                    = args->reg_rst,
+               .cfg_jc_byp                 = 1,
+               .cfg_common_reserve_7_0     = 1,
+               .cfg_pll_lol_set            = 1,
+               .cfg_tx2rx_lp_en            = 0,
+               .cfg_txlb_en                = 0,
+               .cfg_rx2tx_lp_en            = 0,
+               .cfg_rxlb_en                = 0,
+               .r_tx_pol_inv               = args->txinvert,
+               .r_rx_pol_inv               = args->rxinvert,
+       };
+
+       *params = init;
+}
+
+static void sparx5_sd10g28_get_params(struct sparx5_serdes_macro *macro,
+                                     struct sparx5_sd10g28_media_preset *media,
+                                     struct sparx5_sd10g28_mode_preset *mode,
+                                     struct sparx5_sd10g28_args *args,
+                                     struct sparx5_sd10g28_params *params)
+{
+       u8 iw = sd10g28_get_iw_setting(macro->priv->dev, mode->bwidth);
+       struct sparx5_sd10g28_params init = {
+               .skip_cmu_cfg                = args->skip_cmu_cfg,
+               .is_6g                       = args->is_6g,
+               .cmu_sel                     = mode->cmu_sel,
+               .cfg_lane_reserve_7_0        = (mode->cmu_sel % 2) << 6,
+               .cfg_ssc_rtl_clk_sel         = (mode->cmu_sel / 2),
+               .cfg_lane_reserve_15_8       = mode->duty_cycle,
+               .cfg_txrate_1_0              = mode->rate,
+               .cfg_rxrate_1_0              = mode->rate,
+               .fx_100                      = macro->serdesmode == SPX5_SD_MODE_100FX,
+               .r_d_width_ctrl_2_0          = iw,
+               .cfg_pma_tx_ck_bitwidth_2_0  = iw,
+               .cfg_rxdiv_sel_2_0           = iw,
+               .r_pcs2pma_phymode_4_0       = 0,
+               .cfg_lane_id_2_0             = 0,
+               .cfg_cdrck_en                = 1,
+               .cfg_dfeck_en                = mode->dfe_enable,
+               .cfg_dfe_pd                  = (mode->dfe_enable == 1) ? 0 : 1,
+               .cfg_dfetap_en_5_1           = mode->dfe_tap,
+               .cfg_erramp_pd               = (mode->dfe_enable == 1) ? 0 : 1,
+               .cfg_pi_DFE_en               = mode->dfe_enable,
+               .cfg_pi_en                   = 1,
+               .cfg_pd_ctle                 = 0,
+               .cfg_summer_en               = 1,
+               .cfg_pd_rx_cktree            = 0,
+               .cfg_pd_clk                  = 0,
+               .cfg_pd_cml                  = 0,
+               .cfg_pd_driver               = 0,
+               .cfg_rx_reg_pu               = 1,
+               .cfg_d_cdr_pd                = 0,
+               .cfg_pd_sq                   = mode->dfe_enable,
+               .cfg_rxdet_en                = 0,
+               .cfg_rxdet_str               = 0,
+               .r_multi_lane_mode           = 0,
+               .cfg_en_adv                  = media->cfg_en_adv,
+               .cfg_en_main                 = 1,
+               .cfg_en_dly                  = media->cfg_en_dly,
+               .cfg_tap_adv_3_0             = media->cfg_tap_adv_3_0,
+               .cfg_tap_main                = media->cfg_tap_main,
+               .cfg_tap_dly_4_0             = media->cfg_tap_dly_4_0,
+               .cfg_vga_ctrl_3_0            = media->cfg_vga_ctrl_3_0,
+               .cfg_vga_cp_2_0              = media->cfg_vga_cp_2_0,
+               .cfg_eq_res_3_0              = media->cfg_eq_res_3_0,
+               .cfg_eq_r_byp                = media->cfg_eq_r_byp,
+               .cfg_eq_c_force_3_0          = media->cfg_eq_c_force_3_0,
+               .cfg_en_dfedig               = mode->dfe_enable,
+               .cfg_sum_setcm_en            = 1,
+               .cfg_en_preemph              = 0,
+               .cfg_itx_ippreemp_base_1_0   = 0,
+               .cfg_itx_ipdriver_base_2_0   = (args->txswing >> 6),
+               .cfg_ibias_tune_reserve_5_0  = (args->txswing & 63),
+               .cfg_txswing_half            = (args->txmargin),
+               .cfg_dis_2nd_order           = 0x1,
+               .cfg_rx_ssc_lh               = 0x0,
+               .cfg_pi_floop_steps_1_0      = 0x0,
+               .cfg_pi_ext_dac_23_16        = (1 << 5),
+               .cfg_pi_ext_dac_15_8         = (0 << 6),
+               .cfg_iscan_ext_dac_7_0       = (1 << 7) + 9,
+               .cfg_cdr_kf_gen1_2_0         = 1,
+               .cfg_cdr_kf_gen2_2_0         = 1,
+               .cfg_cdr_kf_gen3_2_0         = 1,
+               .cfg_cdr_kf_gen4_2_0         = 1,
+               .r_cdr_m_gen1_7_0            = 4,
+               .cfg_pi_bw_gen1_3_0          = mode->pi_bw_gen1,
+               .cfg_pi_bw_gen2              = mode->pi_bw_gen1,
+               .cfg_pi_bw_gen3              = mode->pi_bw_gen1,
+               .cfg_pi_bw_gen4              = mode->pi_bw_gen1,
+               .cfg_pi_ext_dac_7_0          = 3,
+               .cfg_pi_steps                = 0,
+               .cfg_mp_max_3_0              = 1,
+               .cfg_rstn_dfedig             = mode->dfe_enable,
+               .cfg_alos_thr_3_0            = media->cfg_alos_thr_3_0,
+               .cfg_predrv_slewrate_1_0     = 3,
+               .cfg_itx_ipcml_base_1_0      = 0,
+               .cfg_ip_pre_base_1_0         = 0,
+               .r_cdr_m_gen2_7_0            = 2,
+               .r_cdr_m_gen3_7_0            = 2,
+               .r_cdr_m_gen4_7_0            = 2,
+               .r_en_auto_cdr_rstn          = 0,
+               .cfg_oscal_afe               = 1,
+               .cfg_pd_osdac_afe            = 0,
+               .cfg_resetb_oscal_afe[0]     = 0,
+               .cfg_resetb_oscal_afe[1]     = 1,
+               .cfg_center_spreading        = 0,
+               .cfg_m_cnt_maxval_4_0        = 15,
+               .cfg_ncnt_maxval_7_0         = 32,
+               .cfg_ncnt_maxval_10_8        = 6,
+               .cfg_ssc_en                  = 1,
+               .cfg_tx2rx_lp_en             = 0,
+               .cfg_txlb_en                 = 0,
+               .cfg_rx2tx_lp_en             = 0,
+               .cfg_rxlb_en                 = 0,
+               .r_tx_pol_inv                = args->txinvert,
+               .r_rx_pol_inv                = args->rxinvert,
+       };
+
+       *params = init;
+}
+
+static void sparx5_sd25g28_reset(void __iomem *regs[],
+                                struct sparx5_sd25g28_params *params,
+                                u32 sd_index)
+{
+       if (params->reg_rst == 1) {
+               sdx5_rmw_addr(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(1),
+                        SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST,
+                        sdx5_addr(regs, SD_LANE_25G_SD_LANE_CFG(sd_index)));
+
+               usleep_range(1000, 2000);
+
+               sdx5_rmw_addr(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(0),
+                        SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST,
+                        sdx5_addr(regs, SD_LANE_25G_SD_LANE_CFG(sd_index)));
+       }
+}
+
+static int sparx5_sd25g28_apply_params(struct sparx5_serdes_macro *macro,
+                                      struct sparx5_sd25g28_params *params)
+{
+       struct sparx5_serdes_private *priv = macro->priv;
+       void __iomem **regs = priv->regs;
+       struct device *dev = priv->dev;
+       u32 sd_index = macro->stpidx;
+       u32 value;
+
+       sdx5_rmw(SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(1),
+                SD_LANE_25G_SD_LANE_CFG_MACRO_RST,
+                priv,
+                SD_LANE_25G_SD_LANE_CFG(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0xFF),
+                SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
+                priv,
+                SD25G_LANE_CMU_FF(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT_SET
+                (params->r_d_width_ctrl_from_hwt) |
+                SD25G_LANE_CMU_1A_R_REG_MANUAL_SET(params->r_reg_manual),
+                SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT |
+                SD25G_LANE_CMU_1A_R_REG_MANUAL,
+                priv,
+                SD25G_LANE_CMU_1A(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_SET
+                (params->cfg_common_reserve_7_0),
+                SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0,
+                priv,
+                SD25G_LANE_CMU_31(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_09_CFG_EN_DUMMY_SET(params->cfg_en_dummy),
+                SD25G_LANE_CMU_09_CFG_EN_DUMMY,
+                priv,
+                SD25G_LANE_CMU_09(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_SET
+                (params->cfg_pll_reserve_3_0),
+                SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0,
+                priv,
+                SD25G_LANE_CMU_13(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN_SET(params->l0_cfg_txcal_en),
+                SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN,
+                priv,
+                SD25G_LANE_CMU_40(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_SET
+                (params->l0_cfg_tx_reserve_15_8),
+                SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8,
+                priv,
+                SD25G_LANE_CMU_46(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_SET
+                (params->l0_cfg_tx_reserve_7_0),
+                SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0,
+                priv,
+                SD25G_LANE_CMU_45(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_SET(0),
+                SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN,
+                priv,
+                SD25G_LANE_CMU_0B(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_SET(1),
+                SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN,
+                priv,
+                SD25G_LANE_CMU_0B(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_19_R_CK_RESETB_SET(0),
+                SD25G_LANE_CMU_19_R_CK_RESETB,
+                priv,
+                SD25G_LANE_CMU_19(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_19_R_CK_RESETB_SET(1),
+                SD25G_LANE_CMU_19_R_CK_RESETB,
+                priv,
+                SD25G_LANE_CMU_19(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_18_R_PLL_RSTN_SET(0),
+                SD25G_LANE_CMU_18_R_PLL_RSTN,
+                priv,
+                SD25G_LANE_CMU_18(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_18_R_PLL_RSTN_SET(1),
+                SD25G_LANE_CMU_18_R_PLL_RSTN,
+                priv,
+                SD25G_LANE_CMU_18(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_SET(params->r_d_width_ctrl_2_0),
+                SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0,
+                priv,
+                SD25G_LANE_CMU_1A(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_SET
+                (params->r_txfifo_ck_div_pmad_2_0) |
+                SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_SET
+                (params->r_rxfifo_ck_div_pmad_2_0),
+                SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0 |
+                SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0,
+                priv,
+                SD25G_LANE_CMU_30(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_SET(params->cfg_pll_lol_set) |
+                SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_SET
+                (params->cfg_vco_div_mode_1_0),
+                SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET |
+                SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0,
+                priv,
+                SD25G_LANE_CMU_0C(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_SET
+                (params->cfg_pre_divsel_1_0),
+                SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0,
+                priv,
+                SD25G_LANE_CMU_0D(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_SET(params->cfg_sel_div_3_0),
+                SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0,
+                priv,
+                SD25G_LANE_CMU_0E(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0x00),
+                SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
+                priv,
+                SD25G_LANE_CMU_FF(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_SET
+                (params->cfg_pma_tx_ck_bitwidth_2_0),
+                SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0,
+                priv,
+                SD25G_LANE_LANE_0C(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_SET
+                (params->cfg_tx_prediv_1_0),
+                SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0,
+                priv,
+                SD25G_LANE_LANE_01(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_SET
+                (params->cfg_rxdiv_sel_2_0),
+                SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0,
+                priv,
+                SD25G_LANE_LANE_18(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_SET
+                (params->cfg_tx_subrate_2_0),
+                SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0,
+                priv,
+                SD25G_LANE_LANE_2C(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_SET
+                (params->cfg_rx_subrate_2_0),
+                SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0,
+                priv,
+                SD25G_LANE_LANE_28(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_SET(params->cfg_cdrck_en),
+                SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN,
+                priv,
+                SD25G_LANE_LANE_18(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_SET
+                (params->cfg_dfetap_en_5_1),
+                SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1,
+                priv,
+                SD25G_LANE_LANE_0F(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_SET(params->cfg_erramp_pd),
+                SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD,
+                priv,
+                SD25G_LANE_LANE_18(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN_SET(params->cfg_pi_dfe_en),
+                SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN,
+                priv,
+                SD25G_LANE_LANE_1D(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_19_LN_CFG_ECDR_PD_SET(params->cfg_ecdr_pd),
+                SD25G_LANE_LANE_19_LN_CFG_ECDR_PD,
+                priv,
+                SD25G_LANE_LANE_19(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_SET
+                (params->cfg_itx_ipdriver_base_2_0),
+                SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0,
+                priv,
+                SD25G_LANE_LANE_01(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_SET(params->cfg_tap_dly_4_0),
+                SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0,
+                priv,
+                SD25G_LANE_LANE_03(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_SET(params->cfg_tap_adv_3_0),
+                SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0,
+                priv,
+                SD25G_LANE_LANE_06(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_07_LN_CFG_EN_ADV_SET(params->cfg_en_adv) |
+                SD25G_LANE_LANE_07_LN_CFG_EN_DLY_SET(params->cfg_en_dly),
+                SD25G_LANE_LANE_07_LN_CFG_EN_ADV |
+                SD25G_LANE_LANE_07_LN_CFG_EN_DLY,
+                priv,
+                SD25G_LANE_LANE_07(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_SET
+                (params->cfg_tx_reserve_15_8),
+                SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8,
+                priv,
+                SD25G_LANE_LANE_43(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_SET
+                (params->cfg_tx_reserve_7_0),
+                SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0,
+                priv,
+                SD25G_LANE_LANE_42(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_05_LN_CFG_BW_1_0_SET(params->cfg_bw_1_0),
+                SD25G_LANE_LANE_05_LN_CFG_BW_1_0,
+                priv,
+                SD25G_LANE_LANE_05(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_SET
+                (params->cfg_txcal_man_en),
+                SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN,
+                priv,
+                SD25G_LANE_LANE_0B(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_SET
+                (params->cfg_txcal_shift_code_5_0),
+                SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0,
+                priv,
+                SD25G_LANE_LANE_0A(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_SET
+                (params->cfg_txcal_valid_sel_3_0),
+                SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0,
+                priv,
+                SD25G_LANE_LANE_09(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_SET(params->cfg_cdr_kf_2_0),
+                SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0,
+                priv,
+                SD25G_LANE_LANE_1A(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_SET(params->cfg_cdr_m_7_0),
+                SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0,
+                priv,
+                SD25G_LANE_LANE_1B(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_SET(params->cfg_pi_bw_3_0),
+                SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0,
+                priv,
+                SD25G_LANE_LANE_2B(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_SET
+                (params->cfg_dis_2ndorder),
+                SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER,
+                priv,
+                SD25G_LANE_LANE_2C(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_SET(params->cfg_ctle_rstn),
+                SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN,
+                priv,
+                SD25G_LANE_LANE_2E(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_SET
+                (params->cfg_itx_ipcml_base_1_0),
+                SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0,
+                priv,
+                SD25G_LANE_LANE_00(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_SET
+                (params->cfg_rx_reserve_7_0),
+                SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0,
+                priv,
+                SD25G_LANE_LANE_44(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_SET
+                (params->cfg_rx_reserve_15_8),
+                SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8,
+                priv,
+                SD25G_LANE_LANE_45(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_SET(params->cfg_dfeck_en) |
+                SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_SET(params->cfg_rxterm_2_0),
+                SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN |
+                SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0,
+                priv,
+                SD25G_LANE_LANE_0D(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_SET
+                (params->cfg_vga_ctrl_byp_4_0),
+                SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0,
+                priv,
+                SD25G_LANE_LANE_21(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_SET
+                (params->cfg_eqr_force_3_0),
+                SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0,
+                priv,
+                SD25G_LANE_LANE_22(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_SET
+                (params->cfg_eqc_force_3_0) |
+                SD25G_LANE_LANE_1C_LN_CFG_DFE_PD_SET(params->cfg_dfe_pd),
+                SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0 |
+                SD25G_LANE_LANE_1C_LN_CFG_DFE_PD,
+                priv,
+                SD25G_LANE_LANE_1C(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN_SET
+                (params->cfg_sum_setcm_en),
+                SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN,
+                priv,
+                SD25G_LANE_LANE_1E(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_SET
+                (params->cfg_init_pos_iscan_6_0),
+                SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0,
+                priv,
+                SD25G_LANE_LANE_25(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_SET
+                (params->cfg_init_pos_ipi_6_0),
+                SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0,
+                priv,
+                SD25G_LANE_LANE_26(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_SET(params->cfg_erramp_pd),
+                SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD,
+                priv,
+                SD25G_LANE_LANE_18(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_SET
+                (params->cfg_dfedig_m_2_0),
+                SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0,
+                priv,
+                SD25G_LANE_LANE_0E(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG_SET(params->cfg_en_dfedig),
+                SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG,
+                priv,
+                SD25G_LANE_LANE_0E(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_40_LN_R_TX_POL_INV_SET(params->r_tx_pol_inv) |
+                SD25G_LANE_LANE_40_LN_R_RX_POL_INV_SET(params->r_rx_pol_inv),
+                SD25G_LANE_LANE_40_LN_R_TX_POL_INV |
+                SD25G_LANE_LANE_40_LN_R_RX_POL_INV,
+                priv,
+                SD25G_LANE_LANE_40(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN_SET(params->cfg_rx2tx_lp_en) |
+                SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_SET(params->cfg_tx2rx_lp_en),
+                SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN |
+                SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN,
+                priv,
+                SD25G_LANE_LANE_04(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN_SET(params->cfg_rxlb_en),
+                SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN,
+                priv,
+                SD25G_LANE_LANE_1E(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_19_LN_CFG_TXLB_EN_SET(params->cfg_txlb_en),
+                SD25G_LANE_LANE_19_LN_CFG_TXLB_EN,
+                priv,
+                SD25G_LANE_LANE_19(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_SET(0),
+                SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG,
+                priv,
+                SD25G_LANE_LANE_2E(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_SET(1),
+                SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG,
+                priv,
+                SD25G_LANE_LANE_2E(sd_index));
+
+       sdx5_rmw(SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(0),
+                SD_LANE_25G_SD_LANE_CFG_MACRO_RST,
+                priv,
+                SD_LANE_25G_SD_LANE_CFG(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(0),
+                SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN,
+                priv,
+                SD25G_LANE_LANE_1C(sd_index));
+
+       usleep_range(1000, 2000);
+
+       sdx5_rmw(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(1),
+                SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN,
+                priv,
+                SD25G_LANE_LANE_1C(sd_index));
+
+       usleep_range(10000, 20000);
+
+       sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0xff),
+                SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
+                priv,
+                SD25G_LANE_CMU_FF(sd_index));
+
+       value = readl(sdx5_addr(regs, SD25G_LANE_CMU_C0(sd_index)));
+       value = SD25G_LANE_CMU_C0_PLL_LOL_UDL_GET(value);
+
+       if (value) {
+               dev_err(dev, "25G PLL Loss of Lock: 0x%x\n", value);
+               return -EINVAL;
+       }
+
+       value = readl(sdx5_addr(regs, SD_LANE_25G_SD_LANE_STAT(sd_index)));
+       value = SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_GET(value);
+
+       if (value != 0x1) {
+               dev_err(dev, "25G PMA Reset failed: 0x%x\n", value);
+               return -EINVAL;
+       }
+       sdx5_rmw(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_SET(0x1),
+                SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS,
+                priv,
+                SD25G_LANE_CMU_2A(sd_index));
+
+       sdx5_rmw(SD_LANE_25G_SD_SER_RST_SER_RST_SET(0x0),
+                SD_LANE_25G_SD_SER_RST_SER_RST,
+                priv,
+                SD_LANE_25G_SD_SER_RST(sd_index));
+
+       sdx5_rmw(SD_LANE_25G_SD_DES_RST_DES_RST_SET(0x0),
+                SD_LANE_25G_SD_DES_RST_DES_RST,
+                priv,
+                SD_LANE_25G_SD_DES_RST(sd_index));
+
+       sdx5_rmw(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(0),
+                SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX,
+                priv,
+                SD25G_LANE_CMU_FF(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_SET
+                (params->cfg_alos_thr_2_0),
+                SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0,
+                priv,
+                SD25G_LANE_LANE_2D(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ_SET(0),
+                SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ,
+                priv,
+                SD25G_LANE_LANE_2E(sd_index));
+
+       sdx5_rmw(SD25G_LANE_LANE_2E_LN_CFG_PD_SQ_SET(0),
+                SD25G_LANE_LANE_2E_LN_CFG_PD_SQ,
+                priv,
+                SD25G_LANE_LANE_2E(sd_index));
+
+       return 0;
+}
+
+static void sparx5_sd10g28_reset(void __iomem *regs[], u32 lane_index)
+{
+       /* Note: SerDes SD10G_LANE_1 is configured in 10G_LAN mode */
+       sdx5_rmw_addr(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(1),
+                     SD_LANE_SD_LANE_CFG_EXT_CFG_RST,
+                     sdx5_addr(regs, SD_LANE_SD_LANE_CFG(lane_index)));
+
+       usleep_range(1000, 2000);
+
+       sdx5_rmw_addr(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(0),
+                     SD_LANE_SD_LANE_CFG_EXT_CFG_RST,
+                     sdx5_addr(regs, SD_LANE_SD_LANE_CFG(lane_index)));
+}
+
+static int sparx5_sd10g28_apply_params(struct sparx5_serdes_macro *macro,
+                                      struct sparx5_sd10g28_params *params)
+{
+       struct sparx5_serdes_private *priv = macro->priv;
+       void __iomem **regs = priv->regs;
+       struct device *dev = priv->dev;
+       u32 lane_index = macro->sidx;
+       u32 sd_index = macro->stpidx;
+       void __iomem *sd_inst;
+       u32 value;
+
+       if (params->is_6g)
+               sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, sd_index);
+       else
+               sd_inst = sdx5_inst_get(priv, TARGET_SD10G_LANE, sd_index);
+
+       sdx5_rmw(SD_LANE_SD_LANE_CFG_MACRO_RST_SET(1),
+                SD_LANE_SD_LANE_CFG_MACRO_RST,
+                priv,
+                SD_LANE_SD_LANE_CFG(lane_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT_SET(0x0) |
+                     SD10G_LANE_LANE_93_R_REG_MANUAL_SET(0x1) |
+                     SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT_SET(0x1) |
+                     SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT_SET(0x1) |
+                     SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL_SET(0x0),
+                     SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT |
+                     SD10G_LANE_LANE_93_R_REG_MANUAL |
+                     SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT |
+                     SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT |
+                     SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL,
+                     sd_inst,
+                     SD10G_LANE_LANE_93(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_94_R_ISCAN_REG_SET(0x1) |
+                     SD10G_LANE_LANE_94_R_TXEQ_REG_SET(0x1) |
+                     SD10G_LANE_LANE_94_R_MISC_REG_SET(0x1) |
+                     SD10G_LANE_LANE_94_R_SWING_REG_SET(0x1),
+                     SD10G_LANE_LANE_94_R_ISCAN_REG |
+                     SD10G_LANE_LANE_94_R_TXEQ_REG |
+                     SD10G_LANE_LANE_94_R_MISC_REG |
+                     SD10G_LANE_LANE_94_R_SWING_REG,
+                     sd_inst,
+                     SD10G_LANE_LANE_94(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_9E_R_RXEQ_REG_SET(0x1),
+                     SD10G_LANE_LANE_9E_R_RXEQ_REG,
+                     sd_inst,
+                     SD10G_LANE_LANE_9E(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_A1_R_SSC_FROM_HWT_SET(0x0) |
+                     SD10G_LANE_LANE_A1_R_CDR_FROM_HWT_SET(0x0) |
+                     SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT_SET(0x1),
+                     SD10G_LANE_LANE_A1_R_SSC_FROM_HWT |
+                     SD10G_LANE_LANE_A1_R_CDR_FROM_HWT |
+                     SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT,
+                     sd_inst,
+                     SD10G_LANE_LANE_A1(sd_index));
+
+       sdx5_rmw(SD_LANE_SD_LANE_CFG_RX_REF_SEL_SET(params->cmu_sel) |
+                SD_LANE_SD_LANE_CFG_TX_REF_SEL_SET(params->cmu_sel),
+                SD_LANE_SD_LANE_CFG_RX_REF_SEL |
+                SD_LANE_SD_LANE_CFG_TX_REF_SEL,
+                priv,
+                SD_LANE_SD_LANE_CFG(lane_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_SET
+                     (params->cfg_lane_reserve_7_0),
+                     SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_40(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL_SET
+                     (params->cfg_ssc_rtl_clk_sel),
+                     SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL,
+                     sd_inst,
+                     SD10G_LANE_LANE_50(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_35_CFG_TXRATE_1_0_SET
+                     (params->cfg_txrate_1_0) |
+                     SD10G_LANE_LANE_35_CFG_RXRATE_1_0_SET
+                     (params->cfg_rxrate_1_0),
+                     SD10G_LANE_LANE_35_CFG_TXRATE_1_0 |
+                     SD10G_LANE_LANE_35_CFG_RXRATE_1_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_35(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_SET
+                     (params->r_d_width_ctrl_2_0),
+                     SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_94(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_SET
+                     (params->cfg_pma_tx_ck_bitwidth_2_0),
+                     SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_01(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_SET
+                     (params->cfg_rxdiv_sel_2_0),
+                     SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_30(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_SET
+                     (params->r_pcs2pma_phymode_4_0),
+                     SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_A2(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_13_CFG_CDRCK_EN_SET(params->cfg_cdrck_en),
+                     SD10G_LANE_LANE_13_CFG_CDRCK_EN,
+                     sd_inst,
+                     SD10G_LANE_LANE_13(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_23_CFG_DFECK_EN_SET
+                     (params->cfg_dfeck_en) |
+                     SD10G_LANE_LANE_23_CFG_DFE_PD_SET(params->cfg_dfe_pd) |
+                     SD10G_LANE_LANE_23_CFG_ERRAMP_PD_SET
+                     (params->cfg_erramp_pd),
+                     SD10G_LANE_LANE_23_CFG_DFECK_EN |
+                     SD10G_LANE_LANE_23_CFG_DFE_PD |
+                     SD10G_LANE_LANE_23_CFG_ERRAMP_PD,
+                     sd_inst,
+                     SD10G_LANE_LANE_23(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_SET
+                     (params->cfg_dfetap_en_5_1),
+                     SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1,
+                     sd_inst,
+                     SD10G_LANE_LANE_22(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_DFE_EN_SET
+                     (params->cfg_pi_DFE_en),
+                     SD10G_LANE_LANE_1A_CFG_PI_DFE_EN,
+                     sd_inst,
+                     SD10G_LANE_LANE_1A(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_02_CFG_EN_ADV_SET(params->cfg_en_adv) |
+                     SD10G_LANE_LANE_02_CFG_EN_MAIN_SET(params->cfg_en_main) |
+                     SD10G_LANE_LANE_02_CFG_EN_DLY_SET(params->cfg_en_dly) |
+                     SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_SET
+                     (params->cfg_tap_adv_3_0),
+                     SD10G_LANE_LANE_02_CFG_EN_ADV |
+                     SD10G_LANE_LANE_02_CFG_EN_MAIN |
+                     SD10G_LANE_LANE_02_CFG_EN_DLY |
+                     SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_02(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_03_CFG_TAP_MAIN_SET(params->cfg_tap_main),
+                     SD10G_LANE_LANE_03_CFG_TAP_MAIN,
+                     sd_inst,
+                     SD10G_LANE_LANE_03(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_SET
+                     (params->cfg_tap_dly_4_0),
+                     SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_04(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_SET
+                     (params->cfg_vga_ctrl_3_0),
+                     SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_2F(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_SET
+                     (params->cfg_vga_cp_2_0),
+                     SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_2F(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_SET
+                     (params->cfg_eq_res_3_0),
+                     SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_0B(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_0D_CFG_EQR_BYP_SET(params->cfg_eq_r_byp),
+                     SD10G_LANE_LANE_0D_CFG_EQR_BYP,
+                     sd_inst,
+                     SD10G_LANE_LANE_0D(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_SET
+                     (params->cfg_eq_c_force_3_0) |
+                     SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_SET
+                     (params->cfg_sum_setcm_en),
+                     SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0 |
+                     SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN,
+                     sd_inst,
+                     SD10G_LANE_LANE_0E(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_23_CFG_EN_DFEDIG_SET
+                     (params->cfg_en_dfedig),
+                     SD10G_LANE_LANE_23_CFG_EN_DFEDIG,
+                     sd_inst,
+                     SD10G_LANE_LANE_23(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_EN_PREEMPH_SET
+                     (params->cfg_en_preemph),
+                     SD10G_LANE_LANE_06_CFG_EN_PREEMPH,
+                     sd_inst,
+                     SD10G_LANE_LANE_06(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_SET
+                     (params->cfg_itx_ippreemp_base_1_0) |
+                     SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_SET
+                     (params->cfg_itx_ipdriver_base_2_0),
+                     SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0 |
+                     SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_33(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_SET
+                     (params->cfg_ibias_tune_reserve_5_0),
+                     SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_52(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_37_CFG_TXSWING_HALF_SET
+                     (params->cfg_txswing_half),
+                     SD10G_LANE_LANE_37_CFG_TXSWING_HALF,
+                     sd_inst,
+                     SD10G_LANE_LANE_37(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_SET
+                     (params->cfg_dis_2nd_order),
+                     SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER,
+                     sd_inst,
+                     SD10G_LANE_LANE_3C(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_39_CFG_RX_SSC_LH_SET
+                     (params->cfg_rx_ssc_lh),
+                     SD10G_LANE_LANE_39_CFG_RX_SSC_LH,
+                     sd_inst,
+                     SD10G_LANE_LANE_39(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_SET
+                     (params->cfg_pi_floop_steps_1_0),
+                     SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_1A(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_SET
+                     (params->cfg_pi_ext_dac_23_16),
+                     SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16,
+                     sd_inst,
+                     SD10G_LANE_LANE_16(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_SET
+                     (params->cfg_pi_ext_dac_15_8),
+                     SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8,
+                     sd_inst,
+                     SD10G_LANE_LANE_15(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_SET
+                     (params->cfg_iscan_ext_dac_7_0),
+                     SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_26(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_SET
+                     (params->cfg_cdr_kf_gen1_2_0),
+                     SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_42(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_SET
+                     (params->r_cdr_m_gen1_7_0),
+                     SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_0F(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_SET
+                     (params->cfg_pi_bw_gen1_3_0),
+                     SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_24(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_SET
+                     (params->cfg_pi_ext_dac_7_0),
+                     SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_14(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_1A_CFG_PI_STEPS_SET(params->cfg_pi_steps),
+                     SD10G_LANE_LANE_1A_CFG_PI_STEPS,
+                     sd_inst,
+                     SD10G_LANE_LANE_1A(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_SET
+                     (params->cfg_mp_max_3_0),
+                     SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_3A(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG_SET
+                     (params->cfg_rstn_dfedig),
+                     SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG,
+                     sd_inst,
+                     SD10G_LANE_LANE_31(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_SET
+                     (params->cfg_alos_thr_3_0),
+                     SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_48(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_SET
+                     (params->cfg_predrv_slewrate_1_0),
+                     SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_36(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_SET
+                     (params->cfg_itx_ipcml_base_1_0),
+                     SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_32(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_SET
+                     (params->cfg_ip_pre_base_1_0),
+                     SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0,
+                     sd_inst,
+                     SD10G_LANE_LANE_37(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_SET
+                     (params->cfg_lane_reserve_15_8),
+                     SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8,
+                     sd_inst,
+                     SD10G_LANE_LANE_41(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_SET
+                     (params->r_en_auto_cdr_rstn),
+                     SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN,
+                     sd_inst,
+                     SD10G_LANE_LANE_9E(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_SET
+                     (params->cfg_oscal_afe) |
+                     SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE_SET
+                     (params->cfg_pd_osdac_afe),
+                     SD10G_LANE_LANE_0C_CFG_OSCAL_AFE |
+                     SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE,
+                     sd_inst,
+                     SD10G_LANE_LANE_0C(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_SET
+                     (params->cfg_resetb_oscal_afe[0]),
+                     SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE,
+                     sd_inst,
+                     SD10G_LANE_LANE_0B(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_SET
+                     (params->cfg_resetb_oscal_afe[1]),
+                     SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE,
+                     sd_inst,
+                     SD10G_LANE_LANE_0B(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_83_R_TX_POL_INV_SET
+                     (params->r_tx_pol_inv) |
+                     SD10G_LANE_LANE_83_R_RX_POL_INV_SET
+                     (params->r_rx_pol_inv),
+                     SD10G_LANE_LANE_83_R_TX_POL_INV |
+                     SD10G_LANE_LANE_83_R_RX_POL_INV,
+                     sd_inst,
+                     SD10G_LANE_LANE_83(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN_SET
+                     (params->cfg_rx2tx_lp_en) |
+                     SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN_SET
+                     (params->cfg_tx2rx_lp_en),
+                     SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN |
+                     SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN,
+                     sd_inst,
+                     SD10G_LANE_LANE_06(sd_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_0E_CFG_RXLB_EN_SET(params->cfg_rxlb_en) |
+                     SD10G_LANE_LANE_0E_CFG_TXLB_EN_SET(params->cfg_txlb_en),
+                     SD10G_LANE_LANE_0E_CFG_RXLB_EN |
+                     SD10G_LANE_LANE_0E_CFG_TXLB_EN,
+                     sd_inst,
+                     SD10G_LANE_LANE_0E(sd_index));
+
+       sdx5_rmw(SD_LANE_SD_LANE_CFG_MACRO_RST_SET(0),
+                SD_LANE_SD_LANE_CFG_MACRO_RST,
+                priv,
+                SD_LANE_SD_LANE_CFG(lane_index));
+
+       sdx5_inst_rmw(SD10G_LANE_LANE_50_CFG_SSC_RESETB_SET(1),
+                     SD10G_LANE_LANE_50_CFG_SSC_RESETB,
+                     sd_inst,
+                     SD10G_LANE_LANE_50(sd_index));
+
+       sdx5_rmw(SD10G_LANE_LANE_50_CFG_SSC_RESETB_SET(1),
+                SD10G_LANE_LANE_50_CFG_SSC_RESETB,
+                priv,
+                SD10G_LANE_LANE_50(sd_index));
+
+       sdx5_rmw(SD_LANE_MISC_SD_125_RST_DIS_SET(params->fx_100),
+                SD_LANE_MISC_SD_125_RST_DIS,
+                priv,
+                SD_LANE_MISC(lane_index));
+
+       sdx5_rmw(SD_LANE_MISC_RX_ENA_SET(params->fx_100),
+                SD_LANE_MISC_RX_ENA,
+                priv,
+                SD_LANE_MISC(lane_index));
+
+       sdx5_rmw(SD_LANE_MISC_MUX_ENA_SET(params->fx_100),
+                SD_LANE_MISC_MUX_ENA,
+                priv,
+                SD_LANE_MISC(lane_index));
+
+       usleep_range(3000, 6000);
+
+       value = readl(sdx5_addr(regs, SD_LANE_SD_LANE_STAT(lane_index)));
+       value = SD_LANE_SD_LANE_STAT_PMA_RST_DONE_GET(value);
+       if (value != 1) {
+               dev_err(dev, "10G PMA Reset failed: 0x%x\n", value);
+               return -EINVAL;
+       }
+
+       sdx5_rmw(SD_LANE_SD_SER_RST_SER_RST_SET(0x0),
+                SD_LANE_SD_SER_RST_SER_RST,
+                priv,
+                SD_LANE_SD_SER_RST(lane_index));
+
+       sdx5_rmw(SD_LANE_SD_DES_RST_DES_RST_SET(0x0),
+                SD_LANE_SD_DES_RST_DES_RST,
+                priv,
+                SD_LANE_SD_DES_RST(lane_index));
+
+       return 0;
+}
+
+static int sparx5_sd25g28_config(struct sparx5_serdes_macro *macro, bool reset)
+{
+       struct sparx5_sd25g28_media_preset media = media_presets_25g[macro->media];
+       struct sparx5_sd25g28_mode_preset mode;
+       struct sparx5_sd25g28_args args = {
+               .rxinvert = 1,
+               .txinvert = 0,
+               .txswing = 240,
+               .com_pll_reserve = 0xf,
+               .reg_rst = reset,
+       };
+       struct sparx5_sd25g28_params params;
+       int err;
+
+       err = sparx5_sd10g25_get_mode_preset(macro, &mode);
+       if (err)
+               return err;
+       sparx5_sd25g28_get_params(macro, &media, &mode, &args, &params);
+       sparx5_sd25g28_reset(macro->priv->regs, &params, macro->stpidx);
+       return sparx5_sd25g28_apply_params(macro, &params);
+}
+
+static int sparx5_sd10g28_config(struct sparx5_serdes_macro *macro, bool reset)
+{
+       struct sparx5_sd10g28_media_preset media = media_presets_10g[macro->media];
+       struct sparx5_sd10g28_mode_preset mode;
+       struct sparx5_sd10g28_params params;
+       struct sparx5_sd10g28_args args = {
+               .is_6g = (macro->serdestype == SPX5_SDT_6G),
+               .txinvert = 0,
+               .rxinvert = 1,
+               .txswing = 240,
+               .reg_rst = reset,
+       };
+       int err;
+
+       err = sparx5_sd10g28_get_mode_preset(macro, &mode, &args);
+       if (err)
+               return err;
+       sparx5_sd10g28_get_params(macro, &media, &mode, &args, &params);
+       sparx5_sd10g28_reset(macro->priv->regs, macro->sidx);
+       return sparx5_sd10g28_apply_params(macro, &params);
+}
+
+/* Power down serdes TX driver */
+static int sparx5_serdes_power_save(struct sparx5_serdes_macro *macro, u32 pwdn)
+{
+       struct sparx5_serdes_private *priv = macro->priv;
+       void __iomem *sd_inst;
+
+       if (macro->serdestype == SPX5_SDT_6G)
+               sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, macro->stpidx);
+       else if (macro->serdestype == SPX5_SDT_10G)
+               sd_inst = sdx5_inst_get(priv, TARGET_SD10G_LANE, macro->stpidx);
+       else
+               sd_inst = sdx5_inst_get(priv, TARGET_SD25G_LANE, macro->stpidx);
+
+       if (macro->serdestype == SPX5_SDT_25G) {
+               sdx5_inst_rmw(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_SET(pwdn),
+                             SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER,
+                             sd_inst,
+                             SD25G_LANE_LANE_04(0));
+       } else {
+               /* 6G and 10G */
+               sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(pwdn),
+                             SD10G_LANE_LANE_06_CFG_PD_DRIVER,
+                             sd_inst,
+                             SD10G_LANE_LANE_06(0));
+       }
+       return 0;
+}
+
+static int sparx5_serdes_clock_config(struct sparx5_serdes_macro *macro)
+{
+       struct sparx5_serdes_private *priv = macro->priv;
+
+       if (macro->serdesmode == SPX5_SD_MODE_100FX) {
+               u32 freq = priv->coreclock == 250000000 ? 2 :
+                       priv->coreclock == 500000000 ? 1 : 0;
+
+               sdx5_rmw(SD_LANE_MISC_CORE_CLK_FREQ_SET(freq),
+                        SD_LANE_MISC_CORE_CLK_FREQ,
+                        priv,
+                        SD_LANE_MISC(macro->sidx));
+       }
+       return 0;
+}
+
+static int sparx5_cmu_apply_cfg(struct sparx5_serdes_private *priv,
+                               u32 cmu_idx,
+                               void __iomem *cmu_tgt,
+                               void __iomem *cmu_cfg_tgt,
+                               u32 spd10g)
+{
+       void __iomem **regs = priv->regs;
+       struct device *dev = priv->dev;
+       int value;
+
+       cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
+       cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
+
+       if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
+           cmu_idx == 10 || cmu_idx == 13) {
+               spd10g = 0;
+       }
+
+       sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(1),
+                     SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
+                     cmu_cfg_tgt,
+                     SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
+                     SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
+                     cmu_cfg_tgt,
+                     SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(1),
+                     SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
+                     cmu_cfg_tgt,
+                     SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(0x1) |
+                     SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(0x1) |
+                     SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(0x1) |
+                     SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(0x1) |
+                     SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(0x0),
+                     SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT |
+                     SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT |
+                     SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT |
+                     SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT |
+                     SD_CMU_CMU_45_R_EN_RATECHG_CTRL,
+                     cmu_tgt,
+                     SD_CMU_CMU_45(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(0),
+                     SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0,
+                     cmu_tgt,
+                     SD_CMU_CMU_47(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(0),
+                     SD_CMU_CMU_1B_CFG_RESERVE_7_0,
+                     cmu_tgt,
+                     SD_CMU_CMU_1B(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_JC_BYP_SET(0x1),
+                     SD_CMU_CMU_0D_CFG_JC_BYP,
+                     cmu_tgt,
+                     SD_CMU_CMU_0D(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(1),
+                     SD_CMU_CMU_1F_CFG_VTUNE_SEL,
+                     cmu_tgt,
+                     SD_CMU_CMU_1F(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(3),
+                     SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0,
+                     cmu_tgt,
+                     SD_CMU_CMU_00(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(3),
+                     SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0,
+                     cmu_tgt,
+                     SD_CMU_CMU_05(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(1),
+                     SD_CMU_CMU_30_R_PLL_DLOL_EN,
+                     cmu_tgt,
+                     SD_CMU_CMU_30(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_09_CFG_SW_10G_SET(spd10g),
+                     SD_CMU_CMU_09_CFG_SW_10G,
+                     cmu_tgt,
+                     SD_CMU_CMU_09(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(0),
+                     SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
+                     cmu_cfg_tgt,
+                     SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+       msleep(20);
+
+       sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(0),
+                     SD_CMU_CMU_44_R_PLL_RSTN,
+                     cmu_tgt,
+                     SD_CMU_CMU_44(cmu_idx));
+
+       sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(1),
+                     SD_CMU_CMU_44_R_PLL_RSTN,
+                     cmu_tgt,
+                     SD_CMU_CMU_44(cmu_idx));
+
+       msleep(20);
+
+       value = readl(sdx5_addr(regs, SD_CMU_CMU_E0(cmu_idx)));
+       value = SD_CMU_CMU_E0_PLL_LOL_UDL_GET(value);
+
+       if (value) {
+               dev_err(dev, "CMU PLL Loss of Lock: 0x%x\n", value);
+               return -EINVAL;
+       }
+       sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(0),
+                     SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD,
+                     cmu_tgt,
+                     SD_CMU_CMU_0D(cmu_idx));
+       return 0;
+}
+
+static int sparx5_cmu_cfg(struct sparx5_serdes_private *priv, u32 cmu_idx)
+{
+       void __iomem *cmu_tgt, *cmu_cfg_tgt;
+       u32 spd10g = 1;
+
+       if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
+           cmu_idx == 10 || cmu_idx == 13) {
+               spd10g = 0;
+       }
+
+       cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
+       cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
+
+       return sparx5_cmu_apply_cfg(priv, cmu_idx, cmu_tgt, cmu_cfg_tgt, spd10g);
+}
+
+static int sparx5_serdes_cmu_enable(struct sparx5_serdes_private *priv)
+{
+       int idx, err = 0;
+
+       if (!priv->cmu_enabled) {
+               for (idx = 0; idx < SPX5_CMU_MAX; idx++) {
+                       err  = sparx5_cmu_cfg(priv, idx);
+                       if (err) {
+                               dev_err(priv->dev, "CMU %u, error: %d\n", idx, err);
+                               goto leave;
+                       }
+               }
+               priv->cmu_enabled = true;
+       }
+leave:
+       return err;
+}
+
+static int sparx5_serdes_get_serdesmode(phy_interface_t portmode, int speed)
+{
+       switch (portmode) {
+       case PHY_INTERFACE_MODE_1000BASEX:
+       case PHY_INTERFACE_MODE_2500BASEX:
+               if (speed == SPEED_2500)
+                       return SPX5_SD_MODE_2G5;
+               if (speed == SPEED_100)
+                       return SPX5_SD_MODE_100FX;
+               return SPX5_SD_MODE_1000BASEX;
+       case PHY_INTERFACE_MODE_SGMII:
+               /* The same Serdes mode is used for both SGMII and 1000BaseX */
+               return SPX5_SD_MODE_1000BASEX;
+       case PHY_INTERFACE_MODE_QSGMII:
+               return SPX5_SD_MODE_QSGMII;
+       case PHY_INTERFACE_MODE_10GBASER:
+               return SPX5_SD_MODE_SFI;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int sparx5_serdes_config(struct sparx5_serdes_macro *macro)
+{
+       struct device *dev = macro->priv->dev;
+       int serdesmode;
+       int err;
+
+       err = sparx5_serdes_cmu_enable(macro->priv);
+       if (err)
+               return err;
+
+       serdesmode = sparx5_serdes_get_serdesmode(macro->portmode, macro->speed);
+       if (serdesmode < 0) {
+               dev_err(dev, "SerDes %u, interface not supported: %s\n",
+                       macro->sidx,
+                       phy_modes(macro->portmode));
+               return serdesmode;
+       }
+       macro->serdesmode = serdesmode;
+
+       sparx5_serdes_clock_config(macro);
+
+       if (macro->serdestype == SPX5_SDT_25G)
+               err = sparx5_sd25g28_config(macro, false);
+       else
+               err = sparx5_sd10g28_config(macro, false);
+       if (err) {
+               dev_err(dev, "SerDes %u, config error: %d\n",
+                       macro->sidx, err);
+       }
+       return err;
+}
+
+static int sparx5_serdes_power_on(struct phy *phy)
+{
+       struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+       return sparx5_serdes_power_save(macro, false);
+}
+
+static int sparx5_serdes_power_off(struct phy *phy)
+{
+       struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+       return sparx5_serdes_power_save(macro, true);
+}
+
+static int sparx5_serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+       struct sparx5_serdes_macro *macro;
+
+       if (mode != PHY_MODE_ETHERNET)
+               return -EINVAL;
+
+       switch (submode) {
+       case PHY_INTERFACE_MODE_1000BASEX:
+       case PHY_INTERFACE_MODE_2500BASEX:
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_10GBASER:
+               macro = phy_get_drvdata(phy);
+               macro->portmode = submode;
+               sparx5_serdes_config(macro);
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int sparx5_serdes_set_media(struct phy *phy, enum phy_media media)
+{
+       struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+       if (media != macro->media) {
+               macro->media = media;
+               if (macro->serdesmode != SPX5_SD_MODE_NONE)
+                       sparx5_serdes_config(macro);
+       }
+       return 0;
+}
+
+static int sparx5_serdes_set_speed(struct phy *phy, int speed)
+{
+       struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+       if (macro->sidx < SPX5_SERDES_10G_START && speed > SPEED_5000)
+               return -EINVAL;
+       if (macro->sidx < SPX5_SERDES_25G_START && speed > SPEED_10000)
+               return -EINVAL;
+       if (speed != macro->speed) {
+               macro->speed = speed;
+               if (macro->serdesmode != SPX5_SD_MODE_NONE)
+                       sparx5_serdes_config(macro);
+       }
+       return 0;
+}
+
+static int sparx5_serdes_reset(struct phy *phy)
+{
+       struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+       int err;
+
+       err = sparx5_serdes_cmu_enable(macro->priv);
+       if (err)
+               return err;
+       if (macro->serdestype == SPX5_SDT_25G)
+               err = sparx5_sd25g28_config(macro, true);
+       else
+               err = sparx5_sd10g28_config(macro, true);
+       if (err) {
+               dev_err(&phy->dev, "SerDes %u, reset error: %d\n",
+                       macro->sidx, err);
+       }
+       return err;
+}
+
+static int sparx5_serdes_validate(struct phy *phy, enum phy_mode mode,
+                                       int submode,
+                                       union phy_configure_opts *opts)
+{
+       struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
+
+       if (mode != PHY_MODE_ETHERNET)
+               return -EINVAL;
+
+       if (macro->speed == 0)
+               return -EINVAL;
+
+       if (macro->sidx < SPX5_SERDES_10G_START && macro->speed > SPEED_5000)
+               return -EINVAL;
+       if (macro->sidx < SPX5_SERDES_25G_START && macro->speed > SPEED_10000)
+               return -EINVAL;
+
+       switch (submode) {
+       case PHY_INTERFACE_MODE_1000BASEX:
+               if (macro->speed != SPEED_100 && /* This is for 100BASE-FX */
+                   macro->speed != SPEED_1000)
+                       return -EINVAL;
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_2500BASEX:
+       case PHY_INTERFACE_MODE_QSGMII:
+               if (macro->speed >= SPEED_5000)
+                       return -EINVAL;
+               break;
+       case PHY_INTERFACE_MODE_10GBASER:
+               if (macro->speed < SPEED_5000)
+                       return -EINVAL;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static const struct phy_ops sparx5_serdes_ops = {
+       .power_on       = sparx5_serdes_power_on,
+       .power_off      = sparx5_serdes_power_off,
+       .set_mode       = sparx5_serdes_set_mode,
+       .set_media      = sparx5_serdes_set_media,
+       .set_speed      = sparx5_serdes_set_speed,
+       .reset          = sparx5_serdes_reset,
+       .validate       = sparx5_serdes_validate,
+       .owner          = THIS_MODULE,
+};
+
+static int sparx5_phy_create(struct sparx5_serdes_private *priv,
+                          int idx, struct phy **phy)
+{
+       struct sparx5_serdes_macro *macro;
+
+       *phy = devm_phy_create(priv->dev, NULL, &sparx5_serdes_ops);
+       if (IS_ERR(*phy))
+               return PTR_ERR(*phy);
+
+       macro = devm_kzalloc(priv->dev, sizeof(*macro), GFP_KERNEL);
+       if (!macro)
+               return -ENOMEM;
+
+       macro->sidx = idx;
+       macro->priv = priv;
+       macro->speed = SPEED_UNKNOWN;
+       if (idx < SPX5_SERDES_10G_START) {
+               macro->serdestype = SPX5_SDT_6G;
+               macro->stpidx = macro->sidx;
+       } else if (idx < SPX5_SERDES_25G_START) {
+               macro->serdestype = SPX5_SDT_10G;
+               macro->stpidx = macro->sidx - SPX5_SERDES_10G_START;
+       } else {
+               macro->serdestype = SPX5_SDT_25G;
+               macro->stpidx = macro->sidx - SPX5_SERDES_25G_START;
+       }
+
+       phy_set_drvdata(*phy, macro);
+
+       return 0;
+}
+
+static struct sparx5_serdes_io_resource sparx5_serdes_iomap[] =  {
+       { TARGET_SD_CMU,          0x0 },      /* 0x610808000: sd_cmu_0 */
+       { TARGET_SD_CMU + 1,      0x8000 },   /* 0x610810000: sd_cmu_1 */
+       { TARGET_SD_CMU + 2,      0x10000 },  /* 0x610818000: sd_cmu_2 */
+       { TARGET_SD_CMU + 3,      0x18000 },  /* 0x610820000: sd_cmu_3 */
+       { TARGET_SD_CMU + 4,      0x20000 },  /* 0x610828000: sd_cmu_4 */
+       { TARGET_SD_CMU + 5,      0x28000 },  /* 0x610830000: sd_cmu_5 */
+       { TARGET_SD_CMU + 6,      0x30000 },  /* 0x610838000: sd_cmu_6 */
+       { TARGET_SD_CMU + 7,      0x38000 },  /* 0x610840000: sd_cmu_7 */
+       { TARGET_SD_CMU + 8,      0x40000 },  /* 0x610848000: sd_cmu_8 */
+       { TARGET_SD_CMU_CFG,      0x48000 },  /* 0x610850000: sd_cmu_cfg_0 */
+       { TARGET_SD_CMU_CFG + 1,  0x50000 },  /* 0x610858000: sd_cmu_cfg_1 */
+       { TARGET_SD_CMU_CFG + 2,  0x58000 },  /* 0x610860000: sd_cmu_cfg_2 */
+       { TARGET_SD_CMU_CFG + 3,  0x60000 },  /* 0x610868000: sd_cmu_cfg_3 */
+       { TARGET_SD_CMU_CFG + 4,  0x68000 },  /* 0x610870000: sd_cmu_cfg_4 */
+       { TARGET_SD_CMU_CFG + 5,  0x70000 },  /* 0x610878000: sd_cmu_cfg_5 */
+       { TARGET_SD_CMU_CFG + 6,  0x78000 },  /* 0x610880000: sd_cmu_cfg_6 */
+       { TARGET_SD_CMU_CFG + 7,  0x80000 },  /* 0x610888000: sd_cmu_cfg_7 */
+       { TARGET_SD_CMU_CFG + 8,  0x88000 },  /* 0x610890000: sd_cmu_cfg_8 */
+       { TARGET_SD6G_LANE,       0x90000 },  /* 0x610898000: sd6g_lane_0 */
+       { TARGET_SD6G_LANE + 1,   0x98000 },  /* 0x6108a0000: sd6g_lane_1 */
+       { TARGET_SD6G_LANE + 2,   0xa0000 },  /* 0x6108a8000: sd6g_lane_2 */
+       { TARGET_SD6G_LANE + 3,   0xa8000 },  /* 0x6108b0000: sd6g_lane_3 */
+       { TARGET_SD6G_LANE + 4,   0xb0000 },  /* 0x6108b8000: sd6g_lane_4 */
+       { TARGET_SD6G_LANE + 5,   0xb8000 },  /* 0x6108c0000: sd6g_lane_5 */
+       { TARGET_SD6G_LANE + 6,   0xc0000 },  /* 0x6108c8000: sd6g_lane_6 */
+       { TARGET_SD6G_LANE + 7,   0xc8000 },  /* 0x6108d0000: sd6g_lane_7 */
+       { TARGET_SD6G_LANE + 8,   0xd0000 },  /* 0x6108d8000: sd6g_lane_8 */
+       { TARGET_SD6G_LANE + 9,   0xd8000 },  /* 0x6108e0000: sd6g_lane_9 */
+       { TARGET_SD6G_LANE + 10,  0xe0000 },  /* 0x6108e8000: sd6g_lane_10 */
+       { TARGET_SD6G_LANE + 11,  0xe8000 },  /* 0x6108f0000: sd6g_lane_11 */
+       { TARGET_SD6G_LANE + 12,  0xf0000 },  /* 0x6108f8000: sd6g_lane_12 */
+       { TARGET_SD10G_LANE,      0xf8000 },  /* 0x610900000: sd10g_lane_0 */
+       { TARGET_SD10G_LANE + 1,  0x100000 }, /* 0x610908000: sd10g_lane_1 */
+       { TARGET_SD10G_LANE + 2,  0x108000 }, /* 0x610910000: sd10g_lane_2 */
+       { TARGET_SD10G_LANE + 3,  0x110000 }, /* 0x610918000: sd10g_lane_3 */
+       { TARGET_SD_LANE,         0x1a0000 }, /* 0x6109a8000: sd_lane_0 */
+       { TARGET_SD_LANE + 1,     0x1a8000 }, /* 0x6109b0000: sd_lane_1 */
+       { TARGET_SD_LANE + 2,     0x1b0000 }, /* 0x6109b8000: sd_lane_2 */
+       { TARGET_SD_LANE + 3,     0x1b8000 }, /* 0x6109c0000: sd_lane_3 */
+       { TARGET_SD_LANE + 4,     0x1c0000 }, /* 0x6109c8000: sd_lane_4 */
+       { TARGET_SD_LANE + 5,     0x1c8000 }, /* 0x6109d0000: sd_lane_5 */
+       { TARGET_SD_LANE + 6,     0x1d0000 }, /* 0x6109d8000: sd_lane_6 */
+       { TARGET_SD_LANE + 7,     0x1d8000 }, /* 0x6109e0000: sd_lane_7 */
+       { TARGET_SD_LANE + 8,     0x1e0000 }, /* 0x6109e8000: sd_lane_8 */
+       { TARGET_SD_LANE + 9,     0x1e8000 }, /* 0x6109f0000: sd_lane_9 */
+       { TARGET_SD_LANE + 10,    0x1f0000 }, /* 0x6109f8000: sd_lane_10 */
+       { TARGET_SD_LANE + 11,    0x1f8000 }, /* 0x610a00000: sd_lane_11 */
+       { TARGET_SD_LANE + 12,    0x200000 }, /* 0x610a08000: sd_lane_12 */
+       { TARGET_SD_LANE + 13,    0x208000 }, /* 0x610a10000: sd_lane_13 */
+       { TARGET_SD_LANE + 14,    0x210000 }, /* 0x610a18000: sd_lane_14 */
+       { TARGET_SD_LANE + 15,    0x218000 }, /* 0x610a20000: sd_lane_15 */
+       { TARGET_SD_LANE + 16,    0x220000 }, /* 0x610a28000: sd_lane_16 */
+       { TARGET_SD_CMU + 9,      0x400000 }, /* 0x610c08000: sd_cmu_9 */
+       { TARGET_SD_CMU + 10,     0x408000 }, /* 0x610c10000: sd_cmu_10 */
+       { TARGET_SD_CMU + 11,     0x410000 }, /* 0x610c18000: sd_cmu_11 */
+       { TARGET_SD_CMU + 12,     0x418000 }, /* 0x610c20000: sd_cmu_12 */
+       { TARGET_SD_CMU + 13,     0x420000 }, /* 0x610c28000: sd_cmu_13 */
+       { TARGET_SD_CMU_CFG + 9,  0x428000 }, /* 0x610c30000: sd_cmu_cfg_9 */
+       { TARGET_SD_CMU_CFG + 10, 0x430000 }, /* 0x610c38000: sd_cmu_cfg_10 */
+       { TARGET_SD_CMU_CFG + 11, 0x438000 }, /* 0x610c40000: sd_cmu_cfg_11 */
+       { TARGET_SD_CMU_CFG + 12, 0x440000 }, /* 0x610c48000: sd_cmu_cfg_12 */
+       { TARGET_SD_CMU_CFG + 13, 0x448000 }, /* 0x610c50000: sd_cmu_cfg_13 */
+       { TARGET_SD10G_LANE + 4,  0x450000 }, /* 0x610c58000: sd10g_lane_4 */
+       { TARGET_SD10G_LANE + 5,  0x458000 }, /* 0x610c60000: sd10g_lane_5 */
+       { TARGET_SD10G_LANE + 6,  0x460000 }, /* 0x610c68000: sd10g_lane_6 */
+       { TARGET_SD10G_LANE + 7,  0x468000 }, /* 0x610c70000: sd10g_lane_7 */
+       { TARGET_SD10G_LANE + 8,  0x470000 }, /* 0x610c78000: sd10g_lane_8 */
+       { TARGET_SD10G_LANE + 9,  0x478000 }, /* 0x610c80000: sd10g_lane_9 */
+       { TARGET_SD10G_LANE + 10, 0x480000 }, /* 0x610c88000: sd10g_lane_10 */
+       { TARGET_SD10G_LANE + 11, 0x488000 }, /* 0x610c90000: sd10g_lane_11 */
+       { TARGET_SD25G_LANE,      0x490000 }, /* 0x610c98000: sd25g_lane_0 */
+       { TARGET_SD25G_LANE + 1,  0x498000 }, /* 0x610ca0000: sd25g_lane_1 */
+       { TARGET_SD25G_LANE + 2,  0x4a0000 }, /* 0x610ca8000: sd25g_lane_2 */
+       { TARGET_SD25G_LANE + 3,  0x4a8000 }, /* 0x610cb0000: sd25g_lane_3 */
+       { TARGET_SD25G_LANE + 4,  0x4b0000 }, /* 0x610cb8000: sd25g_lane_4 */
+       { TARGET_SD25G_LANE + 5,  0x4b8000 }, /* 0x610cc0000: sd25g_lane_5 */
+       { TARGET_SD25G_LANE + 6,  0x4c0000 }, /* 0x610cc8000: sd25g_lane_6 */
+       { TARGET_SD25G_LANE + 7,  0x4c8000 }, /* 0x610cd0000: sd25g_lane_7 */
+       { TARGET_SD_LANE + 17,    0x550000 }, /* 0x610d58000: sd_lane_17 */
+       { TARGET_SD_LANE + 18,    0x558000 }, /* 0x610d60000: sd_lane_18 */
+       { TARGET_SD_LANE + 19,    0x560000 }, /* 0x610d68000: sd_lane_19 */
+       { TARGET_SD_LANE + 20,    0x568000 }, /* 0x610d70000: sd_lane_20 */
+       { TARGET_SD_LANE + 21,    0x570000 }, /* 0x610d78000: sd_lane_21 */
+       { TARGET_SD_LANE + 22,    0x578000 }, /* 0x610d80000: sd_lane_22 */
+       { TARGET_SD_LANE + 23,    0x580000 }, /* 0x610d88000: sd_lane_23 */
+       { TARGET_SD_LANE + 24,    0x588000 }, /* 0x610d90000: sd_lane_24 */
+       { TARGET_SD_LANE_25G,     0x590000 }, /* 0x610d98000: sd_lane_25g_25 */
+       { TARGET_SD_LANE_25G + 1, 0x598000 }, /* 0x610da0000: sd_lane_25g_26 */
+       { TARGET_SD_LANE_25G + 2, 0x5a0000 }, /* 0x610da8000: sd_lane_25g_27 */
+       { TARGET_SD_LANE_25G + 3, 0x5a8000 }, /* 0x610db0000: sd_lane_25g_28 */
+       { TARGET_SD_LANE_25G + 4, 0x5b0000 }, /* 0x610db8000: sd_lane_25g_29 */
+       { TARGET_SD_LANE_25G + 5, 0x5b8000 }, /* 0x610dc0000: sd_lane_25g_30 */
+       { TARGET_SD_LANE_25G + 6, 0x5c0000 }, /* 0x610dc8000: sd_lane_25g_31 */
+       { TARGET_SD_LANE_25G + 7, 0x5c8000 }, /* 0x610dd0000: sd_lane_25g_32 */
+};
+
+/* Client lookup function, uses serdes index */
+static struct phy *sparx5_serdes_xlate(struct device *dev,
+                                    struct of_phandle_args *args)
+{
+       struct sparx5_serdes_private *priv = dev_get_drvdata(dev);
+       int idx;
+       unsigned int sidx;
+
+       if (args->args_count != 1)
+               return ERR_PTR(-EINVAL);
+
+       sidx = args->args[0];
+
+       /* Check validity: ERR_PTR(-ENODEV) if not valid */
+       for (idx = 0; idx < SPX5_SERDES_MAX; idx++) {
+               struct sparx5_serdes_macro *macro =
+                       phy_get_drvdata(priv->phys[idx]);
+
+               if (sidx != macro->sidx)
+                       continue;
+
+               return priv->phys[idx];
+       }
+       return ERR_PTR(-ENODEV);
+}
+
+static int sparx5_serdes_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct sparx5_serdes_private *priv;
+       struct phy_provider *provider;
+       struct resource *iores;
+       void __iomem *iomem;
+       unsigned long clock;
+       struct clk *clk;
+       int idx;
+       int err;
+
+       if (!np && !pdev->dev.platform_data)
+               return -ENODEV;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, priv);
+       priv->dev = &pdev->dev;
+
+       /* Get coreclock */
+       clk = devm_clk_get(priv->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(priv->dev, "Failed to get coreclock\n");
+               return PTR_ERR(clk);
+       }
+       clock = clk_get_rate(clk);
+       if (clock == 0) {
+               dev_err(priv->dev, "Invalid coreclock %lu\n", clock);
+               return -EINVAL;
+       }
+       priv->coreclock = clock;
+
+       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
+       if (IS_ERR(iomem)) {
+               dev_err(priv->dev, "Unable to get serdes registers: %s\n",
+                       iores->name);
+               return PTR_ERR(iomem);
+       }
+       for (idx = 0; idx < ARRAY_SIZE(sparx5_serdes_iomap); idx++) {
+               struct sparx5_serdes_io_resource *iomap = &sparx5_serdes_iomap[idx];
+
+               priv->regs[iomap->id] = iomem + iomap->offset;
+       }
+       for (idx = 0; idx < SPX5_SERDES_MAX; idx++) {
+               err = sparx5_phy_create(priv, idx, &priv->phys[idx]);
+               if (err)
+                       return err;
+       }
+
+       provider = devm_of_phy_provider_register(priv->dev, sparx5_serdes_xlate);
+
+       return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id sparx5_serdes_match[] = {
+       { .compatible = "microchip,sparx5-serdes" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sparx5_serdes_match);
+
+static struct platform_driver sparx5_serdes_driver = {
+       .probe = sparx5_serdes_probe,
+       .driver = {
+               .name = "sparx5-serdes",
+               .of_match_table = sparx5_serdes_match,
+       },
+};
+
+module_platform_driver(sparx5_serdes_driver);
+
+MODULE_DESCRIPTION("Microchip Sparx5 switch serdes driver");
+MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/microchip/sparx5_serdes.h b/drivers/phy/microchip/sparx5_serdes.h
new file mode 100644 (file)
index 0000000..0a3e496
--- /dev/null
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ * Microchip Sparx5 SerDes driver
+ *
+ * Copyright (c) 2020 Microchip Technology Inc.
+ */
+
+#ifndef _SPARX5_SERDES_H_
+#define _SPARX5_SERDES_H_
+
+#include "sparx5_serdes_regs.h"
+
+#define SPX5_SERDES_MAX       33
+
+enum sparx5_serdes_type {
+       SPX5_SDT_6G  = 6,
+       SPX5_SDT_10G = 10,
+       SPX5_SDT_25G = 25,
+};
+
+enum sparx5_serdes_mode {
+       SPX5_SD_MODE_NONE,
+       SPX5_SD_MODE_2G5,
+       SPX5_SD_MODE_QSGMII,
+       SPX5_SD_MODE_100FX,
+       SPX5_SD_MODE_1000BASEX,
+       SPX5_SD_MODE_SFI,
+};
+
+struct sparx5_serdes_private {
+       struct device *dev;
+       void __iomem *regs[NUM_TARGETS];
+       struct phy *phys[SPX5_SERDES_MAX];
+       bool cmu_enabled;
+       unsigned long coreclock;
+};
+
+struct sparx5_serdes_macro {
+       struct sparx5_serdes_private *priv;
+       u32 sidx;
+       u32 stpidx;
+       enum sparx5_serdes_type serdestype;
+       enum sparx5_serdes_mode serdesmode;
+       phy_interface_t portmode;
+       int speed;
+       enum phy_media media;
+};
+
+/* Read, Write and modify registers content.
+ * The register definition macros start at the id
+ */
+static inline void __iomem *sdx5_addr(void __iomem *base[],
+                                     int id, int tinst, int tcnt,
+                                     int gbase, int ginst,
+                                     int gcnt, int gwidth,
+                                     int raddr, int rinst,
+                                     int rcnt, int rwidth)
+{
+       WARN_ON((tinst) >= tcnt);
+       WARN_ON((ginst) >= gcnt);
+       WARN_ON((rinst) >= rcnt);
+       return base[id + (tinst)] +
+               gbase + ((ginst) * gwidth) +
+               raddr + ((rinst) * rwidth);
+}
+
+static inline void __iomem *sdx5_inst_baseaddr(void __iomem *base,
+                                              int gbase, int ginst,
+                                              int gcnt, int gwidth,
+                                              int raddr, int rinst,
+                                              int rcnt, int rwidth)
+{
+       WARN_ON((ginst) >= gcnt);
+       WARN_ON((rinst) >= rcnt);
+       return base +
+               gbase + ((ginst) * gwidth) +
+               raddr + ((rinst) * rwidth);
+}
+
+static inline void sdx5_rmw(u32 val, u32 mask, struct sparx5_serdes_private *priv,
+                           int id, int tinst, int tcnt,
+                           int gbase, int ginst, int gcnt, int gwidth,
+                           int raddr, int rinst, int rcnt, int rwidth)
+{
+       u32 nval;
+       void __iomem *addr =
+               sdx5_addr(priv->regs, id, tinst, tcnt,
+                         gbase, ginst, gcnt, gwidth,
+                         raddr, rinst, rcnt, rwidth);
+       nval = readl(addr);
+       nval = (nval & ~mask) | (val & mask);
+       writel(nval, addr);
+}
+
+static inline void sdx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem,
+                                int id, int tinst, int tcnt,
+                                int gbase, int ginst, int gcnt, int gwidth,
+                                int raddr, int rinst, int rcnt, int rwidth)
+{
+       u32 nval;
+       void __iomem *addr =
+               sdx5_inst_baseaddr(iomem,
+                                  gbase, ginst, gcnt, gwidth,
+                                  raddr, rinst, rcnt, rwidth);
+       nval = readl(addr);
+       nval = (nval & ~mask) | (val & mask);
+       writel(nval, addr);
+}
+
+static inline void sdx5_rmw_addr(u32 val, u32 mask, void __iomem *addr)
+{
+       u32 nval;
+
+       nval = readl(addr);
+       nval = (nval & ~mask) | (val & mask);
+       writel(nval, addr);
+}
+
+static inline void __iomem *sdx5_inst_get(struct sparx5_serdes_private *priv,
+                                         int id, int tinst)
+{
+       return priv->regs[id + tinst];
+}
+
+static inline void __iomem *sdx5_inst_addr(void __iomem *iomem,
+                                          int id, int tinst, int tcnt,
+                                          int gbase,
+                                          int ginst, int gcnt, int gwidth,
+                                          int raddr,
+                                          int rinst, int rcnt, int rwidth)
+{
+       return sdx5_inst_baseaddr(iomem, gbase, ginst, gcnt, gwidth,
+                                 raddr, rinst, rcnt, rwidth);
+}
+
+
+#endif /* _SPARX5_SERDES_REGS_H_ */
diff --git a/drivers/phy/microchip/sparx5_serdes_regs.h b/drivers/phy/microchip/sparx5_serdes_regs.h
new file mode 100644 (file)
index 0000000..b96386a
--- /dev/null
@@ -0,0 +1,2695 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ * Microchip Sparx5 SerDes driver
+ *
+ * Copyright (c) 2020 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2020-11-16 13:11:27 +0100.
+ * Commit ID: 13bdf073131d8bf40c54901df6988ae4e9c8f29f
+ */
+
+#ifndef _SPARX5_SERDES_REGS_H_
+#define _SPARX5_SERDES_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum sparx5_serdes_target {
+       TARGET_SD10G_LANE = 200,
+       TARGET_SD25G_LANE = 212,
+       TARGET_SD6G_LANE = 233,
+       TARGET_SD_CMU = 248,
+       TARGET_SD_CMU_CFG = 262,
+       TARGET_SD_LANE = 276,
+       TARGET_SD_LANE_25G = 301,
+       NUM_TARGETS = 332
+};
+
+#define __REG(...)    __VA_ARGS__
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_01 */
+#define SD10G_LANE_LANE_01(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 4, 0, 1, 4)
+
+#define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0 GENMASK(2, 0)
+#define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0, x)
+#define SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_01_CFG_PMA_TX_CK_BITWIDTH_2_0, x)
+
+#define SD10G_LANE_LANE_01_CFG_RXDET_EN          BIT(4)
+#define SD10G_LANE_LANE_01_CFG_RXDET_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_01_CFG_RXDET_EN, x)
+#define SD10G_LANE_LANE_01_CFG_RXDET_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_01_CFG_RXDET_EN, x)
+
+#define SD10G_LANE_LANE_01_CFG_RXDET_STR         BIT(5)
+#define SD10G_LANE_LANE_01_CFG_RXDET_STR_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_01_CFG_RXDET_STR, x)
+#define SD10G_LANE_LANE_01_CFG_RXDET_STR_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_01_CFG_RXDET_STR, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_02 */
+#define SD10G_LANE_LANE_02(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 8, 0, 1, 4)
+
+#define SD10G_LANE_LANE_02_CFG_EN_ADV            BIT(0)
+#define SD10G_LANE_LANE_02_CFG_EN_ADV_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_02_CFG_EN_ADV, x)
+#define SD10G_LANE_LANE_02_CFG_EN_ADV_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_02_CFG_EN_ADV, x)
+
+#define SD10G_LANE_LANE_02_CFG_EN_MAIN           BIT(1)
+#define SD10G_LANE_LANE_02_CFG_EN_MAIN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_02_CFG_EN_MAIN, x)
+#define SD10G_LANE_LANE_02_CFG_EN_MAIN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_02_CFG_EN_MAIN, x)
+
+#define SD10G_LANE_LANE_02_CFG_EN_DLY            BIT(2)
+#define SD10G_LANE_LANE_02_CFG_EN_DLY_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_02_CFG_EN_DLY, x)
+#define SD10G_LANE_LANE_02_CFG_EN_DLY_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_02_CFG_EN_DLY, x)
+
+#define SD10G_LANE_LANE_02_CFG_EN_DLY2           BIT(3)
+#define SD10G_LANE_LANE_02_CFG_EN_DLY2_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_02_CFG_EN_DLY2, x)
+#define SD10G_LANE_LANE_02_CFG_EN_DLY2_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_02_CFG_EN_DLY2, x)
+
+#define SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0       GENMASK(7, 4)
+#define SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0, x)
+#define SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_02_CFG_TAP_ADV_3_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_03 */
+#define SD10G_LANE_LANE_03(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 12, 0, 1, 4)
+
+#define SD10G_LANE_LANE_03_CFG_TAP_MAIN          BIT(0)
+#define SD10G_LANE_LANE_03_CFG_TAP_MAIN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_03_CFG_TAP_MAIN, x)
+#define SD10G_LANE_LANE_03_CFG_TAP_MAIN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_03_CFG_TAP_MAIN, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_04 */
+#define SD10G_LANE_LANE_04(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 16, 0, 1, 4)
+
+#define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0       GENMASK(4, 0)
+#define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0, x)
+#define SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_04_CFG_TAP_DLY_4_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_06 */
+#define SD10G_LANE_LANE_06(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 24, 0, 1, 4)
+
+#define SD10G_LANE_LANE_06_CFG_PD_DRIVER         BIT(0)
+#define SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_06_CFG_PD_DRIVER, x)
+#define SD10G_LANE_LANE_06_CFG_PD_DRIVER_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_06_CFG_PD_DRIVER, x)
+
+#define SD10G_LANE_LANE_06_CFG_PD_CLK            BIT(1)
+#define SD10G_LANE_LANE_06_CFG_PD_CLK_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_06_CFG_PD_CLK, x)
+#define SD10G_LANE_LANE_06_CFG_PD_CLK_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_06_CFG_PD_CLK, x)
+
+#define SD10G_LANE_LANE_06_CFG_PD_CML            BIT(2)
+#define SD10G_LANE_LANE_06_CFG_PD_CML_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_06_CFG_PD_CML, x)
+#define SD10G_LANE_LANE_06_CFG_PD_CML_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_06_CFG_PD_CML, x)
+
+#define SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN       BIT(3)
+#define SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN, x)
+#define SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_06_CFG_TX2RX_LP_EN, x)
+
+#define SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN       BIT(4)
+#define SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN, x)
+#define SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_06_CFG_RX2TX_LP_EN, x)
+
+#define SD10G_LANE_LANE_06_CFG_EN_PREEMPH        BIT(5)
+#define SD10G_LANE_LANE_06_CFG_EN_PREEMPH_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_06_CFG_EN_PREEMPH, x)
+#define SD10G_LANE_LANE_06_CFG_EN_PREEMPH_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_06_CFG_EN_PREEMPH, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_0B */
+#define SD10G_LANE_LANE_0B(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 44, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0        GENMASK(3, 0)
+#define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0, x)
+#define SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0B_CFG_EQ_RES_3_0, x)
+
+#define SD10G_LANE_LANE_0B_CFG_PD_CTLE           BIT(4)
+#define SD10G_LANE_LANE_0B_CFG_PD_CTLE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0B_CFG_PD_CTLE, x)
+#define SD10G_LANE_LANE_0B_CFG_PD_CTLE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0B_CFG_PD_CTLE, x)
+
+#define SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN        BIT(5)
+#define SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN, x)
+#define SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0B_CFG_CTLE_TP_EN, x)
+
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE  BIT(6)
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE, x)
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_AFE, x)
+
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ   BIT(7)
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ, x)
+#define SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0B_CFG_RESETB_OSCAL_SQ, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_0C */
+#define SD10G_LANE_LANE_0C(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 48, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE         BIT(0)
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0C_CFG_OSCAL_AFE, x)
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_AFE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0C_CFG_OSCAL_AFE, x)
+
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_SQ          BIT(1)
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_SQ_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0C_CFG_OSCAL_SQ, x)
+#define SD10G_LANE_LANE_0C_CFG_OSCAL_SQ_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0C_CFG_OSCAL_SQ, x)
+
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE      BIT(2)
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE, x)
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0C_CFG_OSDAC_2X_AFE, x)
+
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ       BIT(3)
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ, x)
+#define SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0C_CFG_OSDAC_2X_SQ, x)
+
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE      BIT(4)
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE, x)
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0C_CFG_PD_OSDAC_AFE, x)
+
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ       BIT(5)
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ, x)
+#define SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0C_CFG_PD_OSDAC_SQ, x)
+
+#define SD10G_LANE_LANE_0C_CFG_PD_RX_LS          BIT(6)
+#define SD10G_LANE_LANE_0C_CFG_PD_RX_LS_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0C_CFG_PD_RX_LS, x)
+#define SD10G_LANE_LANE_0C_CFG_PD_RX_LS_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0C_CFG_PD_RX_LS, x)
+
+#define SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12     BIT(7)
+#define SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12, x)
+#define SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0C_CFG_RX_PCIE_GEN12, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_0D */
+#define SD10G_LANE_LANE_0D(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 52, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0    GENMASK(1, 0)
+#define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0, x)
+#define SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0D_CFG_CTLE_M_THR_1_0, x)
+
+#define SD10G_LANE_LANE_0D_CFG_EQR_BYP           BIT(4)
+#define SD10G_LANE_LANE_0D_CFG_EQR_BYP_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0D_CFG_EQR_BYP, x)
+#define SD10G_LANE_LANE_0D_CFG_EQR_BYP_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0D_CFG_EQR_BYP, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_0E */
+#define SD10G_LANE_LANE_0E(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 56, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0     GENMASK(3, 0)
+#define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0, x)
+#define SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0E_CFG_EQC_FORCE_3_0, x)
+
+#define SD10G_LANE_LANE_0E_CFG_RXLB_EN           BIT(4)
+#define SD10G_LANE_LANE_0E_CFG_RXLB_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0E_CFG_RXLB_EN, x)
+#define SD10G_LANE_LANE_0E_CFG_RXLB_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0E_CFG_RXLB_EN, x)
+
+#define SD10G_LANE_LANE_0E_CFG_TXLB_EN           BIT(5)
+#define SD10G_LANE_LANE_0E_CFG_TXLB_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0E_CFG_TXLB_EN, x)
+#define SD10G_LANE_LANE_0E_CFG_TXLB_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0E_CFG_TXLB_EN, x)
+
+#define SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN      BIT(6)
+#define SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN, x)
+#define SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0E_CFG_SUM_SETCM_EN, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_0F */
+#define SD10G_LANE_LANE_0F(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 60, 0, 1, 4)
+
+#define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0      GENMASK(7, 0)
+#define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0, x)
+#define SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_0F_R_CDR_M_GEN1_7_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_13 */
+#define SD10G_LANE_LANE_13(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 76, 0, 1, 4)
+
+#define SD10G_LANE_LANE_13_CFG_DCDR_PD           BIT(0)
+#define SD10G_LANE_LANE_13_CFG_DCDR_PD_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_13_CFG_DCDR_PD, x)
+#define SD10G_LANE_LANE_13_CFG_DCDR_PD_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_13_CFG_DCDR_PD, x)
+
+#define SD10G_LANE_LANE_13_CFG_PHID_1T           BIT(1)
+#define SD10G_LANE_LANE_13_CFG_PHID_1T_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_13_CFG_PHID_1T, x)
+#define SD10G_LANE_LANE_13_CFG_PHID_1T_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_13_CFG_PHID_1T, x)
+
+#define SD10G_LANE_LANE_13_CFG_CDRCK_EN          BIT(2)
+#define SD10G_LANE_LANE_13_CFG_CDRCK_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_13_CFG_CDRCK_EN, x)
+#define SD10G_LANE_LANE_13_CFG_CDRCK_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_13_CFG_CDRCK_EN, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_14 */
+#define SD10G_LANE_LANE_14(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 80, 0, 1, 4)
+
+#define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0    GENMASK(7, 0)
+#define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0, x)
+#define SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_14_CFG_PI_EXT_DAC_7_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_15 */
+#define SD10G_LANE_LANE_15(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 84, 0, 1, 4)
+
+#define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8   GENMASK(7, 0)
+#define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8, x)
+#define SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_15_CFG_PI_EXT_DAC_15_8, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_16 */
+#define SD10G_LANE_LANE_16(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 88, 0, 1, 4)
+
+#define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16  GENMASK(7, 0)
+#define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16, x)
+#define SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_16_CFG_PI_EXT_DAC_23_16, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_1A */
+#define SD10G_LANE_LANE_1A(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 104, 0, 1, 4)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN      BIT(0)
+#define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_R_SCAN_EN, x)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_EN             BIT(1)
+#define SD10G_LANE_LANE_1A_CFG_PI_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_EN, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_EN, x)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_DFE_EN         BIT(2)
+#define SD10G_LANE_LANE_1A_CFG_PI_DFE_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_DFE_EN, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_DFE_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_DFE_EN, x)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_STEPS          BIT(3)
+#define SD10G_LANE_LANE_1A_CFG_PI_STEPS_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_STEPS, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_STEPS_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_STEPS, x)
+
+#define SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0 GENMASK(5, 4)
+#define SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0, x)
+#define SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_1A_CFG_PI_FLOOP_STEPS_1_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_22 */
+#define SD10G_LANE_LANE_22(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 136, 0, 1, 4)
+
+#define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1     GENMASK(4, 0)
+#define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1, x)
+#define SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_22_CFG_DFETAP_EN_5_1, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_23 */
+#define SD10G_LANE_LANE_23(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 140, 0, 1, 4)
+
+#define SD10G_LANE_LANE_23_CFG_DFE_PD            BIT(0)
+#define SD10G_LANE_LANE_23_CFG_DFE_PD_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_23_CFG_DFE_PD, x)
+#define SD10G_LANE_LANE_23_CFG_DFE_PD_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_23_CFG_DFE_PD, x)
+
+#define SD10G_LANE_LANE_23_CFG_EN_DFEDIG         BIT(1)
+#define SD10G_LANE_LANE_23_CFG_EN_DFEDIG_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_23_CFG_EN_DFEDIG, x)
+#define SD10G_LANE_LANE_23_CFG_EN_DFEDIG_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_23_CFG_EN_DFEDIG, x)
+
+#define SD10G_LANE_LANE_23_CFG_DFECK_EN          BIT(2)
+#define SD10G_LANE_LANE_23_CFG_DFECK_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_23_CFG_DFECK_EN, x)
+#define SD10G_LANE_LANE_23_CFG_DFECK_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_23_CFG_DFECK_EN, x)
+
+#define SD10G_LANE_LANE_23_CFG_ERRAMP_PD         BIT(3)
+#define SD10G_LANE_LANE_23_CFG_ERRAMP_PD_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_23_CFG_ERRAMP_PD, x)
+#define SD10G_LANE_LANE_23_CFG_ERRAMP_PD_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_23_CFG_ERRAMP_PD, x)
+
+#define SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0      GENMASK(6, 4)
+#define SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0, x)
+#define SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_23_CFG_DFEDIG_M_2_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_24 */
+#define SD10G_LANE_LANE_24(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 144, 0, 1, 4)
+
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0    GENMASK(3, 0)
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0, x)
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_24_CFG_PI_BW_GEN1_3_0, x)
+
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0    GENMASK(7, 4)
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0, x)
+#define SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_24_CFG_PI_BW_GEN2_3_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_26 */
+#define SD10G_LANE_LANE_26(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 152, 0, 1, 4)
+
+#define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0 GENMASK(7, 0)
+#define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0, x)
+#define SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_26_CFG_ISCAN_EXT_DAC_7_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_2F */
+#define SD10G_LANE_LANE_2F(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 188, 0, 1, 4)
+
+#define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0        GENMASK(2, 0)
+#define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0, x)
+#define SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_2F_CFG_VGA_CP_2_0, x)
+
+#define SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0      GENMASK(7, 4)
+#define SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0, x)
+#define SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_2F_CFG_VGA_CTRL_3_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_30 */
+#define SD10G_LANE_LANE_30(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 192, 0, 1, 4)
+
+#define SD10G_LANE_LANE_30_CFG_SUMMER_EN         BIT(0)
+#define SD10G_LANE_LANE_30_CFG_SUMMER_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_30_CFG_SUMMER_EN, x)
+#define SD10G_LANE_LANE_30_CFG_SUMMER_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_30_CFG_SUMMER_EN, x)
+
+#define SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0     GENMASK(6, 4)
+#define SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0, x)
+#define SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_30_CFG_RXDIV_SEL_2_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_31 */
+#define SD10G_LANE_LANE_31(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 196, 0, 1, 4)
+
+#define SD10G_LANE_LANE_31_CFG_PI_RSTN           BIT(0)
+#define SD10G_LANE_LANE_31_CFG_PI_RSTN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_31_CFG_PI_RSTN, x)
+#define SD10G_LANE_LANE_31_CFG_PI_RSTN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_31_CFG_PI_RSTN, x)
+
+#define SD10G_LANE_LANE_31_CFG_CDR_RSTN          BIT(1)
+#define SD10G_LANE_LANE_31_CFG_CDR_RSTN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_31_CFG_CDR_RSTN, x)
+#define SD10G_LANE_LANE_31_CFG_CDR_RSTN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_31_CFG_CDR_RSTN, x)
+
+#define SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG       BIT(2)
+#define SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG, x)
+#define SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_31_CFG_RSTN_DFEDIG, x)
+
+#define SD10G_LANE_LANE_31_CFG_CTLE_RSTN         BIT(3)
+#define SD10G_LANE_LANE_31_CFG_CTLE_RSTN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_31_CFG_CTLE_RSTN, x)
+#define SD10G_LANE_LANE_31_CFG_CTLE_RSTN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_31_CFG_CTLE_RSTN, x)
+
+#define SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8       BIT(4)
+#define SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8, x)
+#define SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_31_CFG_RSTN_DIV5_8, x)
+
+#define SD10G_LANE_LANE_31_CFG_R50_EN            BIT(5)
+#define SD10G_LANE_LANE_31_CFG_R50_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_31_CFG_R50_EN, x)
+#define SD10G_LANE_LANE_31_CFG_R50_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_31_CFG_R50_EN, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_32 */
+#define SD10G_LANE_LANE_32(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 200, 0, 1, 4)
+
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0, x)
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_32_CFG_ITX_IPCLK_BASE_1_0, x)
+
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0 GENMASK(5, 4)
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0, x)
+#define SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_32_CFG_ITX_IPCML_BASE_1_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_33 */
+#define SD10G_LANE_LANE_33(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 204, 0, 1, 4)
+
+#define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0 GENMASK(2, 0)
+#define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0, x)
+#define SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_33_CFG_ITX_IPDRIVER_BASE_2_0, x)
+
+#define SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0 GENMASK(5, 4)
+#define SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0, x)
+#define SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_33_CFG_ITX_IPPREEMP_BASE_1_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_35 */
+#define SD10G_LANE_LANE_35(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 212, 0, 1, 4)
+
+#define SD10G_LANE_LANE_35_CFG_TXRATE_1_0        GENMASK(1, 0)
+#define SD10G_LANE_LANE_35_CFG_TXRATE_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_35_CFG_TXRATE_1_0, x)
+#define SD10G_LANE_LANE_35_CFG_TXRATE_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_35_CFG_TXRATE_1_0, x)
+
+#define SD10G_LANE_LANE_35_CFG_RXRATE_1_0        GENMASK(5, 4)
+#define SD10G_LANE_LANE_35_CFG_RXRATE_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_35_CFG_RXRATE_1_0, x)
+#define SD10G_LANE_LANE_35_CFG_RXRATE_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_35_CFG_RXRATE_1_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_36 */
+#define SD10G_LANE_LANE_36(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 216, 0, 1, 4)
+
+#define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0, x)
+#define SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_36_CFG_PREDRV_SLEWRATE_1_0, x)
+
+#define SD10G_LANE_LANE_36_CFG_EID_LP            BIT(4)
+#define SD10G_LANE_LANE_36_CFG_EID_LP_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_36_CFG_EID_LP, x)
+#define SD10G_LANE_LANE_36_CFG_EID_LP_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_36_CFG_EID_LP, x)
+
+#define SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH    BIT(5)
+#define SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH, x)
+#define SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_36_CFG_EN_PREDRV_EMPH, x)
+
+#define SD10G_LANE_LANE_36_CFG_PRBS_SEL          BIT(6)
+#define SD10G_LANE_LANE_36_CFG_PRBS_SEL_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_36_CFG_PRBS_SEL, x)
+#define SD10G_LANE_LANE_36_CFG_PRBS_SEL_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_36_CFG_PRBS_SEL, x)
+
+#define SD10G_LANE_LANE_36_CFG_PRBS_SETB         BIT(7)
+#define SD10G_LANE_LANE_36_CFG_PRBS_SETB_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_36_CFG_PRBS_SETB, x)
+#define SD10G_LANE_LANE_36_CFG_PRBS_SETB_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_36_CFG_PRBS_SETB, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_37 */
+#define SD10G_LANE_LANE_37(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 220, 0, 1, 4)
+
+#define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD     BIT(0)
+#define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD, x)
+#define SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_37_CFG_RXDET_COMP_PD, x)
+
+#define SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE      BIT(1)
+#define SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE, x)
+#define SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_37_CFG_PD_RX_CKTREE, x)
+
+#define SD10G_LANE_LANE_37_CFG_TXSWING_HALF      BIT(2)
+#define SD10G_LANE_LANE_37_CFG_TXSWING_HALF_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_37_CFG_TXSWING_HALF, x)
+#define SD10G_LANE_LANE_37_CFG_TXSWING_HALF_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_37_CFG_TXSWING_HALF, x)
+
+#define SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0   GENMASK(5, 4)
+#define SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0, x)
+#define SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_37_CFG_IP_PRE_BASE_1_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_39 */
+#define SD10G_LANE_LANE_39(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 228, 0, 1, 4)
+
+#define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0      GENMASK(2, 0)
+#define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0, x)
+#define SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_39_CFG_RXFILT_Y_2_0, x)
+
+#define SD10G_LANE_LANE_39_CFG_RX_SSC_LH         BIT(4)
+#define SD10G_LANE_LANE_39_CFG_RX_SSC_LH_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_39_CFG_RX_SSC_LH, x)
+#define SD10G_LANE_LANE_39_CFG_RX_SSC_LH_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_39_CFG_RX_SSC_LH, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_3A */
+#define SD10G_LANE_LANE_3A(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 232, 0, 1, 4)
+
+#define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0        GENMASK(3, 0)
+#define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0, x)
+#define SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_3A_CFG_MP_MIN_3_0, x)
+
+#define SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0        GENMASK(7, 4)
+#define SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0, x)
+#define SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_3A_CFG_MP_MAX_3_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_3C */
+#define SD10G_LANE_LANE_3C(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 240, 0, 1, 4)
+
+#define SD10G_LANE_LANE_3C_CFG_DIS_ACC           BIT(0)
+#define SD10G_LANE_LANE_3C_CFG_DIS_ACC_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_3C_CFG_DIS_ACC, x)
+#define SD10G_LANE_LANE_3C_CFG_DIS_ACC_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_3C_CFG_DIS_ACC, x)
+
+#define SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER      BIT(1)
+#define SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER, x)
+#define SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_3C_CFG_DIS_2NDORDER, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_40 */
+#define SD10G_LANE_LANE_40(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 256, 0, 1, 4)
+
+#define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0  GENMASK(7, 0)
+#define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0, x)
+#define SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_40_CFG_LANE_RESERVE_7_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_41 */
+#define SD10G_LANE_LANE_41(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 260, 0, 1, 4)
+
+#define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8 GENMASK(7, 0)
+#define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8, x)
+#define SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_41_CFG_LANE_RESERVE_15_8, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_0:LANE_42 */
+#define SD10G_LANE_LANE_42(t)     __REG(TARGET_SD10G_LANE, t, 12, 0, 0, 1, 288, 264, 0, 1, 4)
+
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0   GENMASK(2, 0)
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0, x)
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN1_2_0, x)
+
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0   GENMASK(6, 4)
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0, x)
+#define SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_42_CFG_CDR_KF_GEN2_2_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_1:LANE_48 */
+#define SD10G_LANE_LANE_48(t)     __REG(TARGET_SD10G_LANE, t, 12, 288, 0, 1, 40, 0, 0, 1, 4)
+
+#define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0      GENMASK(3, 0)
+#define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0, x)
+#define SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_48_CFG_ALOS_THR_3_0, x)
+
+#define SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL      BIT(4)
+#define SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL, x)
+#define SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_48_CFG_AUX_RXCK_SEL, x)
+
+#define SD10G_LANE_LANE_48_CFG_CLK_ENQ           BIT(5)
+#define SD10G_LANE_LANE_48_CFG_CLK_ENQ_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_48_CFG_CLK_ENQ, x)
+#define SD10G_LANE_LANE_48_CFG_CLK_ENQ_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_48_CFG_CLK_ENQ, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_1:LANE_50 */
+#define SD10G_LANE_LANE_50(t)     __REG(TARGET_SD10G_LANE, t, 12, 288, 0, 1, 40, 32, 0, 1, 4)
+
+#define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0   GENMASK(1, 0)
+#define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0, x)
+#define SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_50_CFG_SSC_PI_STEP_1_0, x)
+
+#define SD10G_LANE_LANE_50_CFG_SSC_RESETB        BIT(4)
+#define SD10G_LANE_LANE_50_CFG_SSC_RESETB_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_50_CFG_SSC_RESETB, x)
+#define SD10G_LANE_LANE_50_CFG_SSC_RESETB_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_50_CFG_SSC_RESETB, x)
+
+#define SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL   BIT(5)
+#define SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL, x)
+#define SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_50_CFG_SSC_RTL_CLK_SEL, x)
+
+#define SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL      BIT(6)
+#define SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL, x)
+#define SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_50_CFG_AUX_TXCK_SEL, x)
+
+#define SD10G_LANE_LANE_50_CFG_JT_EN             BIT(7)
+#define SD10G_LANE_LANE_50_CFG_JT_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_50_CFG_JT_EN, x)
+#define SD10G_LANE_LANE_50_CFG_JT_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_50_CFG_JT_EN, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_2:LANE_52 */
+#define SD10G_LANE_LANE_52(t)     __REG(TARGET_SD10G_LANE, t, 12, 328, 0, 1, 24, 0, 0, 1, 4)
+
+#define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0 GENMASK(5, 0)
+#define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0, x)
+#define SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_52_CFG_IBIAS_TUNE_RESERVE_5_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_4:LANE_83 */
+#define SD10G_LANE_LANE_83(t)     __REG(TARGET_SD10G_LANE, t, 12, 464, 0, 1, 112, 60, 0, 1, 4)
+
+#define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE      BIT(0)
+#define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_83_R_TX_BIT_REVERSE, x)
+#define SD10G_LANE_LANE_83_R_TX_BIT_REVERSE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_83_R_TX_BIT_REVERSE, x)
+
+#define SD10G_LANE_LANE_83_R_TX_POL_INV          BIT(1)
+#define SD10G_LANE_LANE_83_R_TX_POL_INV_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_83_R_TX_POL_INV, x)
+#define SD10G_LANE_LANE_83_R_TX_POL_INV_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_83_R_TX_POL_INV, x)
+
+#define SD10G_LANE_LANE_83_R_RX_BIT_REVERSE      BIT(2)
+#define SD10G_LANE_LANE_83_R_RX_BIT_REVERSE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_83_R_RX_BIT_REVERSE, x)
+#define SD10G_LANE_LANE_83_R_RX_BIT_REVERSE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_83_R_RX_BIT_REVERSE, x)
+
+#define SD10G_LANE_LANE_83_R_RX_POL_INV          BIT(3)
+#define SD10G_LANE_LANE_83_R_RX_POL_INV_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_83_R_RX_POL_INV, x)
+#define SD10G_LANE_LANE_83_R_RX_POL_INV_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_83_R_RX_POL_INV, x)
+
+#define SD10G_LANE_LANE_83_R_DFE_RSTN            BIT(4)
+#define SD10G_LANE_LANE_83_R_DFE_RSTN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_83_R_DFE_RSTN, x)
+#define SD10G_LANE_LANE_83_R_DFE_RSTN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_83_R_DFE_RSTN, x)
+
+#define SD10G_LANE_LANE_83_R_CDR_RSTN            BIT(5)
+#define SD10G_LANE_LANE_83_R_CDR_RSTN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_83_R_CDR_RSTN, x)
+#define SD10G_LANE_LANE_83_R_CDR_RSTN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_83_R_CDR_RSTN, x)
+
+#define SD10G_LANE_LANE_83_R_CTLE_RSTN           BIT(6)
+#define SD10G_LANE_LANE_83_R_CTLE_RSTN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_83_R_CTLE_RSTN, x)
+#define SD10G_LANE_LANE_83_R_CTLE_RSTN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_83_R_CTLE_RSTN, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_5:LANE_93 */
+#define SD10G_LANE_LANE_93(t)     __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 12, 0, 1, 4)
+
+#define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN    BIT(0)
+#define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN, x)
+#define SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_93_R_RXEI_FIFO_RST_EN, x)
+
+#define SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT BIT(1)
+#define SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT, x)
+#define SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_93_R_DWIDTHCTRL_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE     BIT(2)
+#define SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE, x)
+#define SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_93_R_DIS_RESTORE_DFE, x)
+
+#define SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL     BIT(3)
+#define SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL, x)
+#define SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_93_R_EN_RATECHG_CTRL, x)
+
+#define SD10G_LANE_LANE_93_R_REG_MANUAL          BIT(4)
+#define SD10G_LANE_LANE_93_R_REG_MANUAL_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_93_R_REG_MANUAL, x)
+#define SD10G_LANE_LANE_93_R_REG_MANUAL_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_93_R_REG_MANUAL, x)
+
+#define SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT   BIT(5)
+#define SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT, x)
+#define SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_93_R_AUXCKSEL_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT    BIT(6)
+#define SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT, x)
+#define SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_93_R_LANE_ID_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT BIT(7)
+#define SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT, x)
+#define SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_93_R_RX_PCIE_GEN12_FROM_HWT, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_5:LANE_94 */
+#define SD10G_LANE_LANE_94(t)     __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 16, 0, 1, 4)
+
+#define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0      GENMASK(2, 0)
+#define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0, x)
+#define SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_94_R_DWIDTHCTRL_2_0, x)
+
+#define SD10G_LANE_LANE_94_R_ISCAN_REG           BIT(4)
+#define SD10G_LANE_LANE_94_R_ISCAN_REG_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_94_R_ISCAN_REG, x)
+#define SD10G_LANE_LANE_94_R_ISCAN_REG_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_94_R_ISCAN_REG, x)
+
+#define SD10G_LANE_LANE_94_R_TXEQ_REG            BIT(5)
+#define SD10G_LANE_LANE_94_R_TXEQ_REG_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_94_R_TXEQ_REG, x)
+#define SD10G_LANE_LANE_94_R_TXEQ_REG_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_94_R_TXEQ_REG, x)
+
+#define SD10G_LANE_LANE_94_R_MISC_REG            BIT(6)
+#define SD10G_LANE_LANE_94_R_MISC_REG_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_94_R_MISC_REG, x)
+#define SD10G_LANE_LANE_94_R_MISC_REG_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_94_R_MISC_REG, x)
+
+#define SD10G_LANE_LANE_94_R_SWING_REG           BIT(7)
+#define SD10G_LANE_LANE_94_R_SWING_REG_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_94_R_SWING_REG, x)
+#define SD10G_LANE_LANE_94_R_SWING_REG_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_94_R_SWING_REG, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_5:LANE_9E */
+#define SD10G_LANE_LANE_9E(t)     __REG(TARGET_SD10G_LANE, t, 12, 576, 0, 1, 64, 56, 0, 1, 4)
+
+#define SD10G_LANE_LANE_9E_R_RXEQ_REG            BIT(0)
+#define SD10G_LANE_LANE_9E_R_RXEQ_REG_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_9E_R_RXEQ_REG, x)
+#define SD10G_LANE_LANE_9E_R_RXEQ_REG_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_9E_R_RXEQ_REG, x)
+
+#define SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN BIT(1)
+#define SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN, x)
+#define SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_9E_R_AUTO_RST_TREE_PD_MAN, x)
+
+#define SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN    BIT(2)
+#define SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN, x)
+#define SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_9E_R_EN_AUTO_CDR_RSTN, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_6:LANE_A1 */
+#define SD10G_LANE_LANE_A1(t)     __REG(TARGET_SD10G_LANE, t, 12, 640, 0, 1, 128, 4, 0, 1, 4)
+
+#define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0 GENMASK(1, 0)
+#define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0, x)
+#define SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_A1_R_PMA_TXCK_DIV_SEL_1_0, x)
+
+#define SD10G_LANE_LANE_A1_R_SSC_FROM_HWT        BIT(4)
+#define SD10G_LANE_LANE_A1_R_SSC_FROM_HWT_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_A1_R_SSC_FROM_HWT, x)
+#define SD10G_LANE_LANE_A1_R_SSC_FROM_HWT_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_A1_R_SSC_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_A1_R_CDR_FROM_HWT        BIT(5)
+#define SD10G_LANE_LANE_A1_R_CDR_FROM_HWT_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_A1_R_CDR_FROM_HWT, x)
+#define SD10G_LANE_LANE_A1_R_CDR_FROM_HWT_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_A1_R_CDR_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT BIT(6)
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT, x)
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_A1_R_PCLK_GATING_FROM_HWT, x)
+
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING         BIT(7)
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_A1_R_PCLK_GATING, x)
+#define SD10G_LANE_LANE_A1_R_PCLK_GATING_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_A1_R_PCLK_GATING, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_6:LANE_A2 */
+#define SD10G_LANE_LANE_A2(t)     __REG(TARGET_SD10G_LANE, t, 12, 640, 0, 1, 128, 8, 0, 1, 4)
+
+#define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0 GENMASK(4, 0)
+#define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0, x)
+#define SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_A2_R_PCS2PMA_PHYMODE_4_0, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */
+#define SD10G_LANE_LANE_DF(t)     __REG(TARGET_SD10G_LANE, t, 12, 832, 0, 1, 84, 60, 0, 1, 4)
+
+#define SD10G_LANE_LANE_DF_LOL_UDL               BIT(0)
+#define SD10G_LANE_LANE_DF_LOL_UDL_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_DF_LOL_UDL, x)
+#define SD10G_LANE_LANE_DF_LOL_UDL_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_DF_LOL_UDL, x)
+
+#define SD10G_LANE_LANE_DF_LOL                   BIT(1)
+#define SD10G_LANE_LANE_DF_LOL_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_DF_LOL, x)
+#define SD10G_LANE_LANE_DF_LOL_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_DF_LOL, x)
+
+#define SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED BIT(2)
+#define SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED, x)
+#define SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED, x)
+
+#define SD10G_LANE_LANE_DF_SQUELCH               BIT(3)
+#define SD10G_LANE_LANE_DF_SQUELCH_SET(x)\
+       FIELD_PREP(SD10G_LANE_LANE_DF_SQUELCH, x)
+#define SD10G_LANE_LANE_DF_SQUELCH_GET(x)\
+       FIELD_GET(SD10G_LANE_LANE_DF_SQUELCH, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_09 */
+#define SD25G_LANE_CMU_09(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 36, 0, 1, 4)
+
+#define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN      BIT(0)
+#define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN, x)
+#define SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_09_CFG_REFCK_TERM_EN, x)
+
+#define SD25G_LANE_CMU_09_CFG_EN_DUMMY           BIT(1)
+#define SD25G_LANE_CMU_09_CFG_EN_DUMMY_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_09_CFG_EN_DUMMY, x)
+#define SD25G_LANE_CMU_09_CFG_EN_DUMMY_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_09_CFG_EN_DUMMY, x)
+
+#define SD25G_LANE_CMU_09_CFG_PLL_LOS_SET        BIT(2)
+#define SD25G_LANE_CMU_09_CFG_PLL_LOS_SET_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_09_CFG_PLL_LOS_SET, x)
+#define SD25G_LANE_CMU_09_CFG_PLL_LOS_SET_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_09_CFG_PLL_LOS_SET, x)
+
+#define SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD      BIT(3)
+#define SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD, x)
+#define SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_09_CFG_CTRL_LOGIC_PD, x)
+
+#define SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0     GENMASK(5, 4)
+#define SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0, x)
+#define SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_09_CFG_PLL_TP_SEL_1_0, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_0B */
+#define SD25G_LANE_CMU_0B(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 44, 0, 1, 4)
+
+#define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT      BIT(0)
+#define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT, x)
+#define SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0B_CFG_FORCE_RX_FILT, x)
+
+#define SD25G_LANE_CMU_0B_CFG_DISLOL             BIT(1)
+#define SD25G_LANE_CMU_0B_CFG_DISLOL_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0B_CFG_DISLOL, x)
+#define SD25G_LANE_CMU_0B_CFG_DISLOL_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0B_CFG_DISLOL, x)
+
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN BIT(2)
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN, x)
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_EN, x)
+
+#define SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN     BIT(3)
+#define SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN, x)
+#define SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0B_CFG_VCO_CAL_RESETN, x)
+
+#define SD25G_LANE_CMU_0B_CFG_VFILT2PAD          BIT(4)
+#define SD25G_LANE_CMU_0B_CFG_VFILT2PAD_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0B_CFG_VFILT2PAD, x)
+#define SD25G_LANE_CMU_0B_CFG_VFILT2PAD_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0B_CFG_VFILT2PAD, x)
+
+#define SD25G_LANE_CMU_0B_CFG_DISLOS             BIT(5)
+#define SD25G_LANE_CMU_0B_CFG_DISLOS_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0B_CFG_DISLOS, x)
+#define SD25G_LANE_CMU_0B_CFG_DISLOS_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0B_CFG_DISLOS, x)
+
+#define SD25G_LANE_CMU_0B_CFG_DCLOL              BIT(6)
+#define SD25G_LANE_CMU_0B_CFG_DCLOL_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0B_CFG_DCLOL, x)
+#define SD25G_LANE_CMU_0B_CFG_DCLOL_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0B_CFG_DCLOL, x)
+
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN    BIT(7)
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN, x)
+#define SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0B_CFG_RST_TREE_PD_MAN, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_0C */
+#define SD25G_LANE_CMU_0C(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 48, 0, 1, 4)
+
+#define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET        BIT(0)
+#define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET, x)
+#define SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0C_CFG_PLL_LOL_SET, x)
+
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN        BIT(1)
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN, x)
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0C_CFG_EN_TX_CK_DN, x)
+
+#define SD25G_LANE_CMU_0C_CFG_VCO_PD             BIT(2)
+#define SD25G_LANE_CMU_0C_CFG_VCO_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0C_CFG_VCO_PD, x)
+#define SD25G_LANE_CMU_0C_CFG_VCO_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0C_CFG_VCO_PD, x)
+
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP        BIT(3)
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP, x)
+#define SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0C_CFG_EN_TX_CK_UP, x)
+
+#define SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0   GENMASK(5, 4)
+#define SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0, x)
+#define SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0C_CFG_VCO_DIV_MODE_1_0, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_0D */
+#define SD25G_LANE_CMU_0D(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 52, 0, 1, 4)
+
+#define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD         BIT(0)
+#define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0D_CFG_CK_TREE_PD, x)
+#define SD25G_LANE_CMU_0D_CFG_CK_TREE_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0D_CFG_CK_TREE_PD, x)
+
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN        BIT(1)
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN, x)
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0D_CFG_EN_RX_CK_DN, x)
+
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP        BIT(2)
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP, x)
+#define SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0D_CFG_EN_RX_CK_UP, x)
+
+#define SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP        BIT(3)
+#define SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP, x)
+#define SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0D_CFG_VCO_CAL_BYP, x)
+
+#define SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0     GENMASK(5, 4)
+#define SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0, x)
+#define SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0D_CFG_PRE_DIVSEL_1_0, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_0E */
+#define SD25G_LANE_CMU_0E(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 56, 0, 1, 4)
+
+#define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0        GENMASK(3, 0)
+#define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0, x)
+#define SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0E_CFG_SEL_DIV_3_0, x)
+
+#define SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD   BIT(4)
+#define SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD, x)
+#define SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_0E_CFG_PMAA_CENTR_CK_PD, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_13 */
+#define SD25G_LANE_CMU_13(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 76, 0, 1, 4)
+
+#define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0    GENMASK(3, 0)
+#define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0, x)
+#define SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_13_CFG_PLL_RESERVE_3_0, x)
+
+#define SD25G_LANE_CMU_13_CFG_JT_EN              BIT(4)
+#define SD25G_LANE_CMU_13_CFG_JT_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_13_CFG_JT_EN, x)
+#define SD25G_LANE_CMU_13_CFG_JT_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_13_CFG_JT_EN, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_18 */
+#define SD25G_LANE_CMU_18(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 96, 0, 1, 4)
+
+#define SD25G_LANE_CMU_18_R_PLL_RSTN             BIT(0)
+#define SD25G_LANE_CMU_18_R_PLL_RSTN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_18_R_PLL_RSTN, x)
+#define SD25G_LANE_CMU_18_R_PLL_RSTN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_18_R_PLL_RSTN, x)
+
+#define SD25G_LANE_CMU_18_R_PLL_LOL_SET          BIT(1)
+#define SD25G_LANE_CMU_18_R_PLL_LOL_SET_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_18_R_PLL_LOL_SET, x)
+#define SD25G_LANE_CMU_18_R_PLL_LOL_SET_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_18_R_PLL_LOL_SET, x)
+
+#define SD25G_LANE_CMU_18_R_PLL_LOS_SET          BIT(2)
+#define SD25G_LANE_CMU_18_R_PLL_LOS_SET_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_18_R_PLL_LOS_SET, x)
+#define SD25G_LANE_CMU_18_R_PLL_LOS_SET_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_18_R_PLL_LOS_SET, x)
+
+#define SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0       GENMASK(5, 4)
+#define SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0, x)
+#define SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_18_R_PLL_TP_SEL_1_0, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_19 */
+#define SD25G_LANE_CMU_19(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 100, 0, 1, 4)
+
+#define SD25G_LANE_CMU_19_R_CK_RESETB            BIT(0)
+#define SD25G_LANE_CMU_19_R_CK_RESETB_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_19_R_CK_RESETB, x)
+#define SD25G_LANE_CMU_19_R_CK_RESETB_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_19_R_CK_RESETB, x)
+
+#define SD25G_LANE_CMU_19_R_PLL_DLOL_EN          BIT(1)
+#define SD25G_LANE_CMU_19_R_PLL_DLOL_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_19_R_PLL_DLOL_EN, x)
+#define SD25G_LANE_CMU_19_R_PLL_DLOL_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_19_R_PLL_DLOL_EN, x)
+
+/*      SD25G_TARGET:CMU_GRP_0:CMU_1A */
+#define SD25G_LANE_CMU_1A(t)      __REG(TARGET_SD25G_LANE, t, 8, 0, 0, 1, 132, 104, 0, 1, 4)
+
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0       GENMASK(2, 0)
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0, x)
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_2_0, x)
+
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT  BIT(4)
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT, x)
+#define SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_1A_R_DWIDTHCTRL_FROM_HWT, x)
+
+#define SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE       BIT(5)
+#define SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE, x)
+#define SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_1A_R_MASK_EI_SOURCE, x)
+
+#define SD25G_LANE_CMU_1A_R_REG_MANUAL           BIT(6)
+#define SD25G_LANE_CMU_1A_R_REG_MANUAL_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_1A_R_REG_MANUAL, x)
+#define SD25G_LANE_CMU_1A_R_REG_MANUAL_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_1A_R_REG_MANUAL, x)
+
+/*      SD25G_TARGET:CMU_GRP_1:CMU_2A */
+#define SD25G_LANE_CMU_2A(t)      __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 36, 0, 1, 4)
+
+#define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0          GENMASK(1, 0)
+#define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_2A_R_DBG_SEL_1_0, x)
+#define SD25G_LANE_CMU_2A_R_DBG_SEL_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_2A_R_DBG_SEL_1_0, x)
+
+#define SD25G_LANE_CMU_2A_R_DBG_LINK_LANE        BIT(4)
+#define SD25G_LANE_CMU_2A_R_DBG_LINK_LANE_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_2A_R_DBG_LINK_LANE, x)
+#define SD25G_LANE_CMU_2A_R_DBG_LINK_LANE_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_2A_R_DBG_LINK_LANE, x)
+
+#define SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS       BIT(5)
+#define SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS, x)
+#define SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_2A_R_DBG_LOL_STATUS, x)
+
+/*      SD25G_TARGET:CMU_GRP_1:CMU_30 */
+#define SD25G_LANE_CMU_30(t)      __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 60, 0, 1, 4)
+
+#define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0 GENMASK(2, 0)
+#define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0, x)
+#define SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_30_R_TXFIFO_CK_DIV_PMAD_2_0, x)
+
+#define SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0 GENMASK(6, 4)
+#define SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0, x)
+#define SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_30_R_RXFIFO_CK_DIV_PMAD_2_0, x)
+
+/*      SD25G_TARGET:CMU_GRP_1:CMU_31 */
+#define SD25G_LANE_CMU_31(t)      __REG(TARGET_SD25G_LANE, t, 8, 132, 0, 1, 124, 64, 0, 1, 4)
+
+#define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0 GENMASK(7, 0)
+#define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0, x)
+#define SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_31_CFG_COMMON_RESERVE_7_0, x)
+
+/*      SD25G_TARGET:CMU_GRP_2:CMU_40 */
+#define SD25G_LANE_CMU_40(t)      __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 0, 0, 1, 4)
+
+#define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL     BIT(0)
+#define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL, x)
+#define SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_CKSKEW_CTRL, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD      BIT(1)
+#define SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD, x)
+#define SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_ISCAN_HOLD, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_PD_CLK          BIT(2)
+#define SD25G_LANE_CMU_40_L0_CFG_PD_CLK_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_PD_CLK, x)
+#define SD25G_LANE_CMU_40_L0_CFG_PD_CLK_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_PD_CLK, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN        BIT(3)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN, x)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_TXCAL_EN, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN    BIT(4)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN, x)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_TXCAL_MAN_EN, x)
+
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST       BIT(5)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST, x)
+#define SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_40_L0_CFG_TXCAL_RST, x)
+
+/*      SD25G_TARGET:CMU_GRP_2:CMU_45 */
+#define SD25G_LANE_CMU_45(t)      __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 20, 0, 1, 4)
+
+#define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0  GENMASK(7, 0)
+#define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0, x)
+#define SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_45_L0_CFG_TX_RESERVE_7_0, x)
+
+/*      SD25G_TARGET:CMU_GRP_2:CMU_46 */
+#define SD25G_LANE_CMU_46(t)      __REG(TARGET_SD25G_LANE, t, 8, 256, 0, 1, 512, 24, 0, 1, 4)
+
+#define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8 GENMASK(7, 0)
+#define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8, x)
+#define SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_46_L0_CFG_TX_RESERVE_15_8, x)
+
+/*      SD25G_TARGET:CMU_GRP_3:CMU_C0 */
+#define SD25G_LANE_CMU_C0(t)      __REG(TARGET_SD25G_LANE, t, 8, 768, 0, 1, 252, 0, 0, 1, 4)
+
+#define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0     GENMASK(3, 0)
+#define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0, x)
+#define SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_C0_READ_VCO_CTUNE_3_0, x)
+
+#define SD25G_LANE_CMU_C0_PLL_LOL_UDL            BIT(4)
+#define SD25G_LANE_CMU_C0_PLL_LOL_UDL_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_C0_PLL_LOL_UDL, x)
+#define SD25G_LANE_CMU_C0_PLL_LOL_UDL_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_C0_PLL_LOL_UDL, x)
+
+/*      SD25G_TARGET:CMU_GRP_4:CMU_FF */
+#define SD25G_LANE_CMU_FF(t)      __REG(TARGET_SD25G_LANE, t, 8, 1020, 0, 1, 4, 0, 0, 1, 4)
+
+#define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX   GENMASK(7, 0)
+#define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_SET(x)\
+       FIELD_PREP(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX, x)
+#define SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX_GET(x)\
+       FIELD_GET(SD25G_LANE_CMU_FF_REGISTER_TABLE_INDEX, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_00 */
+#define SD25G_LANE_LANE_00(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 0, 0, 1, 4)
+
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0 GENMASK(3, 0)
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0, x)
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_00_LN_CFG_ITX_VC_DRIVER_3_0, x)
+
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0 GENMASK(5, 4)
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0, x)
+#define SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_00_LN_CFG_ITX_IPCML_BASE_1_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_01 */
+#define SD25G_LANE_LANE_01(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 4, 0, 1, 4)
+
+#define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0 GENMASK(2, 0)
+#define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0, x)
+#define SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_01_LN_CFG_ITX_IPDRIVER_BASE_2_0, x)
+
+#define SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0  GENMASK(5, 4)
+#define SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0, x)
+#define SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_01_LN_CFG_TX_PREDIV_1_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_03 */
+#define SD25G_LANE_LANE_03(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 12, 0, 1, 4)
+
+#define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0    GENMASK(4, 0)
+#define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0, x)
+#define SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_03_LN_CFG_TAP_DLY_4_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_04 */
+#define SD25G_LANE_LANE_04(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 16, 0, 1, 4)
+
+#define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN    BIT(0)
+#define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN, x)
+#define SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_TX2RX_LP_EN, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN    BIT(1)
+#define SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN, x)
+#define SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_RX2TX_LP_EN, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CML         BIT(2)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CML_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_PD_CML, x)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CML_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_PD_CML, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CLK         BIT(3)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CLK_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_PD_CLK, x)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_CLK_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_PD_CLK, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER      BIT(4)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER, x)
+#define SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER, x)
+
+#define SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN       BIT(5)
+#define SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN, x)
+#define SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_04_LN_CFG_TAP_MAIN, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_05 */
+#define SD25G_LANE_LANE_05(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 20, 0, 1, 4)
+
+#define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0   GENMASK(3, 0)
+#define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0, x)
+#define SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_05_LN_CFG_TAP_DLY2_3_0, x)
+
+#define SD25G_LANE_LANE_05_LN_CFG_BW_1_0         GENMASK(5, 4)
+#define SD25G_LANE_LANE_05_LN_CFG_BW_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_05_LN_CFG_BW_1_0, x)
+#define SD25G_LANE_LANE_05_LN_CFG_BW_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_05_LN_CFG_BW_1_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_06 */
+#define SD25G_LANE_LANE_06(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 24, 0, 1, 4)
+
+#define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN        BIT(0)
+#define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_06_LN_CFG_EN_MAIN, x)
+#define SD25G_LANE_LANE_06_LN_CFG_EN_MAIN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_06_LN_CFG_EN_MAIN, x)
+
+#define SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0    GENMASK(7, 4)
+#define SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0, x)
+#define SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_06_LN_CFG_TAP_ADV_3_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_07 */
+#define SD25G_LANE_LANE_07(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 28, 0, 1, 4)
+
+#define SD25G_LANE_LANE_07_LN_CFG_EN_ADV         BIT(0)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_ADV_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_07_LN_CFG_EN_ADV, x)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_ADV_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_07_LN_CFG_EN_ADV, x)
+
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY2        BIT(1)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY2_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_07_LN_CFG_EN_DLY2, x)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY2_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_07_LN_CFG_EN_DLY2, x)
+
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY         BIT(2)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_07_LN_CFG_EN_DLY, x)
+#define SD25G_LANE_LANE_07_LN_CFG_EN_DLY_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_07_LN_CFG_EN_DLY, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_09 */
+#define SD25G_LANE_LANE_09(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 36, 0, 1, 4)
+
+#define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0 GENMASK(3, 0)
+#define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0, x)
+#define SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_09_LN_CFG_TXCAL_VALID_SEL_3_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_0A */
+#define SD25G_LANE_LANE_0A(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 40, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0 GENMASK(5, 0)
+#define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0, x)
+#define SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0A_LN_CFG_TXCAL_SHIFT_CODE_5_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_0B */
+#define SD25G_LANE_LANE_0B(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 44, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN   BIT(0)
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN, x)
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_MAN_EN, x)
+
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST      BIT(1)
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST, x)
+#define SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0B_LN_CFG_TXCAL_RST, x)
+
+#define SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0   GENMASK(5, 4)
+#define SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0, x)
+#define SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0B_LN_CFG_QUAD_MAN_1_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_0C */
+#define SD25G_LANE_LANE_0C(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 48, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0 GENMASK(2, 0)
+#define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0, x)
+#define SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0C_LN_CFG_PMA_TX_CK_BITWIDTH_2_0, x)
+
+#define SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN       BIT(4)
+#define SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN, x)
+#define SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0C_LN_CFG_TXCAL_EN, x)
+
+#define SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD      BIT(5)
+#define SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD, x)
+#define SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0C_LN_CFG_RXTERM_PD, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_0D */
+#define SD25G_LANE_LANE_0D(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 52, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0     GENMASK(2, 0)
+#define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_RXTERM_2_0, x)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8    BIT(4)
+#define SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_RSTN_DIV5_8, x)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN      BIT(5)
+#define SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_SUMMER_EN, x)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD        BIT(6)
+#define SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_DMUX_PD, x)
+
+#define SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN       BIT(7)
+#define SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN, x)
+#define SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0D_LN_CFG_DFECK_EN, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_0E */
+#define SD25G_LANE_LANE_0E(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 56, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN       BIT(0)
+#define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN, x)
+#define SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_ISCAN_EN, x)
+
+#define SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD    BIT(1)
+#define SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD, x)
+#define SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_DMUX_CLK_PD, x)
+
+#define SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG      BIT(2)
+#define SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG, x)
+#define SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_EN_DFEDIG, x)
+
+#define SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0   GENMASK(6, 4)
+#define SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0, x)
+#define SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0E_LN_CFG_DFEDIG_M_2_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_0F */
+#define SD25G_LANE_LANE_0F(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 60, 0, 1, 4)
+
+#define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1  GENMASK(4, 0)
+#define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1, x)
+#define SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_0F_LN_CFG_DFETAP_EN_5_1, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_18 */
+#define SD25G_LANE_LANE_18(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 96, 0, 1, 4)
+
+#define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN       BIT(0)
+#define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN, x)
+#define SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_CDRCK_EN, x)
+
+#define SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT       BIT(1)
+#define SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT, x)
+#define SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_ADD_VOLT, x)
+
+#define SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN    BIT(2)
+#define SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN, x)
+#define SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_MAN_VOLT_EN, x)
+
+#define SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD      BIT(3)
+#define SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD, x)
+#define SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_ERRAMP_PD, x)
+
+#define SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0  GENMASK(6, 4)
+#define SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0, x)
+#define SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_18_LN_CFG_RXDIV_SEL_2_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_19 */
+#define SD25G_LANE_LANE_19(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 100, 0, 1, 4)
+
+#define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD        BIT(0)
+#define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_DCDR_PD, x)
+#define SD25G_LANE_LANE_19_LN_CFG_DCDR_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_DCDR_PD, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_ECDR_PD        BIT(1)
+#define SD25G_LANE_LANE_19_LN_CFG_ECDR_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_ECDR_PD, x)
+#define SD25G_LANE_LANE_19_LN_CFG_ECDR_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_ECDR_PD, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL      BIT(2)
+#define SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL, x)
+#define SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_ISCAN_SEL, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_TXLB_EN        BIT(3)
+#define SD25G_LANE_LANE_19_LN_CFG_TXLB_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_TXLB_EN, x)
+#define SD25G_LANE_LANE_19_LN_CFG_TXLB_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_TXLB_EN, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU      BIT(4)
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU, x)
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_RX_REG_PU, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP     BIT(5)
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP, x)
+#define SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_RX_REG_BYP, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET     BIT(6)
+#define SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET, x)
+#define SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_PD_RMS_DET, x)
+
+#define SD25G_LANE_LANE_19_LN_CFG_PD_CTLE        BIT(7)
+#define SD25G_LANE_LANE_19_LN_CFG_PD_CTLE_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_19_LN_CFG_PD_CTLE, x)
+#define SD25G_LANE_LANE_19_LN_CFG_PD_CTLE_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_19_LN_CFG_PD_CTLE, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_1A */
+#define SD25G_LANE_LANE_1A(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 104, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN     BIT(0)
+#define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN, x)
+#define SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1A_LN_CFG_CTLE_TP_EN, x)
+
+#define SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0     GENMASK(6, 4)
+#define SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0, x)
+#define SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1A_LN_CFG_CDR_KF_2_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_1B */
+#define SD25G_LANE_LANE_1B(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 108, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0      GENMASK(7, 0)
+#define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0, x)
+#define SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1B_LN_CFG_CDR_M_7_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_1C */
+#define SD25G_LANE_LANE_1C(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 112, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN       BIT(0)
+#define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN, x)
+#define SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_CDR_RSTN, x)
+
+#define SD25G_LANE_LANE_1C_LN_CFG_DFE_PD         BIT(1)
+#define SD25G_LANE_LANE_1C_LN_CFG_DFE_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1C_LN_CFG_DFE_PD, x)
+#define SD25G_LANE_LANE_1C_LN_CFG_DFE_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_DFE_PD, x)
+
+#define SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD      BIT(2)
+#define SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD, x)
+#define SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_DFEDMX_PD, x)
+
+#define SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0  GENMASK(7, 4)
+#define SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0, x)
+#define SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1C_LN_CFG_EQC_FORCE_3_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_1D */
+#define SD25G_LANE_LANE_1D(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 116, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR  BIT(0)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_EXT_OVR, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD     BIT(1)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_HOLD, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN     BIT(2)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_ISCAN_RSTN, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP   BIT(3)
+#define SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_AGC_ADPT_BYP, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_PHID_1T        BIT(4)
+#define SD25G_LANE_LANE_1D_LN_CFG_PHID_1T_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_PHID_1T, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_PHID_1T_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PHID_1T, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN      BIT(5)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PI_DFE_EN, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR     BIT(6)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PI_EXT_OVR, x)
+
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD        BIT(7)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD, x)
+#define SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1D_LN_CFG_PI_HOLD, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_1E */
+#define SD25G_LANE_LANE_1E(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 120, 0, 1, 4)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0   GENMASK(1, 0)
+#define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_PI_STEPS_1_0, x)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN        BIT(4)
+#define SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_RXLB_EN, x)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN   BIT(5)
+#define SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_SUM_SETCM_EN, x)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR   BIT(6)
+#define SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_R_OFFSET_DIR, x)
+
+#define SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD     BIT(7)
+#define SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD, x)
+#define SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_1E_LN_CFG_PMAD_CK_PD, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_21 */
+#define SD25G_LANE_LANE_21(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 132, 0, 1, 4)
+
+#define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0 GENMASK(4, 0)
+#define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0, x)
+#define SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_21_LN_CFG_VGA_CTRL_BYP_4_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_22 */
+#define SD25G_LANE_LANE_22(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 136, 0, 1, 4)
+
+#define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0  GENMASK(3, 0)
+#define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0, x)
+#define SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_22_LN_CFG_EQR_FORCE_3_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_25 */
+#define SD25G_LANE_LANE_25(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 148, 0, 1, 4)
+
+#define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0 GENMASK(6, 0)
+#define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0, x)
+#define SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_25_LN_CFG_INIT_POS_ISCAN_6_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_26 */
+#define SD25G_LANE_LANE_26(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 152, 0, 1, 4)
+
+#define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0 GENMASK(6, 0)
+#define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0, x)
+#define SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_26_LN_CFG_INIT_POS_IPI_6_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_28 */
+#define SD25G_LANE_LANE_28(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 160, 0, 1, 4)
+
+#define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN  BIT(0)
+#define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN, x)
+#define SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_ISCAN_MODE_EN, x)
+
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH      BIT(1)
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH, x)
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_RX_SSC_LH, x)
+
+#define SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL   BIT(2)
+#define SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL, x)
+#define SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_FIGMERIT_SEL, x)
+
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0 GENMASK(6, 4)
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0, x)
+#define SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_28_LN_CFG_RX_SUBRATE_2_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_2B */
+#define SD25G_LANE_LANE_2B(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 172, 0, 1, 4)
+
+#define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0      GENMASK(3, 0)
+#define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0, x)
+#define SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2B_LN_CFG_PI_BW_3_0, x)
+
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR BIT(4)
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR, x)
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2B_LN_CFG_RSTN_DMUX_SUBR, x)
+
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU    BIT(5)
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU, x)
+#define SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2B_LN_CFG_RSTN_TXDUPU, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_2C */
+#define SD25G_LANE_LANE_2C(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 176, 0, 1, 4)
+
+#define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0 GENMASK(2, 0)
+#define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0, x)
+#define SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2C_LN_CFG_TX_SUBRATE_2_0, x)
+
+#define SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER   BIT(4)
+#define SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER, x)
+#define SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2C_LN_CFG_DIS_2NDORDER, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_2D */
+#define SD25G_LANE_LANE_2D(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 180, 0, 1, 4)
+
+#define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0   GENMASK(2, 0)
+#define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0, x)
+#define SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2D_LN_CFG_ALOS_THR_2_0, x)
+
+#define SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0 GENMASK(6, 4)
+#define SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0, x)
+#define SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2D_LN_CFG_SAT_CNTSEL_2_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_2E */
+#define SD25G_LANE_LANE_2E(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 184, 0, 1, 4)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN  BIT(0)
+#define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_EN_FAST_ISCAN, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ         BIT(1)
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_DIS_SQ, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_PD_SQ          BIT(2)
+#define SD25G_LANE_LANE_2E_LN_CFG_PD_SQ_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_PD_SQ, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_PD_SQ_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_PD_SQ, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS       BIT(3)
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_DIS_ALOS, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC     BIT(4)
+#define SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_RESETN_AGC, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG    BIT(5)
+#define SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_RSTN_DFEDIG, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN        BIT(6)
+#define SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_PI_RSTN, x)
+
+#define SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN      BIT(7)
+#define SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN, x)
+#define SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_2E_LN_CFG_CTLE_RSTN, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_40 */
+#define SD25G_LANE_LANE_40(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 256, 0, 1, 4)
+
+#define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE   BIT(0)
+#define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE, x)
+#define SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_40_LN_R_TX_BIT_REVERSE, x)
+
+#define SD25G_LANE_LANE_40_LN_R_TX_POL_INV       BIT(1)
+#define SD25G_LANE_LANE_40_LN_R_TX_POL_INV_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_40_LN_R_TX_POL_INV, x)
+#define SD25G_LANE_LANE_40_LN_R_TX_POL_INV_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_40_LN_R_TX_POL_INV, x)
+
+#define SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE   BIT(2)
+#define SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE, x)
+#define SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_40_LN_R_RX_BIT_REVERSE, x)
+
+#define SD25G_LANE_LANE_40_LN_R_RX_POL_INV       BIT(3)
+#define SD25G_LANE_LANE_40_LN_R_RX_POL_INV_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_40_LN_R_RX_POL_INV, x)
+#define SD25G_LANE_LANE_40_LN_R_RX_POL_INV_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_40_LN_R_RX_POL_INV, x)
+
+#define SD25G_LANE_LANE_40_LN_R_CDR_RSTN         BIT(4)
+#define SD25G_LANE_LANE_40_LN_R_CDR_RSTN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_40_LN_R_CDR_RSTN, x)
+#define SD25G_LANE_LANE_40_LN_R_CDR_RSTN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_40_LN_R_CDR_RSTN, x)
+
+#define SD25G_LANE_LANE_40_LN_R_DFE_RSTN         BIT(5)
+#define SD25G_LANE_LANE_40_LN_R_DFE_RSTN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_40_LN_R_DFE_RSTN, x)
+#define SD25G_LANE_LANE_40_LN_R_DFE_RSTN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_40_LN_R_DFE_RSTN, x)
+
+#define SD25G_LANE_LANE_40_LN_R_CTLE_RSTN        BIT(6)
+#define SD25G_LANE_LANE_40_LN_R_CTLE_RSTN_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_40_LN_R_CTLE_RSTN, x)
+#define SD25G_LANE_LANE_40_LN_R_CTLE_RSTN_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_40_LN_R_CTLE_RSTN, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_42 */
+#define SD25G_LANE_LANE_42(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 264, 0, 1, 4)
+
+#define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0 GENMASK(7, 0)
+#define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0, x)
+#define SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_42_LN_CFG_TX_RESERVE_7_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_43 */
+#define SD25G_LANE_LANE_43(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 268, 0, 1, 4)
+
+#define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8 GENMASK(7, 0)
+#define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8, x)
+#define SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_43_LN_CFG_TX_RESERVE_15_8, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_44 */
+#define SD25G_LANE_LANE_44(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 272, 0, 1, 4)
+
+#define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0 GENMASK(7, 0)
+#define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0, x)
+#define SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_44_LN_CFG_RX_RESERVE_7_0, x)
+
+/*      SD25G_TARGET:LANE_GRP_0:LANE_45 */
+#define SD25G_LANE_LANE_45(t)     __REG(TARGET_SD25G_LANE, t, 8, 1024, 0, 1, 768, 276, 0, 1, 4)
+
+#define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8 GENMASK(7, 0)
+#define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8, x)
+#define SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_45_LN_CFG_RX_RESERVE_15_8, x)
+
+/*      SD25G_TARGET:LANE_GRP_1:LANE_DE */
+#define SD25G_LANE_LANE_DE(t)     __REG(TARGET_SD25G_LANE, t, 8, 1792, 0, 1, 128, 120, 0, 1, 4)
+
+#define SD25G_LANE_LANE_DE_LN_LOL_UDL            BIT(0)
+#define SD25G_LANE_LANE_DE_LN_LOL_UDL_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_DE_LN_LOL_UDL, x)
+#define SD25G_LANE_LANE_DE_LN_LOL_UDL_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_DE_LN_LOL_UDL, x)
+
+#define SD25G_LANE_LANE_DE_LN_LOL                BIT(1)
+#define SD25G_LANE_LANE_DE_LN_LOL_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_DE_LN_LOL, x)
+#define SD25G_LANE_LANE_DE_LN_LOL_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_DE_LN_LOL, x)
+
+#define SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED BIT(2)
+#define SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED, x)
+#define SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_DE_LN_PMA2PCS_RXEI_FILTERED, x)
+
+#define SD25G_LANE_LANE_DE_LN_PMA_RXEI           BIT(3)
+#define SD25G_LANE_LANE_DE_LN_PMA_RXEI_SET(x)\
+       FIELD_PREP(SD25G_LANE_LANE_DE_LN_PMA_RXEI, x)
+#define SD25G_LANE_LANE_DE_LN_PMA_RXEI_GET(x)\
+       FIELD_GET(SD25G_LANE_LANE_DE_LN_PMA_RXEI, x)
+
+/*      SD10G_LANE_TARGET:LANE_GRP_8:LANE_DF */
+#define SD6G_LANE_LANE_DF(t)      __REG(TARGET_SD6G_LANE, t, 13, 832, 0, 1, 84, 60, 0, 1, 4)
+
+#define SD6G_LANE_LANE_DF_LOL_UDL                BIT(0)
+#define SD6G_LANE_LANE_DF_LOL_UDL_SET(x)\
+       FIELD_PREP(SD6G_LANE_LANE_DF_LOL_UDL, x)
+#define SD6G_LANE_LANE_DF_LOL_UDL_GET(x)\
+       FIELD_GET(SD6G_LANE_LANE_DF_LOL_UDL, x)
+
+#define SD6G_LANE_LANE_DF_LOL                    BIT(1)
+#define SD6G_LANE_LANE_DF_LOL_SET(x)\
+       FIELD_PREP(SD6G_LANE_LANE_DF_LOL, x)
+#define SD6G_LANE_LANE_DF_LOL_GET(x)\
+       FIELD_GET(SD6G_LANE_LANE_DF_LOL, x)
+
+#define SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED  BIT(2)
+#define SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED_SET(x)\
+       FIELD_PREP(SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED, x)
+#define SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED_GET(x)\
+       FIELD_GET(SD6G_LANE_LANE_DF_PMA2PCS_RXEI_FILTERED, x)
+
+#define SD6G_LANE_LANE_DF_SQUELCH                BIT(3)
+#define SD6G_LANE_LANE_DF_SQUELCH_SET(x)\
+       FIELD_PREP(SD6G_LANE_LANE_DF_SQUELCH, x)
+#define SD6G_LANE_LANE_DF_SQUELCH_GET(x)\
+       FIELD_GET(SD6G_LANE_LANE_DF_SQUELCH, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_0:CMU_00 */
+#define SD_CMU_CMU_00(t)          __REG(TARGET_SD_CMU, t, 14, 0, 0, 1, 20, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE      BIT(0)
+#define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_00_R_HWT_SIMULATION_MODE, x)
+#define SD_CMU_CMU_00_R_HWT_SIMULATION_MODE_GET(x)\
+       FIELD_GET(SD_CMU_CMU_00_R_HWT_SIMULATION_MODE, x)
+
+#define SD_CMU_CMU_00_CFG_PLL_LOL_SET            BIT(1)
+#define SD_CMU_CMU_00_CFG_PLL_LOL_SET_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_00_CFG_PLL_LOL_SET, x)
+#define SD_CMU_CMU_00_CFG_PLL_LOL_SET_GET(x)\
+       FIELD_GET(SD_CMU_CMU_00_CFG_PLL_LOL_SET, x)
+
+#define SD_CMU_CMU_00_CFG_PLL_LOS_SET            BIT(2)
+#define SD_CMU_CMU_00_CFG_PLL_LOS_SET_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_00_CFG_PLL_LOS_SET, x)
+#define SD_CMU_CMU_00_CFG_PLL_LOS_SET_GET(x)\
+       FIELD_GET(SD_CMU_CMU_00_CFG_PLL_LOS_SET, x)
+
+#define SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0         GENMASK(5, 4)
+#define SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0, x)
+#define SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_GET(x)\
+       FIELD_GET(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_1:CMU_05 */
+#define SD_CMU_CMU_05(t)          __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_05_CFG_REFCK_TERM_EN          BIT(0)
+#define SD_CMU_CMU_05_CFG_REFCK_TERM_EN_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_05_CFG_REFCK_TERM_EN, x)
+#define SD_CMU_CMU_05_CFG_REFCK_TERM_EN_GET(x)\
+       FIELD_GET(SD_CMU_CMU_05_CFG_REFCK_TERM_EN, x)
+
+#define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0        GENMASK(5, 4)
+#define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0, x)
+#define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_GET(x)\
+       FIELD_GET(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_1:CMU_09 */
+#define SD_CMU_CMU_09(t)          __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 16, 0, 1, 4)
+
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_UP            BIT(0)
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_UP_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_09_CFG_EN_TX_CK_UP, x)
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_UP_GET(x)\
+       FIELD_GET(SD_CMU_CMU_09_CFG_EN_TX_CK_UP, x)
+
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_DN            BIT(1)
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_DN_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_09_CFG_EN_TX_CK_DN, x)
+#define SD_CMU_CMU_09_CFG_EN_TX_CK_DN_GET(x)\
+       FIELD_GET(SD_CMU_CMU_09_CFG_EN_TX_CK_DN, x)
+
+#define SD_CMU_CMU_09_CFG_SW_8G                  BIT(4)
+#define SD_CMU_CMU_09_CFG_SW_8G_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_09_CFG_SW_8G, x)
+#define SD_CMU_CMU_09_CFG_SW_8G_GET(x)\
+       FIELD_GET(SD_CMU_CMU_09_CFG_SW_8G, x)
+
+#define SD_CMU_CMU_09_CFG_SW_10G                 BIT(5)
+#define SD_CMU_CMU_09_CFG_SW_10G_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_09_CFG_SW_10G, x)
+#define SD_CMU_CMU_09_CFG_SW_10G_GET(x)\
+       FIELD_GET(SD_CMU_CMU_09_CFG_SW_10G, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_1:CMU_0D */
+#define SD_CMU_CMU_0D(t)          __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 32, 0, 1, 4)
+
+#define SD_CMU_CMU_0D_CFG_PD_DIV64               BIT(0)
+#define SD_CMU_CMU_0D_CFG_PD_DIV64_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_0D_CFG_PD_DIV64, x)
+#define SD_CMU_CMU_0D_CFG_PD_DIV64_GET(x)\
+       FIELD_GET(SD_CMU_CMU_0D_CFG_PD_DIV64, x)
+
+#define SD_CMU_CMU_0D_CFG_PD_DIV66               BIT(1)
+#define SD_CMU_CMU_0D_CFG_PD_DIV66_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_0D_CFG_PD_DIV66, x)
+#define SD_CMU_CMU_0D_CFG_PD_DIV66_GET(x)\
+       FIELD_GET(SD_CMU_CMU_0D_CFG_PD_DIV66, x)
+
+#define SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD           BIT(2)
+#define SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD, x)
+#define SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_GET(x)\
+       FIELD_GET(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD, x)
+
+#define SD_CMU_CMU_0D_CFG_JC_BYP                 BIT(3)
+#define SD_CMU_CMU_0D_CFG_JC_BYP_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_0D_CFG_JC_BYP, x)
+#define SD_CMU_CMU_0D_CFG_JC_BYP_GET(x)\
+       FIELD_GET(SD_CMU_CMU_0D_CFG_JC_BYP, x)
+
+#define SD_CMU_CMU_0D_CFG_REFCK_PD               BIT(4)
+#define SD_CMU_CMU_0D_CFG_REFCK_PD_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_0D_CFG_REFCK_PD, x)
+#define SD_CMU_CMU_0D_CFG_REFCK_PD_GET(x)\
+       FIELD_GET(SD_CMU_CMU_0D_CFG_REFCK_PD, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_3:CMU_1B */
+#define SD_CMU_CMU_1B(t)          __REG(TARGET_SD_CMU, t, 14, 104, 0, 1, 20, 4, 0, 1, 4)
+
+#define SD_CMU_CMU_1B_CFG_RESERVE_7_0            GENMASK(7, 0)
+#define SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_1B_CFG_RESERVE_7_0, x)
+#define SD_CMU_CMU_1B_CFG_RESERVE_7_0_GET(x)\
+       FIELD_GET(SD_CMU_CMU_1B_CFG_RESERVE_7_0, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_4:CMU_1F */
+#define SD_CMU_CMU_1F(t)          __REG(TARGET_SD_CMU, t, 14, 124, 0, 1, 68, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_1F_CFG_BIAS_DN_EN             BIT(0)
+#define SD_CMU_CMU_1F_CFG_BIAS_DN_EN_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_1F_CFG_BIAS_DN_EN, x)
+#define SD_CMU_CMU_1F_CFG_BIAS_DN_EN_GET(x)\
+       FIELD_GET(SD_CMU_CMU_1F_CFG_BIAS_DN_EN, x)
+
+#define SD_CMU_CMU_1F_CFG_BIAS_UP_EN             BIT(1)
+#define SD_CMU_CMU_1F_CFG_BIAS_UP_EN_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_1F_CFG_BIAS_UP_EN, x)
+#define SD_CMU_CMU_1F_CFG_BIAS_UP_EN_GET(x)\
+       FIELD_GET(SD_CMU_CMU_1F_CFG_BIAS_UP_EN, x)
+
+#define SD_CMU_CMU_1F_CFG_IC2IP_N                BIT(2)
+#define SD_CMU_CMU_1F_CFG_IC2IP_N_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_1F_CFG_IC2IP_N, x)
+#define SD_CMU_CMU_1F_CFG_IC2IP_N_GET(x)\
+       FIELD_GET(SD_CMU_CMU_1F_CFG_IC2IP_N, x)
+
+#define SD_CMU_CMU_1F_CFG_VTUNE_SEL              BIT(3)
+#define SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_1F_CFG_VTUNE_SEL, x)
+#define SD_CMU_CMU_1F_CFG_VTUNE_SEL_GET(x)\
+       FIELD_GET(SD_CMU_CMU_1F_CFG_VTUNE_SEL, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_5:CMU_30 */
+#define SD_CMU_CMU_30(t)          __REG(TARGET_SD_CMU, t, 14, 192, 0, 1, 72, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_30_R_PLL_DLOL_EN              BIT(0)
+#define SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_30_R_PLL_DLOL_EN, x)
+#define SD_CMU_CMU_30_R_PLL_DLOL_EN_GET(x)\
+       FIELD_GET(SD_CMU_CMU_30_R_PLL_DLOL_EN, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_6:CMU_44 */
+#define SD_CMU_CMU_44(t)          __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 8, 0, 1, 4)
+
+#define SD_CMU_CMU_44_R_PLL_RSTN                 BIT(0)
+#define SD_CMU_CMU_44_R_PLL_RSTN_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_44_R_PLL_RSTN, x)
+#define SD_CMU_CMU_44_R_PLL_RSTN_GET(x)\
+       FIELD_GET(SD_CMU_CMU_44_R_PLL_RSTN, x)
+
+#define SD_CMU_CMU_44_R_CK_RESETB                BIT(1)
+#define SD_CMU_CMU_44_R_CK_RESETB_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_44_R_CK_RESETB, x)
+#define SD_CMU_CMU_44_R_CK_RESETB_GET(x)\
+       FIELD_GET(SD_CMU_CMU_44_R_CK_RESETB, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_6:CMU_45 */
+#define SD_CMU_CMU_45(t)          __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 12, 0, 1, 4)
+
+#define SD_CMU_CMU_45_R_EN_RATECHG_CTRL          BIT(0)
+#define SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_45_R_EN_RATECHG_CTRL, x)
+#define SD_CMU_CMU_45_R_EN_RATECHG_CTRL_GET(x)\
+       FIELD_GET(SD_CMU_CMU_45_R_EN_RATECHG_CTRL, x)
+
+#define SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT      BIT(1)
+#define SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT, x)
+#define SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_GET(x)\
+       FIELD_GET(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT, x)
+
+#define SD_CMU_CMU_45_RESERVED                   BIT(2)
+#define SD_CMU_CMU_45_RESERVED_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_45_RESERVED, x)
+#define SD_CMU_CMU_45_RESERVED_GET(x)\
+       FIELD_GET(SD_CMU_CMU_45_RESERVED, x)
+
+#define SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT    BIT(3)
+#define SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT, x)
+#define SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_GET(x)\
+       FIELD_GET(SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT, x)
+
+#define SD_CMU_CMU_45_RESERVED_2                 BIT(4)
+#define SD_CMU_CMU_45_RESERVED_2_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_45_RESERVED_2, x)
+#define SD_CMU_CMU_45_RESERVED_2_GET(x)\
+       FIELD_GET(SD_CMU_CMU_45_RESERVED_2, x)
+
+#define SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT     BIT(5)
+#define SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT, x)
+#define SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_GET(x)\
+       FIELD_GET(SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT, x)
+
+#define SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT         BIT(6)
+#define SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT, x)
+#define SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_GET(x)\
+       FIELD_GET(SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT, x)
+
+#define SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN     BIT(7)
+#define SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN, x)
+#define SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN_GET(x)\
+       FIELD_GET(SD_CMU_CMU_45_R_AUTO_RST_TREE_PD_MAN, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_6:CMU_47 */
+#define SD_CMU_CMU_47(t)          __REG(TARGET_SD_CMU, t, 14, 264, 0, 1, 632, 20, 0, 1, 4)
+
+#define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0      GENMASK(4, 0)
+#define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0, x)
+#define SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_GET(x)\
+       FIELD_GET(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0, x)
+
+/*      SD10G_CMU_TARGET:CMU_GRP_7:CMU_E0 */
+#define SD_CMU_CMU_E0(t)          __REG(TARGET_SD_CMU, t, 14, 896, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0         GENMASK(3, 0)
+#define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0, x)
+#define SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0_GET(x)\
+       FIELD_GET(SD_CMU_CMU_E0_READ_VCO_CTUNE_3_0, x)
+
+#define SD_CMU_CMU_E0_PLL_LOL_UDL                BIT(4)
+#define SD_CMU_CMU_E0_PLL_LOL_UDL_SET(x)\
+       FIELD_PREP(SD_CMU_CMU_E0_PLL_LOL_UDL, x)
+#define SD_CMU_CMU_E0_PLL_LOL_UDL_GET(x)\
+       FIELD_GET(SD_CMU_CMU_E0_PLL_LOL_UDL, x)
+
+/*      SD_CMU_TARGET:SD_CMU_CFG:SD_CMU_CFG */
+#define SD_CMU_CFG_SD_CMU_CFG(t)  __REG(TARGET_SD_CMU_CFG, t, 14, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_CMU_CFG_SD_CMU_CFG_CMU_RST            BIT(0)
+#define SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(x)\
+       FIELD_PREP(SD_CMU_CFG_SD_CMU_CFG_CMU_RST, x)
+#define SD_CMU_CFG_SD_CMU_CFG_CMU_RST_GET(x)\
+       FIELD_GET(SD_CMU_CFG_SD_CMU_CFG_CMU_RST, x)
+
+#define SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST        BIT(1)
+#define SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(x)\
+       FIELD_PREP(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, x)
+#define SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_GET(x)\
+       FIELD_GET(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, x)
+
+/*      SD_LANE_TARGET:SD_RESET:SD_SER_RST */
+#define SD_LANE_SD_SER_RST(t)     __REG(TARGET_SD_LANE, t, 25, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_LANE_SD_SER_RST_SER_RST               BIT(0)
+#define SD_LANE_SD_SER_RST_SER_RST_SET(x)\
+       FIELD_PREP(SD_LANE_SD_SER_RST_SER_RST, x)
+#define SD_LANE_SD_SER_RST_SER_RST_GET(x)\
+       FIELD_GET(SD_LANE_SD_SER_RST_SER_RST, x)
+
+/*      SD_LANE_TARGET:SD_RESET:SD_DES_RST */
+#define SD_LANE_SD_DES_RST(t)     __REG(TARGET_SD_LANE, t, 25, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_SD_DES_RST_DES_RST               BIT(0)
+#define SD_LANE_SD_DES_RST_DES_RST_SET(x)\
+       FIELD_PREP(SD_LANE_SD_DES_RST_DES_RST, x)
+#define SD_LANE_SD_DES_RST_DES_RST_GET(x)\
+       FIELD_GET(SD_LANE_SD_DES_RST_DES_RST, x)
+
+/*      SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */
+#define SD_LANE_SD_LANE_CFG(t)    __REG(TARGET_SD_LANE, t, 25, 8, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_LANE_SD_LANE_CFG_MACRO_RST            BIT(0)
+#define SD_LANE_SD_LANE_CFG_MACRO_RST_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_CFG_MACRO_RST, x)
+#define SD_LANE_SD_LANE_CFG_MACRO_RST_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_CFG_MACRO_RST, x)
+
+#define SD_LANE_SD_LANE_CFG_EXT_CFG_RST          BIT(1)
+#define SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_CFG_EXT_CFG_RST, x)
+#define SD_LANE_SD_LANE_CFG_EXT_CFG_RST_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_CFG_EXT_CFG_RST, x)
+
+#define SD_LANE_SD_LANE_CFG_TX_REF_SEL           GENMASK(5, 4)
+#define SD_LANE_SD_LANE_CFG_TX_REF_SEL_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_CFG_TX_REF_SEL, x)
+#define SD_LANE_SD_LANE_CFG_TX_REF_SEL_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_CFG_TX_REF_SEL, x)
+
+#define SD_LANE_SD_LANE_CFG_RX_REF_SEL           GENMASK(7, 6)
+#define SD_LANE_SD_LANE_CFG_RX_REF_SEL_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_CFG_RX_REF_SEL, x)
+#define SD_LANE_SD_LANE_CFG_RX_REF_SEL_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_CFG_RX_REF_SEL, x)
+
+#define SD_LANE_SD_LANE_CFG_LANE_RST             BIT(8)
+#define SD_LANE_SD_LANE_CFG_LANE_RST_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_CFG_LANE_RST, x)
+#define SD_LANE_SD_LANE_CFG_LANE_RST_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_CFG_LANE_RST, x)
+
+#define SD_LANE_SD_LANE_CFG_LANE_TX_RST          BIT(9)
+#define SD_LANE_SD_LANE_CFG_LANE_TX_RST_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_CFG_LANE_TX_RST, x)
+#define SD_LANE_SD_LANE_CFG_LANE_TX_RST_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_CFG_LANE_TX_RST, x)
+
+#define SD_LANE_SD_LANE_CFG_LANE_RX_RST          BIT(10)
+#define SD_LANE_SD_LANE_CFG_LANE_RX_RST_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_CFG_LANE_RX_RST, x)
+#define SD_LANE_SD_LANE_CFG_LANE_RX_RST_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_CFG_LANE_RX_RST, x)
+
+/*      SD_LANE_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */
+#define SD_LANE_SD_LANE_STAT(t)   __REG(TARGET_SD_LANE, t, 25, 8, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_SD_LANE_STAT_PMA_RST_DONE        BIT(0)
+#define SD_LANE_SD_LANE_STAT_PMA_RST_DONE_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_STAT_PMA_RST_DONE, x)
+#define SD_LANE_SD_LANE_STAT_PMA_RST_DONE_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_STAT_PMA_RST_DONE, x)
+
+#define SD_LANE_SD_LANE_STAT_DFE_RST_DONE        BIT(1)
+#define SD_LANE_SD_LANE_STAT_DFE_RST_DONE_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_STAT_DFE_RST_DONE, x)
+#define SD_LANE_SD_LANE_STAT_DFE_RST_DONE_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_STAT_DFE_RST_DONE, x)
+
+#define SD_LANE_SD_LANE_STAT_DBG_OBS             GENMASK(31, 16)
+#define SD_LANE_SD_LANE_STAT_DBG_OBS_SET(x)\
+       FIELD_PREP(SD_LANE_SD_LANE_STAT_DBG_OBS, x)
+#define SD_LANE_SD_LANE_STAT_DBG_OBS_GET(x)\
+       FIELD_GET(SD_LANE_SD_LANE_STAT_DBG_OBS, x)
+
+/*      SD_LANE_TARGET:CFG_STAT_FX100:MISC */
+#define SD_LANE_MISC(t)           __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 0, 0, 1, 4)
+
+#define SD_LANE_MISC_SD_125_RST_DIS              BIT(0)
+#define SD_LANE_MISC_SD_125_RST_DIS_SET(x)\
+       FIELD_PREP(SD_LANE_MISC_SD_125_RST_DIS, x)
+#define SD_LANE_MISC_SD_125_RST_DIS_GET(x)\
+       FIELD_GET(SD_LANE_MISC_SD_125_RST_DIS, x)
+
+#define SD_LANE_MISC_RX_ENA                      BIT(1)
+#define SD_LANE_MISC_RX_ENA_SET(x)\
+       FIELD_PREP(SD_LANE_MISC_RX_ENA, x)
+#define SD_LANE_MISC_RX_ENA_GET(x)\
+       FIELD_GET(SD_LANE_MISC_RX_ENA, x)
+
+#define SD_LANE_MISC_MUX_ENA                     BIT(2)
+#define SD_LANE_MISC_MUX_ENA_SET(x)\
+       FIELD_PREP(SD_LANE_MISC_MUX_ENA, x)
+#define SD_LANE_MISC_MUX_ENA_GET(x)\
+       FIELD_GET(SD_LANE_MISC_MUX_ENA, x)
+
+#define SD_LANE_MISC_CORE_CLK_FREQ               GENMASK(5, 4)
+#define SD_LANE_MISC_CORE_CLK_FREQ_SET(x)\
+       FIELD_PREP(SD_LANE_MISC_CORE_CLK_FREQ, x)
+#define SD_LANE_MISC_CORE_CLK_FREQ_GET(x)\
+       FIELD_GET(SD_LANE_MISC_CORE_CLK_FREQ, x)
+
+/*      SD_LANE_TARGET:CFG_STAT_FX100:M_STAT_MISC */
+#define SD_LANE_M_STAT_MISC(t)    __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 36, 0, 1, 4)
+
+#define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM GENMASK(21, 0)
+#define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM_SET(x)\
+       FIELD_PREP(SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM, x)
+#define SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM_GET(x)\
+       FIELD_GET(SD_LANE_M_STAT_MISC_M_RIS_EDGE_PTR_ADJ_SUM, x)
+
+#define SD_LANE_M_STAT_MISC_M_LOCK_CNT           GENMASK(31, 24)
+#define SD_LANE_M_STAT_MISC_M_LOCK_CNT_SET(x)\
+       FIELD_PREP(SD_LANE_M_STAT_MISC_M_LOCK_CNT, x)
+#define SD_LANE_M_STAT_MISC_M_LOCK_CNT_GET(x)\
+       FIELD_GET(SD_LANE_M_STAT_MISC_M_LOCK_CNT, x)
+
+/*      SD25G_CFG_TARGET:SD_RESET:SD_SER_RST */
+#define SD_LANE_25G_SD_SER_RST(t) __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define SD_LANE_25G_SD_SER_RST_SER_RST           BIT(0)
+#define SD_LANE_25G_SD_SER_RST_SER_RST_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_SER_RST_SER_RST, x)
+#define SD_LANE_25G_SD_SER_RST_SER_RST_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_SER_RST_SER_RST, x)
+
+/*      SD25G_CFG_TARGET:SD_RESET:SD_DES_RST */
+#define SD_LANE_25G_SD_DES_RST(t) __REG(TARGET_SD_LANE_25G, t, 8, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_25G_SD_DES_RST_DES_RST           BIT(0)
+#define SD_LANE_25G_SD_DES_RST_DES_RST_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_DES_RST_DES_RST, x)
+#define SD_LANE_25G_SD_DES_RST_DES_RST_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_DES_RST_DES_RST, x)
+
+/*      SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG */
+#define SD_LANE_25G_SD_LANE_CFG(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 0, 0, 1, 4)
+
+#define SD_LANE_25G_SD_LANE_CFG_MACRO_RST        BIT(0)
+#define SD_LANE_25G_SD_LANE_CFG_MACRO_RST_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_MACRO_RST, x)
+#define SD_LANE_25G_SD_LANE_CFG_MACRO_RST_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_MACRO_RST, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST      BIT(1)
+#define SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST, x)
+#define SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE BIT(4)
+#define SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE, x)
+#define SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_HWT_MULTI_LANE_MODE, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE  GENMASK(7, 5)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_PHYMODE, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_LANE_RST         BIT(8)
+#define SD_LANE_25G_SD_LANE_CFG_LANE_RST_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_LANE_RST, x)
+#define SD_LANE_25G_SD_LANE_CFG_LANE_RST_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_LANE_RST, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV       BIT(9)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_EN_ADV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN      BIT(10)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_EN_MAIN, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY       BIT(11)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_EN_DLY, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV      GENMASK(15, 12)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_ADV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN     BIT(16)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_MAIN, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY      GENMASK(21, 17)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_TAP_DLY, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN     BIT(22)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_ISCAN_EN, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN BIT(23)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS_EN_FAST_ISCAN, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING  BIT(24)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXSWING, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI     BIT(25)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXEI, x)
+
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN GENMASK(28, 26)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN, x)
+#define SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG_PCS2PMA_TXMARGIN, x)
+
+/*      SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_CFG2 */
+#define SD_LANE_25G_SD_LANE_CFG2(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 4, 0, 1, 4)
+
+#define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL  GENMASK(2, 0)
+#define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_DATA_WIDTH_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL    GENMASK(5, 3)
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_PMA_TXCK_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL   GENMASK(8, 6)
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_PMA_RXDIV_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED GENMASK(10, 9)
+#define SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED, x)
+#define SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_PCS2PMA_TX_SPEED, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV   GENMASK(13, 11)
+#define SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV, x)
+#define SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_TXFIFO_CK_DIV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV   GENMASK(16, 14)
+#define SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV, x)
+#define SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_RXFIFO_CK_DIV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL GENMASK(19, 17)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_HWT_VCO_DIV_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV GENMASK(23, 20)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV, x)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_HWT_CFG_SEL_DIV, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL  GENMASK(25, 24)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_HWT_PRE_DIVSEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL      GENMASK(28, 26)
+#define SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_TXRATE_SEL, x)
+
+#define SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL      GENMASK(31, 29)
+#define SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL, x)
+#define SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_CFG2_RXRATE_SEL, x)
+
+/*      SD25G_CFG_TARGET:SD_LANE_CFG_STAT:SD_LANE_STAT */
+#define SD_LANE_25G_SD_LANE_STAT(t) __REG(TARGET_SD_LANE_25G, t, 8, 8, 0, 1, 12, 8, 0, 1, 4)
+
+#define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE    BIT(0)
+#define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE, x)
+#define SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_STAT_PMA_RST_DONE, x)
+
+#define SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE   BIT(1)
+#define SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE, x)
+#define SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_STAT_LANE_RST_DONE, x)
+
+#define SD_LANE_25G_SD_LANE_STAT_DBG_OBS         GENMASK(31, 16)
+#define SD_LANE_25G_SD_LANE_STAT_DBG_OBS_SET(x)\
+       FIELD_PREP(SD_LANE_25G_SD_LANE_STAT_DBG_OBS, x)
+#define SD_LANE_25G_SD_LANE_STAT_DBG_OBS_GET(x)\
+       FIELD_GET(SD_LANE_25G_SD_LANE_STAT_DBG_OBS, x)
+
+#endif /* _SPARX5_SERDES_REGS_H_ */
index 71cb1082632626646014fdb060c70b6d20f2f72b..ccb575b137778151feae9c51c7c18b353c85c2e8 100644 (file)
@@ -373,6 +373,36 @@ int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
 }
 EXPORT_SYMBOL_GPL(phy_set_mode_ext);
 
+int phy_set_media(struct phy *phy, enum phy_media media)
+{
+       int ret;
+
+       if (!phy || !phy->ops->set_media)
+               return 0;
+
+       mutex_lock(&phy->mutex);
+       ret = phy->ops->set_media(phy, media);
+       mutex_unlock(&phy->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(phy_set_media);
+
+int phy_set_speed(struct phy *phy, int speed)
+{
+       int ret;
+
+       if (!phy || !phy->ops->set_speed)
+               return 0;
+
+       mutex_lock(&phy->mutex);
+       ret = phy->ops->set_speed(phy, speed);
+       mutex_unlock(&phy->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(phy_set_speed);
+
 int phy_reset(struct phy *phy)
 {
        int ret;
index 9061ece7ff6ab1e42aee9ee13528ee3dabf35a2c..bfff0c8c9130389e4c472d22a978548aaf7672f3 100644 (file)
@@ -276,8 +276,8 @@ static int qcom_ipq806x_usb_hs_phy_init(struct phy *phy)
        val = HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_DMSEHV_CLAMP |
                HSUSB_CTRL_RETENABLEN  | HSUSB_CTRL_COMMONONN |
                HSUSB_CTRL_OTGSESSVLD_CLAMP | HSUSB_CTRL_ID_HV_CLAMP |
-               HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_UTMI_OTG_VBUS_VALID |
-               HSUSB_CTRL_UTMI_CLK_EN | HSUSB_CTRL_CLAMP_EN | 0x70;
+               HSUSB_CTRL_UTMI_OTG_VBUS_VALID | HSUSB_CTRL_UTMI_CLK_EN |
+               HSUSB_CTRL_CLAMP_EN | 0x70;
 
        /* use core clock if external reference is not present */
        if (!phy_dwc3->xo_clk)
index 9cdebe7f26cb1d3a66134e2065e32979c7dc30f9..7877f70cf86fa09811307a6433eb0ad50db32e33 100644 (file)
@@ -1840,6 +1840,86 @@ static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = {
        QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
 };
 
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_SVS_MODE_CLK_SEL, 0x05),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x3b),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x0c),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x06),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x30),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_CTRL, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x17),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORE_CLK_EN, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_rbr[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x05),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x6f),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x03),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0e),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr2[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x8c),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x1f),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1c),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl_hbr3[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x69),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x80),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x07),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x2f),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x2a),
+       QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v4_dp_tx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_VMODE_CTRL1, 0x40),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_INTERFACE_SELECT, 0x3b),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_CLKBUF_ENABLE, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_RESET_TSYNC_EN, 0x03),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_INTERFACE_MODE, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_BAND, 0x4),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_POL_INV, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_DRV_LVL, 0x2a),
+       QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_EMP_POST1_LVL, 0x20),
+};
+
 static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = {
        QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08),
        QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34),
@@ -2268,6 +2348,8 @@ static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_tbl[] = {
        QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
 };
 
+struct qmp_phy;
+
 /* struct qmp_phy_cfg - per-PHY initialization config */
 struct qmp_phy_cfg {
        /* phy-type - PCIE/UFS/USB */
@@ -2307,6 +2389,12 @@ struct qmp_phy_cfg {
        const struct qmp_phy_init_tbl *serdes_tbl_hbr3;
        int serdes_tbl_hbr3_num;
 
+       /* DP PHY callbacks */
+       int (*configure_dp_phy)(struct qmp_phy *qphy);
+       void (*configure_dp_tx)(struct qmp_phy *qphy);
+       int (*calibrate_dp_phy)(struct qmp_phy *qphy);
+       void (*dp_aux_init)(struct qmp_phy *qphy);
+
        /* clock ids to be requested */
        const char * const *clk_list;
        int num_clks;
@@ -2423,6 +2511,16 @@ struct qcom_qmp {
        struct reset_control *ufs_reset;
 };
 
+static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy);
+static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy);
+static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy);
+static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy);
+
+static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy);
+static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy);
+static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy);
+static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy);
+
 static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
 {
        u32 reg;
@@ -2871,6 +2969,11 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
 
        .has_phy_dp_com_ctrl    = true,
        .is_dual_lane_phy       = true,
+
+       .dp_aux_init = qcom_qmp_v3_phy_dp_aux_init,
+       .configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx,
+       .configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy,
+       .calibrate_dp_phy = qcom_qmp_v3_dp_phy_calibrate,
 };
 
 static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = {
@@ -3123,6 +3226,46 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
        .pwrdn_delay_max        = POWER_DOWN_DELAY_US_MAX,
 };
 
+static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
+       .type                   = PHY_TYPE_DP,
+       .nlanes                 = 1,
+
+       .serdes_tbl             = qmp_v4_dp_serdes_tbl,
+       .serdes_tbl_num         = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
+       .tx_tbl                 = qmp_v4_dp_tx_tbl,
+       .tx_tbl_num             = ARRAY_SIZE(qmp_v4_dp_tx_tbl),
+
+       .serdes_tbl_rbr         = qmp_v4_dp_serdes_tbl_rbr,
+       .serdes_tbl_rbr_num     = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
+       .serdes_tbl_hbr         = qmp_v4_dp_serdes_tbl_hbr,
+       .serdes_tbl_hbr_num     = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
+       .serdes_tbl_hbr2        = qmp_v4_dp_serdes_tbl_hbr2,
+       .serdes_tbl_hbr2_num    = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
+       .serdes_tbl_hbr3        = qmp_v4_dp_serdes_tbl_hbr3,
+       .serdes_tbl_hbr3_num    = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+
+       .clk_list               = qmp_v4_phy_clk_l,
+       .num_clks               = ARRAY_SIZE(qmp_v4_phy_clk_l),
+       .reset_list             = msm8996_usb3phy_reset_l,
+       .num_resets             = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+       .vreg_list              = qmp_phy_vreg_l,
+       .num_vregs              = ARRAY_SIZE(qmp_phy_vreg_l),
+       .regs                   = qmp_v4_usb3phy_regs_layout,
+
+       .has_phy_dp_com_ctrl    = true,
+       .is_dual_lane_phy       = true,
+
+       .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
+       .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
+       .configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
+       .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate,
+};
+
+static const struct qmp_phy_combo_cfg sm8250_usb3dpphy_cfg = {
+       .usb_cfg                = &sm8250_usb3phy_cfg,
+       .dp_cfg                 = &sm8250_dpphy_cfg,
+};
+
 static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
        .type                   = PHY_TYPE_USB3,
        .nlanes                 = 1,
@@ -3332,24 +3475,24 @@ static int qcom_qmp_phy_serdes_init(struct qmp_phy *qphy)
        return 0;
 }
 
-static void qcom_qmp_phy_dp_aux_init(struct qmp_phy *qphy)
+static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy)
 {
        writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
               DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
-              qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+              qphy->pcs + QSERDES_DP_PHY_PD_CTL);
 
        /* Turn on BIAS current for PHY/PLL */
        writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX |
               QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL,
               qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
 
-       writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+       writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
 
        writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
               DP_PHY_PD_CTL_LANE_0_1_PWRDN |
               DP_PHY_PD_CTL_LANE_2_3_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN |
               DP_PHY_PD_CTL_DP_CLAMP_EN,
-              qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+              qphy->pcs + QSERDES_DP_PHY_PD_CTL);
 
        writel(QSERDES_V3_COM_BIAS_EN |
               QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN |
@@ -3357,16 +3500,16 @@ static void qcom_qmp_phy_dp_aux_init(struct qmp_phy *qphy)
               QSERDES_V3_COM_CLKBUF_RX_DRIVE_L,
               qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
 
-       writel(0x00, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG0);
-       writel(0x13, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG1);
-       writel(0x24, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG2);
-       writel(0x00, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG3);
-       writel(0x0a, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG4);
-       writel(0x26, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG5);
-       writel(0x0a, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG6);
-       writel(0x03, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG7);
-       writel(0xbb, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG8);
-       writel(0x03, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG9);
+       writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0);
+       writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+       writel(0x24, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+       writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3);
+       writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4);
+       writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5);
+       writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6);
+       writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7);
+       writel(0xbb, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8);
+       writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9);
        qphy->dp_aux_cfg = 0;
 
        writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
@@ -3375,6 +3518,20 @@ static void qcom_qmp_phy_dp_aux_init(struct qmp_phy *qphy)
               qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
 }
 
+static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = {
+       { 0x00, 0x0c, 0x15, 0x1a },
+       { 0x02, 0x0e, 0x16, 0xff },
+       { 0x02, 0x11, 0xff, 0xff },
+       { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = {
+       { 0x02, 0x12, 0x16, 0x1a },
+       { 0x09, 0x19, 0x1f, 0xff },
+       { 0x10, 0x1f, 0xff, 0xff },
+       { 0x1f, 0xff, 0xff, 0xff }
+};
+
 static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
        { 0x00, 0x0c, 0x14, 0x19 },
        { 0x00, 0x0b, 0x12, 0xff },
@@ -3389,11 +3546,11 @@ static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
        { 0x1f, 0xff, 0xff, 0xff }
 };
 
-static void qcom_qmp_phy_configure_dp_tx(struct qmp_phy *qphy)
+static int qcom_qmp_phy_configure_dp_swing(struct qmp_phy *qphy,
+               unsigned int drv_lvl_reg, unsigned int emp_post_reg)
 {
        const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
        unsigned int v_level = 0, p_level = 0;
-       u32 bias_en, drvr_en;
        u8 voltage_swing_cfg, pre_emphasis_cfg;
        int i;
 
@@ -3402,56 +3559,58 @@ static void qcom_qmp_phy_configure_dp_tx(struct qmp_phy *qphy)
                p_level = max(p_level, dp_opts->pre[i]);
        }
 
-       if (dp_opts->lanes == 1) {
-               bias_en = 0x3e;
-               drvr_en = 0x13;
+       if (dp_opts->link_rate <= 2700) {
+               voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
+               pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
        } else {
-               bias_en = 0x3f;
-               drvr_en = 0x10;
+               voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr3_hbr2[v_level][p_level];
+               pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr3_hbr2[v_level][p_level];
        }
 
-       voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
-       pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
-
        /* TODO: Move check to config check */
        if (voltage_swing_cfg == 0xFF && pre_emphasis_cfg == 0xFF)
-               return;
+               return -EINVAL;
 
        /* Enable MUX to use Cursor values from these registers */
        voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN;
        pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN;
 
-       writel(voltage_swing_cfg, qphy->tx + QSERDES_V3_TX_TX_DRV_LVL);
-       writel(pre_emphasis_cfg, qphy->tx + QSERDES_V3_TX_TX_EMP_POST1_LVL);
-       writel(voltage_swing_cfg, qphy->tx2 + QSERDES_V3_TX_TX_DRV_LVL);
-       writel(pre_emphasis_cfg, qphy->tx2 + QSERDES_V3_TX_TX_EMP_POST1_LVL);
+       writel(voltage_swing_cfg, qphy->tx + drv_lvl_reg);
+       writel(pre_emphasis_cfg, qphy->tx + emp_post_reg);
+       writel(voltage_swing_cfg, qphy->tx2 + drv_lvl_reg);
+       writel(pre_emphasis_cfg, qphy->tx2 + emp_post_reg);
 
-       writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN);
-       writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
-       writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN);
-       writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
+       return 0;
 }
 
-static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy)
 {
-       const struct phy_configure_opts_dp *dp_opts = &opts->dp;
-       struct qmp_phy *qphy = phy_get_drvdata(phy);
+       const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+       u32 bias_en, drvr_en;
 
-       memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts));
-       if (qphy->dp_opts.set_voltages) {
-               qcom_qmp_phy_configure_dp_tx(qphy);
-               qphy->dp_opts.set_voltages = 0;
+       if (qcom_qmp_phy_configure_dp_swing(qphy,
+                               QSERDES_V3_TX_TX_DRV_LVL,
+                               QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0)
+               return;
+
+       if (dp_opts->lanes == 1) {
+               bias_en = 0x3e;
+               drvr_en = 0x13;
+       } else {
+               bias_en = 0x3f;
+               drvr_en = 0x10;
        }
 
-       return 0;
+       writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN);
+       writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
+       writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN);
+       writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
 }
 
-static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
+static bool qcom_qmp_phy_configure_dp_mode(struct qmp_phy *qphy)
 {
-       const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
-       const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
-       u32 val, phy_vco_div, status;
-       unsigned long pixel_freq;
+       u32 val;
+       bool reverse = false;
 
        val = DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
              DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN;
@@ -3471,9 +3630,22 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
         *      writel(0x4c, qphy->pcs + QSERDES_V3_DP_PHY_MODE);
         */
        val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
-       writel(val, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+       writel(val, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+       writel(0x5c, qphy->pcs + QSERDES_DP_PHY_MODE);
+
+       return reverse;
+}
+
+static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+       const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
+       const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+       u32 phy_vco_div, status;
+       unsigned long pixel_freq;
+
+       qcom_qmp_phy_configure_dp_mode(qphy);
 
-       writel(0x5c, qphy->pcs + QSERDES_V3_DP_PHY_MODE);
        writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
        writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
 
@@ -3503,11 +3675,11 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
        clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
        clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
 
-       writel(0x04, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG2);
-       writel(0x01, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
-       writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
-       writel(0x01, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
-       writel(0x09, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+       writel(0x04, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+       writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+       writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG);
+       writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+       writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG);
 
        writel(0x20, qphy->serdes + QSERDES_V3_COM_RESETSM_CNTRL);
 
@@ -3518,7 +3690,7 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
                        10000))
                return -ETIMEDOUT;
 
-       writel(0x19, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+       writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
 
        if (readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
                        status,
@@ -3527,9 +3699,9 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
                        10000))
                return -ETIMEDOUT;
 
-       writel(0x18, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+       writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
        udelay(2000);
-       writel(0x19, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+       writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
 
        return readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
                        status,
@@ -3542,9 +3714,8 @@ static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
  * We need to calibrate the aux setting here as many times
  * as the caller tries
  */
-static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
+static int qcom_qmp_v3_dp_phy_calibrate(struct qmp_phy *qphy)
 {
-       struct qmp_phy *qphy = phy_get_drvdata(phy);
        static const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d };
        u8 val;
 
@@ -3552,7 +3723,231 @@ static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
        qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
        val = cfg1_settings[qphy->dp_aux_cfg];
 
-       writel(val, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG1);
+       writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+
+       return 0;
+}
+
+static void qcom_qmp_v4_phy_dp_aux_init(struct qmp_phy *qphy)
+{
+       writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+              DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+              qphy->pcs + QSERDES_DP_PHY_PD_CTL);
+
+       /* Turn on BIAS current for PHY/PLL */
+       writel(0x17, qphy->serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+
+       writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG0);
+       writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+       writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+       writel(0x00, qphy->pcs + QSERDES_DP_PHY_AUX_CFG3);
+       writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG4);
+       writel(0x26, qphy->pcs + QSERDES_DP_PHY_AUX_CFG5);
+       writel(0x0a, qphy->pcs + QSERDES_DP_PHY_AUX_CFG6);
+       writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG7);
+       writel(0xb7, qphy->pcs + QSERDES_DP_PHY_AUX_CFG8);
+       writel(0x03, qphy->pcs + QSERDES_DP_PHY_AUX_CFG9);
+       qphy->dp_aux_cfg = 0;
+
+       writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
+              PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
+              PHY_AUX_REQ_ERR_MASK,
+              qphy->pcs + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK);
+}
+
+static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy)
+{
+       /* Program default values before writing proper values */
+       writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL);
+       writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+
+       writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+       writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+
+       qcom_qmp_phy_configure_dp_swing(qphy,
+                       QSERDES_V4_TX_TX_DRV_LVL,
+                       QSERDES_V4_TX_TX_EMP_POST1_LVL);
+}
+
+static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+       const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
+       const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+       u32 phy_vco_div, status;
+       unsigned long pixel_freq;
+       u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+       bool reverse;
+
+       writel(0x0f, qphy->pcs + QSERDES_V4_DP_PHY_CFG_1);
+
+       reverse = qcom_qmp_phy_configure_dp_mode(qphy);
+
+       writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+       writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
+
+       writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL);
+       writel(0x05, qphy->pcs + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL);
+
+       switch (dp_opts->link_rate) {
+       case 1620:
+               phy_vco_div = 0x1;
+               pixel_freq = 1620000000UL / 2;
+               break;
+       case 2700:
+               phy_vco_div = 0x1;
+               pixel_freq = 2700000000UL / 2;
+               break;
+       case 5400:
+               phy_vco_div = 0x2;
+               pixel_freq = 5400000000UL / 4;
+               break;
+       case 8100:
+               phy_vco_div = 0x0;
+               pixel_freq = 8100000000UL / 6;
+               break;
+       default:
+               /* Other link rates aren't supported */
+               return -EINVAL;
+       }
+       writel(phy_vco_div, qphy->pcs + QSERDES_V4_DP_PHY_VCO_DIV);
+
+       clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
+       clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
+
+       writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+       writel(0x05, qphy->pcs + QSERDES_DP_PHY_CFG);
+       writel(0x01, qphy->pcs + QSERDES_DP_PHY_CFG);
+       writel(0x09, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+       writel(0x20, qphy->serdes + QSERDES_V4_COM_RESETSM_CNTRL);
+
+       if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_C_READY_STATUS,
+                       status,
+                       ((status & BIT(0)) > 0),
+                       500,
+                       10000))
+               return -ETIMEDOUT;
+
+       if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS,
+                       status,
+                       ((status & BIT(0)) > 0),
+                       500,
+                       10000))
+               return -ETIMEDOUT;
+
+       if (readl_poll_timeout(qphy->serdes + QSERDES_V4_COM_CMN_STATUS,
+                       status,
+                       ((status & BIT(1)) > 0),
+                       500,
+                       10000))
+               return -ETIMEDOUT;
+
+       writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+       if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+                       status,
+                       ((status & BIT(0)) > 0),
+                       500,
+                       10000))
+               return -ETIMEDOUT;
+
+       if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+                       status,
+                       ((status & BIT(1)) > 0),
+                       500,
+                       10000))
+               return -ETIMEDOUT;
+
+       /*
+        * At least for 7nm DP PHY this has to be done after enabling link
+        * clock.
+        */
+
+       if (dp_opts->lanes == 1) {
+               bias0_en = reverse ? 0x3e : 0x15;
+               bias1_en = reverse ? 0x15 : 0x3e;
+               drvr0_en = reverse ? 0x13 : 0x10;
+               drvr1_en = reverse ? 0x10 : 0x13;
+       } else if (dp_opts->lanes == 2) {
+               bias0_en = reverse ? 0x3f : 0x15;
+               bias1_en = reverse ? 0x15 : 0x3f;
+               drvr0_en = 0x10;
+               drvr1_en = 0x10;
+       } else {
+               bias0_en = 0x3f;
+               bias1_en = 0x3f;
+               drvr0_en = 0x10;
+               drvr1_en = 0x10;
+       }
+
+       writel(drvr0_en, qphy->tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
+       writel(bias0_en, qphy->tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
+       writel(drvr1_en, qphy->tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
+       writel(bias1_en, qphy->tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
+
+       writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
+       udelay(2000);
+       writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+       if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+                       status,
+                       ((status & BIT(1)) > 0),
+                       500,
+                       10000))
+               return -ETIMEDOUT;
+
+       writel(0x0a, qphy->tx + QSERDES_V4_TX_TX_POL_INV);
+       writel(0x0a, qphy->tx2 + QSERDES_V4_TX_TX_POL_INV);
+
+       writel(0x27, qphy->tx + QSERDES_V4_TX_TX_DRV_LVL);
+       writel(0x27, qphy->tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+
+       writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+       writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+
+       return 0;
+}
+
+/*
+ * We need to calibrate the aux setting here as many times
+ * as the caller tries
+ */
+static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy)
+{
+       static const u8 cfg1_settings[] = { 0x20, 0x13, 0x23, 0x1d };
+       u8 val;
+
+       qphy->dp_aux_cfg++;
+       qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
+       val = cfg1_settings[qphy->dp_aux_cfg];
+
+       writel(val, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
+
+       return 0;
+}
+
+static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+       const struct phy_configure_opts_dp *dp_opts = &opts->dp;
+       struct qmp_phy *qphy = phy_get_drvdata(phy);
+       const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+       memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts));
+       if (qphy->dp_opts.set_voltages) {
+               cfg->configure_dp_tx(qphy);
+               qphy->dp_opts.set_voltages = 0;
+       }
+
+       return 0;
+}
+
+static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
+{
+       struct qmp_phy *qphy = phy_get_drvdata(phy);
+       const struct qmp_phy_cfg *cfg = qphy->cfg;
+
+       if (cfg->calibrate_dp_phy)
+               return cfg->calibrate_dp_phy(qphy);
 
        return 0;
 }
@@ -3729,7 +4124,7 @@ static int qcom_qmp_phy_init(struct phy *phy)
                return ret;
 
        if (cfg->type == PHY_TYPE_DP)
-               qcom_qmp_phy_dp_aux_init(qphy);
+               cfg->dp_aux_init(qphy);
 
        return 0;
 }
@@ -3783,7 +4178,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
 
        /* Configure special DP tx tunings */
        if (cfg->type == PHY_TYPE_DP)
-               qcom_qmp_phy_configure_dp_tx(qphy);
+               cfg->configure_dp_tx(qphy);
 
        qcom_qmp_phy_configure_lane(rx, cfg->regs,
                                    cfg->rx_tbl, cfg->rx_tbl_num, 1);
@@ -3802,7 +4197,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
 
        /* Configure link rate, swing, etc. */
        if (cfg->type == PHY_TYPE_DP) {
-               qcom_qmp_phy_configure_dp_phy(qphy);
+               cfg->configure_dp_phy(qphy);
        } else {
                qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
                if (cfg->pcs_tbl_sec)
@@ -3874,7 +4269,7 @@ static int qcom_qmp_phy_power_off(struct phy *phy)
 
        if (cfg->type == PHY_TYPE_DP) {
                /* Assert DP PHY power down */
-               writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+               writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_DP_PHY_PD_CTL);
        } else {
                /* PHY reset */
                if (!cfg->no_pcs_sw_reset)
@@ -4577,6 +4972,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
        }, {
                .compatible = "qcom,sm8250-qmp-usb3-phy",
                .data = &sm8250_usb3phy_cfg,
+       }, {
+               .compatible = "qcom,sm8250-qmp-usb3-dp-phy",
+               /* It's a combo phy */
        }, {
                .compatible = "qcom,sm8250-qmp-usb3-uni-phy",
                .data = &sm8250_usb3_uniphy_cfg,
@@ -4611,6 +5009,10 @@ static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
                .compatible = "qcom,sc7180-qmp-usb3-dp-phy",
                .data = &sc7180_usb3dpphy_cfg,
        },
+       {
+               .compatible = "qcom,sm8250-qmp-usb3-dp-phy",
+               .data = &sm8250_usb3dpphy_cfg,
+       },
        { }
 };
 
index 71ce3aa174ae69fba4dc24ea9eb05cca004083fd..67bd2dd0d8c594d8ff23e2a73507e72ea1aa44e8 100644 (file)
 #define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4                0x5c
 #define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5                0x60
 
-/* Only for QMP V3 PHY - DP PHY registers */
-#define QSERDES_V3_DP_PHY_REVISION_ID0                 0x000
-#define QSERDES_V3_DP_PHY_REVISION_ID1                 0x004
-#define QSERDES_V3_DP_PHY_REVISION_ID2                 0x008
-#define QSERDES_V3_DP_PHY_REVISION_ID3                 0x00c
-#define QSERDES_V3_DP_PHY_CFG                          0x010
-#define QSERDES_V3_DP_PHY_PD_CTL                       0x018
+/* QMP PHY - DP PHY registers */
+#define QSERDES_DP_PHY_REVISION_ID0                    0x000
+#define QSERDES_DP_PHY_REVISION_ID1                    0x004
+#define QSERDES_DP_PHY_REVISION_ID2                    0x008
+#define QSERDES_DP_PHY_REVISION_ID3                    0x00c
+#define QSERDES_DP_PHY_CFG                             0x010
+#define QSERDES_DP_PHY_PD_CTL                          0x018
 # define DP_PHY_PD_CTL_PWRDN                           0x001
 # define DP_PHY_PD_CTL_PSR_PWRDN                       0x002
 # define DP_PHY_PD_CTL_AUX_PWRDN                       0x004
 # define DP_PHY_PD_CTL_LANE_2_3_PWRDN                  0x010
 # define DP_PHY_PD_CTL_PLL_PWRDN                       0x020
 # define DP_PHY_PD_CTL_DP_CLAMP_EN                     0x040
-#define QSERDES_V3_DP_PHY_MODE                         0x01c
-#define QSERDES_V3_DP_PHY_AUX_CFG0                     0x020
-#define QSERDES_V3_DP_PHY_AUX_CFG1                     0x024
-#define QSERDES_V3_DP_PHY_AUX_CFG2                     0x028
-#define QSERDES_V3_DP_PHY_AUX_CFG3                     0x02c
-#define QSERDES_V3_DP_PHY_AUX_CFG4                     0x030
-#define QSERDES_V3_DP_PHY_AUX_CFG5                     0x034
-#define QSERDES_V3_DP_PHY_AUX_CFG6                     0x038
-#define QSERDES_V3_DP_PHY_AUX_CFG7                     0x03c
-#define QSERDES_V3_DP_PHY_AUX_CFG8                     0x040
-#define QSERDES_V3_DP_PHY_AUX_CFG9                     0x044
+#define QSERDES_DP_PHY_MODE                            0x01c
+#define QSERDES_DP_PHY_AUX_CFG0                                0x020
+#define QSERDES_DP_PHY_AUX_CFG1                                0x024
+#define QSERDES_DP_PHY_AUX_CFG2                                0x028
+#define QSERDES_DP_PHY_AUX_CFG3                                0x02c
+#define QSERDES_DP_PHY_AUX_CFG4                                0x030
+#define QSERDES_DP_PHY_AUX_CFG5                                0x034
+#define QSERDES_DP_PHY_AUX_CFG6                                0x038
+#define QSERDES_DP_PHY_AUX_CFG7                                0x03c
+#define QSERDES_DP_PHY_AUX_CFG8                                0x040
+#define QSERDES_DP_PHY_AUX_CFG9                                0x044
 
+/* Only for QMP V3 PHY - DP PHY registers */
 #define QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK           0x048
 # define PHY_AUX_STOP_ERR_MASK                         0x01
 # define PHY_AUX_DEC_ERR_MASK                          0x02
 #define QSERDES_V3_DP_PHY_STATUS                       0x0c0
 
 /* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_BG_TIMER                                0x00c
 #define QSERDES_V4_COM_SSC_EN_CENTER                   0x010
 #define QSERDES_V4_COM_SSC_PER1                                0x01c
 #define QSERDES_V4_COM_SSC_PER2                                0x020
 #define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0            0x028
 #define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1            0x030
 #define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1            0x034
+#define QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN             0x044
 #define QSERDES_V4_COM_CLK_ENABLE1                     0x048
+#define QSERDES_V4_COM_SYS_CLK_CTRL                    0x04c
 #define QSERDES_V4_COM_SYSCLK_BUF_ENABLE               0x050
 #define QSERDES_V4_COM_PLL_IVCO                                0x058
 #define QSERDES_V4_COM_CMN_IPTRIM                      0x060
 #define QSERDES_V4_COM_PLL_CCTRL_MODE0                 0x084
 #define QSERDES_V4_COM_PLL_CCTRL_MODE1                 0x088
 #define QSERDES_V4_COM_SYSCLK_EN_SEL                   0x094
+#define QSERDES_V4_COM_RESETSM_CNTRL                   0x09c
 #define QSERDES_V4_COM_LOCK_CMP_EN                     0x0a4
 #define QSERDES_V4_COM_LOCK_CMP1_MODE0                 0x0ac
 #define QSERDES_V4_COM_LOCK_CMP2_MODE0                 0x0b0
 #define QSERDES_V4_COM_DIV_FRAC_START1_MODE1           0x0d8
 #define QSERDES_V4_COM_DIV_FRAC_START2_MODE1           0x0dc
 #define QSERDES_V4_COM_DIV_FRAC_START3_MODE1           0x0e0
+#define QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0           0x0ec
+#define QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0           0x0f0
+#define QSERDES_V4_COM_VCO_TUNE_CTRL                   0x108
 #define QSERDES_V4_COM_VCO_TUNE_MAP                    0x10c
 #define QSERDES_V4_COM_VCO_TUNE1_MODE0                 0x110
 #define QSERDES_V4_COM_VCO_TUNE2_MODE0                 0x114
 #define QSERDES_V4_COM_VCO_TUNE1_MODE1                 0x118
 #define QSERDES_V4_COM_VCO_TUNE2_MODE1                 0x11c
 #define QSERDES_V4_COM_VCO_TUNE_INITVAL2               0x124
+#define QSERDES_V4_COM_CMN_STATUS                      0x140
 #define QSERDES_V4_COM_CLK_SELECT                      0x154
 #define QSERDES_V4_COM_HSCLK_SEL                       0x158
 #define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL             0x15c
+#define QSERDES_V4_COM_CORECLK_DIV_MODE0               0x168
 #define QSERDES_V4_COM_CORECLK_DIV_MODE1               0x16c
+#define QSERDES_V4_COM_CORE_CLK_EN                     0x174
+#define QSERDES_V4_COM_C_READY_STATUS                  0x178
+#define QSERDES_V4_COM_CMN_CONFIG                      0x17c
 #define QSERDES_V4_COM_SVS_MODE_CLK_SEL                        0x184
 #define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0      0x1ac
 #define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0      0x1b0
 #define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1      0x1b8
 
 /* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_CLKBUF_ENABLE                    0x08
+#define QSERDES_V4_TX_TX_EMP_POST1_LVL                 0x0c
+#define QSERDES_V4_TX_TX_DRV_LVL                       0x14
+#define QSERDES_V4_TX_RESET_TSYNC_EN                   0x1c
+#define QSERDES_V4_TX_PRE_STALL_LDO_BOOST_EN           0x20
+#define QSERDES_V4_TX_TX_BAND                          0x24
+#define QSERDES_V4_TX_INTERFACE_SELECT                 0x2c
 #define QSERDES_V4_TX_RES_CODE_LANE_TX                 0x34
 #define QSERDES_V4_TX_RES_CODE_LANE_RX                 0x38
 #define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX          0x3c
 #define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX          0x40
+#define QSERDES_V4_TX_TRANSCEIVER_BIAS_EN              0x54
+#define QSERDES_V4_TX_HIGHZ_DRVR_EN                    0x58
+#define QSERDES_V4_TX_TX_POL_INV                       0x5c
+#define QSERDES_V4_TX_PARRATE_REC_DETECT_IDLE_EN       0x60
 #define QSERDES_V4_TX_LANE_MODE_1                      0x84
 #define QSERDES_V4_TX_LANE_MODE_2                      0x88
 #define QSERDES_V4_TX_RCV_DETECT_LVL_2                 0x9c
+#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN                 0xb8
+#define QSERDES_V4_TX_TX_INTERFACE_MODE                        0xbc
 #define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1       0xd8
 #define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1       0xdC
 #define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1       0xe0
 #define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1       0xe4
-#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN                 0xb8
-#define QSERDES_V4_TX_PI_QEC_CTRL              0x104
+#define QSERDES_V4_TX_VMODE_CTRL1                      0xe8
+#define QSERDES_V4_TX_PI_QEC_CTRL                      0x104
 
 /* Only for QMP V4 PHY - RX registers */
 #define QSERDES_V4_RX_UCDR_FO_GAIN                     0x008
 #define QSERDES_V4_RX_DCC_CTRL1                                0x1bc
 #define QSERDES_V4_RX_VTH_CODE                         0x1c4
 
+/* Only for QMP V4 PHY - DP PHY registers */
+#define QSERDES_V4_DP_PHY_CFG_1                                0x014
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK           0x054
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_CLEAR          0x058
+#define QSERDES_V4_DP_PHY_VCO_DIV                      0x070
+#define QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL             0x078
+#define QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL             0x09c
+#define QSERDES_V4_DP_PHY_SPARE0                       0x0c8
+#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_STATUS         0x0d8
+#define QSERDES_V4_DP_PHY_STATUS                       0x0dc
+
 /* Only for QMP V4 PHY - UFS PCS registers */
 #define QPHY_V4_PCS_UFS_PHY_START                              0x000
 #define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL                     0x004
index 327df1a99f77325a1f229cc1638402f5e8bbe5d6..5c6c17673396175493c0a1f2c19b409b6136c2b8 100644 (file)
@@ -56,6 +56,7 @@ static int qcom_usb_hs_phy_set_mode(struct phy *phy,
                        fallthrough;
                case PHY_MODE_USB_DEVICE:
                        val |= ULPI_INT_SESS_VALID;
+                       break;
                default:
                        break;
                }
index 9a610b414b1fb36f901765134d69e26ad969ead0..753cb5bab9308f71b9ea41cff320f610d42d7995 100644 (file)
@@ -62,7 +62,7 @@
 
 #define RG_PE1_FRC_MSTCKDIV                    BIT(5)
 
-#define XTAL_MASK                              GENMASK(7, 6)
+#define XTAL_MASK                              GENMASK(8, 6)
 
 #define MAX_PHYS       2
 
@@ -319,9 +319,9 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
                return PTR_ERR(phy->regmap);
 
        phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
-       if (IS_ERR(phy)) {
+       if (IS_ERR(phy->phy)) {
                dev_err(dev, "failed to create phy\n");
-               return PTR_ERR(phy);
+               return PTR_ERR(phy->phy);
        }
 
        phy_set_drvdata(phy->phy, phy);
index 70a31251b202bc5b4e6b6cbf98ae69179ec96df0..d2bbdc96a1672ae1386e365803776bae54e84960 100644 (file)
@@ -1180,6 +1180,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
                        dev_err(dev, "failed to create phy: %pOFn\n",
                                child_np);
                        pm_runtime_disable(dev);
+                       of_node_put(child_np);
                        return PTR_ERR(phy);
                }
 
index b32f44ff90337500f045a2aed711681ff5ee099b..3fc3d0781fb8accdd5cd15d8e6f627b940cb4388 100644 (file)
@@ -36,6 +36,7 @@ config PHY_STIH407_USB
 config PHY_STM32_USBPHYC
        tristate "STMicroelectronics STM32 USB HS PHY Controller driver"
        depends on ARCH_STM32 || COMPILE_TEST
+       depends on COMMON_CLK
        select GENERIC_PHY
        help
          Enable this to support the High-Speed USB transceivers that are part
index d08fbb180e4323c998fdd75eb1430a130f1ddc7a..c184f4e3458447946ab4a9adbdced8f271890f1a 100644 (file)
@@ -7,6 +7,7 @@
  */
 #include <linux/bitfield.h>
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
@@ -70,6 +71,7 @@ struct stm32_usbphyc {
        struct regulator *vdda1v1;
        struct regulator *vdda1v8;
        atomic_t n_pll_cons;
+       struct clk_hw clk48_hw;
        int switch_setup;
 };
 
@@ -295,6 +297,61 @@ static const struct phy_ops stm32_usbphyc_phy_ops = {
        .owner = THIS_MODULE,
 };
 
+static int stm32_usbphyc_clk48_prepare(struct clk_hw *hw)
+{
+       struct stm32_usbphyc *usbphyc = container_of(hw, struct stm32_usbphyc, clk48_hw);
+
+       return stm32_usbphyc_pll_enable(usbphyc);
+}
+
+static void stm32_usbphyc_clk48_unprepare(struct clk_hw *hw)
+{
+       struct stm32_usbphyc *usbphyc = container_of(hw, struct stm32_usbphyc, clk48_hw);
+
+       stm32_usbphyc_pll_disable(usbphyc);
+}
+
+static unsigned long stm32_usbphyc_clk48_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+       return 48000000;
+}
+
+static const struct clk_ops usbphyc_clk48_ops = {
+       .prepare = stm32_usbphyc_clk48_prepare,
+       .unprepare = stm32_usbphyc_clk48_unprepare,
+       .recalc_rate = stm32_usbphyc_clk48_recalc_rate,
+};
+
+static void stm32_usbphyc_clk48_unregister(void *data)
+{
+       struct stm32_usbphyc *usbphyc = data;
+
+       of_clk_del_provider(usbphyc->dev->of_node);
+       clk_hw_unregister(&usbphyc->clk48_hw);
+}
+
+static int stm32_usbphyc_clk48_register(struct stm32_usbphyc *usbphyc)
+{
+       struct device_node *node = usbphyc->dev->of_node;
+       struct clk_init_data init = { };
+       int ret = 0;
+
+       init.name = "ck_usbo_48m";
+       init.ops = &usbphyc_clk48_ops;
+
+       usbphyc->clk48_hw.init = &init;
+
+       ret = clk_hw_register(usbphyc->dev, &usbphyc->clk48_hw);
+       if (ret)
+               return ret;
+
+       ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &usbphyc->clk48_hw);
+       if (ret)
+               clk_hw_unregister(&usbphyc->clk48_hw);
+
+       return ret;
+}
+
 static void stm32_usbphyc_switch_setup(struct stm32_usbphyc *usbphyc,
                                       u32 utmi_switch)
 {
@@ -473,6 +530,12 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
+       ret = stm32_usbphyc_clk48_register(usbphyc);
+       if (ret) {
+               dev_err(dev, "failed to register ck_usbo_48m clock: %d\n", ret);
+               goto clk_disable;
+       }
+
        version = readl_relaxed(usbphyc->base + STM32_USBPHYC_VERSION);
        dev_info(dev, "registered rev:%lu.%lu\n",
                 FIELD_GET(MAJREV, version), FIELD_GET(MINREV, version));
@@ -497,6 +560,8 @@ static int stm32_usbphyc_remove(struct platform_device *pdev)
                if (usbphyc->phys[port]->active)
                        stm32_usbphyc_phy_exit(usbphyc->phys[port]->phy);
 
+       stm32_usbphyc_clk48_unregister(usbphyc);
+
        clk_disable_unprepare(usbphyc->clk);
 
        return 0;
index c9cfafe89cbf1ccf22bddad08b810a7259ad98fc..9eb6d37c907ead08f83b1b92bf14e6a75219bed5 100644 (file)
@@ -7,6 +7,8 @@
  */
 
 #include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-ti.h>
+#include <linux/slab.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/gpio.h>
 #define WIZ_SERDES_RST         0x40c
 #define WIZ_SERDES_TYPEC       0x410
 #define WIZ_LANECTL(n)         (0x480 + (0x40 * (n)))
+#define WIZ_LANEDIV(n)         (0x484 + (0x40 * (n)))
+
+#define WIZ_MAX_INPUT_CLOCKS   4
+/* To include mux clocks, divider clocks and gate clocks */
+#define WIZ_MAX_OUTPUT_CLOCKS  32
 
 #define WIZ_MAX_LANES          4
 #define WIZ_MUX_NUM_CLOCKS     3
@@ -52,8 +59,16 @@ enum wiz_refclk_div_sel {
        CMN_REFCLK1_DIG_DIV,
 };
 
+enum wiz_clock_input {
+       WIZ_CORE_REFCLK,
+       WIZ_EXT_REFCLK,
+       WIZ_CORE_REFCLK1,
+       WIZ_EXT_REFCLK1,
+};
+
 static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31);
 static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31);
+static const struct reg_field phy_en_refclk = REG_FIELD(WIZ_SERDES_RST, 30, 30);
 static const struct reg_field pll1_refclk_mux_sel =
                                        REG_FIELD(WIZ_SERDES_RST, 29, 29);
 static const struct reg_field pll0_refclk_mux_sel =
@@ -70,6 +85,12 @@ static const struct reg_field pma_cmn_refclk_dig_div =
                                        REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
 static const struct reg_field pma_cmn_refclk1_dig_div =
                                        REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+static const char * const output_clk_names[] = {
+       [TI_WIZ_PLL0_REFCLK] = "pll0-refclk",
+       [TI_WIZ_PLL1_REFCLK] = "pll1-refclk",
+       [TI_WIZ_REFCLK_DIG] = "refclk-dig",
+       [TI_WIZ_PHY_EN_REFCLK] = "phy-en-refclk",
+};
 
 static const struct reg_field p_enable[WIZ_MAX_LANES] = {
        REG_FIELD(WIZ_LANECTL(0), 30, 31),
@@ -101,13 +122,34 @@ static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = {
        REG_FIELD(WIZ_LANECTL(3), 24, 25),
 };
 
+static const struct reg_field p0_fullrt_div[WIZ_MAX_LANES] = {
+       REG_FIELD(WIZ_LANECTL(0), 22, 23),
+       REG_FIELD(WIZ_LANECTL(1), 22, 23),
+       REG_FIELD(WIZ_LANECTL(2), 22, 23),
+       REG_FIELD(WIZ_LANECTL(3), 22, 23),
+};
+
+static const struct reg_field p_mac_div_sel0[WIZ_MAX_LANES] = {
+       REG_FIELD(WIZ_LANEDIV(0), 16, 22),
+       REG_FIELD(WIZ_LANEDIV(1), 16, 22),
+       REG_FIELD(WIZ_LANEDIV(2), 16, 22),
+       REG_FIELD(WIZ_LANEDIV(3), 16, 22),
+};
+
+static const struct reg_field p_mac_div_sel1[WIZ_MAX_LANES] = {
+       REG_FIELD(WIZ_LANEDIV(0), 0, 8),
+       REG_FIELD(WIZ_LANEDIV(1), 0, 8),
+       REG_FIELD(WIZ_LANEDIV(2), 0, 8),
+       REG_FIELD(WIZ_LANEDIV(3), 0, 8),
+};
+
 static const struct reg_field typec_ln10_swap =
                                        REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
 
 struct wiz_clk_mux {
        struct clk_hw           hw;
        struct regmap_field     *field;
-       u32                     *table;
+       const u32               *table;
        struct clk_init_data    clk_data;
 };
 
@@ -123,18 +165,26 @@ struct wiz_clk_divider {
 #define to_wiz_clk_div(_hw) container_of(_hw, struct wiz_clk_divider, hw)
 
 struct wiz_clk_mux_sel {
-       struct regmap_field     *field;
-       u32                     table[4];
+       u32                     table[WIZ_MAX_INPUT_CLOCKS];
        const char              *node_name;
+       u32                     num_parents;
+       u32                     parents[WIZ_MAX_INPUT_CLOCKS];
 };
 
 struct wiz_clk_div_sel {
-       struct regmap_field     *field;
-       const struct clk_div_table      *table;
+       const struct clk_div_table *table;
        const char              *node_name;
 };
 
-static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
+struct wiz_phy_en_refclk {
+       struct clk_hw           hw;
+       struct regmap_field     *phy_en_refclk;
+       struct clk_init_data    clk_data;
+};
+
+#define to_wiz_phy_en_refclk(_hw) container_of(_hw, struct wiz_phy_en_refclk, hw)
+
+static const struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
        {
                /*
                 * Mux value to be configured for each of the input clocks
@@ -153,20 +203,26 @@ static struct wiz_clk_mux_sel clk_mux_sel_16g[] = {
        },
 };
 
-static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
+static const struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
        {
                /*
                 * Mux value to be configured for each of the input clocks
                 * in the order populated in device tree
                 */
+               .num_parents = 2,
+               .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
                .table = { 1, 0 },
                .node_name = "pll0-refclk",
        },
        {
+               .num_parents = 2,
+               .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
                .table = { 1, 0 },
                .node_name = "pll1-refclk",
        },
        {
+               .num_parents = 2,
+               .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK },
                .table = { 1, 0 },
                .node_name = "refclk-dig",
        },
@@ -179,7 +235,7 @@ static const struct clk_div_table clk_div_table[] = {
        { .val = 3, .div = 8, },
 };
 
-static struct wiz_clk_div_sel clk_div_sel[] = {
+static const struct wiz_clk_div_sel clk_div_sel[] = {
        {
                .table = clk_div_table,
                .node_name = "cmn-refclk-dig-div",
@@ -193,6 +249,7 @@ static struct wiz_clk_div_sel clk_div_sel[] = {
 enum wiz_type {
        J721E_WIZ_16G,
        J721E_WIZ_10G,
+       AM64_WIZ_10G,
 };
 
 #define WIZ_TYPEC_DIR_DEBOUNCE_MIN     100     /* ms */
@@ -201,19 +258,25 @@ enum wiz_type {
 struct wiz {
        struct regmap           *regmap;
        enum wiz_type           type;
-       struct wiz_clk_mux_sel  *clk_mux_sel;
-       struct wiz_clk_div_sel  *clk_div_sel;
+       const struct wiz_clk_mux_sel *clk_mux_sel;
+       const struct wiz_clk_div_sel *clk_div_sel;
        unsigned int            clk_div_sel_num;
        struct regmap_field     *por_en;
        struct regmap_field     *phy_reset_n;
+       struct regmap_field     *phy_en_refclk;
        struct regmap_field     *p_enable[WIZ_MAX_LANES];
        struct regmap_field     *p_align[WIZ_MAX_LANES];
        struct regmap_field     *p_raw_auto_start[WIZ_MAX_LANES];
        struct regmap_field     *p_standard_mode[WIZ_MAX_LANES];
+       struct regmap_field     *p_mac_div_sel0[WIZ_MAX_LANES];
+       struct regmap_field     *p_mac_div_sel1[WIZ_MAX_LANES];
+       struct regmap_field     *p0_fullrt_div[WIZ_MAX_LANES];
        struct regmap_field     *pma_cmn_refclk_int_mode;
        struct regmap_field     *pma_cmn_refclk_mode;
        struct regmap_field     *pma_cmn_refclk_dig_div;
        struct regmap_field     *pma_cmn_refclk1_dig_div;
+       struct regmap_field     *mux_sel_field[WIZ_MUX_NUM_CLOCKS];
+       struct regmap_field     *div_sel_field[WIZ_DIV_NUM_CLOCKS_16G];
        struct regmap_field     *typec_ln10_swap;
 
        struct device           *dev;
@@ -223,6 +286,9 @@ struct wiz {
        struct gpio_desc        *gpio_typec_dir;
        int                     typec_dir_delay;
        u32 lane_phy_type[WIZ_MAX_LANES];
+       struct clk              *input_clks[WIZ_MAX_INPUT_CLOCKS];
+       struct clk              *output_clks[WIZ_MAX_OUTPUT_CLOCKS];
+       struct clk_onecell_data clk_data;
 };
 
 static int wiz_reset(struct wiz *wiz)
@@ -242,6 +308,27 @@ static int wiz_reset(struct wiz *wiz)
        return 0;
 }
 
+static int wiz_p_mac_div_sel(struct wiz *wiz)
+{
+       u32 num_lanes = wiz->num_lanes;
+       int ret;
+       int i;
+
+       for (i = 0; i < num_lanes; i++) {
+               if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) {
+                       ret = regmap_field_write(wiz->p_mac_div_sel0[i], 1);
+                       if (ret)
+                               return ret;
+
+                       ret = regmap_field_write(wiz->p_mac_div_sel1[i], 2);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
 static int wiz_mode_select(struct wiz *wiz)
 {
        u32 num_lanes = wiz->num_lanes;
@@ -252,8 +339,10 @@ static int wiz_mode_select(struct wiz *wiz)
        for (i = 0; i < num_lanes; i++) {
                if (wiz->lane_phy_type[i] == PHY_TYPE_DP)
                        mode = LANE_MODE_GEN1;
+               else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII)
+                       mode = LANE_MODE_GEN2;
                else
-                       mode = LANE_MODE_GEN4;
+                       continue;
 
                ret = regmap_field_write(wiz->p_standard_mode[i], mode);
                if (ret)
@@ -299,6 +388,12 @@ static int wiz_init(struct wiz *wiz)
                return ret;
        }
 
+       ret = wiz_p_mac_div_sel(wiz);
+       if (ret) {
+               dev_err(dev, "Configuring P0 MAC DIV SEL failed\n");
+               return ret;
+       }
+
        ret = wiz_init_raw_interface(wiz, true);
        if (ret) {
                dev_err(dev, "WIZ interface initialization failed\n");
@@ -310,8 +405,6 @@ static int wiz_init(struct wiz *wiz)
 
 static int wiz_regfield_init(struct wiz *wiz)
 {
-       struct wiz_clk_mux_sel *clk_mux_sel;
-       struct wiz_clk_div_sel *clk_div_sel;
        struct regmap *regmap = wiz->regmap;
        int num_lanes = wiz->num_lanes;
        struct device *dev = wiz->dev;
@@ -344,54 +437,49 @@ static int wiz_regfield_init(struct wiz *wiz)
                return PTR_ERR(wiz->pma_cmn_refclk_mode);
        }
 
-       clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK_DIG_DIV];
-       clk_div_sel->field = devm_regmap_field_alloc(dev, regmap,
-                                                    pma_cmn_refclk_dig_div);
-       if (IS_ERR(clk_div_sel->field)) {
+       wiz->div_sel_field[CMN_REFCLK_DIG_DIV] =
+               devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_dig_div);
+       if (IS_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV])) {
                dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n");
-               return PTR_ERR(clk_div_sel->field);
+               return PTR_ERR(wiz->div_sel_field[CMN_REFCLK_DIG_DIV]);
        }
 
        if (wiz->type == J721E_WIZ_16G) {
-               clk_div_sel = &wiz->clk_div_sel[CMN_REFCLK1_DIG_DIV];
-               clk_div_sel->field =
+               wiz->div_sel_field[CMN_REFCLK1_DIG_DIV] =
                        devm_regmap_field_alloc(dev, regmap,
                                                pma_cmn_refclk1_dig_div);
-               if (IS_ERR(clk_div_sel->field)) {
+               if (IS_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV])) {
                        dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n");
-                       return PTR_ERR(clk_div_sel->field);
+                       return PTR_ERR(wiz->div_sel_field[CMN_REFCLK1_DIG_DIV]);
                }
        }
 
-       clk_mux_sel = &wiz->clk_mux_sel[PLL0_REFCLK];
-       clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
-                                                    pll0_refclk_mux_sel);
-       if (IS_ERR(clk_mux_sel->field)) {
+       wiz->mux_sel_field[PLL0_REFCLK] =
+               devm_regmap_field_alloc(dev, regmap, pll0_refclk_mux_sel);
+       if (IS_ERR(wiz->mux_sel_field[PLL0_REFCLK])) {
                dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
-               return PTR_ERR(clk_mux_sel->field);
+               return PTR_ERR(wiz->mux_sel_field[PLL0_REFCLK]);
        }
 
-       clk_mux_sel = &wiz->clk_mux_sel[PLL1_REFCLK];
-       clk_mux_sel->field = devm_regmap_field_alloc(dev, regmap,
-                                                    pll1_refclk_mux_sel);
-       if (IS_ERR(clk_mux_sel->field)) {
+       wiz->mux_sel_field[PLL1_REFCLK] =
+               devm_regmap_field_alloc(dev, regmap, pll1_refclk_mux_sel);
+       if (IS_ERR(wiz->mux_sel_field[PLL1_REFCLK])) {
                dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
-               return PTR_ERR(clk_mux_sel->field);
+               return PTR_ERR(wiz->mux_sel_field[PLL1_REFCLK]);
        }
 
-       clk_mux_sel = &wiz->clk_mux_sel[REFCLK_DIG];
-       if (wiz->type == J721E_WIZ_10G)
-               clk_mux_sel->field =
+       if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
+               wiz->mux_sel_field[REFCLK_DIG] =
                        devm_regmap_field_alloc(dev, regmap,
                                                refclk_dig_sel_10g);
        else
-               clk_mux_sel->field =
+               wiz->mux_sel_field[REFCLK_DIG] =
                        devm_regmap_field_alloc(dev, regmap,
                                                refclk_dig_sel_16g);
 
-       if (IS_ERR(clk_mux_sel->field)) {
+       if (IS_ERR(wiz->mux_sel_field[REFCLK_DIG])) {
                dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
-               return PTR_ERR(clk_mux_sel->field);
+               return PTR_ERR(wiz->mux_sel_field[REFCLK_DIG]);
        }
 
        for (i = 0; i < num_lanes; i++) {
@@ -424,6 +512,28 @@ static int wiz_regfield_init(struct wiz *wiz)
                                i);
                        return PTR_ERR(wiz->p_standard_mode[i]);
                }
+
+               wiz->p0_fullrt_div[i] = devm_regmap_field_alloc(dev, regmap, p0_fullrt_div[i]);
+               if (IS_ERR(wiz->p0_fullrt_div[i])) {
+                       dev_err(dev, "P%d_FULLRT_DIV reg field init failed\n", i);
+                       return PTR_ERR(wiz->p0_fullrt_div[i]);
+               }
+
+               wiz->p_mac_div_sel0[i] =
+                 devm_regmap_field_alloc(dev, regmap, p_mac_div_sel0[i]);
+               if (IS_ERR(wiz->p_mac_div_sel0[i])) {
+                       dev_err(dev, "P%d_MAC_DIV_SEL0 reg field init fail\n",
+                               i);
+                       return PTR_ERR(wiz->p_mac_div_sel0[i]);
+               }
+
+               wiz->p_mac_div_sel1[i] =
+                 devm_regmap_field_alloc(dev, regmap, p_mac_div_sel1[i]);
+               if (IS_ERR(wiz->p_mac_div_sel1[i])) {
+                       dev_err(dev, "P%d_MAC_DIV_SEL1 reg field init fail\n",
+                               i);
+                       return PTR_ERR(wiz->p_mac_div_sel1[i]);
+               }
        }
 
        wiz->typec_ln10_swap = devm_regmap_field_alloc(dev, regmap,
@@ -433,6 +543,76 @@ static int wiz_regfield_init(struct wiz *wiz)
                return PTR_ERR(wiz->typec_ln10_swap);
        }
 
+       wiz->phy_en_refclk = devm_regmap_field_alloc(dev, regmap, phy_en_refclk);
+       if (IS_ERR(wiz->phy_en_refclk)) {
+               dev_err(dev, "PHY_EN_REFCLK reg field init failed\n");
+               return PTR_ERR(wiz->phy_en_refclk);
+       }
+
+       return 0;
+}
+
+static int wiz_phy_en_refclk_enable(struct clk_hw *hw)
+{
+       struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+       struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+
+       regmap_field_write(phy_en_refclk, 1);
+
+       return 0;
+}
+
+static void wiz_phy_en_refclk_disable(struct clk_hw *hw)
+{
+       struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+       struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+
+       regmap_field_write(phy_en_refclk, 0);
+}
+
+static int wiz_phy_en_refclk_is_enabled(struct clk_hw *hw)
+{
+       struct wiz_phy_en_refclk *wiz_phy_en_refclk = to_wiz_phy_en_refclk(hw);
+       struct regmap_field *phy_en_refclk = wiz_phy_en_refclk->phy_en_refclk;
+       int val;
+
+       regmap_field_read(phy_en_refclk, &val);
+
+       return !!val;
+}
+
+static const struct clk_ops wiz_phy_en_refclk_ops = {
+       .enable = wiz_phy_en_refclk_enable,
+       .disable = wiz_phy_en_refclk_disable,
+       .is_enabled = wiz_phy_en_refclk_is_enabled,
+};
+
+static int wiz_phy_en_refclk_register(struct wiz *wiz)
+{
+       struct wiz_phy_en_refclk *wiz_phy_en_refclk;
+       struct device *dev = wiz->dev;
+       struct clk_init_data *init;
+       struct clk *clk;
+
+       wiz_phy_en_refclk = devm_kzalloc(dev, sizeof(*wiz_phy_en_refclk), GFP_KERNEL);
+       if (!wiz_phy_en_refclk)
+               return -ENOMEM;
+
+       init = &wiz_phy_en_refclk->clk_data;
+
+       init->ops = &wiz_phy_en_refclk_ops;
+       init->flags = 0;
+       init->name = output_clk_names[TI_WIZ_PHY_EN_REFCLK];
+
+       wiz_phy_en_refclk->phy_en_refclk = wiz->phy_en_refclk;
+       wiz_phy_en_refclk->hw.init = init;
+
+       clk = devm_clk_register(dev, &wiz_phy_en_refclk->hw);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       wiz->output_clks[TI_WIZ_PHY_EN_REFCLK] = clk;
+
        return 0;
 }
 
@@ -443,7 +623,7 @@ static u8 wiz_clk_mux_get_parent(struct clk_hw *hw)
        unsigned int val;
 
        regmap_field_read(field, &val);
-       return clk_mux_val_to_index(hw, mux->table, 0, val);
+       return clk_mux_val_to_index(hw, (u32 *)mux->table, 0, val);
 }
 
 static int wiz_clk_mux_set_parent(struct clk_hw *hw, u8 index)
@@ -461,8 +641,69 @@ static const struct clk_ops wiz_clk_mux_ops = {
        .get_parent = wiz_clk_mux_get_parent,
 };
 
-static int wiz_mux_clk_register(struct wiz *wiz, struct device_node *node,
-                               struct regmap_field *field, u32 *table)
+static int wiz_mux_clk_register(struct wiz *wiz, struct regmap_field *field,
+                               const struct wiz_clk_mux_sel *mux_sel, int clk_index)
+{
+       struct device *dev = wiz->dev;
+       struct clk_init_data *init;
+       const char **parent_names;
+       unsigned int num_parents;
+       struct wiz_clk_mux *mux;
+       char clk_name[100];
+       struct clk *clk;
+       int ret = 0, i;
+
+       mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return -ENOMEM;
+
+       num_parents = mux_sel->num_parents;
+
+       parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+       if (!parent_names)
+               return -ENOMEM;
+
+       for (i = 0; i < num_parents; i++) {
+               clk = wiz->input_clks[mux_sel->parents[i]];
+               if (IS_ERR_OR_NULL(clk)) {
+                       dev_err(dev, "Failed to get parent clk for %s\n",
+                               output_clk_names[clk_index]);
+                       ret = -EINVAL;
+                       goto err;
+               }
+               parent_names[i] = __clk_get_name(clk);
+       }
+
+       snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), output_clk_names[clk_index]);
+
+       init = &mux->clk_data;
+
+       init->ops = &wiz_clk_mux_ops;
+       init->flags = CLK_SET_RATE_NO_REPARENT;
+       init->parent_names = parent_names;
+       init->num_parents = num_parents;
+       init->name = clk_name;
+
+       mux->field = field;
+       mux->table = mux_sel->table;
+       mux->hw.init = init;
+
+       clk = devm_clk_register(dev, &mux->hw);
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               goto err;
+       }
+
+       wiz->output_clks[clk_index] = clk;
+
+err:
+       kfree(parent_names);
+
+       return ret;
+}
+
+static int wiz_mux_of_clk_register(struct wiz *wiz, struct device_node *node,
+                                  struct regmap_field *field, const u32 *table)
 {
        struct device *dev = wiz->dev;
        struct clk_init_data *init;
@@ -606,20 +847,70 @@ static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
 
 static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
 {
-       struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+       const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+       struct device *dev = wiz->dev;
        struct device_node *clk_node;
        int i;
 
+       if (wiz->type == AM64_WIZ_10G) {
+               of_clk_del_provider(dev->of_node);
+               return;
+       }
+
        for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
                clk_node = of_get_child_by_name(node, clk_mux_sel[i].node_name);
                of_clk_del_provider(clk_node);
                of_node_put(clk_node);
        }
+
+       for (i = 0; i < wiz->clk_div_sel_num; i++) {
+               clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
+               of_clk_del_provider(clk_node);
+               of_node_put(clk_node);
+       }
+
+       of_clk_del_provider(wiz->dev->of_node);
+}
+
+static int wiz_clock_register(struct wiz *wiz)
+{
+       const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+       struct device *dev = wiz->dev;
+       struct device_node *node = dev->of_node;
+       int clk_index;
+       int ret;
+       int i;
+
+       if (wiz->type != AM64_WIZ_10G)
+               return 0;
+
+       clk_index = TI_WIZ_PLL0_REFCLK;
+       for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++, clk_index++) {
+               ret = wiz_mux_clk_register(wiz, wiz->mux_sel_field[i], &clk_mux_sel[i], clk_index);
+               if (ret) {
+                       dev_err(dev, "Failed to register clk: %s\n", output_clk_names[clk_index]);
+                       return ret;
+               }
+       }
+
+       ret = wiz_phy_en_refclk_register(wiz);
+       if (ret) {
+               dev_err(dev, "Failed to add phy-en-refclk\n");
+               return ret;
+       }
+
+       wiz->clk_data.clks = wiz->output_clks;
+       wiz->clk_data.clk_num = WIZ_MAX_OUTPUT_CLOCKS;
+       ret = of_clk_add_provider(node, of_clk_src_onecell_get, &wiz->clk_data);
+       if (ret)
+               dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+
+       return ret;
 }
 
 static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
 {
-       struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
+       const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel;
        struct device *dev = wiz->dev;
        struct device_node *clk_node;
        const char *node_name;
@@ -634,6 +925,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
                ret = PTR_ERR(clk);
                return ret;
        }
+       wiz->input_clks[WIZ_CORE_REFCLK] = clk;
 
        rate = clk_get_rate(clk);
        if (rate >= 100000000)
@@ -647,6 +939,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
                ret = PTR_ERR(clk);
                return ret;
        }
+       wiz->input_clks[WIZ_EXT_REFCLK] = clk;
 
        rate = clk_get_rate(clk);
        if (rate >= 100000000)
@@ -654,6 +947,13 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
        else
                regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
 
+       if (wiz->type == AM64_WIZ_10G) {
+               ret = wiz_clock_register(wiz);
+               if (ret)
+                       dev_err(dev, "Failed to register wiz clocks\n");
+               return ret;
+       }
+
        for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
                node_name = clk_mux_sel[i].node_name;
                clk_node = of_get_child_by_name(node, node_name);
@@ -663,8 +963,8 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
                        goto err;
                }
 
-               ret = wiz_mux_clk_register(wiz, clk_node, clk_mux_sel[i].field,
-                                          clk_mux_sel[i].table);
+               ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i],
+                                             clk_mux_sel[i].table);
                if (ret) {
                        dev_err(dev, "Failed to register %s clock\n",
                                node_name);
@@ -684,7 +984,7 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
                        goto err;
                }
 
-               ret = wiz_div_clk_register(wiz, clk_node, clk_div_sel[i].field,
+               ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i],
                                           clk_div_sel[i].table);
                if (ret) {
                        dev_err(dev, "Failed to register %s clock\n",
@@ -719,6 +1019,17 @@ static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
        return ret;
 }
 
+static int wiz_phy_fullrt_div(struct wiz *wiz, int lane)
+{
+       if (wiz->type != AM64_WIZ_10G)
+               return 0;
+
+       if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE)
+               return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1);
+
+       return 0;
+}
+
 static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
                                  unsigned long id)
 {
@@ -742,6 +1053,10 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
                return ret;
        }
 
+       ret = wiz_phy_fullrt_div(wiz, id - 1);
+       if (ret)
+               return ret;
+
        if (wiz->lane_phy_type[id - 1] == PHY_TYPE_DP)
                ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE);
        else
@@ -769,6 +1084,9 @@ static const struct of_device_id wiz_id_table[] = {
        {
                .compatible = "ti,j721e-wiz-10g", .data = (void *)J721E_WIZ_10G
        },
+       {
+               .compatible = "ti,am64-wiz-10g", .data = (void *)AM64_WIZ_10G
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, wiz_id_table);
@@ -787,8 +1105,13 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
                u32 reg, num_lanes = 1, phy_type = PHY_NONE;
                int ret, i;
 
+               if (!(of_node_name_eq(subnode, "phy") ||
+                     of_node_name_eq(subnode, "link")))
+                       continue;
+
                ret = of_property_read_u32(subnode, "reg", &reg);
                if (ret) {
+                       of_node_put(subnode);
                        dev_err(dev,
                                "%s: Reading \"reg\" from \"%s\" failed: %d\n",
                                __func__, subnode->name, ret);
@@ -813,13 +1136,14 @@ static int wiz_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct device_node *node = dev->of_node;
        struct platform_device *serdes_pdev;
+       bool already_configured = false;
        struct device_node *child_node;
        struct regmap *regmap;
        struct resource res;
        void __iomem *base;
        struct wiz *wiz;
+       int ret, val, i;
        u32 num_lanes;
-       int ret;
 
        wiz = devm_kzalloc(dev, sizeof(*wiz), GFP_KERNEL);
        if (!wiz)
@@ -900,14 +1224,14 @@ static int wiz_probe(struct platform_device *pdev)
        wiz->dev = dev;
        wiz->regmap = regmap;
        wiz->num_lanes = num_lanes;
-       if (wiz->type == J721E_WIZ_10G)
+       if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
                wiz->clk_mux_sel = clk_mux_sel_10g;
        else
                wiz->clk_mux_sel = clk_mux_sel_16g;
 
        wiz->clk_div_sel = clk_div_sel;
 
-       if (wiz->type == J721E_WIZ_10G)
+       if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G)
                wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G;
        else
                wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G;
@@ -947,27 +1271,34 @@ static int wiz_probe(struct platform_device *pdev)
                goto err_get_sync;
        }
 
+       for (i = 0; i < wiz->num_lanes; i++) {
+               regmap_field_read(wiz->p_enable[i], &val);
+               if (val & (P_ENABLE | P_ENABLE_FORCE)) {
+                       already_configured = true;
+                       break;
+               }
+       }
+
+       if (!already_configured) {
+               ret = wiz_init(wiz);
+               if (ret) {
+                       dev_err(dev, "WIZ initialization failed\n");
+                       goto err_wiz_init;
+               }
+       }
+
        serdes_pdev = of_platform_device_create(child_node, NULL, dev);
        if (!serdes_pdev) {
                dev_WARN(dev, "Unable to create SERDES platform device\n");
                ret = -ENOMEM;
-               goto err_pdev_create;
-       }
-       wiz->serdes_pdev = serdes_pdev;
-
-       ret = wiz_init(wiz);
-       if (ret) {
-               dev_err(dev, "WIZ initialization failed\n");
                goto err_wiz_init;
        }
+       wiz->serdes_pdev = serdes_pdev;
 
        of_node_put(child_node);
        return 0;
 
 err_wiz_init:
-       of_platform_device_destroy(&serdes_pdev->dev, NULL);
-
-err_pdev_create:
        wiz_clock_cleanup(wiz, node);
 
 err_get_sync:
index d8d0cc11d187e98c31e023ca60da8969bea9d98b..a63213f5972a7e591061bf6dce32d05644528ba2 100644 (file)
@@ -7,15 +7,16 @@
  * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
  */
 #include <linux/module.h>
+#include <linux/bitfield.h>
 #include <linux/ulpi/driver.h>
 #include <linux/ulpi/regs.h>
 #include <linux/gpio/consumer.h>
 #include <linux/phy/ulpi_phy.h>
 
 #define TUSB1210_VENDOR_SPECIFIC2              0x80
-#define TUSB1210_VENDOR_SPECIFIC2_IHSTX_SHIFT  0
-#define TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_SHIFT 4
-#define TUSB1210_VENDOR_SPECIFIC2_DP_SHIFT     6
+#define TUSB1210_VENDOR_SPECIFIC2_IHSTX_MASK   GENMASK(3, 0)
+#define TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_MASK  GENMASK(5, 4)
+#define TUSB1210_VENDOR_SPECIFIC2_DP_MASK      BIT(6)
 
 struct tusb1210 {
        struct ulpi *ulpi;
@@ -118,22 +119,22 @@ static int tusb1210_probe(struct ulpi *ulpi)
         * diagram optimization and DP/DM swap.
         */
 
+       reg = ulpi_read(ulpi, TUSB1210_VENDOR_SPECIFIC2);
+
        /* High speed output drive strength configuration */
-       device_property_read_u8(&ulpi->dev, "ihstx", &val);
-       reg = val << TUSB1210_VENDOR_SPECIFIC2_IHSTX_SHIFT;
+       if (!device_property_read_u8(&ulpi->dev, "ihstx", &val))
+               u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_IHSTX_MASK);
 
        /* High speed output impedance configuration */
-       device_property_read_u8(&ulpi->dev, "zhsdrv", &val);
-       reg |= val << TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_SHIFT;
+       if (!device_property_read_u8(&ulpi->dev, "zhsdrv", &val))
+               u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_ZHSDRV_MASK);
 
        /* DP/DM swap control */
-       device_property_read_u8(&ulpi->dev, "datapolarity", &val);
-       reg |= val << TUSB1210_VENDOR_SPECIFIC2_DP_SHIFT;
+       if (!device_property_read_u8(&ulpi->dev, "datapolarity", &val))
+               u8p_replace_bits(&reg, val, (u8)TUSB1210_VENDOR_SPECIFIC2_DP_MASK);
 
-       if (reg) {
-               ulpi_write(ulpi, TUSB1210_VENDOR_SPECIFIC2, reg);
-               tusb->vendor_specific2 = reg;
-       }
+       ulpi_write(ulpi, TUSB1210_VENDOR_SPECIFIC2, reg);
+       tusb->vendor_specific2 = reg;
 
        tusb->phy = ulpi_phy_create(ulpi, &phy_ops);
        if (IS_ERR(tusb->phy))
index 9887f908f5401a11e274159ab82b3c7272070709..812e5409d35958e75cf06a19efc1eec71124739a 100644 (file)
@@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
 
        usb_remove_phy(&twl->phy);
        pm_runtime_get_sync(twl->dev);
-       cancel_delayed_work(&twl->id_workaround_work);
+       cancel_delayed_work_sync(&twl->id_workaround_work);
        device_remove_file(twl->dev, &dev_attr_vbus);
 
        /* set transceiver mode to power on defaults */
index 2b65f84a5f8937d7622953e7441ee2f29b78e075..35652152ce5d0062a381bf7c0a7b03b5837f2975 100644 (file)
@@ -208,6 +208,7 @@ struct xpsgtr_phy {
  * @gtr_mutex: mutex for locking
  * @phys: PHY lanes
  * @refclk_sscs: spread spectrum settings for the reference clocks
+ * @clk: reference clocks
  * @tx_term_fix: fix for GT issue
  * @saved_icm_cfg0: stored value of ICM CFG0 register
  * @saved_icm_cfg1: stored value of ICM CFG1 register
@@ -219,6 +220,7 @@ struct xpsgtr_dev {
        struct mutex gtr_mutex; /* mutex for locking */
        struct xpsgtr_phy phys[NUM_LANES];
        const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
+       struct clk *clk[NUM_LANES];
        bool tx_term_fix;
        unsigned int saved_icm_cfg0;
        unsigned int saved_icm_cfg1;
@@ -818,11 +820,15 @@ static struct phy *xpsgtr_xlate(struct device *dev,
 static int __maybe_unused xpsgtr_suspend(struct device *dev)
 {
        struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+       unsigned int i;
 
        /* Save the snapshot ICM_CFG registers. */
        gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
        gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
 
+       for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
+               clk_disable_unprepare(gtr_dev->clk[i]);
+
        return 0;
 }
 
@@ -832,6 +838,13 @@ static int __maybe_unused xpsgtr_resume(struct device *dev)
        unsigned int icm_cfg0, icm_cfg1;
        unsigned int i;
        bool skip_phy_init;
+       int err;
+
+       for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++) {
+               err = clk_prepare_enable(gtr_dev->clk[i]);
+               if (err)
+                       goto err_clk_put;
+       }
 
        icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
        icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
@@ -852,6 +865,12 @@ static int __maybe_unused xpsgtr_resume(struct device *dev)
                gtr_dev->phys[i].skip_phy_init = skip_phy_init;
 
        return 0;
+
+err_clk_put:
+       while (i--)
+               clk_disable_unprepare(gtr_dev->clk[i]);
+
+       return err;
 }
 
 static const struct dev_pm_ops xpsgtr_pm_ops = {
@@ -865,6 +884,7 @@ static const struct dev_pm_ops xpsgtr_pm_ops = {
 static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
 {
        unsigned int refclk;
+       int ret;
 
        for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
                unsigned long rate;
@@ -874,14 +894,22 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
 
                snprintf(name, sizeof(name), "ref%u", refclk);
                clk = devm_clk_get_optional(gtr_dev->dev, name);
-               if (IS_ERR(clk))
-                       return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
-                                            "Failed to get reference clock %u\n",
-                                            refclk);
+               if (IS_ERR(clk)) {
+                       ret = dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
+                                           "Failed to get reference clock %u\n",
+                                           refclk);
+                       goto err_clk_put;
+               }
 
                if (!clk)
                        continue;
 
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       goto err_clk_put;
+
+               gtr_dev->clk[refclk] = clk;
+
                /*
                 * Get the spread spectrum (SSC) settings for the reference
                 * clock rate.
@@ -899,11 +927,18 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
                        dev_err(gtr_dev->dev,
                                "Invalid rate %lu for reference clock %u\n",
                                rate, refclk);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err_clk_put;
                }
        }
 
        return 0;
+
+err_clk_put:
+       while (refclk--)
+               clk_disable_unprepare(gtr_dev->clk[refclk]);
+
+       return ret;
 }
 
 static int xpsgtr_probe(struct platform_device *pdev)
@@ -912,6 +947,7 @@ static int xpsgtr_probe(struct platform_device *pdev)
        struct xpsgtr_dev *gtr_dev;
        struct phy_provider *provider;
        unsigned int port;
+       unsigned int i;
        int ret;
 
        gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
@@ -951,7 +987,8 @@ static int xpsgtr_probe(struct platform_device *pdev)
                phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
                if (IS_ERR(phy)) {
                        dev_err(&pdev->dev, "failed to create PHY\n");
-                       return PTR_ERR(phy);
+                       ret = PTR_ERR(phy);
+                       goto err_clk_put;
                }
 
                gtr_phy->phy = phy;
@@ -962,9 +999,16 @@ static int xpsgtr_probe(struct platform_device *pdev)
        provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
        if (IS_ERR(provider)) {
                dev_err(&pdev->dev, "registering provider failed\n");
-               return PTR_ERR(provider);
+               ret = PTR_ERR(provider);
+               goto err_clk_put;
        }
        return 0;
+
+err_clk_put:
+       for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
+               clk_disable_unprepare(gtr_dev->clk[i]);
+
+       return ret;
 }
 
 static const struct of_device_id xpsgtr_of_match[] = {
index 7d3370289938cb52d36d66da4b0492f02d36d92c..6e6825d17a1d149eef539b2e0c53f4df1a9125ca 100644 (file)
@@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
        unsigned i, pin;
 #ifdef CONFIG_GPIOLIB
        struct pinctrl_gpio_range *range;
-       unsigned int gpio_num;
        struct gpio_chip *chip;
+       int gpio_num;
 #endif
 
        seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
@@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
                seq_printf(s, "pin %d (%s) ", pin, desc->name);
 
 #ifdef CONFIG_GPIOLIB
-               gpio_num = 0;
+               gpio_num = -1;
                list_for_each_entry(range, &pctldev->gpio_ranges, node) {
                        if ((pin >= range->pin_base) &&
                            (pin < (range->pin_base + range->npins))) {
@@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
                                break;
                        }
                }
-               chip = gpio_to_chip(gpio_num);
-               if (chip && chip->gpiodev && chip->gpiodev->base)
-                       seq_printf(s, "%u:%s ", gpio_num -
-                               chip->gpiodev->base, chip->label);
+               if (gpio_num >= 0)
+                       chip = gpio_to_chip(gpio_num);
+               else
+                       chip = NULL;
+               if (chip)
+                       seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
                else
                        seq_puts(s, "0:? ");
 #endif
index 7fdf4257df1ed1f1e53165359c3ce75996782845..ad4b446d588e653d539e90d6338f72667a847db3 100644 (file)
@@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
 static const struct intel_community lbg_communities[] = {
        LBG_COMMUNITY(0, 0, 71),
        LBG_COMMUNITY(1, 72, 132),
-       LBG_COMMUNITY(3, 133, 144),
-       LBG_COMMUNITY(4, 145, 180),
-       LBG_COMMUNITY(5, 181, 246),
+       LBG_COMMUNITY(3, 133, 143),
+       LBG_COMMUNITY(4, 144, 178),
+       LBG_COMMUNITY(5, 179, 246),
 };
 
 static const struct intel_pinctrl_soc_data lbg_soc_data = {
index 5d21c6adf1ab6de9647a25d3fb17452d5509b2c8..1c7a288b59a5c52e072b72ae5e615388aa18d6be 100644 (file)
@@ -208,7 +208,7 @@ static ssize_t secure_boot_fuse_state_show(struct device *dev,
         * 0011 = version 1, 0111 = version 2, 1111 = version 3). Upper 4 bits
         * are a thermometer code indicating key programming has completed for
         * key n (same encodings as the start bits). This allows for detection
-        * of an interruption in the progamming process which has left the key
+        * of an interruption in the programming process which has left the key
         * partially programmed (and thus invalid). The process is to burn the
         * eFuse for the new key start bit, burn the key eFuses, then burn the
         * eFuse for the new key complete bit.
index b013445147dd5692e688a8808ab01a533b00bc4a..a9db2f32658f2bef0f255e60a5db91b1ddf6f1d2 100644 (file)
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
 
        err = devm_request_irq(&pdev->dev, priv->irq,
                               mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
-                              | IRQF_SHARED, "mlxreg-hotplug", priv);
+                              | IRQF_SHARED | IRQF_NO_AUTOEN,
+                              "mlxreg-hotplug", priv);
        if (err) {
                dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
                return err;
        }
 
-       disable_irq(priv->irq);
        spin_lock_init(&priv->lock);
        INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
        dev_set_drvdata(&pdev->dev, priv);
index 0847b2dc97bf5a0a141d0deea1612de9af3335d3..3105f651614ffca224523690626a5b398c45a532 100644 (file)
@@ -77,6 +77,53 @@ config SURFACE_AGGREGATOR_CDEV
          The provided interface is intended for debugging and development only,
          and should not be used otherwise.
 
+config SURFACE_AGGREGATOR_REGISTRY
+       tristate "Surface System Aggregator Module Device Registry"
+       depends on SURFACE_AGGREGATOR
+       depends on SURFACE_AGGREGATOR_BUS
+       help
+         Device-registry and device-hubs for Surface System Aggregator Module
+         (SSAM) devices.
+
+         Provides a module and driver which act as a device-registry for SSAM
+         client devices that cannot be detected automatically, e.g. via ACPI.
+         Such devices are instead provided via this registry and attached via
+         device hubs, also provided in this module.
+
+         Devices provided via this registry are:
+         - Platform profile (performance-/cooling-mode) device (5th- and later
+           generations).
+         - Battery/AC devices (7th-generation).
+         - HID input devices (7th-generation).
+
+         Select M (recommended) or Y here if you want support for the above
+         mentioned devices on the corresponding Surface models. Without this
+         module, the respective devices will not be instantiated and thus any
+         functionality provided by them will be missing, even when drivers for
+         these devices are present. In other words, this module only provides
+         the respective client devices. Drivers for these devices still need to
+         be selected via the other options.
+
+config SURFACE_DTX
+       tristate "Surface DTX (Detachment System) Driver"
+       depends on SURFACE_AGGREGATOR
+       depends on INPUT
+       help
+         Driver for the Surface Book clipboard detachment system (DTX).
+
+         On the Surface Book series devices, the display part containing the
+         CPU (called the clipboard) can be detached from the base (containing a
+         battery, the keyboard, and, optionally, a discrete GPU) by (if
+         necessary) unlocking and opening the latch connecting both parts.
+
+         This driver provides a user-space interface that can influence the
+         behavior of this process, which includes the option to abort it in
+         case the base is still in use or speed it up in case it is not.
+
+         Note that this module can be built without support for the Surface
+         Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case,
+         some devices, specifically the Surface Book 3, will not be supported.
+
 config SURFACE_GPE
        tristate "Surface GPE/Lid Support Driver"
        depends on DMI
@@ -105,6 +152,28 @@ config SURFACE_HOTPLUG
          Select M or Y here, if you want to (fully) support hot-plugging of
          dGPU devices on the Surface Book 2 and/or 3 during D3cold.
 
+config SURFACE_PLATFORM_PROFILE
+       tristate "Surface Platform Profile Driver"
+       depends on SURFACE_AGGREGATOR_REGISTRY
+       select ACPI_PLATFORM_PROFILE
+       help
+         Provides support for the ACPI platform profile on 5th- and later
+         generation Microsoft Surface devices.
+
+         More specifically, this driver provides ACPI platform profile support
+         on Microsoft Surface devices with a Surface System Aggregator Module
+         (SSAM) connected via the Surface Serial Hub (SSH / SAM-over-SSH). In
+         other words, this driver provides platform profile support on the
+         Surface Pro 5, Surface Book 2, Surface Laptop, Surface Laptop Go and
+         later. On those devices, the platform profile can significantly
+         influence cooling behavior, e.g. setting it to 'quiet' (default) or
+         'low-power' can significantly limit performance of the discrete GPU on
+         Surface Books, while in turn leading to lower power consumption and/or
+         less fan noise.
+
+         Select M or Y here, if you want to include ACPI platform profile
+         support on the above mentioned devices.
+
 config SURFACE_PRO3_BUTTON
        tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
        depends on INPUT
index 990424c5f0c93b69846f79c9f0461f7c0f338352..32889482de55b9e578a02632d95cff0680721e6d 100644 (file)
@@ -10,6 +10,9 @@ obj-$(CONFIG_SURFACE_3_POWER_OPREGION)        += surface3_power.o
 obj-$(CONFIG_SURFACE_ACPI_NOTIFY)      += surface_acpi_notify.o
 obj-$(CONFIG_SURFACE_AGGREGATOR)       += aggregator/
 obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV)  += surface_aggregator_cdev.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+obj-$(CONFIG_SURFACE_DTX)              += surface_dtx.o
 obj-$(CONFIG_SURFACE_GPE)              += surface_gpe.o
 obj-$(CONFIG_SURFACE_HOTPLUG)          += surface_hotplug.o
+obj-$(CONFIG_SURFACE_PLATFORM_PROFILE) += surface_platform_profile.o
 obj-$(CONFIG_SURFACE_PRO3_BUTTON)      += surfacepro3_button.o
index 5bcb59ed579dbab50c448682b3695d6480f1ca43..69e86cd599d38dce783d91b061ab5d3d6dbdf10b 100644 (file)
@@ -1040,7 +1040,7 @@ static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
        union acpi_object *obj;
        u64 val;
 
-       if (!(funcs & BIT(func)))
+       if (!(funcs & BIT_ULL(func)))
                return 0; /* Not supported, leave *ret at its default value */
 
        obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
@@ -1750,35 +1750,35 @@ EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
 
 /* -- Internal SAM requests. ------------------------------------------------ */
 
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
        .target_category = SSAM_SSH_TC_SAM,
        .target_id       = 0x01,
        .command_id      = 0x13,
        .instance_id     = 0x00,
 });
 
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
        .target_category = SSAM_SSH_TC_SAM,
        .target_id       = 0x01,
        .command_id      = 0x15,
        .instance_id     = 0x00,
 });
 
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
        .target_category = SSAM_SSH_TC_SAM,
        .target_id       = 0x01,
        .command_id      = 0x16,
        .instance_id     = 0x00,
 });
 
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
        .target_category = SSAM_SSH_TC_SAM,
        .target_id       = 0x01,
        .command_id      = 0x33,
        .instance_id     = 0x00,
 });
 
-static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
        .target_category = SSAM_SSH_TC_SAM,
        .target_id       = 0x01,
        .command_id      = 0x34,
@@ -2483,7 +2483,8 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
         * interrupt, and let the SAM resume callback during the controller
         * resume process clear it.
         */
-       const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+       const int irqf = IRQF_SHARED | IRQF_ONESHOT |
+                        IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
 
        gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
        if (IS_ERR(gpiod))
@@ -2501,7 +2502,6 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
                return status;
 
        ctrl->irq.num = irq;
-       disable_irq(ctrl->irq.num);
        return 0;
 }
 
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
new file mode 100644 (file)
index 0000000..685d37a
--- /dev/null
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) client device registry.
+ *
+ * Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that
+ * cannot be auto-detected. Provides device-hubs and performs instantiation
+ * for these devices.
+ *
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- Device registry. ------------------------------------------------------ */
+
+/*
+ * SSAM device names follow the SSAM module alias, meaning they are prefixed
+ * with 'ssam:', followed by domain, category, target ID, instance ID, and
+ * function, each encoded as two-digit hexadecimal, separated by ':'. In other
+ * words, it follows the scheme
+ *
+ *      ssam:dd:cc:tt:ii:ff
+ *
+ * Where, 'dd', 'cc', 'tt', 'ii', and 'ff' are the two-digit hexadecimal
+ * values mentioned above, respectively.
+ */
+
+/* Root node. */
+static const struct software_node ssam_node_root = {
+       .name = "ssam_platform_hub",
+};
+
+/* Base device hub (devices attached to Surface Book 3 base). */
+static const struct software_node ssam_node_hub_base = {
+       .name = "ssam:00:00:02:00:00",
+       .parent = &ssam_node_root,
+};
+
+/* AC adapter. */
+static const struct software_node ssam_node_bat_ac = {
+       .name = "ssam:01:02:01:01:01",
+       .parent = &ssam_node_root,
+};
+
+/* Primary battery. */
+static const struct software_node ssam_node_bat_main = {
+       .name = "ssam:01:02:01:01:00",
+       .parent = &ssam_node_root,
+};
+
+/* Secondary battery (Surface Book 3). */
+static const struct software_node ssam_node_bat_sb3base = {
+       .name = "ssam:01:02:02:01:00",
+       .parent = &ssam_node_hub_base,
+};
+
+/* Platform profile / performance-mode device. */
+static const struct software_node ssam_node_tmp_pprof = {
+       .name = "ssam:01:03:01:00:01",
+       .parent = &ssam_node_root,
+};
+
+/* DTX / detachment-system device (Surface Book 3). */
+static const struct software_node ssam_node_bas_dtx = {
+       .name = "ssam:01:11:01:00:00",
+       .parent = &ssam_node_root,
+};
+
+/* HID keyboard. */
+static const struct software_node ssam_node_hid_main_keyboard = {
+       .name = "ssam:01:15:02:01:00",
+       .parent = &ssam_node_root,
+};
+
+/* HID touchpad. */
+static const struct software_node ssam_node_hid_main_touchpad = {
+       .name = "ssam:01:15:02:03:00",
+       .parent = &ssam_node_root,
+};
+
+/* HID device instance 5 (unknown HID device). */
+static const struct software_node ssam_node_hid_main_iid5 = {
+       .name = "ssam:01:15:02:05:00",
+       .parent = &ssam_node_root,
+};
+
+/* HID keyboard (base hub). */
+static const struct software_node ssam_node_hid_base_keyboard = {
+       .name = "ssam:01:15:02:01:00",
+       .parent = &ssam_node_hub_base,
+};
+
+/* HID touchpad (base hub). */
+static const struct software_node ssam_node_hid_base_touchpad = {
+       .name = "ssam:01:15:02:03:00",
+       .parent = &ssam_node_hub_base,
+};
+
+/* HID device instance 5 (unknown HID device, base hub). */
+static const struct software_node ssam_node_hid_base_iid5 = {
+       .name = "ssam:01:15:02:05:00",
+       .parent = &ssam_node_hub_base,
+};
+
+/* HID device instance 6 (unknown HID device, base hub). */
+static const struct software_node ssam_node_hid_base_iid6 = {
+       .name = "ssam:01:15:02:06:00",
+       .parent = &ssam_node_hub_base,
+};
+
+/* Devices for Surface Book 2. */
+static const struct software_node *ssam_node_group_sb2[] = {
+       &ssam_node_root,
+       &ssam_node_tmp_pprof,
+       NULL,
+};
+
+/* Devices for Surface Book 3. */
+static const struct software_node *ssam_node_group_sb3[] = {
+       &ssam_node_root,
+       &ssam_node_hub_base,
+       &ssam_node_bat_ac,
+       &ssam_node_bat_main,
+       &ssam_node_bat_sb3base,
+       &ssam_node_tmp_pprof,
+       &ssam_node_bas_dtx,
+       &ssam_node_hid_base_keyboard,
+       &ssam_node_hid_base_touchpad,
+       &ssam_node_hid_base_iid5,
+       &ssam_node_hid_base_iid6,
+       NULL,
+};
+
+/* Devices for Surface Laptop 1. */
+static const struct software_node *ssam_node_group_sl1[] = {
+       &ssam_node_root,
+       &ssam_node_tmp_pprof,
+       NULL,
+};
+
+/* Devices for Surface Laptop 2. */
+static const struct software_node *ssam_node_group_sl2[] = {
+       &ssam_node_root,
+       &ssam_node_tmp_pprof,
+       NULL,
+};
+
+/* Devices for Surface Laptop 3. */
+static const struct software_node *ssam_node_group_sl3[] = {
+       &ssam_node_root,
+       &ssam_node_bat_ac,
+       &ssam_node_bat_main,
+       &ssam_node_tmp_pprof,
+       &ssam_node_hid_main_keyboard,
+       &ssam_node_hid_main_touchpad,
+       &ssam_node_hid_main_iid5,
+       NULL,
+};
+
+/* Devices for Surface Laptop Go. */
+static const struct software_node *ssam_node_group_slg1[] = {
+       &ssam_node_root,
+       &ssam_node_bat_ac,
+       &ssam_node_bat_main,
+       &ssam_node_tmp_pprof,
+       NULL,
+};
+
+/* Devices for Surface Pro 5. */
+static const struct software_node *ssam_node_group_sp5[] = {
+       &ssam_node_root,
+       &ssam_node_tmp_pprof,
+       NULL,
+};
+
+/* Devices for Surface Pro 6. */
+static const struct software_node *ssam_node_group_sp6[] = {
+       &ssam_node_root,
+       &ssam_node_tmp_pprof,
+       NULL,
+};
+
+/* Devices for Surface Pro 7 and Surface Pro 7+. */
+static const struct software_node *ssam_node_group_sp7[] = {
+       &ssam_node_root,
+       &ssam_node_bat_ac,
+       &ssam_node_bat_main,
+       &ssam_node_tmp_pprof,
+       NULL,
+};
+
+
+/* -- Device registry helper functions. ------------------------------------- */
+
+static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
+{
+       u8 d, tc, tid, iid, fn;
+       int n;
+
+       n = sscanf(str, "ssam:%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
+       if (n != 5)
+               return -EINVAL;
+
+       uid->domain = d;
+       uid->category = tc;
+       uid->target = tid;
+       uid->instance = iid;
+       uid->function = fn;
+
+       return 0;
+}
+
+static int ssam_hub_remove_devices_fn(struct device *dev, void *data)
+{
+       if (!is_ssam_device(dev))
+               return 0;
+
+       ssam_device_remove(to_ssam_device(dev));
+       return 0;
+}
+
+static void ssam_hub_remove_devices(struct device *parent)
+{
+       device_for_each_child_reverse(parent, NULL, ssam_hub_remove_devices_fn);
+}
+
+static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ctrl,
+                              struct fwnode_handle *node)
+{
+       struct ssam_device_uid uid;
+       struct ssam_device *sdev;
+       int status;
+
+       status = ssam_uid_from_string(fwnode_get_name(node), &uid);
+       if (status)
+               return status;
+
+       sdev = ssam_device_alloc(ctrl, uid);
+       if (!sdev)
+               return -ENOMEM;
+
+       sdev->dev.parent = parent;
+       sdev->dev.fwnode = node;
+
+       status = ssam_device_add(sdev);
+       if (status)
+               ssam_device_put(sdev);
+
+       return status;
+}
+
+static int ssam_hub_add_devices(struct device *parent, struct ssam_controller *ctrl,
+                               struct fwnode_handle *node)
+{
+       struct fwnode_handle *child;
+       int status;
+
+       fwnode_for_each_child_node(node, child) {
+               /*
+                * Try to add the device specified in the firmware node. If
+                * this fails with -EINVAL, the node does not specify any SSAM
+                * device, so ignore it and continue with the next one.
+                */
+
+               status = ssam_hub_add_device(parent, ctrl, child);
+               if (status && status != -EINVAL)
+                       goto err;
+       }
+
+       return 0;
+err:
+       ssam_hub_remove_devices(parent);
+       return status;
+}
+
+
+/* -- SSAM base-hub driver. ------------------------------------------------- */
+
+/*
+ * Some devices (especially battery) may need a bit of time to be fully usable
+ * after being (re-)connected. This delay has been determined via
+ * experimentation.
+ */
+#define SSAM_BASE_UPDATE_CONNECT_DELAY         msecs_to_jiffies(2500)
+
+enum ssam_base_hub_state {
+       SSAM_BASE_HUB_UNINITIALIZED,
+       SSAM_BASE_HUB_CONNECTED,
+       SSAM_BASE_HUB_DISCONNECTED,
+};
+
+struct ssam_base_hub {
+       struct ssam_device *sdev;
+
+       enum ssam_base_hub_state state;
+       struct delayed_work update_work;
+
+       struct ssam_event_notifier notif;
+};
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x0d,
+       .instance_id     = 0x00,
+});
+
+#define SSAM_BAS_OPMODE_TABLET         0x00
+#define SSAM_EVENT_BAS_CID_CONNECTION  0x0c
+
+static int ssam_base_hub_query_state(struct ssam_base_hub *hub, enum ssam_base_hub_state *state)
+{
+       u8 opmode;
+       int status;
+
+       status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
+       if (status < 0) {
+               dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
+               return status;
+       }
+
+       if (opmode != SSAM_BAS_OPMODE_TABLET)
+               *state = SSAM_BASE_HUB_CONNECTED;
+       else
+               *state = SSAM_BASE_HUB_DISCONNECTED;
+
+       return 0;
+}
+
+static ssize_t ssam_base_hub_state_show(struct device *dev, struct device_attribute *attr,
+                                       char *buf)
+{
+       struct ssam_base_hub *hub = dev_get_drvdata(dev);
+       bool connected = hub->state == SSAM_BASE_HUB_CONNECTED;
+
+       return sysfs_emit(buf, "%d\n", connected);
+}
+
+static struct device_attribute ssam_base_hub_attr_state =
+       __ATTR(state, 0444, ssam_base_hub_state_show, NULL);
+
+static struct attribute *ssam_base_hub_attrs[] = {
+       &ssam_base_hub_attr_state.attr,
+       NULL,
+};
+
+static const struct attribute_group ssam_base_hub_group = {
+       .attrs = ssam_base_hub_attrs,
+};
+
+static void ssam_base_hub_update_workfn(struct work_struct *work)
+{
+       struct ssam_base_hub *hub = container_of(work, struct ssam_base_hub, update_work.work);
+       struct fwnode_handle *node = dev_fwnode(&hub->sdev->dev);
+       enum ssam_base_hub_state state;
+       int status = 0;
+
+       status = ssam_base_hub_query_state(hub, &state);
+       if (status)
+               return;
+
+       if (hub->state == state)
+               return;
+       hub->state = state;
+
+       if (hub->state == SSAM_BASE_HUB_CONNECTED)
+               status = ssam_hub_add_devices(&hub->sdev->dev, hub->sdev->ctrl, node);
+       else
+               ssam_hub_remove_devices(&hub->sdev->dev);
+
+       if (status)
+               dev_err(&hub->sdev->dev, "failed to update base-hub devices: %d\n", status);
+}
+
+static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+       struct ssam_base_hub *hub = container_of(nf, struct ssam_base_hub, notif);
+       unsigned long delay;
+
+       if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
+               return 0;
+
+       if (event->length < 1) {
+               dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+               return 0;
+       }
+
+       /*
+        * Delay update when the base is being connected to give devices/EC
+        * some time to set up.
+        */
+       delay = event->data[0] ? SSAM_BASE_UPDATE_CONNECT_DELAY : 0;
+
+       schedule_delayed_work(&hub->update_work, delay);
+
+       /*
+        * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
+        * consumed by the detachment system driver. We're just a (more or less)
+        * silent observer.
+        */
+       return 0;
+}
+
+static int __maybe_unused ssam_base_hub_resume(struct device *dev)
+{
+       struct ssam_base_hub *hub = dev_get_drvdata(dev);
+
+       schedule_delayed_work(&hub->update_work, 0);
+       return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
+
+static int ssam_base_hub_probe(struct ssam_device *sdev)
+{
+       struct ssam_base_hub *hub;
+       int status;
+
+       hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
+       if (!hub)
+               return -ENOMEM;
+
+       hub->sdev = sdev;
+       hub->state = SSAM_BASE_HUB_UNINITIALIZED;
+
+       hub->notif.base.priority = INT_MAX;  /* This notifier should run first. */
+       hub->notif.base.fn = ssam_base_hub_notif;
+       hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+       hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
+       hub->notif.event.id.instance = 0,
+       hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
+       hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+       INIT_DELAYED_WORK(&hub->update_work, ssam_base_hub_update_workfn);
+
+       ssam_device_set_drvdata(sdev, hub);
+
+       status = ssam_notifier_register(sdev->ctrl, &hub->notif);
+       if (status)
+               return status;
+
+       status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
+       if (status)
+               goto err;
+
+       schedule_delayed_work(&hub->update_work, 0);
+       return 0;
+
+err:
+       ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+       cancel_delayed_work_sync(&hub->update_work);
+       ssam_hub_remove_devices(&sdev->dev);
+       return status;
+}
+
+static void ssam_base_hub_remove(struct ssam_device *sdev)
+{
+       struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
+
+       sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
+
+       ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+       cancel_delayed_work_sync(&hub->update_work);
+       ssam_hub_remove_devices(&sdev->dev);
+}
+
+static const struct ssam_device_id ssam_base_hub_match[] = {
+       { SSAM_VDEV(HUB, 0x02, SSAM_ANY_IID, 0x00) },
+       { },
+};
+
+static struct ssam_device_driver ssam_base_hub_driver = {
+       .probe = ssam_base_hub_probe,
+       .remove = ssam_base_hub_remove,
+       .match_table = ssam_base_hub_match,
+       .driver = {
+               .name = "surface_aggregator_base_hub",
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+               .pm = &ssam_base_hub_pm_ops,
+       },
+};
+
+
+/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
+
+static const struct acpi_device_id ssam_platform_hub_match[] = {
+       /* Surface Pro 4, 5, and 6 (OMBR < 0x10) */
+       { "MSHW0081", (unsigned long)ssam_node_group_sp5 },
+
+       /* Surface Pro 6 (OMBR >= 0x10) */
+       { "MSHW0111", (unsigned long)ssam_node_group_sp6 },
+
+       /* Surface Pro 7 */
+       { "MSHW0116", (unsigned long)ssam_node_group_sp7 },
+
+       /* Surface Pro 7+ */
+       { "MSHW0119", (unsigned long)ssam_node_group_sp7 },
+
+       /* Surface Book 2 */
+       { "MSHW0107", (unsigned long)ssam_node_group_sb2 },
+
+       /* Surface Book 3 */
+       { "MSHW0117", (unsigned long)ssam_node_group_sb3 },
+
+       /* Surface Laptop 1 */
+       { "MSHW0086", (unsigned long)ssam_node_group_sl1 },
+
+       /* Surface Laptop 2 */
+       { "MSHW0112", (unsigned long)ssam_node_group_sl2 },
+
+       /* Surface Laptop 3 (13", Intel) */
+       { "MSHW0114", (unsigned long)ssam_node_group_sl3 },
+
+       /* Surface Laptop 3 (15", AMD) */
+       { "MSHW0110", (unsigned long)ssam_node_group_sl3 },
+
+       /* Surface Laptop Go 1 */
+       { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
+
+static int ssam_platform_hub_probe(struct platform_device *pdev)
+{
+       const struct software_node **nodes;
+       struct ssam_controller *ctrl;
+       struct fwnode_handle *root;
+       int status;
+
+       nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
+       if (!nodes)
+               return -ENODEV;
+
+       /*
+        * As we're adding the SSAM client devices as children under this device
+        * and not the SSAM controller, we need to add a device link to the
+        * controller to ensure that we remove all of our devices before the
+        * controller is removed. This also guarantees proper ordering for
+        * suspend/resume of the devices on this hub.
+        */
+       ctrl = ssam_client_bind(&pdev->dev);
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+       status = software_node_register_node_group(nodes);
+       if (status)
+               return status;
+
+       root = software_node_fwnode(&ssam_node_root);
+       if (!root) {
+               software_node_unregister_node_group(nodes);
+               return -ENOENT;
+       }
+
+       set_secondary_fwnode(&pdev->dev, root);
+
+       status = ssam_hub_add_devices(&pdev->dev, ctrl, root);
+       if (status) {
+               set_secondary_fwnode(&pdev->dev, NULL);
+               software_node_unregister_node_group(nodes);
+       }
+
+       platform_set_drvdata(pdev, nodes);
+       return status;
+}
+
+static int ssam_platform_hub_remove(struct platform_device *pdev)
+{
+       const struct software_node **nodes = platform_get_drvdata(pdev);
+
+       ssam_hub_remove_devices(&pdev->dev);
+       set_secondary_fwnode(&pdev->dev, NULL);
+       software_node_unregister_node_group(nodes);
+       return 0;
+}
+
+static struct platform_driver ssam_platform_hub_driver = {
+       .probe = ssam_platform_hub_probe,
+       .remove = ssam_platform_hub_remove,
+       .driver = {
+               .name = "surface_aggregator_platform_hub",
+               .acpi_match_table = ssam_platform_hub_match,
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+       },
+};
+
+
+/* -- Module initialization. ------------------------------------------------ */
+
+static int __init ssam_device_hub_init(void)
+{
+       int status;
+
+       status = platform_driver_register(&ssam_platform_hub_driver);
+       if (status)
+               return status;
+
+       status = ssam_device_driver_register(&ssam_base_hub_driver);
+       if (status)
+               platform_driver_unregister(&ssam_platform_hub_driver);
+
+       return status;
+}
+module_init(ssam_device_hub_init);
+
+static void __exit ssam_device_hub_exit(void)
+{
+       ssam_device_driver_unregister(&ssam_base_hub_driver);
+       platform_driver_unregister(&ssam_platform_hub_driver);
+}
+module_exit(ssam_device_hub_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
new file mode 100644 (file)
index 0000000..63ce587
--- /dev/null
@@ -0,0 +1,1289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Book (gen. 2 and later) detachment system (DTX) driver.
+ *
+ * Provides a user-space interface to properly handle clipboard/tablet
+ * (containing screen and processor) detachment from the base of the device
+ * (containing the keyboard and optionally a discrete GPU). Allows to
+ * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
+ * use), or request detachment via user-space.
+ *
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+#include <linux/surface_aggregator/dtx.h>
+
+
+/* -- SSAM interface. ------------------------------------------------------- */
+
+enum sam_event_cid_bas {
+       SAM_EVENT_CID_DTX_CONNECTION                    = 0x0c,
+       SAM_EVENT_CID_DTX_REQUEST                       = 0x0e,
+       SAM_EVENT_CID_DTX_CANCEL                        = 0x0f,
+       SAM_EVENT_CID_DTX_LATCH_STATUS                  = 0x11,
+};
+
+enum ssam_bas_base_state {
+       SSAM_BAS_BASE_STATE_DETACH_SUCCESS              = 0x00,
+       SSAM_BAS_BASE_STATE_ATTACHED                    = 0x01,
+       SSAM_BAS_BASE_STATE_NOT_FEASIBLE                = 0x02,
+};
+
+enum ssam_bas_latch_status {
+       SSAM_BAS_LATCH_STATUS_CLOSED                    = 0x00,
+       SSAM_BAS_LATCH_STATUS_OPENED                    = 0x01,
+       SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN            = 0x02,
+       SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN     = 0x03,
+       SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE           = 0x04,
+};
+
+enum ssam_bas_cancel_reason {
+       SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE             = 0x00,  /* Low battery. */
+       SSAM_BAS_CANCEL_REASON_TIMEOUT                  = 0x02,
+       SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN           = 0x03,
+       SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN    = 0x04,
+       SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE          = 0x05,
+};
+
+struct ssam_bas_base_info {
+       u8 state;
+       u8 base_id;
+} __packed;
+
+static_assert(sizeof(struct ssam_bas_base_info) == 2);
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x06,
+       .instance_id     = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x07,
+       .instance_id     = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x08,
+       .instance_id     = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x09,
+       .instance_id     = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x0a,
+       .instance_id     = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x0b,
+       .instance_id     = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x0c,
+       .instance_id     = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x0d,
+       .instance_id     = 0x00,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
+       .target_category = SSAM_SSH_TC_BAS,
+       .target_id       = 0x01,
+       .command_id      = 0x11,
+       .instance_id     = 0x00,
+});
+
+
+/* -- Main structures. ------------------------------------------------------ */
+
+enum sdtx_device_state {
+       SDTX_DEVICE_SHUTDOWN_BIT    = BIT(0),
+       SDTX_DEVICE_DIRTY_BASE_BIT  = BIT(1),
+       SDTX_DEVICE_DIRTY_MODE_BIT  = BIT(2),
+       SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
+};
+
+struct sdtx_device {
+       struct kref kref;
+       struct rw_semaphore lock;         /* Guards device and controller reference. */
+
+       struct device *dev;
+       struct ssam_controller *ctrl;
+       unsigned long flags;
+
+       struct miscdevice mdev;
+       wait_queue_head_t waitq;
+       struct mutex write_lock;          /* Guards order of events/notifications. */
+       struct rw_semaphore client_lock;  /* Guards client list.                   */
+       struct list_head client_list;
+
+       struct delayed_work state_work;
+       struct {
+               struct ssam_bas_base_info base;
+               u8 device_mode;
+               u8 latch_status;
+       } state;
+
+       struct delayed_work mode_work;
+       struct input_dev *mode_switch;
+
+       struct ssam_event_notifier notif;
+};
+
+enum sdtx_client_state {
+       SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
+};
+
+struct sdtx_client {
+       struct sdtx_device *ddev;
+       struct list_head node;
+       unsigned long flags;
+
+       struct fasync_struct *fasync;
+
+       struct mutex read_lock;           /* Guards FIFO buffer read access. */
+       DECLARE_KFIFO(buffer, u8, 512);
+};
+
+static void __sdtx_device_release(struct kref *kref)
+{
+       struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
+
+       mutex_destroy(&ddev->write_lock);
+       kfree(ddev);
+}
+
+static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
+{
+       if (ddev)
+               kref_get(&ddev->kref);
+
+       return ddev;
+}
+
+static void sdtx_device_put(struct sdtx_device *ddev)
+{
+       if (ddev)
+               kref_put(&ddev->kref, __sdtx_device_release);
+}
+
+
+/* -- Firmware value translations. ------------------------------------------ */
+
+static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
+{
+       switch (state) {
+       case SSAM_BAS_BASE_STATE_ATTACHED:
+               return SDTX_BASE_ATTACHED;
+
+       case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
+               return SDTX_BASE_DETACHED;
+
+       case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
+               return SDTX_DETACH_NOT_FEASIBLE;
+
+       default:
+               dev_err(ddev->dev, "unknown base state: %#04x\n", state);
+               return SDTX_UNKNOWN(state);
+       }
+}
+
+static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
+{
+       switch (status) {
+       case SSAM_BAS_LATCH_STATUS_CLOSED:
+               return SDTX_LATCH_CLOSED;
+
+       case SSAM_BAS_LATCH_STATUS_OPENED:
+               return SDTX_LATCH_OPENED;
+
+       case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
+               return SDTX_ERR_FAILED_TO_OPEN;
+
+       case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
+               return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
+
+       case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
+               return SDTX_ERR_FAILED_TO_CLOSE;
+
+       default:
+               dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
+               return SDTX_UNKNOWN(status);
+       }
+}
+
+static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
+{
+       switch (reason) {
+       case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
+               return SDTX_DETACH_NOT_FEASIBLE;
+
+       case SSAM_BAS_CANCEL_REASON_TIMEOUT:
+               return SDTX_DETACH_TIMEDOUT;
+
+       case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
+               return SDTX_ERR_FAILED_TO_OPEN;
+
+       case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
+               return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
+
+       case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
+               return SDTX_ERR_FAILED_TO_CLOSE;
+
+       default:
+               dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
+               return SDTX_UNKNOWN(reason);
+       }
+}
+
+
+/* -- IOCTLs. --------------------------------------------------------------- */
+
+static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
+                                   struct sdtx_base_info __user *buf)
+{
+       struct ssam_bas_base_info raw;
+       struct sdtx_base_info info;
+       int status;
+
+       lockdep_assert_held_read(&ddev->lock);
+
+       status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
+       if (status < 0)
+               return status;
+
+       info.state = sdtx_translate_base_state(ddev, raw.state);
+       info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
+
+       if (copy_to_user(buf, &info, sizeof(info)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
+{
+       u8 mode;
+       int status;
+
+       lockdep_assert_held_read(&ddev->lock);
+
+       status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+       if (status < 0)
+               return status;
+
+       return put_user(mode, buf);
+}
+
+static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
+{
+       u8 latch;
+       int status;
+
+       lockdep_assert_held_read(&ddev->lock);
+
+       status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
+       if (status < 0)
+               return status;
+
+       return put_user(sdtx_translate_latch_status(ddev, latch), buf);
+}
+
+static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
+{
+       struct sdtx_device *ddev = client->ddev;
+
+       lockdep_assert_held_read(&ddev->lock);
+
+       switch (cmd) {
+       case SDTX_IOCTL_EVENTS_ENABLE:
+               set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
+               return 0;
+
+       case SDTX_IOCTL_EVENTS_DISABLE:
+               clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
+               return 0;
+
+       case SDTX_IOCTL_LATCH_LOCK:
+               return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
+
+       case SDTX_IOCTL_LATCH_UNLOCK:
+               return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
+
+       case SDTX_IOCTL_LATCH_REQUEST:
+               return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
+
+       case SDTX_IOCTL_LATCH_CONFIRM:
+               return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
+
+       case SDTX_IOCTL_LATCH_HEARTBEAT:
+               return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
+
+       case SDTX_IOCTL_LATCH_CANCEL:
+               return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
+
+       case SDTX_IOCTL_GET_BASE_INFO:
+               return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
+
+       case SDTX_IOCTL_GET_DEVICE_MODE:
+               return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
+
+       case SDTX_IOCTL_GET_LATCH_STATUS:
+               return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct sdtx_client *client = file->private_data;
+       long status;
+
+       if (down_read_killable(&client->ddev->lock))
+               return -ERESTARTSYS;
+
+       if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
+               up_read(&client->ddev->lock);
+               return -ENODEV;
+       }
+
+       status = __surface_dtx_ioctl(client, cmd, arg);
+
+       up_read(&client->ddev->lock);
+       return status;
+}
+
+
+/* -- File operations. ------------------------------------------------------ */
+
+static int surface_dtx_open(struct inode *inode, struct file *file)
+{
+       struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
+       struct sdtx_client *client;
+
+       /* Initialize client. */
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
+       if (!client)
+               return -ENOMEM;
+
+       client->ddev = sdtx_device_get(ddev);
+
+       INIT_LIST_HEAD(&client->node);
+
+       mutex_init(&client->read_lock);
+       INIT_KFIFO(client->buffer);
+
+       file->private_data = client;
+
+       /* Attach client. */
+       down_write(&ddev->client_lock);
+
+       /*
+        * Do not add a new client if the device has been shut down. Note that
+        * it's enough to hold the client_lock here as, during shutdown, we
+        * only acquire that lock and remove clients after marking the device
+        * as shut down.
+        */
+       if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+               up_write(&ddev->client_lock);
+               sdtx_device_put(client->ddev);
+               kfree(client);
+               return -ENODEV;
+       }
+
+       list_add_tail(&client->node, &ddev->client_list);
+       up_write(&ddev->client_lock);
+
+       stream_open(inode, file);
+       return 0;
+}
+
+static int surface_dtx_release(struct inode *inode, struct file *file)
+{
+       struct sdtx_client *client = file->private_data;
+
+       /* Detach client. */
+       down_write(&client->ddev->client_lock);
+       list_del(&client->node);
+       up_write(&client->ddev->client_lock);
+
+       /* Free client. */
+       sdtx_device_put(client->ddev);
+       mutex_destroy(&client->read_lock);
+       kfree(client);
+
+       return 0;
+}
+
+static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
+{
+       struct sdtx_client *client = file->private_data;
+       struct sdtx_device *ddev = client->ddev;
+       unsigned int copied;
+       int status = 0;
+
+       if (down_read_killable(&ddev->lock))
+               return -ERESTARTSYS;
+
+       /* Make sure we're not shut down. */
+       if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+               up_read(&ddev->lock);
+               return -ENODEV;
+       }
+
+       do {
+               /* Check availability, wait if necessary. */
+               if (kfifo_is_empty(&client->buffer)) {
+                       up_read(&ddev->lock);
+
+                       if (file->f_flags & O_NONBLOCK)
+                               return -EAGAIN;
+
+                       status = wait_event_interruptible(ddev->waitq,
+                                                         !kfifo_is_empty(&client->buffer) ||
+                                                         test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
+                                                                  &ddev->flags));
+                       if (status < 0)
+                               return status;
+
+                       if (down_read_killable(&ddev->lock))
+                               return -ERESTARTSYS;
+
+                       /* Need to check that we're not shut down again. */
+                       if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
+                               up_read(&ddev->lock);
+                               return -ENODEV;
+                       }
+               }
+
+               /* Try to read from FIFO. */
+               if (mutex_lock_interruptible(&client->read_lock)) {
+                       up_read(&ddev->lock);
+                       return -ERESTARTSYS;
+               }
+
+               status = kfifo_to_user(&client->buffer, buf, count, &copied);
+               mutex_unlock(&client->read_lock);
+
+               if (status < 0) {
+                       up_read(&ddev->lock);
+                       return status;
+               }
+
+               /* We might not have gotten anything, check this here. */
+               if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
+                       up_read(&ddev->lock);
+                       return -EAGAIN;
+               }
+       } while (copied == 0);
+
+       up_read(&ddev->lock);
+       return copied;
+}
+
+static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
+{
+       struct sdtx_client *client = file->private_data;
+       __poll_t events = 0;
+
+       if (down_read_killable(&client->ddev->lock))
+               return -ERESTARTSYS;
+
+       if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
+               up_read(&client->ddev->lock);
+               return EPOLLHUP | EPOLLERR;
+       }
+
+       poll_wait(file, &client->ddev->waitq, pt);
+
+       if (!kfifo_is_empty(&client->buffer))
+               events |= EPOLLIN | EPOLLRDNORM;
+
+       up_read(&client->ddev->lock);
+       return events;
+}
+
+static int surface_dtx_fasync(int fd, struct file *file, int on)
+{
+       struct sdtx_client *client = file->private_data;
+
+       return fasync_helper(fd, file, on, &client->fasync);
+}
+
+static const struct file_operations surface_dtx_fops = {
+       .owner          = THIS_MODULE,
+       .open           = surface_dtx_open,
+       .release        = surface_dtx_release,
+       .read           = surface_dtx_read,
+       .poll           = surface_dtx_poll,
+       .fasync         = surface_dtx_fasync,
+       .unlocked_ioctl = surface_dtx_ioctl,
+       .compat_ioctl   = surface_dtx_ioctl,
+       .llseek         = no_llseek,
+};
+
+
+/* -- Event handling/forwarding. -------------------------------------------- */
+
+/*
+ * The device operation mode is not immediately updated on the EC when the
+ * base has been connected, i.e. querying the device mode inside the
+ * connection event callback yields an outdated value. Thus, we can only
+ * determine the new tablet-mode switch and device mode values after some
+ * time.
+ *
+ * These delays have been chosen by experimenting. We first delay on connect
+ * events, then check and validate the device mode against the base state and
+ * if invalid delay again by the "recheck" delay.
+ */
+#define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100)
+#define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100)
+
+struct sdtx_status_event {
+       struct sdtx_event e;
+       __u16 v;
+} __packed;
+
+struct sdtx_base_info_event {
+       struct sdtx_event e;
+       struct sdtx_base_info v;
+} __packed;
+
+union sdtx_generic_event {
+       struct sdtx_event common;
+       struct sdtx_status_event status;
+       struct sdtx_base_info_event base;
+};
+
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
+
+/* Must be executed with ddev->write_lock held. */
+static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
+{
+       const size_t len = sizeof(struct sdtx_event) + evt->length;
+       struct sdtx_client *client;
+
+       lockdep_assert_held(&ddev->write_lock);
+
+       down_read(&ddev->client_lock);
+       list_for_each_entry(client, &ddev->client_list, node) {
+               if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
+                       continue;
+
+               if (likely(kfifo_avail(&client->buffer) >= len))
+                       kfifo_in(&client->buffer, (const u8 *)evt, len);
+               else
+                       dev_warn(ddev->dev, "event buffer overrun\n");
+
+               kill_fasync(&client->fasync, SIGIO, POLL_IN);
+       }
+       up_read(&ddev->client_lock);
+
+       wake_up_interruptible(&ddev->waitq);
+}
+
+static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
+{
+       struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
+       union sdtx_generic_event event;
+       size_t len;
+
+       /* Validate event payload length. */
+       switch (in->command_id) {
+       case SAM_EVENT_CID_DTX_CONNECTION:
+               len = 2 * sizeof(u8);
+               break;
+
+       case SAM_EVENT_CID_DTX_REQUEST:
+               len = 0;
+               break;
+
+       case SAM_EVENT_CID_DTX_CANCEL:
+               len = sizeof(u8);
+               break;
+
+       case SAM_EVENT_CID_DTX_LATCH_STATUS:
+               len = sizeof(u8);
+               break;
+
+       default:
+               return 0;
+       }
+
+       if (in->length != len) {
+               dev_err(ddev->dev,
+                       "unexpected payload size for event %#04x: got %u, expected %zu\n",
+                       in->command_id, in->length, len);
+               return 0;
+       }
+
+       mutex_lock(&ddev->write_lock);
+
+       /* Translate event. */
+       switch (in->command_id) {
+       case SAM_EVENT_CID_DTX_CONNECTION:
+               clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
+
+               /* If state has not changed: do not send new event. */
+               if (ddev->state.base.state == in->data[0] &&
+                   ddev->state.base.base_id == in->data[1])
+                       goto out;
+
+               ddev->state.base.state = in->data[0];
+               ddev->state.base.base_id = in->data[1];
+
+               event.base.e.length = sizeof(struct sdtx_base_info);
+               event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
+               event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
+               event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
+               break;
+
+       case SAM_EVENT_CID_DTX_REQUEST:
+               event.common.code = SDTX_EVENT_REQUEST;
+               event.common.length = 0;
+               break;
+
+       case SAM_EVENT_CID_DTX_CANCEL:
+               event.status.e.length = sizeof(u16);
+               event.status.e.code = SDTX_EVENT_CANCEL;
+               event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
+               break;
+
+       case SAM_EVENT_CID_DTX_LATCH_STATUS:
+               clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
+
+               /* If state has not changed: do not send new event. */
+               if (ddev->state.latch_status == in->data[0])
+                       goto out;
+
+               ddev->state.latch_status = in->data[0];
+
+               event.status.e.length = sizeof(u16);
+               event.status.e.code = SDTX_EVENT_LATCH_STATUS;
+               event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
+               break;
+       }
+
+       sdtx_push_event(ddev, &event.common);
+
+       /* Update device mode on base connection change. */
+       if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
+               unsigned long delay;
+
+               delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
+               sdtx_update_device_mode(ddev, delay);
+       }
+
+out:
+       mutex_unlock(&ddev->write_lock);
+       return SSAM_NOTIF_HANDLED;
+}
+
+
+/* -- State update functions. ----------------------------------------------- */
+
+static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
+{
+       return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
+               (mode == SDTX_DEVICE_MODE_TABLET)) ||
+              ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
+               (mode != SDTX_DEVICE_MODE_TABLET));
+}
+
+static void sdtx_device_mode_workfn(struct work_struct *work)
+{
+       struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
+       struct sdtx_status_event event;
+       struct ssam_bas_base_info base;
+       int status, tablet;
+       u8 mode;
+
+       /* Get operation mode. */
+       status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+       if (status) {
+               dev_err(ddev->dev, "failed to get device mode: %d\n", status);
+               return;
+       }
+
+       /* Get base info. */
+       status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
+       if (status) {
+               dev_err(ddev->dev, "failed to get base info: %d\n", status);
+               return;
+       }
+
+       /*
+        * In some cases (specifically when attaching the base), the device
+        * mode isn't updated right away. Thus we check if the device mode
+        * makes sense for the given base state and try again later if it
+        * doesn't.
+        */
+       if (sdtx_device_mode_invalid(mode, base.state)) {
+               dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
+               sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
+               return;
+       }
+
+       mutex_lock(&ddev->write_lock);
+       clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
+
+       /* Avoid sending duplicate device-mode events. */
+       if (ddev->state.device_mode == mode) {
+               mutex_unlock(&ddev->write_lock);
+               return;
+       }
+
+       ddev->state.device_mode = mode;
+
+       event.e.length = sizeof(u16);
+       event.e.code = SDTX_EVENT_DEVICE_MODE;
+       event.v = mode;
+
+       sdtx_push_event(ddev, &event.e);
+
+       /* Send SW_TABLET_MODE event. */
+       tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
+       input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
+       input_sync(ddev->mode_switch);
+
+       mutex_unlock(&ddev->write_lock);
+}
+
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
+{
+       schedule_delayed_work(&ddev->mode_work, delay);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
+                                           struct ssam_bas_base_info info)
+{
+       struct sdtx_base_info_event event;
+
+       lockdep_assert_held(&ddev->write_lock);
+
+       /* Prevent duplicate events. */
+       if (ddev->state.base.state == info.state &&
+           ddev->state.base.base_id == info.base_id)
+               return;
+
+       ddev->state.base = info;
+
+       event.e.length = sizeof(struct sdtx_base_info);
+       event.e.code = SDTX_EVENT_BASE_CONNECTION;
+       event.v.state = sdtx_translate_base_state(ddev, info.state);
+       event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
+
+       sdtx_push_event(ddev, &event.e);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
+{
+       struct sdtx_status_event event;
+       int tablet;
+
+       /*
+        * Note: This function must be called after updating the base state
+        * via __sdtx_device_state_update_base(), as we rely on the updated
+        * base state value in the validity check below.
+        */
+
+       lockdep_assert_held(&ddev->write_lock);
+
+       if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
+               dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
+               sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
+               return;
+       }
+
+       /* Prevent duplicate events. */
+       if (ddev->state.device_mode == mode)
+               return;
+
+       ddev->state.device_mode = mode;
+
+       /* Send event. */
+       event.e.length = sizeof(u16);
+       event.e.code = SDTX_EVENT_DEVICE_MODE;
+       event.v = mode;
+
+       sdtx_push_event(ddev, &event.e);
+
+       /* Send SW_TABLET_MODE event. */
+       tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
+       input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
+       input_sync(ddev->mode_switch);
+}
+
+/* Must be executed with ddev->write_lock held. */
+static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
+{
+       struct sdtx_status_event event;
+
+       lockdep_assert_held(&ddev->write_lock);
+
+       /* Prevent duplicate events. */
+       if (ddev->state.latch_status == status)
+               return;
+
+       ddev->state.latch_status = status;
+
+       event.e.length = sizeof(struct sdtx_base_info);
+       event.e.code = SDTX_EVENT_BASE_CONNECTION;
+       event.v = sdtx_translate_latch_status(ddev, status);
+
+       sdtx_push_event(ddev, &event.e);
+}
+
+static void sdtx_device_state_workfn(struct work_struct *work)
+{
+       struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
+       struct ssam_bas_base_info base;
+       u8 mode, latch;
+       int status;
+
+       /* Mark everything as dirty. */
+       set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
+       set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
+       set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
+
+       /*
+        * Ensure that the state gets marked as dirty before continuing to
+        * query it. Necessary to ensure that clear_bit() calls in
+        * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
+        * bits if an event is received while updating the state here.
+        */
+       smp_mb__after_atomic();
+
+       status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
+       if (status) {
+               dev_err(ddev->dev, "failed to get base state: %d\n", status);
+               return;
+       }
+
+       status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
+       if (status) {
+               dev_err(ddev->dev, "failed to get device mode: %d\n", status);
+               return;
+       }
+
+       status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
+       if (status) {
+               dev_err(ddev->dev, "failed to get latch status: %d\n", status);
+               return;
+       }
+
+       mutex_lock(&ddev->write_lock);
+
+       /*
+        * If the respective dirty-bit has been cleared, an event has been
+        * received, updating this state. The queried state may thus be out of
+        * date. At this point, we can safely assume that the state provided
+        * by the event is either up to date, or we're about to receive
+        * another event updating it.
+        */
+
+       if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
+               __sdtx_device_state_update_base(ddev, base);
+
+       if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
+               __sdtx_device_state_update_mode(ddev, mode);
+
+       if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
+               __sdtx_device_state_update_latch(ddev, latch);
+
+       mutex_unlock(&ddev->write_lock);
+}
+
+static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
+{
+       schedule_delayed_work(&ddev->state_work, delay);
+}
+
+
+/* -- Common device initialization. ----------------------------------------- */
+
+static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
+                           struct ssam_controller *ctrl)
+{
+       int status, tablet_mode;
+
+       /* Basic initialization. */
+       kref_init(&ddev->kref);
+       init_rwsem(&ddev->lock);
+       ddev->dev = dev;
+       ddev->ctrl = ctrl;
+
+       ddev->mdev.minor = MISC_DYNAMIC_MINOR;
+       ddev->mdev.name = "surface_dtx";
+       ddev->mdev.nodename = "surface/dtx";
+       ddev->mdev.fops = &surface_dtx_fops;
+
+       ddev->notif.base.priority = 1;
+       ddev->notif.base.fn = sdtx_notifier;
+       ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+       ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
+       ddev->notif.event.id.instance = 0;
+       ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
+       ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+       init_waitqueue_head(&ddev->waitq);
+       mutex_init(&ddev->write_lock);
+       init_rwsem(&ddev->client_lock);
+       INIT_LIST_HEAD(&ddev->client_list);
+
+       INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
+       INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
+
+       /*
+        * Get current device state. We want to guarantee that events are only
+        * sent when state actually changes. Thus we cannot use special
+        * "uninitialized" values, as that would cause problems when manually
+        * querying the state in surface_dtx_pm_complete(). I.e. we would not
+        * be able to detect state changes there if no change event has been
+        * received between driver initialization and first device suspension.
+        *
+        * Note that we also need to do this before registering the event
+        * notifier, as that may access the state values.
+        */
+       status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
+       if (status)
+               return status;
+
+       status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
+       if (status)
+               return status;
+
+       status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
+       if (status)
+               return status;
+
+       /* Set up tablet mode switch. */
+       ddev->mode_switch = input_allocate_device();
+       if (!ddev->mode_switch)
+               return -ENOMEM;
+
+       ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
+       ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
+       ddev->mode_switch->id.bustype = BUS_HOST;
+       ddev->mode_switch->dev.parent = ddev->dev;
+
+       tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
+       input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
+       input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
+
+       status = input_register_device(ddev->mode_switch);
+       if (status) {
+               input_free_device(ddev->mode_switch);
+               return status;
+       }
+
+       /* Set up event notifier. */
+       status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
+       if (status)
+               goto err_notif;
+
+       /* Register miscdevice. */
+       status = misc_register(&ddev->mdev);
+       if (status)
+               goto err_mdev;
+
+       /*
+        * Update device state in case it has changed between getting the
+        * initial mode and registering the event notifier.
+        */
+       sdtx_update_device_state(ddev, 0);
+       return 0;
+
+err_notif:
+       ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+       cancel_delayed_work_sync(&ddev->mode_work);
+err_mdev:
+       input_unregister_device(ddev->mode_switch);
+       return status;
+}
+
+static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
+{
+       struct sdtx_device *ddev;
+       int status;
+
+       ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+       if (!ddev)
+               return ERR_PTR(-ENOMEM);
+
+       status = sdtx_device_init(ddev, dev, ctrl);
+       if (status) {
+               sdtx_device_put(ddev);
+               return ERR_PTR(status);
+       }
+
+       return ddev;
+}
+
+static void sdtx_device_destroy(struct sdtx_device *ddev)
+{
+       struct sdtx_client *client;
+
+       /*
+        * Mark device as shut-down. Prevent new clients from being added and
+        * new operations from being executed.
+        */
+       set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
+
+       /* Disable notifiers, prevent new events from arriving. */
+       ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+
+       /* Stop mode_work, prevent access to mode_switch. */
+       cancel_delayed_work_sync(&ddev->mode_work);
+
+       /* Stop state_work. */
+       cancel_delayed_work_sync(&ddev->state_work);
+
+       /* With mode_work canceled, we can unregister the mode_switch. */
+       input_unregister_device(ddev->mode_switch);
+
+       /* Wake up async clients. */
+       down_write(&ddev->client_lock);
+       list_for_each_entry(client, &ddev->client_list, node) {
+               kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+       }
+       up_write(&ddev->client_lock);
+
+       /* Wake up blocking clients. */
+       wake_up_interruptible(&ddev->waitq);
+
+       /*
+        * Wait for clients to finish their current operation. After this, the
+        * controller and device references are guaranteed to be no longer in
+        * use.
+        */
+       down_write(&ddev->lock);
+       ddev->dev = NULL;
+       ddev->ctrl = NULL;
+       up_write(&ddev->lock);
+
+       /* Finally remove the misc-device. */
+       misc_deregister(&ddev->mdev);
+
+       /*
+        * We're now guaranteed that sdtx_device_open() won't be called any
+        * more, so we can now drop out reference.
+        */
+       sdtx_device_put(ddev);
+}
+
+
+/* -- PM ops. --------------------------------------------------------------- */
+
+#ifdef CONFIG_PM_SLEEP
+
+static void surface_dtx_pm_complete(struct device *dev)
+{
+       struct sdtx_device *ddev = dev_get_drvdata(dev);
+
+       /*
+        * Normally, the EC will store events while suspended (i.e. in
+        * display-off state) and release them when resumed (i.e. transitioned
+        * to display-on state). During hibernation, however, the EC will be
+        * shut down and does not store events. Furthermore, events might be
+        * dropped during prolonged suspension (it is currently unknown how
+        * big this event buffer is and how it behaves on overruns).
+        *
+        * To prevent any problems, we update the device state here. We do
+        * this delayed to ensure that any events sent by the EC directly
+        * after resuming will be handled first. The delay below has been
+        * chosen (experimentally), so that there should be ample time for
+        * these events to be handled, before we check and, if necessary,
+        * update the state.
+        */
+       sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
+}
+
+static const struct dev_pm_ops surface_dtx_pm_ops = {
+       .complete = surface_dtx_pm_complete,
+};
+
+#else /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops surface_dtx_pm_ops = {};
+
+#endif /* CONFIG_PM_SLEEP */
+
+
+/* -- Platform driver. ------------------------------------------------------ */
+
+static int surface_dtx_platform_probe(struct platform_device *pdev)
+{
+       struct ssam_controller *ctrl;
+       struct sdtx_device *ddev;
+
+       /* Link to EC. */
+       ctrl = ssam_client_bind(&pdev->dev);
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+       ddev = sdtx_device_create(&pdev->dev, ctrl);
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
+
+       platform_set_drvdata(pdev, ddev);
+       return 0;
+}
+
+static int surface_dtx_platform_remove(struct platform_device *pdev)
+{
+       sdtx_device_destroy(platform_get_drvdata(pdev));
+       return 0;
+}
+
+static const struct acpi_device_id surface_dtx_acpi_match[] = {
+       { "MSHW0133", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
+
+static struct platform_driver surface_dtx_platform_driver = {
+       .probe = surface_dtx_platform_probe,
+       .remove = surface_dtx_platform_remove,
+       .driver = {
+               .name = "surface_dtx_pltf",
+               .acpi_match_table = surface_dtx_acpi_match,
+               .pm = &surface_dtx_pm_ops,
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+       },
+};
+
+
+/* -- SSAM device driver. --------------------------------------------------- */
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+static int surface_dtx_ssam_probe(struct ssam_device *sdev)
+{
+       struct sdtx_device *ddev;
+
+       ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
+
+       ssam_device_set_drvdata(sdev, ddev);
+       return 0;
+}
+
+static void surface_dtx_ssam_remove(struct ssam_device *sdev)
+{
+       sdtx_device_destroy(ssam_device_get_drvdata(sdev));
+}
+
+static const struct ssam_device_id surface_dtx_ssam_match[] = {
+       { SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
+       { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
+
+static struct ssam_device_driver surface_dtx_ssam_driver = {
+       .probe = surface_dtx_ssam_probe,
+       .remove = surface_dtx_ssam_remove,
+       .match_table = surface_dtx_ssam_match,
+       .driver = {
+               .name = "surface_dtx",
+               .pm = &surface_dtx_pm_ops,
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+       },
+};
+
+static int ssam_dtx_driver_register(void)
+{
+       return ssam_device_driver_register(&surface_dtx_ssam_driver);
+}
+
+static void ssam_dtx_driver_unregister(void)
+{
+       ssam_device_driver_unregister(&surface_dtx_ssam_driver);
+}
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static int ssam_dtx_driver_register(void)
+{
+       return 0;
+}
+
+static void ssam_dtx_driver_unregister(void)
+{
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+
+/* -- Module setup. --------------------------------------------------------- */
+
+static int __init surface_dtx_init(void)
+{
+       int status;
+
+       status = ssam_dtx_driver_register();
+       if (status)
+               return status;
+
+       status = platform_driver_register(&surface_dtx_platform_driver);
+       if (status)
+               ssam_dtx_driver_unregister();
+
+       return status;
+}
+module_init(surface_dtx_init);
+
+static void __exit surface_dtx_exit(void)
+{
+       platform_driver_unregister(&surface_dtx_platform_driver);
+       ssam_dtx_driver_unregister();
+}
+module_exit(surface_dtx_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
new file mode 100644 (file)
index 0000000..6373d3b
--- /dev/null
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface Platform Profile / Performance Mode driver for Surface System
+ * Aggregator Module (thermal subsystem).
+ *
+ * Copyright (C) 2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_profile.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/device.h>
+
+enum ssam_tmp_profile {
+       SSAM_TMP_PROFILE_NORMAL             = 1,
+       SSAM_TMP_PROFILE_BATTERY_SAVER      = 2,
+       SSAM_TMP_PROFILE_BETTER_PERFORMANCE = 3,
+       SSAM_TMP_PROFILE_BEST_PERFORMANCE   = 4,
+};
+
+struct ssam_tmp_profile_info {
+       __le32 profile;
+       __le16 unknown1;
+       __le16 unknown2;
+} __packed;
+
+struct ssam_tmp_profile_device {
+       struct ssam_device *sdev;
+       struct platform_profile_handler handler;
+};
+
+SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_profile_get, struct ssam_tmp_profile_info, {
+       .target_category = SSAM_SSH_TC_TMP,
+       .command_id      = 0x02,
+});
+
+SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_profile_set, __le32, {
+       .target_category = SSAM_SSH_TC_TMP,
+       .command_id      = 0x03,
+});
+
+static int ssam_tmp_profile_get(struct ssam_device *sdev, enum ssam_tmp_profile *p)
+{
+       struct ssam_tmp_profile_info info;
+       int status;
+
+       status = ssam_retry(__ssam_tmp_profile_get, sdev, &info);
+       if (status < 0)
+               return status;
+
+       *p = le32_to_cpu(info.profile);
+       return 0;
+}
+
+static int ssam_tmp_profile_set(struct ssam_device *sdev, enum ssam_tmp_profile p)
+{
+       __le32 profile_le = cpu_to_le32(p);
+
+       return ssam_retry(__ssam_tmp_profile_set, sdev, &profile_le);
+}
+
+static int convert_ssam_to_profile(struct ssam_device *sdev, enum ssam_tmp_profile p)
+{
+       switch (p) {
+       case SSAM_TMP_PROFILE_NORMAL:
+               return PLATFORM_PROFILE_BALANCED;
+
+       case SSAM_TMP_PROFILE_BATTERY_SAVER:
+               return PLATFORM_PROFILE_LOW_POWER;
+
+       case SSAM_TMP_PROFILE_BETTER_PERFORMANCE:
+               return PLATFORM_PROFILE_BALANCED_PERFORMANCE;
+
+       case SSAM_TMP_PROFILE_BEST_PERFORMANCE:
+               return PLATFORM_PROFILE_PERFORMANCE;
+
+       default:
+               dev_err(&sdev->dev, "invalid performance profile: %d", p);
+               return -EINVAL;
+       }
+}
+
+static int convert_profile_to_ssam(struct ssam_device *sdev, enum platform_profile_option p)
+{
+       switch (p) {
+       case PLATFORM_PROFILE_LOW_POWER:
+               return SSAM_TMP_PROFILE_BATTERY_SAVER;
+
+       case PLATFORM_PROFILE_BALANCED:
+               return SSAM_TMP_PROFILE_NORMAL;
+
+       case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
+               return SSAM_TMP_PROFILE_BETTER_PERFORMANCE;
+
+       case PLATFORM_PROFILE_PERFORMANCE:
+               return SSAM_TMP_PROFILE_BEST_PERFORMANCE;
+
+       default:
+               /* This should have already been caught by platform_profile_store(). */
+               WARN(true, "unsupported platform profile");
+               return -EOPNOTSUPP;
+       }
+}
+
+static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
+                                    enum platform_profile_option *profile)
+{
+       struct ssam_tmp_profile_device *tpd;
+       enum ssam_tmp_profile tp;
+       int status;
+
+       tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+
+       status = ssam_tmp_profile_get(tpd->sdev, &tp);
+       if (status)
+               return status;
+
+       status = convert_ssam_to_profile(tpd->sdev, tp);
+       if (status < 0)
+               return status;
+
+       *profile = status;
+       return 0;
+}
+
+static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
+                                    enum platform_profile_option profile)
+{
+       struct ssam_tmp_profile_device *tpd;
+       int tp;
+
+       tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
+
+       tp = convert_profile_to_ssam(tpd->sdev, profile);
+       if (tp < 0)
+               return tp;
+
+       return ssam_tmp_profile_set(tpd->sdev, tp);
+}
+
+static int surface_platform_profile_probe(struct ssam_device *sdev)
+{
+       struct ssam_tmp_profile_device *tpd;
+
+       tpd = devm_kzalloc(&sdev->dev, sizeof(*tpd), GFP_KERNEL);
+       if (!tpd)
+               return -ENOMEM;
+
+       tpd->sdev = sdev;
+
+       tpd->handler.profile_get = ssam_platform_profile_get;
+       tpd->handler.profile_set = ssam_platform_profile_set;
+
+       set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
+       set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
+       set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
+       set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+
+       platform_profile_register(&tpd->handler);
+       return 0;
+}
+
+static void surface_platform_profile_remove(struct ssam_device *sdev)
+{
+       platform_profile_remove();
+}
+
+static const struct ssam_device_id ssam_platform_profile_match[] = {
+       { SSAM_SDEV(TMP, 0x01, 0x00, 0x01) },
+       { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
+
+static struct ssam_device_driver surface_platform_profile = {
+       .probe = surface_platform_profile_probe,
+       .remove = surface_platform_profile_remove,
+       .match_table = ssam_platform_profile_match,
+       .driver = {
+               .name = "surface_platform_profile",
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+       },
+};
+module_ssam_device_driver(surface_platform_profile);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Platform Profile Support for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
index d8afed5db94c59f69f58e63e24e5976e9fae2ee7..242fb690dcaf7aff6998cbc22c3e753679fe6a84 100644 (file)
@@ -40,8 +40,6 @@ static const guid_t MSHW0040_DSM_UUID =
 #define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN                0xc2
 #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN      0xc3
 
-ACPI_MODULE_NAME("surface pro 3 button");
-
 MODULE_AUTHOR("Chen Yu");
 MODULE_DESCRIPTION("Surface Pro3 Button Driver");
 MODULE_LICENSE("GPL v2");
index 461ec61530ebf2a12883899f4734d85da62c42a8..2714f7c3843e3d3ea34117b95a2cab422bd95076 100644 (file)
@@ -123,6 +123,17 @@ config XIAOMI_WMI
          To compile this driver as a module, choose M here: the module will
          be called xiaomi-wmi.
 
+config GIGABYTE_WMI
+       tristate "Gigabyte WMI temperature driver"
+       depends on ACPI_WMI
+       depends on HWMON
+       help
+         Say Y here if you want to support WMI-based temperature reporting on
+         Gigabyte mainboards.
+
+         To compile this driver as a module, choose M here: the module will
+         be called gigabyte-wmi.
+
 config ACERHDF
        tristate "Acer Aspire One temperature and fan driver"
        depends on ACPI && THERMAL
@@ -193,6 +204,17 @@ config AMD_PMC
          If you choose to compile this driver as a module the module will be
          called amd-pmc.
 
+config ADV_SWBUTTON
+       tristate "Advantech ACPI Software Button Driver"
+       depends on ACPI && INPUT
+       help
+         Say Y here to enable support for Advantech software defined
+         button feature. More information can be found at
+         <http://www.advantech.com.tw/products/>
+
+         To compile this driver as a module, choose M here. The module will
+         be called adv_swbutton.
+
 config APPLE_GMUX
        tristate "Apple Gmux Driver"
        depends on ACPI && PCI
@@ -410,6 +432,7 @@ config HP_WMI
        depends on INPUT
        depends on RFKILL || RFKILL = n
        select INPUT_SPARSEKMAP
+       select ACPI_PLATFORM_PROFILE
        help
         Say Y here if you want to support WMI-based hotkeys on HP laptops and
         to read data from WMI such as docking or ambient light sensor state.
@@ -1171,6 +1194,7 @@ config INTEL_MRFLD_PWRBTN
 config INTEL_PMC_CORE
        tristate "Intel PMC Core driver"
        depends on PCI
+       depends on ACPI
        help
          The Intel Platform Controller Hub for Intel Core SoCs provides access
          to Power Management Controller registers via various interfaces. This
@@ -1192,7 +1216,7 @@ config INTEL_PMT_CLASS
        tristate
        help
          The Intel Platform Monitoring Technology (PMT) class driver provides
-         the basic sysfs interface and file hierarchy uses by PMT devices.
+         the basic sysfs interface and file hierarchy used by PMT devices.
 
          For more information, see:
          <file:Documentation/ABI/testing/sysfs-class-intel_pmt>
index 60d554073749be314ec79f1fd5fb863c7036dd9f..dcc8cdb95b4dde8dfcb191fd4c32364731ce2a14 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_INTEL_WMI_THUNDERBOLT)   += intel-wmi-thunderbolt.o
 obj-$(CONFIG_MXM_WMI)                  += mxm-wmi.o
 obj-$(CONFIG_PEAQ_WMI)                 += peaq-wmi.o
 obj-$(CONFIG_XIAOMI_WMI)               += xiaomi-wmi.o
+obj-$(CONFIG_GIGABYTE_WMI)             += gigabyte-wmi.o
 
 # Acer
 obj-$(CONFIG_ACERHDF)          += acerhdf.o
@@ -24,6 +25,9 @@ obj-$(CONFIG_ACER_WMI)                += acer-wmi.o
 # AMD
 obj-$(CONFIG_AMD_PMC)          += amd-pmc.o
 
+# Advantech
+obj-$(CONFIG_ADV_SWBUTTON)     += adv_swbutton.o
+
 # Apple
 obj-$(CONFIG_APPLE_GMUX)       += apple-gmux.o
 
diff --git a/drivers/platform/x86/adv_swbutton.c b/drivers/platform/x86/adv_swbutton.c
new file mode 100644 (file)
index 0000000..38693b7
--- /dev/null
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  adv_swbutton.c - Software Button Interface Driver.
+ *
+ *  (C) Copyright 2020 Advantech Corporation, Inc
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+
+#define ACPI_BUTTON_HID_SWBTN               "AHC0310"
+
+#define ACPI_BUTTON_NOTIFY_SWBTN_RELEASE    0x86
+#define ACPI_BUTTON_NOTIFY_SWBTN_PRESSED    0x85
+
+struct adv_swbutton {
+       struct input_dev *input;
+       char phys[32];
+};
+
+/*-------------------------------------------------------------------------
+ *                               Driver Interface
+ *--------------------------------------------------------------------------
+ */
+static void adv_swbutton_notify(acpi_handle handle, u32 event, void *context)
+{
+       struct platform_device *device = context;
+       struct adv_swbutton *button = dev_get_drvdata(&device->dev);
+
+       switch (event) {
+       case ACPI_BUTTON_NOTIFY_SWBTN_RELEASE:
+               input_report_key(button->input, KEY_PROG1, 0);
+               input_sync(button->input);
+               break;
+       case ACPI_BUTTON_NOTIFY_SWBTN_PRESSED:
+               input_report_key(button->input, KEY_PROG1, 1);
+               input_sync(button->input);
+               break;
+       default:
+               dev_dbg(&device->dev, "Unsupported event [0x%x]\n", event);
+       }
+}
+
+static int adv_swbutton_probe(struct platform_device *device)
+{
+       struct adv_swbutton *button;
+       struct input_dev *input;
+       acpi_handle handle = ACPI_HANDLE(&device->dev);
+       acpi_status status;
+       int error;
+
+       button = devm_kzalloc(&device->dev, sizeof(*button), GFP_KERNEL);
+       if (!button)
+               return -ENOMEM;
+
+       dev_set_drvdata(&device->dev, button);
+
+       input = devm_input_allocate_device(&device->dev);
+       if (!input)
+               return -ENOMEM;
+
+       button->input = input;
+       snprintf(button->phys, sizeof(button->phys), "%s/button/input0", ACPI_BUTTON_HID_SWBTN);
+
+       input->name = "Advantech Software Button";
+       input->phys = button->phys;
+       input->id.bustype = BUS_HOST;
+       input->dev.parent = &device->dev;
+       set_bit(EV_REP, input->evbit);
+       input_set_capability(input, EV_KEY, KEY_PROG1);
+
+       error = input_register_device(input);
+       if (error)
+               return error;
+
+       device_init_wakeup(&device->dev, true);
+
+       status = acpi_install_notify_handler(handle,
+                                            ACPI_DEVICE_NOTIFY,
+                                            adv_swbutton_notify,
+                                            device);
+       if (ACPI_FAILURE(status)) {
+               dev_err(&device->dev, "Error installing notify handler\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int adv_swbutton_remove(struct platform_device *device)
+{
+       acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+       acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY,
+                                  adv_swbutton_notify);
+
+       return 0;
+}
+
+static const struct acpi_device_id button_device_ids[] = {
+       {ACPI_BUTTON_HID_SWBTN, 0},
+       {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, button_device_ids);
+
+static struct platform_driver adv_swbutton_driver = {
+       .driver = {
+               .name = "adv_swbutton",
+               .acpi_match_table = button_device_ids,
+       },
+       .probe = adv_swbutton_probe,
+       .remove = adv_swbutton_remove,
+};
+module_platform_driver(adv_swbutton_driver);
+
+MODULE_AUTHOR("Andrea Ho");
+MODULE_DESCRIPTION("Advantech ACPI SW Button Driver");
+MODULE_LICENSE("GPL v2");
index bfea656e910c25b840af234a5007305d89c63b2a..4d2d32bfbe2a6da7bbff43e3e4047fea3b715c64 100644 (file)
@@ -1569,7 +1569,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
                                    struct attribute *attr,
                                    int idx)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct asus_laptop *asus = dev_get_drvdata(dev);
        acpi_handle handle = asus->handle;
        bool supported;
index 9ca15f72434303000a85929fc4e1e82a27ce659d..ebaeb7bb80f5c486b0e8d69bd8a260102635f9d8 100644 (file)
@@ -47,6 +47,9 @@ MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>, "
 MODULE_DESCRIPTION("Asus Generic WMI Driver");
 MODULE_LICENSE("GPL");
 
+static bool fnlock_default = true;
+module_param(fnlock_default, bool, 0444);
+
 #define to_asus_wmi_driver(pdrv)                                       \
        (container_of((pdrv), struct asus_wmi_driver, platform_driver))
 
@@ -2673,7 +2676,7 @@ static int asus_wmi_add(struct platform_device *pdev)
                err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
 
        if (asus_wmi_has_fnlock_key(asus)) {
-               asus->fnlock_locked = true;
+               asus->fnlock_locked = fnlock_default;
                asus_wmi_fnlock_update(asus);
        }
 
index 3e03e8d3a07fa6da6f98619d058a798108d9100b..9309ab5792cbc3710ac9370ca0d3a8ec2ebb316a 100644 (file)
@@ -956,7 +956,7 @@ static int cmpc_ipml_add(struct acpi_device *acpi)
        /*
         * If RFKILL is disabled, rfkill_alloc will return ERR_PTR(-ENODEV).
         * This is OK, however, since all other uses of the device will not
-        * derefence it.
+        * dereference it.
         */
        if (ipml->rf) {
                retval = rfkill_register(ipml->rf);
index 5bb2859c828584973c8dae961540a368858e4991..f212482555298ba31d34d35c406068a56465b10b 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Alienware AlienFX control
  *
- * Copyright (C) 2014 Dell Inc <mario_limonciello@dell.com>
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -26,7 +26,7 @@
 #define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
 #define WMAX_METHOD_DEEP_SLEEP_STATUS  0x0C
 
-MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
 MODULE_DESCRIPTION("Alienware special feature control");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
index 3a1dbf199441385ec4066883bff10a7adb0d857a..fc086b66f70b377374dcd71627f2efb0769c8e1d 100644 (file)
@@ -647,6 +647,6 @@ module_exit(dell_smbios_exit);
 MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
 MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
 MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
 MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
 MODULE_LICENSE("GPL");
index 27a298b7c541b9c8c01f43dadbc8cb59be35acbb..a1753485159ca6164206665fbb7d1e53a08eb442 100644 (file)
@@ -205,7 +205,7 @@ fail_register:
        return ret;
 }
 
-static int dell_smbios_wmi_remove(struct wmi_device *wdev)
+static void dell_smbios_wmi_remove(struct wmi_device *wdev)
 {
        struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
        int count;
@@ -218,7 +218,6 @@ static int dell_smbios_wmi_remove(struct wmi_device *wdev)
        count = get_order(priv->req_buf_size);
        free_pages((unsigned long)priv->buf, count);
        mutex_unlock(&call_mutex);
-       return 0;
 }
 
 static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
index a068900ae8a1ada7ac9c118e13a9efe78396d8fc..c2a1802027190186cc364991a100515266a925a1 100644 (file)
@@ -174,14 +174,13 @@ out:
        return ret;
 }
 
-static int dell_wmi_descriptor_remove(struct wmi_device *wdev)
+static void dell_wmi_descriptor_remove(struct wmi_device *wdev)
 {
        struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev);
 
        mutex_lock(&list_mutex);
        list_del(&priv->list);
        mutex_unlock(&list_mutex);
-       return 0;
 }
 
 static const struct wmi_device_id dell_wmi_descriptor_id_table[] = {
@@ -201,6 +200,6 @@ static struct wmi_driver dell_wmi_descriptor_driver = {
 module_wmi_driver(dell_wmi_descriptor_driver);
 
 MODULE_DEVICE_TABLE(wmi, dell_wmi_descriptor_id_table);
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
 MODULE_DESCRIPTION("Dell WMI descriptor driver");
 MODULE_LICENSE("GPL");
index f95d8ddace5a705d825b2336c00ca54e281e30cf..c2dd2de6bc2034226d5b62c04e47d1d74506bc9a 100644 (file)
@@ -152,12 +152,11 @@ static int bios_attr_set_interface_probe(struct wmi_device *wdev, const void *co
        return 0;
 }
 
-static int bios_attr_set_interface_remove(struct wmi_device *wdev)
+static void bios_attr_set_interface_remove(struct wmi_device *wdev)
 {
        mutex_lock(&wmi_priv.mutex);
        wmi_priv.bios_attr_wdev = NULL;
        mutex_unlock(&wmi_priv.mutex);
-       return 0;
 }
 
 static const struct wmi_device_id bios_attr_set_interface_id_table[] = {
index 5780b4d94759b24342886ef2c66ef5916f15f0e5..339a082d6c18d1251f4551d3a969a39fd0d1cd14 100644 (file)
@@ -119,12 +119,11 @@ static int bios_attr_pass_interface_probe(struct wmi_device *wdev, const void *c
        return 0;
 }
 
-static int bios_attr_pass_interface_remove(struct wmi_device *wdev)
+static void bios_attr_pass_interface_remove(struct wmi_device *wdev)
 {
        mutex_lock(&wmi_priv.mutex);
        wmi_priv.password_attr_wdev = NULL;
        mutex_unlock(&wmi_priv.mutex);
-       return 0;
 }
 
 static const struct wmi_device_id bios_attr_pass_interface_id_table[] = {
index 7410ccae650c2ccfd9040f5179ea1970fb4cd7a8..c8d276d78e920ae2cc5b181ebba20e8e5d61a3b4 100644 (file)
@@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
        union acpi_object *obj = NULL;
        union acpi_object *elements;
        struct kset *tmp_set;
+       int min_elements;
 
        /* instance_id needs to be reset for each type GUID
         * also, instance IDs are unique within GUID but not across
@@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
        retval = alloc_attributes_data(attr_type);
        if (retval)
                return retval;
+
+       switch (attr_type) {
+       case ENUM:      min_elements = 8;       break;
+       case INT:       min_elements = 9;       break;
+       case STR:       min_elements = 8;       break;
+       case PO:        min_elements = 4;       break;
+       default:
+               pr_err("Error: Unknown attr_type: %d\n", attr_type);
+               return -EINVAL;
+       }
+
        /* need to use specific instance_id and guid combination to get right data */
        obj = get_wmiobj_pointer(instance_id, guid);
-       if (!obj || obj->type != ACPI_TYPE_PACKAGE)
+       if (!obj)
                return -ENODEV;
-       elements = obj->package.elements;
 
        mutex_lock(&wmi_priv.mutex);
-       while (elements) {
+       while (obj) {
+               if (obj->type != ACPI_TYPE_PACKAGE) {
+                       pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
+                       retval = -EIO;
+                       goto err_attr_init;
+               }
+
+               if (obj->package.count < min_elements) {
+                       pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
+                              obj->package.count, min_elements);
+                       goto nextobj;
+               }
+
+               elements = obj->package.elements;
+
                /* sanity checking */
                if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
                        pr_debug("incorrect element type\n");
@@ -481,7 +506,6 @@ nextobj:
                kfree(obj);
                instance_id++;
                obj = get_wmiobj_pointer(instance_id, guid);
-               elements = obj ? obj->package.elements : NULL;
        }
 
        mutex_unlock(&wmi_priv.mutex);
@@ -604,7 +628,7 @@ static void __exit sysman_exit(void)
 module_init(sysman_init);
 module_exit(sysman_exit);
 
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
 MODULE_AUTHOR("Prasanth Ksr <prasanth.ksr@dell.com>");
 MODULE_AUTHOR("Divya Bharathi <divya.bharathi@dell.com>");
 MODULE_DESCRIPTION("Dell platform setting control interface");
index bbdb3e8608927d795c729b6f31995cb19981b297..5e1b7f897df58025be0731e1c7f5644d516fe59d 100644 (file)
@@ -714,10 +714,9 @@ static int dell_wmi_probe(struct wmi_device *wdev, const void *context)
        return dell_wmi_input_setup(wdev);
 }
 
-static int dell_wmi_remove(struct wmi_device *wdev)
+static void dell_wmi_remove(struct wmi_device *wdev)
 {
        dell_wmi_input_destroy(wdev);
-       return 0;
 }
 static const struct wmi_device_id dell_wmi_id_table[] = {
        { .guid_string = DELL_EVENT_GUID },
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
new file mode 100644 (file)
index 0000000..13d5743
--- /dev/null
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *  Copyright (C) 2021 Thomas Weißschuh <thomas@weissschuh.net>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+#define GIGABYTE_WMI_GUID      "DEADBEEF-2001-0000-00A0-C90629100000"
+#define NUM_TEMPERATURE_SENSORS        6
+
+static bool force_load;
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load, "Force loading on unknown platform");
+
+static u8 usable_sensors_mask;
+
+enum gigabyte_wmi_commandtype {
+       GIGABYTE_WMI_BUILD_DATE_QUERY       =   0x1,
+       GIGABYTE_WMI_MAINBOARD_TYPE_QUERY   =   0x2,
+       GIGABYTE_WMI_FIRMWARE_VERSION_QUERY =   0x4,
+       GIGABYTE_WMI_MAINBOARD_NAME_QUERY   =   0x5,
+       GIGABYTE_WMI_TEMPERATURE_QUERY      = 0x125,
+};
+
+struct gigabyte_wmi_args {
+       u32 arg1;
+};
+
+static int gigabyte_wmi_perform_query(struct wmi_device *wdev,
+                                     enum gigabyte_wmi_commandtype command,
+                                     struct gigabyte_wmi_args *args, struct acpi_buffer *out)
+{
+       const struct acpi_buffer in = {
+               .length = sizeof(*args),
+               .pointer = args,
+       };
+
+       acpi_status ret = wmidev_evaluate_method(wdev, 0x0, command, &in, out);
+
+       if (ACPI_FAILURE(ret))
+               return -EIO;
+
+       return 0;
+}
+
+static int gigabyte_wmi_query_integer(struct wmi_device *wdev,
+                                     enum gigabyte_wmi_commandtype command,
+                                     struct gigabyte_wmi_args *args, u64 *res)
+{
+       union acpi_object *obj;
+       struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+       int ret;
+
+       ret = gigabyte_wmi_perform_query(wdev, command, args, &result);
+       if (ret)
+               return ret;
+       obj = result.pointer;
+       if (obj && obj->type == ACPI_TYPE_INTEGER)
+               *res = obj->integer.value;
+       else
+               ret = -EIO;
+       kfree(result.pointer);
+       return ret;
+}
+
+static int gigabyte_wmi_temperature(struct wmi_device *wdev, u8 sensor, long *res)
+{
+       struct gigabyte_wmi_args args = {
+               .arg1 = sensor,
+       };
+       u64 temp;
+       acpi_status ret;
+
+       ret = gigabyte_wmi_query_integer(wdev, GIGABYTE_WMI_TEMPERATURE_QUERY, &args, &temp);
+       if (ret == 0) {
+               if (temp == 0)
+                       return -ENODEV;
+               *res = (s8)temp * 1000; // value is a signed 8-bit integer
+       }
+       return ret;
+}
+
+static int gigabyte_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+                                  u32 attr, int channel, long *val)
+{
+       struct wmi_device *wdev = dev_get_drvdata(dev);
+
+       return gigabyte_wmi_temperature(wdev, channel, val);
+}
+
+static umode_t gigabyte_wmi_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+                                            u32 attr, int channel)
+{
+       return usable_sensors_mask & BIT(channel) ? 0444  : 0;
+}
+
+static const struct hwmon_channel_info *gigabyte_wmi_hwmon_info[] = {
+       HWMON_CHANNEL_INFO(temp,
+                          HWMON_T_INPUT,
+                          HWMON_T_INPUT,
+                          HWMON_T_INPUT,
+                          HWMON_T_INPUT,
+                          HWMON_T_INPUT,
+                          HWMON_T_INPUT),
+       NULL
+};
+
+static const struct hwmon_ops gigabyte_wmi_hwmon_ops = {
+       .read = gigabyte_wmi_hwmon_read,
+       .is_visible = gigabyte_wmi_hwmon_is_visible,
+};
+
+static const struct hwmon_chip_info gigabyte_wmi_hwmon_chip_info = {
+       .ops = &gigabyte_wmi_hwmon_ops,
+       .info = gigabyte_wmi_hwmon_info,
+};
+
+static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
+{
+       int i;
+       long temp;
+       u8 r = 0;
+
+       for (i = 0; i < NUM_TEMPERATURE_SENSORS; i++) {
+               if (!gigabyte_wmi_temperature(wdev, i, &temp))
+                       r |= BIT(i);
+       }
+       return r;
+}
+
+static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+       { .matches = {
+               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+               DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550 GAMING X V2"),
+       }},
+       { .matches = {
+               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+               DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M AORUS PRO-P"),
+       }},
+       { .matches = {
+               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+               DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M DS3H"),
+       }},
+       { .matches = {
+               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+               DMI_EXACT_MATCH(DMI_BOARD_NAME, "Z390 I AORUS PRO WIFI-CF"),
+       }},
+       { .matches = {
+               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+               DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 AORUS ELITE"),
+       }},
+       { .matches = {
+               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+               DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 I AORUS PRO WIFI"),
+       }},
+       { }
+};
+
+static int gigabyte_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+       struct device *hwmon_dev;
+
+       if (!dmi_check_system(gigabyte_wmi_known_working_platforms)) {
+               if (!force_load)
+                       return -ENODEV;
+               dev_warn(&wdev->dev, "Forcing load on unknown platform");
+       }
+
+       usable_sensors_mask = gigabyte_wmi_detect_sensor_usability(wdev);
+       if (!usable_sensors_mask) {
+               dev_info(&wdev->dev, "No temperature sensors usable");
+               return -ENODEV;
+       }
+
+       hwmon_dev = devm_hwmon_device_register_with_info(&wdev->dev, "gigabyte_wmi", wdev,
+                                                        &gigabyte_wmi_hwmon_chip_info, NULL);
+
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct wmi_device_id gigabyte_wmi_id_table[] = {
+       { GIGABYTE_WMI_GUID, NULL },
+       { }
+};
+
+static struct wmi_driver gigabyte_wmi_driver = {
+       .driver = {
+               .name = "gigabyte-wmi",
+       },
+       .id_table = gigabyte_wmi_id_table,
+       .probe = gigabyte_wmi_probe,
+};
+module_wmi_driver(gigabyte_wmi_driver);
+
+MODULE_DEVICE_TABLE(wmi, gigabyte_wmi_id_table);
+MODULE_AUTHOR("Thomas Weißschuh <thomas@weissschuh.net>");
+MODULE_DESCRIPTION("Gigabyte WMI temperature driver");
+MODULE_LICENSE("GPL");
index 5b516e4c2bfbe2ac53aa68a7bbf99365d1c391a4..7a20f68ae20611f74d6621da1a89601d838cde7d 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/devm-helpers.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -124,7 +125,7 @@ static void gpd_pocket_fan_force_update(struct gpd_pocket_fan_data *fan)
 static int gpd_pocket_fan_probe(struct platform_device *pdev)
 {
        struct gpd_pocket_fan_data *fan;
-       int i;
+       int i, ret;
 
        for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
                if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
@@ -152,7 +153,10 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        fan->dev = &pdev->dev;
-       INIT_DELAYED_WORK(&fan->work, gpd_pocket_fan_worker);
+       ret = devm_delayed_work_autocancel(&pdev->dev, &fan->work,
+                                          gpd_pocket_fan_worker);
+       if (ret)
+               return ret;
 
        /* Note this returns a "weak" reference which we don't need to free */
        fan->dts0 = thermal_zone_get_zone_by_name("soc_dts0");
@@ -177,14 +181,6 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int gpd_pocket_fan_remove(struct platform_device *pdev)
-{
-       struct gpd_pocket_fan_data *fan = platform_get_drvdata(pdev);
-
-       cancel_delayed_work_sync(&fan->work);
-       return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int gpd_pocket_fan_suspend(struct device *dev)
 {
@@ -215,7 +211,6 @@ MODULE_DEVICE_TABLE(acpi, gpd_pocket_fan_acpi_match);
 
 static struct platform_driver gpd_pocket_fan_driver = {
        .probe  = gpd_pocket_fan_probe,
-       .remove = gpd_pocket_fan_remove,
        .driver = {
                .name                   = "gpd_pocket_fan",
                .acpi_match_table       = gpd_pocket_fan_acpi_match,
index e94e59283ecb9f5f601f005f5359d35c60522be1..027a1467d009fddb7d1ee06031c7dfad53a9bbdd 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/input.h>
 #include <linux/input/sparse-keymap.h>
 #include <linux/platform_device.h>
+#include <linux/platform_profile.h>
 #include <linux/acpi.h>
 #include <linux/rfkill.h>
 #include <linux/string.h>
@@ -85,7 +86,7 @@ enum hp_wmi_commandtype {
        HPWMI_FEATURE2_QUERY            = 0x0d,
        HPWMI_WIRELESS2_QUERY           = 0x1b,
        HPWMI_POSTCODEERROR_QUERY       = 0x2a,
-       HPWMI_THERMAL_POLICY_QUERY      = 0x4c,
+       HPWMI_THERMAL_PROFILE_QUERY     = 0x4c,
 };
 
 enum hp_wmi_command {
@@ -119,6 +120,12 @@ enum hp_wireless2_bits {
        HPWMI_POWER_FW_OR_HW    = HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
 };
 
+enum hp_thermal_profile {
+       HP_THERMAL_PROFILE_PERFORMANCE  = 0x00,
+       HP_THERMAL_PROFILE_DEFAULT              = 0x01,
+       HP_THERMAL_PROFILE_COOL                 = 0x02
+};
+
 #define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
 #define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
 
@@ -159,6 +166,8 @@ static const struct key_entry hp_wmi_keymap[] = {
 
 static struct input_dev *hp_wmi_input_dev;
 static struct platform_device *hp_wmi_platform_dev;
+static struct platform_profile_handler platform_profile_handler;
+static bool platform_profile_support;
 
 static struct rfkill *wifi_rfkill;
 static struct rfkill *bluetooth_rfkill;
@@ -869,23 +878,98 @@ fail:
        return err;
 }
 
-static int thermal_policy_setup(struct platform_device *device)
+static int thermal_profile_get(void)
+{
+       return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
+}
+
+static int thermal_profile_set(int thermal_profile)
+{
+       return hp_wmi_perform_query(HPWMI_THERMAL_PROFILE_QUERY, HPWMI_WRITE, &thermal_profile,
+                                                          sizeof(thermal_profile), 0);
+}
+
+static int platform_profile_get(struct platform_profile_handler *pprof,
+                               enum platform_profile_option *profile)
+{
+       int tp;
+
+       tp = thermal_profile_get();
+       if (tp < 0)
+               return tp;
+
+       switch (tp) {
+       case HP_THERMAL_PROFILE_PERFORMANCE:
+               *profile =  PLATFORM_PROFILE_PERFORMANCE;
+               break;
+       case HP_THERMAL_PROFILE_DEFAULT:
+               *profile =  PLATFORM_PROFILE_BALANCED;
+               break;
+       case HP_THERMAL_PROFILE_COOL:
+               *profile =  PLATFORM_PROFILE_COOL;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int platform_profile_set(struct platform_profile_handler *pprof,
+                               enum platform_profile_option profile)
 {
        int err, tp;
 
-       tp = hp_wmi_read_int(HPWMI_THERMAL_POLICY_QUERY);
+       switch (profile) {
+       case PLATFORM_PROFILE_PERFORMANCE:
+               tp =  HP_THERMAL_PROFILE_PERFORMANCE;
+               break;
+       case PLATFORM_PROFILE_BALANCED:
+               tp =  HP_THERMAL_PROFILE_DEFAULT;
+               break;
+       case PLATFORM_PROFILE_COOL:
+               tp =  HP_THERMAL_PROFILE_COOL;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       err = thermal_profile_set(tp);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int thermal_profile_setup(void)
+{
+       int err, tp;
+
+       tp = thermal_profile_get();
        if (tp < 0)
                return tp;
 
        /*
-        * call thermal policy write command to ensure that the firmware correctly
+        * call thermal profile write command to ensure that the firmware correctly
         * sets the OEM variables for the DPTF
         */
-       err = hp_wmi_perform_query(HPWMI_THERMAL_POLICY_QUERY, HPWMI_WRITE, &tp,
-                                                          sizeof(tp), 0);
+       err = thermal_profile_set(tp);
        if (err)
                return err;
 
+       platform_profile_handler.profile_get = platform_profile_get,
+       platform_profile_handler.profile_set = platform_profile_set,
+
+       set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+       set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
+       set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
+
+       err = platform_profile_register(&platform_profile_handler);
+       if (err)
+               return err;
+
+       platform_profile_support = true;
+
        return 0;
 }
 
@@ -900,7 +984,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
        if (hp_wmi_rfkill_setup(device))
                hp_wmi_rfkill2_setup(device);
 
-       thermal_policy_setup(device);
+       thermal_profile_setup();
 
        return 0;
 }
@@ -927,6 +1011,9 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
                rfkill_destroy(wwan_rfkill);
        }
 
+       if (platform_profile_support)
+               platform_profile_remove();
+
        return 0;
 }
 
index 57cc92891a57083b227146718ef7e2726f0d4a21..078648a9201b3c08e19ad4302e542a24a22feabe 100644 (file)
@@ -483,11 +483,16 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
                        goto wakeup;
 
                /*
-                * Switch events will wake the device and report the new switch
-                * position to the input subsystem.
+                * Some devices send (duplicate) tablet-mode events when moved
+                * around even though the mode has not changed; and they do this
+                * even when suspended.
+                * Update the switch state in case it changed and then return
+                * without waking up to avoid spurious wakeups.
                 */
-               if (priv->switches && (event == 0xcc || event == 0xcd))
-                       goto wakeup;
+               if (event == 0xcc || event == 0xcd) {
+                       report_tablet_mode_event(priv->switches, event);
+                       return;
+               }
 
                /* Wake up on 5-button array events only. */
                if (event == 0xc0 || !priv->array)
@@ -501,9 +506,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
 wakeup:
                pm_wakeup_hard_event(&device->dev);
 
-               if (report_tablet_mode_event(priv->switches, event))
-                       return;
-
                return;
        }
 
index 3fdf4cbec9ad2d7803ea85fdce689d482f602168..888a764efad1ac1b42527c4e457c1262e836c6a7 100644 (file)
@@ -63,9 +63,6 @@ static const struct key_entry intel_vbtn_switchmap[] = {
        { KE_END }
 };
 
-#define KEYMAP_LEN \
-       (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1)
-
 struct intel_vbtn_priv {
        struct input_dev *buttons_dev;
        struct input_dev *switches_dev;
index ea87fa0786e8e05ec56501ea3fe30d5672346388..3c86e0108a24727ece455c08cab0200ea3f1928b 100644 (file)
@@ -117,10 +117,9 @@ static int intel_wmi_sbl_fw_update_probe(struct wmi_device *wdev,
        return 0;
 }
 
-static int intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
+static void intel_wmi_sbl_fw_update_remove(struct wmi_device *wdev)
 {
        dev_info(&wdev->dev, "Slim Bootloader signaling driver removed\n");
-       return 0;
 }
 
 static const struct wmi_device_id intel_wmi_sbl_id_table[] = {
index 974c22a7ff61ee4c124f4255bfc053bb892afb0a..4ae87060d18b4c277de34609515f027ea116914d 100644 (file)
@@ -66,11 +66,10 @@ static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev,
        return ret;
 }
 
-static int intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
+static void intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
 {
        sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group);
        kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
-       return 0;
 }
 
 static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
index 0df2e82dd24924bd6d92ff74f2434b4eb1f30d64..9606a994af22324905a7521c2dc584b8da4fc40c 100644 (file)
@@ -58,7 +58,7 @@ static int chtdc_ti_pwrbtn_probe(struct platform_device *pdev)
 
        err = devm_request_threaded_irq(dev, irq, NULL,
                                        chtdc_ti_pwrbtn_interrupt,
-                                       0, KBUILD_MODNAME, input);
+                                       IRQF_ONESHOT, KBUILD_MODNAME, input);
        if (err)
                return err;
 
index b5888aeb4bcff9a79fc7cd913d7f033b183f3257..b0e486a6bdfb0ec72533e227a48d1ac7bb52c066 100644 (file)
@@ -23,7 +23,9 @@
 #include <linux/slab.h>
 #include <linux/suspend.h>
 #include <linux/uaccess.h>
+#include <linux/uuid.h>
 
+#include <acpi/acpi_bus.h>
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
 #include <asm/msr.h>
@@ -31,7 +33,8 @@
 
 #include "intel_pmc_core.h"
 
-static struct pmc_dev pmc;
+#define ACPI_S0IX_DSM_UUID             "57a6512e-3979-4e9d-9708-ff13b2508972"
+#define ACPI_GET_LOW_MODE_REGISTERS    1
 
 /* PKGC MSRs are common across Intel Core SoCs */
 static const struct pmc_bit_map msr_map[] = {
@@ -380,6 +383,8 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
         * a list of core SoCs using this.
         */
        {"WIGIG",               ICL_PMC_LTR_WIGIG},
+       {"THC0",                TGL_PMC_LTR_THC0},
+       {"THC1",                TGL_PMC_LTR_THC1},
        /* Below two cannot be used for LTR_IGNORE */
        {"CURRENT_PLATFORM",    CNP_PMC_LTR_CUR_PLT},
        {"AGGREGATED_SYSTEM",   CNP_PMC_LTR_CUR_ASLT},
@@ -401,6 +406,7 @@ static const struct pmc_reg_map cnp_reg_map = {
        .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
        .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
        .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
+       .etr3_offset = ETR3_OFFSET,
 };
 
 static const struct pmc_reg_map icl_reg_map = {
@@ -418,6 +424,7 @@ static const struct pmc_reg_map icl_reg_map = {
        .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
        .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
        .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
+       .etr3_offset = ETR3_OFFSET,
 };
 
 static const struct pmc_bit_map tgl_clocksource_status_map[] = {
@@ -579,14 +586,65 @@ static const struct pmc_reg_map tgl_reg_map = {
        .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
        .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
        .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
-       .lpm_modes = tgl_lpm_modes,
+       .lpm_num_maps = TGL_LPM_NUM_MAPS,
+       .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+       .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET,
        .lpm_en_offset = TGL_LPM_EN_OFFSET,
+       .lpm_priority_offset = TGL_LPM_PRI_OFFSET,
        .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
        .lpm_sts = tgl_lpm_maps,
        .lpm_status_offset = TGL_LPM_STATUS_OFFSET,
        .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
+       .etr3_offset = ETR3_OFFSET,
 };
 
+static void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
+{
+       struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
+       const int num_maps = pmcdev->map->lpm_num_maps;
+       u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4;
+       union acpi_object *out_obj;
+       struct acpi_device *adev;
+       guid_t s0ix_dsm_guid;
+       u32 *lpm_req_regs, *addr;
+
+       adev = ACPI_COMPANION(&pdev->dev);
+       if (!adev)
+               return;
+
+       guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid);
+
+       out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0,
+                                   ACPI_GET_LOW_MODE_REGISTERS, NULL);
+       if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
+               u32 size = out_obj->buffer.length;
+
+               if (size != lpm_size) {
+                       acpi_handle_debug(adev->handle,
+                               "_DSM returned unexpected buffer size, have %u, expect %u\n",
+                               size, lpm_size);
+                       goto free_acpi_obj;
+               }
+       } else {
+               acpi_handle_debug(adev->handle,
+                                 "_DSM function 0 evaluation failed\n");
+               goto free_acpi_obj;
+       }
+
+       addr = (u32 *)out_obj->buffer.pointer;
+
+       lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32),
+                                    GFP_KERNEL);
+       if (!lpm_req_regs)
+               goto free_acpi_obj;
+
+       memcpy(lpm_req_regs, addr, lpm_size);
+       pmcdev->lpm_req_regs = lpm_req_regs;
+
+free_acpi_obj:
+       ACPI_FREE(out_obj);
+}
+
 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
 {
        return readl(pmcdev->regbase + reg_offset);
@@ -603,6 +661,115 @@ static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
        return (u64)value * pmcdev->map->slp_s0_res_counter_step;
 }
 
+static int set_etr3(struct pmc_dev *pmcdev)
+{
+       const struct pmc_reg_map *map = pmcdev->map;
+       u32 reg;
+       int err;
+
+       if (!map->etr3_offset)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&pmcdev->lock);
+
+       /* check if CF9 is locked */
+       reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+       if (reg & ETR3_CF9LOCK) {
+               err = -EACCES;
+               goto out_unlock;
+       }
+
+       /* write CF9 global reset bit */
+       reg |= ETR3_CF9GR;
+       pmc_core_reg_write(pmcdev, map->etr3_offset, reg);
+
+       reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+       if (!(reg & ETR3_CF9GR)) {
+               err = -EIO;
+               goto out_unlock;
+       }
+
+       err = 0;
+
+out_unlock:
+       mutex_unlock(&pmcdev->lock);
+       return err;
+}
+static umode_t etr3_is_visible(struct kobject *kobj,
+                               struct attribute *attr,
+                               int idx)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+       const struct pmc_reg_map *map = pmcdev->map;
+       u32 reg;
+
+       mutex_lock(&pmcdev->lock);
+       reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+       mutex_unlock(&pmcdev->lock);
+
+       return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
+}
+
+static ssize_t etr3_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+       const struct pmc_reg_map *map = pmcdev->map;
+       u32 reg;
+
+       if (!map->etr3_offset)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&pmcdev->lock);
+
+       reg = pmc_core_reg_read(pmcdev, map->etr3_offset);
+       reg &= ETR3_CF9GR | ETR3_CF9LOCK;
+
+       mutex_unlock(&pmcdev->lock);
+
+       return sysfs_emit(buf, "0x%08x", reg);
+}
+
+static ssize_t etr3_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+       int err;
+       u32 reg;
+
+       err = kstrtouint(buf, 16, &reg);
+       if (err)
+               return err;
+
+       /* allow only CF9 writes */
+       if (reg != ETR3_CF9GR)
+               return -EINVAL;
+
+       err = set_etr3(pmcdev);
+       if (err)
+               return err;
+
+       return len;
+}
+static DEVICE_ATTR_RW(etr3);
+
+static struct attribute *pmc_attrs[] = {
+       &dev_attr_etr3.attr,
+       NULL
+};
+
+static const struct attribute_group pmc_attr_group = {
+       .attrs = pmc_attrs,
+       .is_visible = etr3_is_visible,
+};
+
+static const struct attribute_group *pmc_dev_groups[] = {
+       &pmc_attr_group,
+       NULL
+};
+
 static int pmc_core_dev_state_get(void *data, u64 *val)
 {
        struct pmc_dev *pmcdev = data;
@@ -617,9 +784,8 @@ static int pmc_core_dev_state_get(void *data, u64 *val)
 
 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
 
-static int pmc_core_check_read_lock_bit(void)
+static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev)
 {
-       struct pmc_dev *pmcdev = &pmc;
        u32 value;
 
        value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
@@ -744,28 +910,26 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
 
 /* This function should return link status, 0 means ready */
-static int pmc_core_mtpmc_link_status(void)
+static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev)
 {
-       struct pmc_dev *pmcdev = &pmc;
        u32 value;
 
        value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
        return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
 }
 
-static int pmc_core_send_msg(u32 *addr_xram)
+static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram)
 {
-       struct pmc_dev *pmcdev = &pmc;
        u32 dest;
        int timeout;
 
        for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
-               if (pmc_core_mtpmc_link_status() == 0)
+               if (pmc_core_mtpmc_link_status(pmcdev) == 0)
                        break;
                msleep(5);
        }
 
-       if (timeout <= 0 && pmc_core_mtpmc_link_status())
+       if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev))
                return -EBUSY;
 
        dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
@@ -791,7 +955,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
 
        mutex_lock(&pmcdev->lock);
 
-       if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
+       if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) {
                err = -EBUSY;
                goto out_unlock;
        }
@@ -799,7 +963,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
        msleep(10);
        val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
 
-       if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
+       if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) {
                err = -EBUSY;
                goto out_unlock;
        }
@@ -842,7 +1006,7 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
        mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
        mutex_lock(&pmcdev->lock);
 
-       if (pmc_core_send_msg(&mphy_common_reg) != 0) {
+       if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) {
                err = -EBUSY;
                goto out_unlock;
        }
@@ -863,9 +1027,8 @@ out_unlock:
 }
 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
 
-static int pmc_core_send_ltr_ignore(u32 value)
+static int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
 {
-       struct pmc_dev *pmcdev = &pmc;
        const struct pmc_reg_map *map = pmcdev->map;
        u32 reg;
        int err = 0;
@@ -891,6 +1054,8 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file,
                                         const char __user *userbuf,
                                         size_t count, loff_t *ppos)
 {
+       struct seq_file *s = file->private_data;
+       struct pmc_dev *pmcdev = s->private;
        u32 buf_size, value;
        int err;
 
@@ -900,7 +1065,7 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file,
        if (err)
                return err;
 
-       err = pmc_core_send_ltr_ignore(value);
+       err = pmc_core_send_ltr_ignore(pmcdev, value);
 
        return err == 0 ? count : err;
 }
@@ -1029,21 +1194,26 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
 
+static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset,
+                                      const int lpm_adj_x2)
+{
+       u64 lpm_res = pmc_core_reg_read(pmcdev, offset);
+
+       return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
+}
+
 static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
 {
        struct pmc_dev *pmcdev = s->private;
-       const char **lpm_modes = pmcdev->map->lpm_modes;
+       const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
        u32 offset = pmcdev->map->lpm_residency_offset;
-       u32 lpm_en;
-       int index;
+       int i, mode;
 
-       lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
-       seq_printf(s, "status substate residency\n");
-       for (index = 0; lpm_modes[index]; index++) {
-               seq_printf(s, "%7s %7s %-15u\n",
-                          BIT(index) & lpm_en ? "Enabled" : " ",
-                          lpm_modes[index], pmc_core_reg_read(pmcdev, offset));
-               offset += 4;
+       seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
+
+       pmc_for_each_mode(i, mode, pmcdev) {
+               seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
+                          adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2));
        }
 
        return 0;
@@ -1074,6 +1244,190 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
 
+static void pmc_core_substate_req_header_show(struct seq_file *s)
+{
+       struct pmc_dev *pmcdev = s->private;
+       int i, mode;
+
+       seq_printf(s, "%30s |", "Element");
+       pmc_for_each_mode(i, mode, pmcdev)
+               seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
+
+       seq_printf(s, " %9s |\n", "Status");
+}
+
+static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
+{
+       struct pmc_dev *pmcdev = s->private;
+       const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
+       const struct pmc_bit_map *map;
+       const int num_maps = pmcdev->map->lpm_num_maps;
+       u32 sts_offset = pmcdev->map->lpm_status_offset;
+       u32 *lpm_req_regs = pmcdev->lpm_req_regs;
+       int mp;
+
+       /* Display the header */
+       pmc_core_substate_req_header_show(s);
+
+       /* Loop over maps */
+       for (mp = 0; mp < num_maps; mp++) {
+               u32 req_mask = 0;
+               u32 lpm_status;
+               int mode, idx, i, len = 32;
+
+               /*
+                * Capture the requirements and create a mask so that we only
+                * show an element if it's required for at least one of the
+                * enabled low power modes
+                */
+               pmc_for_each_mode(idx, mode, pmcdev)
+                       req_mask |= lpm_req_regs[mp + (mode * num_maps)];
+
+               /* Get the last latched status for this map */
+               lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4));
+
+               /*  Loop over elements in this map */
+               map = maps[mp];
+               for (i = 0; map[i].name && i < len; i++) {
+                       u32 bit_mask = map[i].bit_mask;
+
+                       if (!(bit_mask & req_mask))
+                               /*
+                                * Not required for any enabled states
+                                * so don't display
+                                */
+                               continue;
+
+                       /* Display the element name in the first column */
+                       seq_printf(s, "%30s |", map[i].name);
+
+                       /* Loop over the enabled states and display if required */
+                       pmc_for_each_mode(idx, mode, pmcdev) {
+                               if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
+                                       seq_printf(s, " %9s |",
+                                                  "Required");
+                               else
+                                       seq_printf(s, " %9s |", " ");
+                       }
+
+                       /* In Status column, show the last captured state of this agent */
+                       if (lpm_status & bit_mask)
+                               seq_printf(s, " %9s |", "Yes");
+                       else
+                               seq_printf(s, " %9s |", " ");
+
+                       seq_puts(s, "\n");
+               }
+       }
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
+
+static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
+{
+       struct pmc_dev *pmcdev = s->private;
+       bool c10;
+       u32 reg;
+       int idx, mode;
+
+       reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
+       if (reg & LPM_STS_LATCH_MODE) {
+               seq_puts(s, "c10");
+               c10 = false;
+       } else {
+               seq_puts(s, "[c10]");
+               c10 = true;
+       }
+
+       pmc_for_each_mode(idx, mode, pmcdev) {
+               if ((BIT(mode) & reg) && !c10)
+                       seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
+               else
+                       seq_printf(s, " %s", pmc_lpm_modes[mode]);
+       }
+
+       seq_puts(s, " clear\n");
+
+       return 0;
+}
+
+static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
+                                            const char __user *userbuf,
+                                            size_t count, loff_t *ppos)
+{
+       struct seq_file *s = file->private_data;
+       struct pmc_dev *pmcdev = s->private;
+       bool clear = false, c10 = false;
+       unsigned char buf[8];
+       int idx, m, mode;
+       u32 reg;
+
+       if (count > sizeof(buf) - 1)
+               return -EINVAL;
+       if (copy_from_user(buf, userbuf, count))
+               return -EFAULT;
+       buf[count] = '\0';
+
+       /*
+        * Allowed strings are:
+        *      Any enabled substate, e.g. 'S0i2.0'
+        *      'c10'
+        *      'clear'
+        */
+       mode = sysfs_match_string(pmc_lpm_modes, buf);
+
+       /* Check string matches enabled mode */
+       pmc_for_each_mode(idx, m, pmcdev)
+               if (mode == m)
+                       break;
+
+       if (mode != m || mode < 0) {
+               if (sysfs_streq(buf, "clear"))
+                       clear = true;
+               else if (sysfs_streq(buf, "c10"))
+                       c10 = true;
+               else
+                       return -EINVAL;
+       }
+
+       if (clear) {
+               mutex_lock(&pmcdev->lock);
+
+               reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset);
+               reg |= ETR3_CLEAR_LPM_EVENTS;
+               pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg);
+
+               mutex_unlock(&pmcdev->lock);
+
+               return count;
+       }
+
+       if (c10) {
+               mutex_lock(&pmcdev->lock);
+
+               reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset);
+               reg &= ~LPM_STS_LATCH_MODE;
+               pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
+
+               mutex_unlock(&pmcdev->lock);
+
+               return count;
+       }
+
+       /*
+        * For LPM mode latching we set the latch enable bit and selected mode
+        * and clear everything else.
+        */
+       reg = LPM_STS_LATCH_MODE | BIT(mode);
+       mutex_lock(&pmcdev->lock);
+       pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg);
+       mutex_unlock(&pmcdev->lock);
+
+       return count;
+}
+DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
+
 static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
 {
        struct pmc_dev *pmcdev = s->private;
@@ -1095,6 +1449,45 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
 
+static void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
+{
+       u8 lpm_priority[LPM_MAX_NUM_MODES];
+       u32 lpm_en;
+       int mode, i, p;
+
+       /* Use LPM Maps to indicate support for substates */
+       if (!pmcdev->map->lpm_num_maps)
+               return;
+
+       lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset);
+       pmcdev->num_lpm_modes = hweight32(lpm_en);
+
+       /* Each byte contains information for 2 modes (7:4 and 3:0) */
+       for (mode = 0; mode < LPM_MAX_NUM_MODES; mode += 2) {
+               u8 priority = pmc_core_reg_read_byte(pmcdev,
+                               pmcdev->map->lpm_priority_offset + (mode / 2));
+               int pri0 = GENMASK(3, 0) & priority;
+               int pri1 = (GENMASK(7, 4) & priority) >> 4;
+
+               lpm_priority[pri0] = mode;
+               lpm_priority[pri1] = mode + 1;
+       }
+
+       /*
+        * Loop though all modes from lowest to highest priority,
+        * and capture all enabled modes in order
+        */
+       i = 0;
+       for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
+               int mode = lpm_priority[p];
+
+               if (!(BIT(mode) & lpm_en))
+                       continue;
+
+               pmcdev->lpm_en_modes[i++] = mode;
+       }
+}
+
 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
 {
        debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -1153,6 +1546,15 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
                debugfs_create_file("substate_live_status_registers", 0444,
                                    pmcdev->dbgfs_dir, pmcdev,
                                    &pmc_core_substate_l_sts_regs_fops);
+               debugfs_create_file("lpm_latch_mode", 0644,
+                                   pmcdev->dbgfs_dir, pmcdev,
+                                   &pmc_core_lpm_latch_mode_fops);
+       }
+
+       if (pmcdev->lpm_req_regs) {
+               debugfs_create_file("substate_requirements", 0444,
+                                   pmcdev->dbgfs_dir, pmcdev,
+                                   &pmc_core_substate_req_regs_fops);
        }
 }
 
@@ -1171,6 +1573,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,        &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      &icl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &tgl_reg_map),
+       X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &tgl_reg_map),
        {}
 };
 
@@ -1186,9 +1589,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
  * the platform BIOS enforces 24Mhz crystal to shutdown
  * before PMC can assert SLP_S0#.
  */
+static bool xtal_ignore;
 static int quirk_xtal_ignore(const struct dmi_system_id *id)
 {
-       struct pmc_dev *pmcdev = &pmc;
+       xtal_ignore = true;
+       return 0;
+}
+
+static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
+{
        u32 value;
 
        value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
@@ -1197,7 +1606,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
        /* Low Voltage Mode Enable */
        value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
        pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
-       return 0;
 }
 
 static const struct dmi_system_id pmc_core_dmi_table[]  = {
@@ -1212,16 +1620,30 @@ static const struct dmi_system_id pmc_core_dmi_table[]  = {
        {}
 };
 
+static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
+{
+       dmi_check_system(pmc_core_dmi_table);
+
+       if (xtal_ignore)
+               pmc_core_xtal_ignore(pmcdev);
+}
+
 static int pmc_core_probe(struct platform_device *pdev)
 {
        static bool device_initialized;
-       struct pmc_dev *pmcdev = &pmc;
+       struct pmc_dev *pmcdev;
        const struct x86_cpu_id *cpu_id;
        u64 slp_s0_addr;
 
        if (device_initialized)
                return -ENODEV;
 
+       pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
+       if (!pmcdev)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, pmcdev);
+
        cpu_id = x86_match_cpu(intel_pmc_core_ids);
        if (!cpu_id)
                return -ENODEV;
@@ -1251,9 +1673,13 @@ static int pmc_core_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        mutex_init(&pmcdev->lock);
-       platform_set_drvdata(pdev, pmcdev);
-       pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
-       dmi_check_system(pmc_core_dmi_table);
+
+       pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev);
+       pmc_core_get_low_power_modes(pmcdev);
+       pmc_core_do_dmi_quirks(pmcdev);
+
+       if (pmcdev->map == &tgl_reg_map)
+               pmc_core_get_tgl_lpm_reqs(pdev);
 
        /*
         * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
@@ -1261,7 +1687,7 @@ static int pmc_core_probe(struct platform_device *pdev)
         */
        if (pmcdev->map == &tgl_reg_map) {
                dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
-               pmc_core_send_ltr_ignore(3);
+               pmc_core_send_ltr_ignore(pmcdev, 3);
        }
 
        pmc_core_dbgfs_register(pmcdev);
@@ -1384,6 +1810,7 @@ static struct platform_driver pmc_core_driver = {
                .name = "intel_pmc_core",
                .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
                .pm = &pmc_core_pm_ops,
+               .dev_groups = pmc_dev_groups,
        },
        .probe = pmc_core_probe,
        .remove = pmc_core_remove,
index f33cd2c3483594fcfcae074e63d97905df7677df..e8dae9c6c45fbdc8fae202c4eae47956bc3a6e9c 100644 (file)
@@ -187,20 +187,38 @@ enum ppfear_regs {
 #define ICL_PMC_LTR_WIGIG                      0x1BFC
 #define ICL_PMC_SLP_S0_RES_COUNTER_STEP                0x64
 
-#define TGL_NUM_IP_IGN_ALLOWED                 22
+#define LPM_MAX_NUM_MODES                      8
+#define GET_X2_COUNTER(v)                      ((v) >> 1)
+#define LPM_STS_LATCH_MODE                     BIT(31)
+
 #define TGL_PMC_SLP_S0_RES_COUNTER_STEP                0x7A
+#define TGL_PMC_LTR_THC0                       0x1C04
+#define TGL_PMC_LTR_THC1                       0x1C08
+#define TGL_NUM_IP_IGN_ALLOWED                 23
+#define TGL_PMC_LPM_RES_COUNTER_STEP_X2                61      /* 30.5us * 2 */
 
 /*
  * Tigerlake Power Management Controller register offsets
  */
+#define TGL_LPM_STS_LATCH_EN_OFFSET            0x1C34
 #define TGL_LPM_EN_OFFSET                      0x1C78
 #define TGL_LPM_RESIDENCY_OFFSET               0x1C80
 
 /* Tigerlake Low Power Mode debug registers */
 #define TGL_LPM_STATUS_OFFSET                  0x1C3C
 #define TGL_LPM_LIVE_STATUS_OFFSET             0x1C5C
+#define TGL_LPM_PRI_OFFSET                     0x1C7C
+#define TGL_LPM_NUM_MAPS                       6
+
+/* Extended Test Mode Register 3 (CNL and later) */
+#define ETR3_OFFSET                            0x1048
+#define ETR3_CF9GR                             BIT(20)
+#define ETR3_CF9LOCK                           BIT(31)
+
+/* Extended Test Mode Register LPM bits (TGL and later */
+#define ETR3_CLEAR_LPM_EVENTS                  BIT(28)
 
-const char *tgl_lpm_modes[] = {
+const char *pmc_lpm_modes[] = {
        "S0i2.0",
        "S0i2.1",
        "S0i2.2",
@@ -258,11 +276,15 @@ struct pmc_reg_map {
        const u32 ltr_ignore_max;
        const u32 pm_vric1_offset;
        /* Low Power Mode registers */
-       const char **lpm_modes;
+       const int lpm_num_maps;
+       const int lpm_res_counter_step_x2;
+       const u32 lpm_sts_latch_en_offset;
        const u32 lpm_en_offset;
+       const u32 lpm_priority_offset;
        const u32 lpm_residency_offset;
        const u32 lpm_status_offset;
        const u32 lpm_live_status_offset;
+       const u32 etr3_offset;
 };
 
 /**
@@ -278,6 +300,9 @@ struct pmc_reg_map {
  * @check_counters:    On resume, check if counters are getting incremented
  * @pc10_counter:      PC10 residency counter
  * @s0ix_counter:      S0ix residency (step adjusted)
+ * @num_lpm_modes:     Count of enabled modes
+ * @lpm_en_modes:      Array of enabled modes from lowest to highest priority
+ * @lpm_req_regs:      List of substate requirements
  *
  * pmc_dev contains info about power management controller device.
  */
@@ -292,6 +317,28 @@ struct pmc_dev {
        bool check_counters; /* Check for counter increments on resume */
        u64 pc10_counter;
        u64 s0ix_counter;
+       int num_lpm_modes;
+       int lpm_en_modes[LPM_MAX_NUM_MODES];
+       u32 *lpm_req_regs;
 };
 
+#define pmc_for_each_mode(i, mode, pmcdev)             \
+       for (i = 0, mode = pmcdev->lpm_en_modes[i];     \
+            i < pmcdev->num_lpm_modes;                 \
+            i++, mode = pmcdev->lpm_en_modes[i])
+
+#define DEFINE_PMC_CORE_ATTR_WRITE(__name)                             \
+static int __name ## _open(struct inode *inode, struct file *file)     \
+{                                                                      \
+       return single_open(file, __name ## _show, inode->i_private);    \
+}                                                                      \
+                                                                       \
+static const struct file_operations __name ## _fops = {                        \
+       .owner          = THIS_MODULE,                                  \
+       .open           = __name ## _open,                              \
+       .read           = seq_read,                                     \
+       .write          = __name ## _write,                             \
+       .release        = single_release,                               \
+}
+
 #endif /* PMC_CORE_H */
index ee2b3bbeb83da51ab7330dbf790c77663c39b47c..c86ff15b1ed522be06f2c670a2f62c3bef70aa7d 100644 (file)
 #define PMT_XA_MAX             INT_MAX
 #define PMT_XA_LIMIT           XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
 
+/*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM). This list tracks those
+ * platforms as needed to handle those differences. Newer client
+ * platforms are expected to be fully compatible with server.
+ */
+static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
+       { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
+       { PCI_VDEVICE(INTEL, 0x490e) }, /* DG1 */
+       { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
+       { }
+};
+
+bool intel_pmt_is_early_client_hw(struct device *dev)
+{
+       struct pci_dev *parent = to_pci_dev(dev->parent);
+
+       return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
+}
+EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
+
 /*
  * sysfs
  */
@@ -147,6 +169,30 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
                 * base address = end of discovery region + base offset
                 */
                entry->base_addr = disc_res->end + 1 + header->base_offset;
+
+               /*
+                * Some hardware use a different calculation for the base address
+                * when access_type == ACCESS_LOCAL. On the these systems
+                * ACCCESS_LOCAL refers to an address in the same BAR as the
+                * header but at a fixed offset. But as the header address was
+                * supplied to the driver, we don't know which BAR it was in.
+                * So search for the bar whose range includes the header address.
+                */
+               if (intel_pmt_is_early_client_hw(dev)) {
+                       int i;
+
+                       entry->base_addr = 0;
+                       for (i = 0; i < 6; i++)
+                               if (disc_res->start >= pci_resource_start(pci_dev, i) &&
+                                  (disc_res->start <= pci_resource_end(pci_dev, i))) {
+                                       entry->base_addr = pci_resource_start(pci_dev, i) +
+                                                          header->base_offset;
+                                       break;
+                               }
+                       if (!entry->base_addr)
+                               return -EINVAL;
+               }
+
                break;
        case ACCESS_BARID:
                /*
index de8f8139ba3116ed9cbb0dc430fa7e071d107d62..1337019c2873eb37b3abda073700228437bb0b36 100644 (file)
@@ -44,6 +44,7 @@ struct intel_pmt_namespace {
                                 struct device *dev);
 };
 
+bool intel_pmt_is_early_client_hw(struct device *dev);
 int intel_pmt_dev_create(struct intel_pmt_entry *entry,
                         struct intel_pmt_namespace *ns,
                         struct platform_device *pdev, int idx);
index f8a87614efa433ceac24bce3b43d72f99bf503d4..9b95ef0504576fcd998b2c567d819961daf37f07 100644 (file)
@@ -34,26 +34,6 @@ struct pmt_telem_priv {
        struct intel_pmt_entry          entry[];
 };
 
-/*
- * Early implementations of PMT on client platforms have some
- * differences from the server platforms (which use the Out Of Band
- * Management Services Module OOBMSM). This list tracks those
- * platforms as needed to handle those differences. Newer client
- * platforms are expected to be fully compatible with server.
- */
-static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
-       { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
-       { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
-       { }
-};
-
-static bool intel_pmt_is_early_client_hw(struct device *dev)
-{
-       struct pci_dev *parent = to_pci_dev(dev->parent);
-
-       return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
-}
-
 static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
                                      struct device *dev)
 {
index a2a2d923e60cbf1347cee9b5785feb3cabb5d524..df1fc6c719f323d0e021208bbe147f4ed19748d3 100644 (file)
 #define PUNIT_MAILBOX_BUSY_BIT         31
 
 /*
- * The average time to complete some commands is about 40us. The current
- * count is enough to satisfy 40us. But when the firmware is very busy, this
- * causes timeout occasionally.  So increase to deal with some worst case
- * scenarios. Most of the command still complete in few us.
+ * The average time to complete mailbox commands is less than 40us. Most of
+ * the commands complete in few micro seconds. But the same firmware handles
+ * requests from all power management features.
+ * We can create a scenario where we flood the firmware with requests then
+ * the mailbox response can be delayed for 100s of micro seconds. So define
+ * two timeouts. One for average case and one for long.
+ * If the firmware is taking more than average, just call cond_resched().
  */
-#define OS_MAILBOX_RETRY_COUNT         100
+#define OS_MAILBOX_TIMEOUT_AVG_US      40
+#define OS_MAILBOX_TIMEOUT_MAX_US      1000
 
 struct isst_if_device {
        struct mutex mutex;
@@ -35,11 +39,13 @@ struct isst_if_device {
 static int isst_if_mbox_cmd(struct pci_dev *pdev,
                            struct isst_if_mbox_cmd *mbox_cmd)
 {
-       u32 retries, data;
+       s64 tm_delta = 0;
+       ktime_t tm;
+       u32 data;
        int ret;
 
        /* Poll for rb bit == 0 */
-       retries = OS_MAILBOX_RETRY_COUNT;
+       tm = ktime_get();
        do {
                ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
                                            &data);
@@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
 
                if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
                        ret = -EBUSY;
+                       tm_delta = ktime_us_delta(ktime_get(), tm);
+                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+                               cond_resched();
                        continue;
                }
                ret = 0;
                break;
-       } while (--retries);
+       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
 
        if (ret)
                return ret;
@@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
                return ret;
 
        /* Poll for rb bit == 0 */
-       retries = OS_MAILBOX_RETRY_COUNT;
+       tm_delta = 0;
+       tm = ktime_get();
        do {
                ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
                                            &data);
@@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
 
                if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
                        ret = -EBUSY;
+                       tm_delta = ktime_us_delta(ktime_get(), tm);
+                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
+                               cond_resched();
                        continue;
                }
 
@@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
                mbox_cmd->resp_data = data;
                ret = 0;
                break;
-       } while (--retries);
+       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
 
        return ret;
 }
index dd900a76d8de5b3846407c88eb565a5eb2b2a3e3..20145b539335b79ee8070ab4cff69215eb05627f 100644 (file)
@@ -678,7 +678,7 @@ static int __init acpi_init(void)
 
        result = acpi_bus_register_driver(&acpi_driver);
        if (result < 0) {
-               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error registering driver\n"));
+               pr_debug("Error registering driver\n");
                return -ENODEV;
        }
 
index 6388c3c705a6615d28b799dbad5a0e0fd0923b9b..d4f444401496e869c61bb42db051aa96d8590849 100644 (file)
@@ -973,7 +973,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
        pcc->mute = pcc->sinf[SINF_MUTE];
        pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
        pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
-       result = pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+       pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
 
        /* add sysfs attributes */
        result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
index ca684ed760d147c0cc82753b28d8632b843fa889..a9d2a4b98e57022ec5deadb0ecea5f63c4c33823 100644 (file)
@@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
        },
        {
                /* pmc_plt_clk* - are used for ethernet controllers */
-               .ident = "Beckhoff CB3163",
+               .ident = "Beckhoff Baytrail",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
-                       DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
-               },
-       },
-       {
-               /* pmc_plt_clk* - are used for ethernet controllers */
-               .ident = "Beckhoff CB4063",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
-                       DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
-               },
-       },
-       {
-               /* pmc_plt_clk* - are used for ethernet controllers */
-               .ident = "Beckhoff CB6263",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
-                       DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
-               },
-       },
-       {
-               /* pmc_plt_clk* - are used for ethernet controllers */
-               .ident = "Beckhoff CB6363",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
-                       DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
                },
        },
        {
index 0d9e2ddbf904d2c93499dc0da3086d7b4c1a0455..dd60c9397d3521aacb36b03b25100abed8b8f689 100644 (file)
@@ -175,6 +175,12 @@ enum tpacpi_hkey_event_t {
                                                     or port replicator */
        TP_HKEY_EV_HOTPLUG_UNDOCK       = 0x4011, /* undocked from hotplug
                                                     dock or port replicator */
+       /*
+        * Thinkpad X1 Tablet series devices emit 0x4012 and 0x4013
+        * when keyboard cover is attached, detached or folded onto the back
+        */
+       TP_HKEY_EV_KBD_COVER_ATTACH     = 0x4012, /* keyboard cover attached */
+       TP_HKEY_EV_KBD_COVER_DETACH     = 0x4013, /* keyboard cover detached or folded back */
 
        /* User-interface events */
        TP_HKEY_EV_LID_CLOSE            = 0x5001, /* laptop lid closed */
@@ -3991,6 +3997,23 @@ static bool hotkey_notify_dockevent(const u32 hkey,
                pr_info("undocked from hotplug port replicator\n");
                return true;
 
+       /*
+        * Deliberately ignore attaching and detaching the keybord cover to avoid
+        * duplicates from intel-vbtn, which already emits SW_TABLET_MODE events
+        * to userspace.
+        *
+        * Please refer to the following thread for more information and a preliminary
+        * implementation using the GTOP ("Get Tablet OPtions") interface that could be
+        * extended to other attachment options of the ThinkPad X1 Tablet series, such as
+        * the Pico cartridge dock module:
+        * https://lore.kernel.org/platform-driver-x86/38cb8265-1e30-d547-9e12-b4ae290be737@a-kobel.de/
+        */
+       case TP_HKEY_EV_KBD_COVER_ATTACH:
+       case TP_HKEY_EV_KBD_COVER_DETACH:
+               *send_acpi_ev = false;
+               *ignore_acpi_ev = true;
+               return true;
+
        default:
                return false;
        }
@@ -4088,7 +4111,7 @@ static bool hotkey_notify_6xxx(const u32 hkey,
                return true;
 
        case TP_HKEY_EV_KEY_FN_ESC:
-               /* Get the media key status to foce the status LED to update */
+               /* Get the media key status to force the status LED to update */
                acpi_evalf(hkey_handle, NULL, "GMKS", "v");
                *send_acpi_ev = false;
                *ignore_acpi_ev = true;
@@ -6260,6 +6283,7 @@ enum thermal_access_mode {
 enum { /* TPACPI_THERMAL_TPEC_* */
        TP_EC_THERMAL_TMP0 = 0x78,      /* ACPI EC regs TMP 0..7 */
        TP_EC_THERMAL_TMP8 = 0xC0,      /* ACPI EC regs TMP 8..15 */
+       TP_EC_FUNCREV      = 0xEF,      /* ACPI EC Functional revision */
        TP_EC_THERMAL_TMP_NA = -128,    /* ACPI EC sensor not available */
 
        TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
@@ -6272,6 +6296,8 @@ struct ibm_thermal_sensors_struct {
 };
 
 static enum thermal_access_mode thermal_read_mode;
+static const struct attribute_group *thermal_attr_group;
+static bool thermal_use_labels;
 
 /* idx is zero-based */
 static int thermal_get_sensor(int idx, s32 *value)
@@ -6454,11 +6480,33 @@ static const struct attribute_group thermal_temp_input8_group = {
 #undef THERMAL_SENSOR_ATTR_TEMP
 #undef THERMAL_ATTRS
 
+static ssize_t temp1_label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return sysfs_emit(buf, "CPU\n");
+}
+static DEVICE_ATTR_RO(temp1_label);
+
+static ssize_t temp2_label_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return sysfs_emit(buf, "GPU\n");
+}
+static DEVICE_ATTR_RO(temp2_label);
+
+static struct attribute *temp_label_attributes[] = {
+       &dev_attr_temp1_label.attr,
+       &dev_attr_temp2_label.attr,
+       NULL
+};
+
+static const struct attribute_group temp_label_attr_group = {
+       .attrs = temp_label_attributes,
+};
+
 /* --------------------------------------------------------------------- */
 
 static int __init thermal_init(struct ibm_init_struct *iibm)
 {
-       u8 t, ta1, ta2;
+       u8 t, ta1, ta2, ver = 0;
        int i;
        int acpi_tmp7;
        int res;
@@ -6473,7 +6521,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
                 * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
                 * non-implemented, thermal sensors return 0x80 when
                 * not available
+                * The above rule is unfortunately flawed. This has been seen with
+                * 0xC2 (power supply ID) causing thermal control problems.
+                * The EC version can be determined by offset 0xEF and at least for
+                * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
+                * are not thermal registers.
                 */
+               if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
+                       pr_warn("Thinkpad ACPI EC unable to access EC version\n");
 
                ta1 = ta2 = 0;
                for (i = 0; i < 8; i++) {
@@ -6483,11 +6538,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
                                ta1 = 0;
                                break;
                        }
-                       if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
-                               ta2 |= t;
-                       } else {
-                               ta1 = 0;
-                               break;
+                       if (ver < 3) {
+                               if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+                                       ta2 |= t;
+                               } else {
+                                       ta1 = 0;
+                                       break;
+                               }
                        }
                }
                if (ta1 == 0) {
@@ -6500,9 +6557,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
                                thermal_read_mode = TPACPI_THERMAL_NONE;
                        }
                } else {
-                       thermal_read_mode =
-                           (ta2 != 0) ?
-                           TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+                       if (ver >= 3) {
+                               thermal_read_mode = TPACPI_THERMAL_TPEC_8;
+                               thermal_use_labels = true;
+                       } else {
+                               thermal_read_mode =
+                                       (ta2 != 0) ?
+                                       TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+                       }
                }
        } else if (acpi_tmp7) {
                if (tpacpi_is_ibm() &&
@@ -6524,44 +6586,40 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
 
        switch (thermal_read_mode) {
        case TPACPI_THERMAL_TPEC_16:
-               res = sysfs_create_group(&tpacpi_hwmon->kobj,
-                               &thermal_temp_input16_group);
-               if (res)
-                       return res;
+               thermal_attr_group = &thermal_temp_input16_group;
                break;
        case TPACPI_THERMAL_TPEC_8:
        case TPACPI_THERMAL_ACPI_TMP07:
        case TPACPI_THERMAL_ACPI_UPDT:
-               res = sysfs_create_group(&tpacpi_hwmon->kobj,
-                               &thermal_temp_input8_group);
-               if (res)
-                       return res;
+               thermal_attr_group = &thermal_temp_input8_group;
                break;
        case TPACPI_THERMAL_NONE:
        default:
                return 1;
        }
 
+       res = sysfs_create_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+       if (res)
+               return res;
+
+       if (thermal_use_labels) {
+               res = sysfs_create_group(&tpacpi_hwmon->kobj, &temp_label_attr_group);
+               if (res) {
+                       sysfs_remove_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+                       return res;
+               }
+       }
+
        return 0;
 }
 
 static void thermal_exit(void)
 {
-       switch (thermal_read_mode) {
-       case TPACPI_THERMAL_TPEC_16:
-               sysfs_remove_group(&tpacpi_hwmon->kobj,
-                                  &thermal_temp_input16_group);
-               break;
-       case TPACPI_THERMAL_TPEC_8:
-       case TPACPI_THERMAL_ACPI_TMP07:
-       case TPACPI_THERMAL_ACPI_UPDT:
-               sysfs_remove_group(&tpacpi_hwmon->kobj,
-                                  &thermal_temp_input8_group);
-               break;
-       case TPACPI_THERMAL_NONE:
-       default:
-               break;
-       }
+       if (thermal_attr_group)
+               sysfs_remove_group(&tpacpi_hwmon->kobj, thermal_attr_group);
+
+       if (thermal_use_labels)
+               sysfs_remove_group(&tpacpi_hwmon->kobj, &temp_label_attr_group);
 }
 
 static int thermal_read(struct seq_file *m)
@@ -10050,6 +10108,7 @@ static struct ibm_struct proxsensor_driver_data = {
  */
 
 #define DYTC_CMD_SET          1 /* To enable/disable IC function mode */
+#define DYTC_CMD_MMC_GET      8 /* To get current MMC function and mode */
 #define DYTC_CMD_RESET    0x1ff /* To reset back to default */
 
 #define DYTC_GET_FUNCTION_BIT 8  /* Bits  8-11 - function setting */
@@ -10066,6 +10125,10 @@ static struct ibm_struct proxsensor_driver_data = {
 #define DYTC_MODE_PERFORM     2  /* High power mode aka performance */
 #define DYTC_MODE_LOWPOWER    3  /* Low power mode */
 #define DYTC_MODE_BALANCE   0xF  /* Default mode aka balanced */
+#define DYTC_MODE_MMC_BALANCE 0  /* Default mode from MMC_GET, aka balanced */
+
+#define DYTC_ERR_MASK       0xF  /* Bits 0-3 in cmd result are the error result */
+#define DYTC_ERR_SUCCESS      1  /* CMD completed successful */
 
 #define DYTC_SET_COMMAND(function, mode, on) \
        (DYTC_CMD_SET | (function) << DYTC_SET_FUNCTION_BIT | \
@@ -10080,6 +10143,7 @@ static bool dytc_profile_available;
 static enum platform_profile_option dytc_current_profile;
 static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
 static DEFINE_MUTEX(dytc_mutex);
+static bool dytc_mmc_get_available;
 
 static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
 {
@@ -10088,6 +10152,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
                *profile = PLATFORM_PROFILE_LOW_POWER;
                break;
        case DYTC_MODE_BALANCE:
+       case DYTC_MODE_MMC_BALANCE:
                *profile =  PLATFORM_PROFILE_BALANCED;
                break;
        case DYTC_MODE_PERFORM:
@@ -10165,7 +10230,6 @@ static int dytc_cql_command(int command, int *output)
                if (err)
                        return err;
        }
-
        return cmd_err;
 }
 
@@ -10222,7 +10286,10 @@ static void dytc_profile_refresh(void)
        int perfmode;
 
        mutex_lock(&dytc_mutex);
-       err = dytc_cql_command(DYTC_CMD_GET, &output);
+       if (dytc_mmc_get_available)
+               err = dytc_command(DYTC_CMD_MMC_GET, &output);
+       else
+               err = dytc_cql_command(DYTC_CMD_GET, &output);
        mutex_unlock(&dytc_mutex);
        if (err)
                return;
@@ -10271,6 +10338,16 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
        if (dytc_version >= 5) {
                dbg_printk(TPACPI_DBG_INIT,
                                "DYTC version %d: thermal mode available\n", dytc_version);
+               /*
+                * Check if MMC_GET functionality available
+                * Version > 6 and return success from MMC_GET command
+                */
+               dytc_mmc_get_available = false;
+               if (dytc_version >= 6) {
+                       err = dytc_command(DYTC_CMD_MMC_GET, &output);
+                       if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS))
+                               dytc_mmc_get_available = true;
+               }
                /* Create platform_profile structure and register */
                err = platform_profile_register(&dytc_profile);
                /*
@@ -10473,6 +10550,111 @@ static struct ibm_struct kbdlang_driver_data = {
        .exit = kbdlang_exit,
 };
 
+/*************************************************************************
+ * DPRC(Dynamic Power Reduction Control) subdriver, for the Lenovo WWAN
+ * and WLAN feature.
+ */
+#define DPRC_GET_WWAN_ANTENNA_TYPE      0x40000
+#define DPRC_WWAN_ANTENNA_TYPE_A_BIT    BIT(4)
+#define DPRC_WWAN_ANTENNA_TYPE_B_BIT    BIT(8)
+static bool has_antennatype;
+static int wwan_antennatype;
+
+static int dprc_command(int command, int *output)
+{
+       acpi_handle dprc_handle;
+
+       if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DPRC", &dprc_handle))) {
+               /* Platform doesn't support DPRC */
+               return -ENODEV;
+       }
+
+       if (!acpi_evalf(dprc_handle, output, NULL, "dd", command))
+               return -EIO;
+
+       /*
+        * METHOD_ERR gets returned on devices where few commands are not supported
+        * for example command to get WWAN Antenna type command is not supported on
+        * some devices.
+        */
+       if (*output & METHOD_ERR)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int get_wwan_antenna(int *wwan_antennatype)
+{
+       int output, err;
+
+       /* Get current Antenna type */
+       err = dprc_command(DPRC_GET_WWAN_ANTENNA_TYPE, &output);
+       if (err)
+               return err;
+
+       if (output & DPRC_WWAN_ANTENNA_TYPE_A_BIT)
+               *wwan_antennatype = 1;
+       else if (output & DPRC_WWAN_ANTENNA_TYPE_B_BIT)
+               *wwan_antennatype = 2;
+       else
+               return -ENODEV;
+
+       return 0;
+}
+
+/* sysfs wwan antenna type entry */
+static ssize_t wwan_antenna_type_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       switch (wwan_antennatype) {
+       case 1:
+               return sysfs_emit(buf, "type a\n");
+       case 2:
+               return sysfs_emit(buf, "type b\n");
+       default:
+               return -ENODATA;
+       }
+}
+static DEVICE_ATTR_RO(wwan_antenna_type);
+
+static int tpacpi_dprc_init(struct ibm_init_struct *iibm)
+{
+       int wwanantenna_err, err;
+
+       wwanantenna_err = get_wwan_antenna(&wwan_antennatype);
+       /*
+        * If support isn't available (ENODEV) then quit, but don't
+        * return an error.
+        */
+       if (wwanantenna_err == -ENODEV)
+               return 0;
+
+       /* if there was an error return it */
+       if (wwanantenna_err && (wwanantenna_err != -ENODEV))
+               return wwanantenna_err;
+       else if (!wwanantenna_err)
+               has_antennatype = true;
+
+       if (has_antennatype) {
+               err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_wwan_antenna_type.attr);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static void dprc_exit(void)
+{
+       if (has_antennatype)
+               sysfs_remove_file(&tpacpi_pdev->dev.kobj, &dev_attr_wwan_antenna_type.attr);
+}
+
+static struct ibm_struct dprc_driver_data = {
+       .name = "dprc",
+       .exit = dprc_exit,
+};
+
 /****************************************************************************
  ****************************************************************************
  *
@@ -10977,6 +11159,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
                .init = tpacpi_kbdlang_init,
                .data = &kbdlang_driver_data,
        },
+       {
+               .init = tpacpi_dprc_init,
+               .data = &dprc_driver_data,
+       },
 };
 
 static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
index c44a6e8dceb8c0a2f1f28e273c8e12e0ca2e7873..90fe4f8f3c2c796b8257bf3733d6b65c84d4c955 100644 (file)
@@ -715,6 +715,32 @@ static const struct ts_dmi_data techbite_arc_11_6_data = {
        .properties     = techbite_arc_11_6_props,
 };
 
+static const struct property_entry teclast_tbook11_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data teclast_tbook11_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl3692-teclast-tbook11.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 43560,
+               .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
+                           0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
+                           0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
+                           0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
+       },
+       .acpi_name      = "MSSL1680:00",
+       .properties     = teclast_tbook11_props,
+};
+
 static const struct property_entry teclast_x3_plus_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -1243,6 +1269,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "G8316_272B"),
                },
        },
+       {
+               /* Teclast Tbook 11 */
+               .driver_data = (void *)&teclast_tbook11_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TbooK 11"),
+                       DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
+               },
+       },
        {
                /* Teclast X3 Plus */
                .driver_data = (void *)&teclast_x3_plus_data,
@@ -1355,7 +1390,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
 
        if (has_acpi_companion(dev) &&
            !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
-               error = device_add_properties(dev, ts_data->properties);
+               error = device_create_managed_software_node(dev, ts_data->properties, NULL);
                if (error)
                        dev_err(dev, "failed to add properties: %d\n", error);
        }
index 66b434d6307f759d985b77639d9ffea56f39efcc..80137afb97535364abb1820e312bff4da408d33a 100644 (file)
@@ -86,13 +86,12 @@ static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
        return ret;
 }
 
-static int wmi_bmof_remove(struct wmi_device *wdev)
+static void wmi_bmof_remove(struct wmi_device *wdev)
 {
        struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
 
        sysfs_remove_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
        kfree(priv->bmofdata);
-       return 0;
 }
 
 static const struct wmi_device_id wmi_bmof_id_table[] = {
index c669676ea8e8a6448e089161bd631f9e1b753e4c..62e0d56a3332b294f0955ff20c5bd04b2fd38291 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/fs.h>
 #include <uapi/linux/wmi.h>
 
-ACPI_MODULE_NAME("wmi");
 MODULE_AUTHOR("Carlos Corbacho");
 MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
 MODULE_LICENSE("GPL");
@@ -986,7 +985,6 @@ static int wmi_dev_remove(struct device *dev)
        struct wmi_block *wblock = dev_to_wblock(dev);
        struct wmi_driver *wdriver =
                container_of(dev->driver, struct wmi_driver, driver);
-       int ret = 0;
 
        if (wdriver->filter_callback) {
                misc_deregister(&wblock->char_dev);
@@ -995,12 +993,12 @@ static int wmi_dev_remove(struct device *dev)
        }
 
        if (wdriver->remove)
-               ret = wdriver->remove(dev_to_wdev(dev));
+               wdriver->remove(dev_to_wdev(dev));
 
        if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
                dev_warn(dev, "failed to disable device\n");
 
-       return ret;
+       return 0;
 }
 
 static struct class wmi_bus_class = {
index 8337c99d2ce25bab372cb82095fdb6a574eed74d..97440462aa258a4d5c52dd650895a75681828574 100644 (file)
@@ -26,8 +26,6 @@
 #define XO15_EBOOK_HID                 "XO15EBK"
 #define XO15_EBOOK_DEVICE_NAME         "EBook Switch"
 
-ACPI_MODULE_NAME(MODULE_NAME);
-
 MODULE_DESCRIPTION("OLPC XO-1.5 ebook switch driver");
 MODULE_LICENSE("GPL");
 
@@ -66,8 +64,8 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
                ebook_send_state(device);
                break;
        default:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Unsupported event [0x%x]\n", event));
+               acpi_handle_debug(device->handle,
+                                 "Unsupported event [0x%x]\n", event);
                break;
        }
 }
index 8933ae26c3d6934c07df37c09f5e1d706f3f20b3..e954970b50e698a35a2783558380b827a2a7077e 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/bitops.h>
 #include <linux/device.h>
+#include <linux/devm-helpers.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
@@ -593,7 +594,11 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
        power->axp20x_id = axp_data->axp20x_id;
        power->regmap = axp20x->regmap;
        power->num_irqs = axp_data->num_irq_names;
-       INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
+
+       ret = devm_delayed_work_autocancel(&pdev->dev, &power->vbus_detect,
+                                          axp20x_usb_power_poll_vbus);
+       if (ret)
+               return ret;
 
        if (power->axp20x_id == AXP202_ID) {
                /* Enable vbus valid checking */
@@ -652,15 +657,6 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int axp20x_usb_power_remove(struct platform_device *pdev)
-{
-       struct axp20x_usb_power *power = platform_get_drvdata(pdev);
-
-       cancel_delayed_work_sync(&power->vbus_detect);
-
-       return 0;
-}
-
 static const struct of_device_id axp20x_usb_power_match[] = {
        {
                .compatible = "x-powers,axp202-usb-power-supply",
@@ -680,7 +676,6 @@ MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
 
 static struct platform_driver axp20x_usb_power_driver = {
        .probe = axp20x_usb_power_probe,
-       .remove = axp20x_usb_power_remove,
        .driver = {
                .name           = DRVNAME,
                .of_match_table = axp20x_usb_power_match,
index ab2f4bf8f603ffa947458be3b209ae66be22f757..b5d619db79f6be6ccb5978c682c07358d5c16e82 100644 (file)
@@ -17,6 +17,7 @@
  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/err.h>
 #include <linux/i2c.h>
 #include <linux/init.h>
@@ -473,7 +474,11 @@ static int bq24735_charger_probe(struct i2c_client *client,
                if (!charger->poll_interval)
                        return 0;
 
-               INIT_DELAYED_WORK(&charger->poll, bq24735_poll);
+               ret = devm_delayed_work_autocancel(&client->dev, &charger->poll,
+                                                  bq24735_poll);
+               if (ret)
+                       return ret;
+
                schedule_delayed_work(&charger->poll,
                                      msecs_to_jiffies(charger->poll_interval));
        }
@@ -481,16 +486,6 @@ static int bq24735_charger_probe(struct i2c_client *client,
        return 0;
 }
 
-static int bq24735_charger_remove(struct i2c_client *client)
-{
-       struct bq24735 *charger = i2c_get_clientdata(client);
-
-       if (charger->poll_interval)
-               cancel_delayed_work_sync(&charger->poll);
-
-       return 0;
-}
-
 static const struct i2c_device_id bq24735_charger_id[] = {
        { "bq24735-charger", 0 },
        {}
@@ -509,7 +504,6 @@ static struct i2c_driver bq24735_charger_driver = {
                .of_match_table = bq24735_match_ids,
        },
        .probe = bq24735_charger_probe,
-       .remove = bq24735_charger_remove,
        .id_table = bq24735_charger_id,
 };
 
index 10cd617516ec26168270aff95af4524aec3014e7..09f3e78af4e01fe2bdf9e2d828a7c01659e87b19 100644 (file)
@@ -8,6 +8,7 @@
  * Author: Auryn Verwegen
  * Author: Mike Looijmans
  */
+#include <linux/devm-helpers.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
@@ -445,15 +446,6 @@ static enum power_supply_property ltc294x_properties[] = {
        POWER_SUPPLY_PROP_CURRENT_NOW,
 };
 
-static int ltc294x_i2c_remove(struct i2c_client *client)
-{
-       struct ltc294x_info *info = i2c_get_clientdata(client);
-
-       cancel_delayed_work_sync(&info->work);
-       power_supply_unregister(info->supply);
-       return 0;
-}
-
 static int ltc294x_i2c_probe(struct i2c_client *client,
        const struct i2c_device_id *id)
 {
@@ -547,7 +539,10 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
 
        psy_cfg.drv_data = info;
 
-       INIT_DELAYED_WORK(&info->work, ltc294x_work);
+       ret = devm_delayed_work_autocancel(&client->dev, &info->work,
+                                          ltc294x_work);
+       if (ret)
+               return ret;
 
        ret = ltc294x_reset(info, prescaler_exp);
        if (ret < 0) {
@@ -555,8 +550,8 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
                return ret;
        }
 
-       info->supply = power_supply_register(&client->dev, &info->supply_desc,
-                                            &psy_cfg);
+       info->supply = devm_power_supply_register(&client->dev,
+                                                 &info->supply_desc, &psy_cfg);
        if (IS_ERR(info->supply)) {
                dev_err(&client->dev, "failed to register ltc2941\n");
                return PTR_ERR(info->supply);
@@ -655,7 +650,6 @@ static struct i2c_driver ltc294x_driver = {
                .pm     = LTC294X_PM_OPS,
        },
        .probe          = ltc294x_i2c_probe,
-       .remove         = ltc294x_i2c_remove,
        .shutdown       = ltc294x_i2c_shutdown,
        .id_table       = ltc294x_i2c_id,
 };
index b6a538ebb378fcde62a42b9d76ac78555af8450f..70ea404b2a36767ecfc313e0df5af4231f61bf81 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/bits.h>
 #include <linux/delay.h>
+#include <linux/devm-helpers.h>
 #include <linux/err.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
@@ -1165,7 +1166,10 @@ skip_gpio:
                }
        }
 
-       INIT_DELAYED_WORK(&chip->work, sbs_delayed_work);
+       rc = devm_delayed_work_autocancel(&client->dev, &chip->work,
+                                         sbs_delayed_work);
+       if (rc)
+               return rc;
 
        chip->power_supply = devm_power_supply_register(&client->dev, sbs_desc,
                                                   &psy_cfg);
@@ -1185,15 +1189,6 @@ exit_psupply:
        return rc;
 }
 
-static int sbs_remove(struct i2c_client *client)
-{
-       struct sbs_info *chip = i2c_get_clientdata(client);
-
-       cancel_delayed_work_sync(&chip->work);
-
-       return 0;
-}
-
 #if defined CONFIG_PM_SLEEP
 
 static int sbs_suspend(struct device *dev)
@@ -1248,7 +1243,6 @@ MODULE_DEVICE_TABLE(of, sbs_dt_ids);
 
 static struct i2c_driver sbs_battery_driver = {
        .probe_new      = sbs_probe,
-       .remove         = sbs_remove,
        .alert          = sbs_alert,
        .id_table       = sbs_id,
        .driver = {
index e0de1df2ede079f4cb81c9f06d1ecb6228a659cd..35799e6401c992960c78e4deb4a325a2d49e9449 100644 (file)
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/pps_kernel.h>
-#include <linux/pps-gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/list.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
 #include <linux/timer.h>
 #include <linux/jiffies.h>
 
@@ -100,51 +99,42 @@ static void pps_gpio_echo_timer_callback(struct timer_list *t)
        gpiod_set_value(info->echo_pin, 0);
 }
 
-static int pps_gpio_setup(struct platform_device *pdev)
+static int pps_gpio_setup(struct device *dev)
 {
-       struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
-       struct device_node *np = pdev->dev.of_node;
+       struct pps_gpio_device_data *data = dev_get_drvdata(dev);
        int ret;
        u32 value;
 
-       data->gpio_pin = devm_gpiod_get(&pdev->dev,
-               NULL,   /* request "gpios" */
-               GPIOD_IN);
-       if (IS_ERR(data->gpio_pin)) {
-               dev_err(&pdev->dev,
-                       "failed to request PPS GPIO\n");
-               return PTR_ERR(data->gpio_pin);
+       data->gpio_pin = devm_gpiod_get(dev, NULL, GPIOD_IN);
+       if (IS_ERR(data->gpio_pin))
+               return dev_err_probe(dev, PTR_ERR(data->gpio_pin),
+                                    "failed to request PPS GPIO\n");
+
+       data->assert_falling_edge =
+               device_property_read_bool(dev, "assert-falling-edge");
+
+       data->echo_pin = devm_gpiod_get_optional(dev, "echo", GPIOD_OUT_LOW);
+       if (IS_ERR(data->echo_pin))
+               return dev_err_probe(dev, PTR_ERR(data->echo_pin),
+                                    "failed to request ECHO GPIO\n");
+
+       if (!data->echo_pin)
+               return 0;
+
+       ret = device_property_read_u32(dev, "echo-active-ms", &value);
+       if (ret) {
+               dev_err(dev, "failed to get echo-active-ms from FW\n");
+               return ret;
        }
 
-       data->echo_pin = devm_gpiod_get_optional(&pdev->dev,
-                       "echo",
-                       GPIOD_OUT_LOW);
-       if (data->echo_pin) {
-               if (IS_ERR(data->echo_pin)) {
-                       dev_err(&pdev->dev, "failed to request ECHO GPIO\n");
-                       return PTR_ERR(data->echo_pin);
-               }
-
-               ret = of_property_read_u32(np,
-                       "echo-active-ms",
-                       &value);
-               if (ret) {
-                       dev_err(&pdev->dev,
-                               "failed to get echo-active-ms from OF\n");
-                       return ret;
-               }
-               data->echo_active_ms = value;
-               /* sanity check on echo_active_ms */
-               if (!data->echo_active_ms || data->echo_active_ms > 999) {
-                       dev_err(&pdev->dev,
-                               "echo-active-ms: %u - bad value from OF\n",
-                               data->echo_active_ms);
-                       return -EINVAL;
-               }
+       /* sanity check on echo_active_ms */
+       if (!value || value > 999) {
+               dev_err(dev, "echo-active-ms: %u - bad value from FW\n", value);
+               return -EINVAL;
        }
 
-       if (of_property_read_bool(np, "assert-falling-edge"))
-               data->assert_falling_edge = true;
+       data->echo_active_ms = value;
+
        return 0;
 }
 
@@ -165,34 +155,26 @@ get_irqf_trigger_flags(const struct pps_gpio_device_data *data)
 static int pps_gpio_probe(struct platform_device *pdev)
 {
        struct pps_gpio_device_data *data;
+       struct device *dev = &pdev->dev;
        int ret;
        int pps_default_params;
-       const struct pps_gpio_platform_data *pdata = pdev->dev.platform_data;
 
        /* allocate space for device info */
-       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
-       platform_set_drvdata(pdev, data);
+
+       dev_set_drvdata(dev, data);
 
        /* GPIO setup */
-       if (pdata) {
-               data->gpio_pin = pdata->gpio_pin;
-               data->echo_pin = pdata->echo_pin;
-
-               data->assert_falling_edge = pdata->assert_falling_edge;
-               data->capture_clear = pdata->capture_clear;
-               data->echo_active_ms = pdata->echo_active_ms;
-       } else {
-               ret = pps_gpio_setup(pdev);
-               if (ret)
-                       return -EINVAL;
-       }
+       ret = pps_gpio_setup(dev);
+       if (ret)
+               return -EINVAL;
 
        /* IRQ setup */
        ret = gpiod_to_irq(data->gpio_pin);
        if (ret < 0) {
-               dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", ret);
+               dev_err(dev, "failed to map GPIO to IRQ: %d\n", ret);
                return -EINVAL;
        }
        data->irq = ret;
@@ -218,17 +200,17 @@ static int pps_gpio_probe(struct platform_device *pdev)
                pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR;
        data->pps = pps_register_source(&data->info, pps_default_params);
        if (IS_ERR(data->pps)) {
-               dev_err(&pdev->dev, "failed to register IRQ %d as PPS source\n",
+               dev_err(dev, "failed to register IRQ %d as PPS source\n",
                        data->irq);
                return PTR_ERR(data->pps);
        }
 
        /* register IRQ interrupt handler */
-       ret = devm_request_irq(&pdev->dev, data->irq, pps_gpio_irq_handler,
+       ret = devm_request_irq(dev, data->irq, pps_gpio_irq_handler,
                        get_irqf_trigger_flags(data), data->info.name, data);
        if (ret) {
                pps_unregister_source(data->pps);
-               dev_err(&pdev->dev, "failed to acquire IRQ %d\n", data->irq);
+               dev_err(dev, "failed to acquire IRQ %d\n", data->irq);
                return -EINVAL;
        }
 
@@ -243,11 +225,9 @@ static int pps_gpio_remove(struct platform_device *pdev)
        struct pps_gpio_device_data *data = platform_get_drvdata(pdev);
 
        pps_unregister_source(data->pps);
-       if (data->echo_pin) {
-               del_timer_sync(&data->echo_timer);
-               /* reset echo pin in any case */
-               gpiod_set_value(data->echo_pin, 0);
-       }
+       del_timer_sync(&data->echo_timer);
+       /* reset echo pin in any case */
+       gpiod_set_value(data->echo_pin, 0);
        dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
        return 0;
 }
index ddecf25b5dd40766a75f74c39890dc52c4ece944..d7894f178bd4fa91264020b62acfa65fc271f4f8 100644 (file)
@@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca)
        return ret;
 }
 
+/**
+ * cec_add_elem - Add an element to the CEC array.
+ * @pfn:       page frame number to insert
+ *
+ * Return values:
+ * - <0:       on error
+ * -  0:       on success
+ * - >0:       when the inserted pfn was offlined
+ */
 static int cec_add_elem(u64 pfn)
 {
        struct ce_array *ca = &ce_arr;
+       int count, err, ret = 0;
        unsigned int to = 0;
-       int count, ret = 0;
 
        /*
         * We can be called very early on the identify_cpu() path where we are
@@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn)
        if (ca->n == MAX_ELEMS)
                WARN_ON(!del_lru_elem_unlocked(ca));
 
-       ret = find_elem(ca, pfn, &to);
-       if (ret < 0) {
+       err = find_elem(ca, pfn, &to);
+       if (err < 0) {
                /*
                 * Shift range [to-end] to make room for one more element.
                 */
index 7b0cd08db446239510377347f3c0490e364e061e..ba020a45f238e764e48a62af325329f34da81779 100644 (file)
@@ -125,7 +125,7 @@ static const struct regulator_ops vid_ops = {
 
 static const struct regulator_desc regulators[] = {
        BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
-                     0x80, 600000, 10000, 0x3c),
+                     0x6f, 600000, 10000, 0x3c),
        BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
                      16, 1625000, 25000, 0),
        BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
@@ -134,7 +134,7 @@ static const struct regulator_desc regulators[] = {
                      11, 2800000, 100000, 0),
        BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
                      BD9571MWV_DVFS_MONIVDAC, 0x7f,
-                     0x80, 600000, 10000, 0x3c),
+                     0x6f, 600000, 10000, 0x3c),
 };
 
 #ifdef CONFIG_PM_SLEEP
@@ -174,7 +174,7 @@ static ssize_t backup_mode_show(struct device *dev,
 {
        struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
 
-       return sprintf(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
+       return sysfs_emit(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
 }
 
 static ssize_t backup_mode_store(struct device *dev,
@@ -301,7 +301,7 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
                                               &config);
                if (IS_ERR(rdev)) {
                        dev_err(&pdev->dev, "failed to register %s regulator\n",
-                               pdev->name);
+                               regulators[i].name);
                        return PTR_ERR(rdev);
                }
        }
index e62e1d72d94390f0843dbc06fcd2b09aa71915a8..c2442d7798ab73ac01918373031b90c304d61ec2 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/devm-helpers.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
@@ -1842,7 +1843,10 @@ static int spmi_regulator_of_parse(struct device_node *node,
                        return ret;
                }
 
-               INIT_DELAYED_WORK(&vreg->ocp_work, spmi_regulator_vs_ocp_work);
+               ret = devm_delayed_work_autocancel(dev, &vreg->ocp_work,
+                                                  spmi_regulator_vs_ocp_work);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -2157,10 +2161,8 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
                vreg->regmap = regmap;
                if (reg->ocp) {
                        vreg->ocp_irq = platform_get_irq_byname(pdev, reg->ocp);
-                       if (vreg->ocp_irq < 0) {
-                               ret = vreg->ocp_irq;
-                               goto err;
-                       }
+                       if (vreg->ocp_irq < 0)
+                               return vreg->ocp_irq;
                }
                vreg->desc.id = -1;
                vreg->desc.owner = THIS_MODULE;
@@ -2203,8 +2205,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
                rdev = devm_regulator_register(dev, &vreg->desc, &config);
                if (IS_ERR(rdev)) {
                        dev_err(dev, "failed to register %s\n", name);
-                       ret = PTR_ERR(rdev);
-                       goto err;
+                       return PTR_ERR(rdev);
                }
 
                INIT_LIST_HEAD(&vreg->node);
@@ -2212,24 +2213,6 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
        }
 
        return 0;
-
-err:
-       list_for_each_entry(vreg, vreg_list, node)
-               if (vreg->ocp_irq)
-                       cancel_delayed_work_sync(&vreg->ocp_work);
-       return ret;
-}
-
-static int qcom_spmi_regulator_remove(struct platform_device *pdev)
-{
-       struct spmi_regulator *vreg;
-       struct list_head *vreg_list = platform_get_drvdata(pdev);
-
-       list_for_each_entry(vreg, vreg_list, node)
-               if (vreg->ocp_irq)
-                       cancel_delayed_work_sync(&vreg->ocp_work);
-
-       return 0;
 }
 
 static struct platform_driver qcom_spmi_regulator_driver = {
@@ -2238,7 +2221,6 @@ static struct platform_driver qcom_spmi_regulator_driver = {
                .of_match_table = qcom_spmi_regulator_match,
        },
        .probe          = qcom_spmi_regulator_probe,
-       .remove         = qcom_spmi_regulator_remove,
 };
 module_platform_driver(qcom_spmi_regulator_driver);
 
index 2667919d76b3410fa7ae828d6d7fe23e5eb525c2..dcb380e868dfdb746201beaeb2037864205f7f87 100644 (file)
@@ -450,6 +450,24 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
        if (len == 0)
                return NULL;
 
+       /*
+        * GNU binutils do not support multiple address spaces. The GNU
+        * linker's default linker script places IRAM at an arbitrary high
+        * offset, in order to differentiate it from DRAM. Hence we need to
+        * strip the artificial offset in the IRAM addresses coming from the
+        * ELF file.
+        *
+        * The TI proprietary linker would never set those higher IRAM address
+        * bits anyway. PRU architecture limits the program counter to 16-bit
+        * word-address range. This in turn corresponds to 18-bit IRAM
+        * byte-address range for ELF.
+        *
+        * Two more bits are added just in case to make the final 20-bit mask.
+        * Idea is to have a safeguard in case TI decides to add banking
+        * in future SoCs.
+        */
+       da &= 0xfffff;
+
        if (da >= PRU_IRAM_DA &&
            da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
                offset = da - PRU_IRAM_DA;
@@ -585,7 +603,7 @@ pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
                        break;
                }
 
-               if (pru->data->is_k3 && is_iram) {
+               if (pru->data->is_k3) {
                        ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
                                               filesz);
                        if (ret) {
index 5521c4437ffabd1848f8dcd43d1526bd9d440116..7c007dd7b2000d65d118c8b493ddbca593659e5a 100644 (file)
@@ -56,7 +56,7 @@ static int qcom_pil_info_init(void)
        memset_io(base, 0, resource_size(&imem));
 
        _reloc.base = base;
-       _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+       _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
 
        return 0;
 }
index d126bb87725059cc60693fda50ae7ae4fcc1f398..ba6a3aa8d9540588e2e7ce4247ecbbfd16bf25f3 100644 (file)
 #ifndef HPSA_CMD_H
 #define HPSA_CMD_H
 
+#include <linux/compiler.h>
+
+#include <linux/build_bug.h> /* static_assert */
+#include <linux/stddef.h> /* offsetof */
+
 /* general boundary defintions */
 #define SENSEINFOBYTES          32 /* may vary between hbas */
 #define SG_ENTRIES_IN_CMD      32 /* Max SG entries excluding chain blocks */
@@ -200,12 +205,10 @@ union u64bit {
        MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
 
 /* SCSI-3 Commands */
-#pragma pack(1)
-
 #define HPSA_INQUIRY 0x12
 struct InquiryData {
        u8 data_byte[36];
-};
+} __packed;
 
 #define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
 #define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
@@ -221,7 +224,7 @@ struct raid_map_disk_data {
        u8    xor_mult[2];            /**< XOR multipliers for this position,
                                        *  valid for data disks only */
        u8    reserved[2];
-};
+} __packed;
 
 struct raid_map_data {
        __le32   structure_size;        /* Size of entire structure in bytes */
@@ -247,14 +250,14 @@ struct raid_map_data {
        __le16   dekindex;              /* Data encryption key index. */
        u8    reserved[16];
        struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
-};
+} __packed;
 
 struct ReportLUNdata {
        u8 LUNListLength[4];
        u8 extended_response_flag;
        u8 reserved[3];
        u8 LUN[HPSA_MAX_LUN][8];
-};
+} __packed;
 
 struct ext_report_lun_entry {
        u8 lunid[8];
@@ -269,20 +272,20 @@ struct ext_report_lun_entry {
        u8 lun_count; /* multi-lun device, how many luns */
        u8 redundant_paths;
        u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
-};
+} __packed;
 
 struct ReportExtendedLUNdata {
        u8 LUNListLength[4];
        u8 extended_response_flag;
        u8 reserved[3];
        struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN];
-};
+} __packed;
 
 struct SenseSubsystem_info {
        u8 reserved[36];
        u8 portname[8];
        u8 reserved1[1108];
-};
+} __packed;
 
 /* BMIC commands */
 #define BMIC_READ 0x26
@@ -317,7 +320,7 @@ union SCSI3Addr {
                u8 Targ:6;
                u8 Mode:2;        /* b10 */
        } LogUnit;
-};
+} __packed;
 
 struct PhysDevAddr {
        u32             TargetId:24;
@@ -325,20 +328,20 @@ struct PhysDevAddr {
        u32             Mode:2;
        /* 2 level target device addr */
        union SCSI3Addr  Target[2];
-};
+} __packed;
 
 struct LogDevAddr {
        u32            VolId:30;
        u32            Mode:2;
        u8             reserved[4];
-};
+} __packed;
 
 union LUNAddr {
        u8               LunAddrBytes[8];
        union SCSI3Addr    SCSI3Lun[4];
        struct PhysDevAddr PhysDev;
        struct LogDevAddr  LogDev;
-};
+} __packed;
 
 struct CommandListHeader {
        u8              ReplyQueue;
@@ -346,7 +349,7 @@ struct CommandListHeader {
        __le16          SGTotal;
        __le64          tag;
        union LUNAddr     LUN;
-};
+} __packed;
 
 struct RequestBlock {
        u8   CDBLen;
@@ -365,18 +368,18 @@ struct RequestBlock {
 #define GET_DIR(tad) (((tad) >> 6) & 0x03)
        u16  Timeout;
        u8   CDB[16];
-};
+} __packed;
 
 struct ErrDescriptor {
        __le64 Addr;
        __le32 Len;
-};
+} __packed;
 
 struct SGDescriptor {
        __le64 Addr;
        __le32 Len;
        __le32 Ext;
-};
+} __packed;
 
 union MoreErrInfo {
        struct {
@@ -390,7 +393,8 @@ union MoreErrInfo {
                u8  offense_num;  /* byte # of offense 0-base */
                u32 offense_value;
        } Invalid_Cmd;
-};
+} __packed;
+
 struct ErrorInfo {
        u8               ScsiStatus;
        u8               SenseLen;
@@ -398,7 +402,7 @@ struct ErrorInfo {
        u32              ResidualCnt;
        union MoreErrInfo  MoreErrInfo;
        u8               SenseInfo[SENSEINFOBYTES];
-};
+} __packed;
 /* Command types */
 #define CMD_IOCTL_PEND  0x01
 #define CMD_SCSI       0x03
@@ -453,6 +457,15 @@ struct CommandList {
        atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
 } __aligned(COMMANDLIST_ALIGNMENT);
 
+/*
+ * Make sure our embedded atomic variable is aligned. Otherwise we break atomic
+ * operations on architectures that don't support unaligned atomics like IA64.
+ *
+ * The assert guards against reintroductin against unwanted __packed to
+ * the struct CommandList.
+ */
+static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0);
+
 /* Max S/G elements in I/O accelerator command */
 #define IOACCEL1_MAXSGENTRIES           24
 #define IOACCEL2_MAXSGENTRIES          28
@@ -489,7 +502,7 @@ struct io_accel1_cmd {
        __le64 host_addr;               /* 0x70 - 0x77 */
        u8  CISS_LUN[8];                /* 0x78 - 0x7F */
        struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
-} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
 
 #define IOACCEL1_FUNCTION_SCSIIO        0x00
 #define IOACCEL1_SGLOFFSET              32
@@ -519,7 +532,7 @@ struct ioaccel2_sg_element {
        u8 chain_indicator;
 #define IOACCEL2_CHAIN 0x80
 #define IOACCEL2_LAST_SG 0x40
-};
+} __packed;
 
 /*
  * SCSI Response Format structure for IO Accelerator Mode 2
@@ -559,7 +572,7 @@ struct io_accel2_scsi_response {
        u8 sense_data_len;              /* sense/response data length */
        u8 resid_cnt[4];                /* residual count */
        u8 sense_data_buff[32];         /* sense/response data buffer */
-};
+} __packed;
 
 /*
  * Structure for I/O accelerator (mode 2 or m2) commands.
@@ -592,7 +605,7 @@ struct io_accel2_cmd {
        __le32 tweak_upper;             /* Encryption tweak, upper 4 bytes */
        struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
        struct io_accel2_scsi_response error_data;
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
 
 /*
  * defines for Mode 2 command struct
@@ -618,7 +631,7 @@ struct hpsa_tmf_struct {
        __le64 abort_tag;       /* cciss tag of SCSI cmd or TMF to abort */
        __le64 error_ptr;               /* Error Pointer */
        __le32 error_len;               /* Error Length */
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
 
 /* Configuration Table Structure */
 struct HostWrite {
@@ -626,7 +639,7 @@ struct HostWrite {
        __le32          command_pool_addr_hi;
        __le32          CoalIntDelay;
        __le32          CoalIntCount;
-};
+} __packed;
 
 #define SIMPLE_MODE     0x02
 #define PERFORMANT_MODE 0x04
@@ -675,7 +688,7 @@ struct CfgTable {
 #define                HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
 #define                HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
        __le32          clear_event_notify;
-};
+} __packed;
 
 #define NUM_BLOCKFETCH_ENTRIES 8
 struct TransTable_struct {
@@ -686,14 +699,14 @@ struct TransTable_struct {
        __le32          RepQCtrAddrHigh32;
 #define MAX_REPLY_QUEUES 64
        struct vals32  RepQAddr[MAX_REPLY_QUEUES];
-};
+} __packed;
 
 struct hpsa_pci_info {
        unsigned char   bus;
        unsigned char   dev_fn;
        unsigned short  domain;
        u32             board_id;
-};
+} __packed;
 
 struct bmic_identify_controller {
        u8      configured_logical_drive_count; /* offset 0 */
@@ -702,7 +715,7 @@ struct bmic_identify_controller {
        u8      pad2[136];
        u8      controller_mode;        /* offset 292 */
        u8      pad3[32];
-};
+} __packed;
 
 
 struct bmic_identify_physical_device {
@@ -845,7 +858,7 @@ struct bmic_identify_physical_device {
        u8     max_link_rate[256];
        u8     neg_phys_link_rate[256];
        u8     box_conn_name[8];
-} __attribute((aligned(512)));
+} __packed __attribute((aligned(512)));
 
 struct bmic_sense_subsystem_info {
        u8      primary_slot_number;
@@ -858,7 +871,7 @@ struct bmic_sense_subsystem_info {
        u8      secondary_array_serial_number[32];
        u8      secondary_cache_serial_number[32];
        u8      pad[332];
-};
+} __packed;
 
 struct bmic_sense_storage_box_params {
        u8      reserved[36];
@@ -870,7 +883,6 @@ struct bmic_sense_storage_box_params {
        u8      reserver_3[84];
        u8      phys_connector[2];
        u8      reserved_4[296];
-};
+} __packed;
 
-#pragma pack()
 #endif /* HPSA_CMD_H */
index 04633e5157e96ae6c715c66b3376cb7e7a709824..4834219497eeb78c0c079315c8a19596e9b4c7c7 100644 (file)
@@ -3179,9 +3179,10 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
        }
 }
 
-static void iscsi_start_session_recovery(struct iscsi_session *session,
-                                        struct iscsi_conn *conn, int flag)
+void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 {
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
        int old_stop_stage;
 
        mutex_lock(&session->eh_mutex);
@@ -3239,27 +3240,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
        spin_unlock_bh(&session->frwd_lock);
        mutex_unlock(&session->eh_mutex);
 }
-
-void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
-{
-       struct iscsi_conn *conn = cls_conn->dd_data;
-       struct iscsi_session *session = conn->session;
-
-       switch (flag) {
-       case STOP_CONN_RECOVER:
-               cls_conn->state = ISCSI_CONN_FAILED;
-               break;
-       case STOP_CONN_TERM:
-               cls_conn->state = ISCSI_CONN_DOWN;
-               break;
-       default:
-               iscsi_conn_printk(KERN_ERR, conn,
-                                 "invalid stop flag %d\n", flag);
-               return;
-       }
-
-       iscsi_start_session_recovery(session, conn, flag);
-}
 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
 
 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
index 024e5a550759ce5062a31a646f071aacbe3f1bd7..8b9a39077dbabb8210a9dbb4d1c896619f37ad1e 100644 (file)
@@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
                memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
                task->total_xfer_len = qc->nbytes;
                task->num_scatter = qc->n_elem;
+               task->data_dir = qc->dma_dir;
+       } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+               task->data_dir = DMA_NONE;
        } else {
                for_each_sg(qc->sg, sg, qc->n_elem, si)
                        xfer += sg_dma_len(sg);
 
                task->total_xfer_len = xfer;
                task->num_scatter = si;
-       }
-
-       if (qc->tf.protocol == ATA_PROT_NODATA)
-               task->data_dir = DMA_NONE;
-       else
                task->data_dir = qc->dma_dir;
+       }
        task->scatter = qc->sg;
        task->ata_task.retry_count = 1;
        task->task_state_flags = SAS_TASK_STATE_PENDING;
index 49bf2f70a470e1ee0d9f8335fe1c28e416893d9c..31e5455d280cb227e33114d5bc9723e2a2678b83 100644 (file)
@@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                PM8001_EVENT_LOG_SIZE;
        pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option         = 0x01;
        pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt          = 0x01;
-       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
+       for (i = 0; i < pm8001_ha->max_q_num; i++) {
                pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
                        PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
                pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
@@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
                pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
        }
-       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
+       for (i = 0; i < pm8001_ha->max_q_num; i++) {
                pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
                        PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
                pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
@@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
        read_outbnd_queue_table(pm8001_ha);
        /* update main config table ,inbound table and outbound table */
        update_main_config_table(pm8001_ha);
-       for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+       for (i = 0; i < pm8001_ha->max_q_num; i++)
                update_inbnd_queue_table(pm8001_ha, i);
-       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+       for (i = 0; i < pm8001_ha->max_q_num; i++)
                update_outbnd_queue_table(pm8001_ha, i);
        /* 8081 controller donot require these operations */
        if (deviceid != 0x8081 && deviceid != 0x0042) {
index f4bf62b007a08ba818e36ac2633a7a13fb3cff19..441f0152193f726c88ceb6afe44550dda9ba3a6e 100644 (file)
@@ -2474,10 +2474,22 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
         * it works.
         */
        mutex_lock(&conn_mutex);
+       switch (flag) {
+       case STOP_CONN_RECOVER:
+               conn->state = ISCSI_CONN_FAILED;
+               break;
+       case STOP_CONN_TERM:
+               conn->state = ISCSI_CONN_DOWN;
+               break;
+       default:
+               iscsi_cls_conn_printk(KERN_ERR, conn,
+                                     "invalid stop flag %d\n", flag);
+               goto unlock;
+       }
+
        conn->transport->stop_conn(conn, flag);
-       conn->state = ISCSI_CONN_DOWN;
+unlock:
        mutex_unlock(&conn_mutex);
-
 }
 
 static void stop_conn_work_fn(struct work_struct *work)
@@ -2968,7 +2980,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
                mutex_lock(&conn->ep_mutex);
                conn->ep = NULL;
                mutex_unlock(&conn->ep_mutex);
-               conn->state = ISCSI_CONN_DOWN;
+               conn->state = ISCSI_CONN_FAILED;
        }
 
        transport->ep_disconnect(ep);
index 1e939a2a387f3f60d26cc35577aff20d22522534..98a34ed10f1a00c0d214380222deceed18492a0d 100644 (file)
@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
        res = mutex_lock_interruptible(&rport->mutex);
        if (res)
                goto out;
-       if (rport->state != SRP_RPORT_FAIL_FAST)
+       if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
                /*
                 * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
                 * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
index c86760788c72c9a6703dd3b997e5e4570d8100a2..d3d05e997c13548590522447549934a1b38df58d 100644 (file)
@@ -6386,37 +6386,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct request *req;
        unsigned long flags;
-       int free_slot, task_tag, err;
+       int task_tag, err;
 
        /*
-        * Get free slot, sleep if slots are unavailable.
-        * Even though we use wait_event() which sleeps indefinitely,
-        * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+        * blk_get_request() is used here only to get a free tag.
         */
        req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
        req->end_io_data = &wait;
-       free_slot = req->tag;
-       WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
        ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
-       task_tag = hba->nutrs + free_slot;
+       blk_mq_start_request(req);
 
+       task_tag = req->tag;
        treq->req_header.dword_0 |= cpu_to_be32(task_tag);
 
-       memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
-       ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+       memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
+       ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
 
        /* send command to the controller */
-       __set_bit(free_slot, &hba->outstanding_tasks);
+       __set_bit(task_tag, &hba->outstanding_tasks);
 
        /* Make sure descriptors are ready before ringing the task doorbell */
        wmb();
 
-       ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+       ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
        /* Make sure that doorbell is committed immediately */
        wmb();
 
@@ -6436,24 +6433,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
                ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
                dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
                                __func__, tm_function);
-               if (ufshcd_clear_tm_cmd(hba, free_slot))
-                       dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
-                                       __func__, free_slot);
+               if (ufshcd_clear_tm_cmd(hba, task_tag))
+                       dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
+                                       __func__, task_tag);
                err = -ETIMEDOUT;
        } else {
                err = 0;
-               memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+               memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
 
                ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
        }
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       __clear_bit(free_slot, &hba->outstanding_tasks);
+       __clear_bit(task_tag, &hba->outstanding_tasks);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       ufshcd_release(hba);
        blk_put_request(req);
 
-       ufshcd_release(hba);
        return err;
 }
 
index a14684ffe4c1a8ef459a1ce9c5e6691b9e7e6601..ca4f4ca413f11d6b2c0a29ab528da14042f395d2 100644 (file)
@@ -179,6 +179,21 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
        return 0;
 }
 
+static bool __init intc_map(struct irq_domain *domain, int irq)
+{
+       if (!irq_to_desc(irq) && irq_alloc_desc_at(irq, NUMA_NO_NODE) != irq) {
+               pr_err("uname to allocate IRQ %d\n", irq);
+               return false;
+       }
+
+       if (irq_domain_associate(domain, irq, irq)) {
+               pr_err("domain association failure\n");
+               return false;
+       }
+
+       return true;
+}
+
 int __init register_intc_controller(struct intc_desc *desc)
 {
        unsigned int i, k, smp;
@@ -311,24 +326,12 @@ int __init register_intc_controller(struct intc_desc *desc)
        for (i = 0; i < hw->nr_vectors; i++) {
                struct intc_vect *vect = hw->vectors + i;
                unsigned int irq = evt2irq(vect->vect);
-               int res;
 
                if (!vect->enum_id)
                        continue;
 
-               res = irq_create_identity_mapping(d->domain, irq);
-               if (unlikely(res)) {
-                       if (res == -EEXIST) {
-                               res = irq_domain_associate(d->domain, irq, irq);
-                               if (unlikely(res)) {
-                                       pr_err("domain association failure\n");
-                                       continue;
-                               }
-                       } else {
-                               pr_err("can't identity map IRQ %d\n", irq);
-                               continue;
-                       }
-               }
+               if (!intc_map(d->domain, irq))
+                       continue;
 
                intc_irq_xlate_set(irq, vect->enum_id, d);
                intc_register_irq(desc, d, vect->enum_id, irq);
@@ -345,22 +348,8 @@ int __init register_intc_controller(struct intc_desc *desc)
                         * IRQ support, each vector still needs to have
                         * its own backing irq_desc.
                         */
-                       res = irq_create_identity_mapping(d->domain, irq2);
-                       if (unlikely(res)) {
-                               if (res == -EEXIST) {
-                                       res = irq_domain_associate(d->domain,
-                                                                  irq2, irq2);
-                                       if (unlikely(res)) {
-                                               pr_err("domain association "
-                                                      "failure\n");
-                                               continue;
-                                       }
-                               } else {
-                                       pr_err("can't identity map IRQ %d\n",
-                                              irq);
-                                       continue;
-                               }
-                       }
+                       if (!intc_map(d->domain, irq2))
+                               continue;
 
                        vect2->enum_id = 0;
 
index a1b9be1d105a0b921e48de44d39558f90965c6dc..fde4edd83c14cb33d009c518070167240b2344c4 100644 (file)
@@ -186,7 +186,7 @@ struct qm_eqcr_entry {
        __be32 tag;
        struct qm_fd fd;
        u8 __reserved3[32];
-} __packed;
+} __packed __aligned(8);
 #define QM_EQCR_VERB_VBIT              0x80
 #define QM_EQCR_VERB_CMD_MASK          0x61    /* but only one value; */
 #define QM_EQCR_VERB_CMD_ENQUEUE       0x01
index 1fd29f93ff6d6ec51d31c8eb85c375b3e9f76833..5bdfb1565c14d2334209d4aaa1927f7f4eff83ee 100644 (file)
@@ -756,6 +756,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
        int i, err;
        const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
 
+       if (has_acpi_companion(se->dev))
+               return 0;
+
        for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
                if (!icc_names[i])
                        continue;
index bf1e250d50dd9b715c72b36c56089e6eb323d283..986776787b9eaddf17ef3ca2ff472623eb94fead 100644 (file)
@@ -20,7 +20,7 @@ soundwire-cadence-y := cadence_master.o
 obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
 
 #Intel driver
-soundwire-intel-y :=   intel.o intel_init.o
+soundwire-intel-y :=   intel.o intel_init.o dmi-quirks.o
 obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
 
 #Qualcomm driver
index 46885429928ab817b1d0b07950ad457db28a0119..a9e0aa72654dd8a940c0d4e8773920405843ab0a 100644 (file)
@@ -44,13 +44,13 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
        }
 
        ret = sdw_get_id(bus);
-       if (ret) {
+       if (ret < 0) {
                dev_err(parent, "Failed to get bus id\n");
                return ret;
        }
 
        ret = sdw_master_device_add(bus, parent, fwnode);
-       if (ret) {
+       if (ret < 0) {
                dev_err(parent, "Failed to add master device at link %d\n",
                        bus->link_id);
                return ret;
@@ -121,7 +121,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
        else
                ret = -ENOTSUPP; /* No ACPI/DT so error out */
 
-       if (ret) {
+       if (ret < 0) {
                dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
                return ret;
        }
@@ -422,7 +422,7 @@ sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
 
        ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
                           SDW_MSG_FLAG_READ, &buf);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        ret = sdw_transfer(bus, &msg);
@@ -440,7 +440,7 @@ sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
 
        ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
                           SDW_MSG_FLAG_WRITE, &value);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        return sdw_transfer(bus, &msg);
@@ -454,7 +454,7 @@ int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
 
        ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
                           SDW_MSG_FLAG_READ, &buf);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        ret = sdw_transfer_unlocked(bus, &msg);
@@ -472,7 +472,7 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
 
        ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
                           SDW_MSG_FLAG_WRITE, &value);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        return sdw_transfer_unlocked(bus, &msg);
@@ -593,7 +593,7 @@ EXPORT_SYMBOL(sdw_write);
 /* called with bus_lock held */
 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
 {
-       struct sdw_slave *slave = NULL;
+       struct sdw_slave *slave;
 
        list_for_each_entry(slave, &bus->slaves, node) {
                if (slave->dev_num == i)
@@ -603,7 +603,7 @@ static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
        return NULL;
 }
 
-static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
+int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
 {
        if (slave->id.mfg_id != id.mfg_id ||
            slave->id.part_id != id.part_id ||
@@ -614,6 +614,7 @@ static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
 
        return 0;
 }
+EXPORT_SYMBOL(sdw_compare_devid);
 
 /* called with bus_lock held */
 static int sdw_get_device_num(struct sdw_slave *slave)
@@ -698,6 +699,7 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
                "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
                id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
 }
+EXPORT_SYMBOL(sdw_extract_slave_id);
 
 static int sdw_program_device_num(struct sdw_bus *bus)
 {
@@ -705,7 +707,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
        struct sdw_slave *slave, *_s;
        struct sdw_slave_id id;
        struct sdw_msg msg;
-       bool found = false;
+       bool found;
        int count = 0, ret;
        u64 addr;
 
@@ -737,6 +739,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
 
                sdw_extract_slave_id(bus, addr, &id);
 
+               found = false;
                /* Now compare with entries */
                list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
                        if (sdw_compare_devid(slave, id) == 0) {
@@ -749,7 +752,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
                                 * dev_num
                                 */
                                ret = sdw_assign_device_num(slave);
-                               if (ret) {
+                               if (ret < 0) {
                                        dev_err(bus->dev,
                                                "Assign dev_num failed:%d\n",
                                                ret);
@@ -875,14 +878,18 @@ static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
                if (wake_en)
                        val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
        } else {
-               val = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
-
+               ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
+               if (ret < 0) {
+                       dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
+                       return ret;
+               }
+               val = ret;
                val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
        }
 
        ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
 
-       if (ret != 0)
+       if (ret < 0)
                dev_err(&slave->dev,
                        "Clock Stop prepare failed for slave: %d", ret);
 
@@ -895,11 +902,15 @@ static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
        int val;
 
        do {
-               val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT) &
-                       SDW_SCP_STAT_CLK_STP_NF;
+               val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
+               if (val < 0) {
+                       dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
+                       return val;
+               }
+               val &= SDW_SCP_STAT_CLK_STP_NF;
                if (!val) {
-                       dev_info(bus->dev, "clock stop prep/de-prep done slave:%d",
-                                dev_num);
+                       dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d",
+                               dev_num);
                        return 0;
                }
 
@@ -1253,6 +1264,7 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
 static int sdw_initialize_slave(struct sdw_slave *slave)
 {
        struct sdw_slave_prop *prop = &slave->prop;
+       int status;
        int ret;
        u8 val;
 
@@ -1260,6 +1272,44 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
        if (ret < 0)
                return ret;
 
+       if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
+               /* Clear bus clash interrupt before enabling interrupt mask */
+               status = sdw_read_no_pm(slave, SDW_SCP_INT1);
+               if (status < 0) {
+                       dev_err(&slave->dev,
+                               "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
+                       return status;
+               }
+               if (status & SDW_SCP_INT1_BUS_CLASH) {
+                       dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
+                       ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
+                       if (ret < 0) {
+                               dev_err(&slave->dev,
+                                       "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
+                               return ret;
+                       }
+               }
+       }
+       if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
+           !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
+               /* Clear parity interrupt before enabling interrupt mask */
+               status = sdw_read_no_pm(slave, SDW_SCP_INT1);
+               if (status < 0) {
+                       dev_err(&slave->dev,
+                               "SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
+                       return status;
+               }
+               if (status & SDW_SCP_INT1_PARITY) {
+                       dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
+                       ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
+                       if (ret < 0) {
+                               dev_err(&slave->dev,
+                                       "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
+                               return ret;
+                       }
+               }
+       }
+
        /*
         * Set SCP_INT1_MASK register, typically bus clash and
         * implementation-defined interrupt mask. The Parity detection
@@ -1589,7 +1639,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
                ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
                if (ret < 0) {
                        dev_err(&slave->dev,
-                               "SDW_SCP_INT1 read failed:%d\n", ret);
+                               "SDW_SCP_INT1 recheck read failed:%d\n", ret);
                        goto io_err;
                }
                _buf = ret;
@@ -1597,7 +1647,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
                ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, _buf2);
                if (ret < 0) {
                        dev_err(&slave->dev,
-                               "SDW_SCP_INT2/3 read failed:%d\n", ret);
+                               "SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
                        goto io_err;
                }
 
@@ -1605,7 +1655,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
                        ret = sdw_read_no_pm(slave, SDW_DP0_INT);
                        if (ret < 0) {
                                dev_err(&slave->dev,
-                                       "SDW_DP0_INT read failed:%d\n", ret);
+                                       "SDW_DP0_INT recheck read failed:%d\n", ret);
                                goto io_err;
                        }
                        sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
@@ -1701,7 +1751,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
        if (status[0] == SDW_SLAVE_ATTACHED) {
                dev_dbg(bus->dev, "Slave attached, programming device number\n");
                ret = sdw_program_device_num(bus);
-               if (ret)
+               if (ret < 0)
                        dev_err(bus->dev, "Slave attach failed: %d\n", ret);
                /*
                 * programming a device number will have side effects,
@@ -1735,7 +1785,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
 
                case SDW_SLAVE_ALERT:
                        ret = sdw_handle_slave_alerts(slave);
-                       if (ret)
+                       if (ret < 0)
                                dev_err(&slave->dev,
                                        "Slave %d alert handling failed: %d\n",
                                        i, ret);
@@ -1754,7 +1804,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
                        attached_initializing = true;
 
                        ret = sdw_initialize_slave(slave);
-                       if (ret)
+                       if (ret < 0)
                                dev_err(&slave->dev,
                                        "Slave %d initialization failed: %d\n",
                                        i, ret);
@@ -1768,7 +1818,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
                }
 
                ret = sdw_update_slave_status(slave, status[i]);
-               if (ret)
+               if (ret < 0)
                        dev_err(&slave->dev,
                                "Update Slave status failed:%d\n", ret);
                if (attached_initializing) {
index 2e049d39c6e5c667c7906174e49210688966f323..40354469860a746f2e52a180170941abbdc283b7 100644 (file)
@@ -7,6 +7,8 @@
 #define DEFAULT_BANK_SWITCH_TIMEOUT 3000
 #define DEFAULT_PROBE_TIMEOUT       2000
 
+u64 sdw_dmi_override_adr(struct sdw_bus *bus, u64 addr);
+
 #if IS_ENABLED(CONFIG_ACPI)
 int sdw_acpi_find_slaves(struct sdw_bus *bus);
 #else
index 575b9bad99d510459fca1fed8fc22bc4e1f2af64..893296f3fe395ea7fdf069b0def0010a6b71df91 100644 (file)
@@ -82,6 +82,7 @@ static int sdw_drv_probe(struct device *dev)
        struct sdw_slave *slave = dev_to_sdw_dev(dev);
        struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
        const struct sdw_device_id *id;
+       const char *name;
        int ret;
 
        /*
@@ -108,7 +109,10 @@ static int sdw_drv_probe(struct device *dev)
 
        ret = drv->probe(slave, id);
        if (ret) {
-               dev_err(dev, "Probe of %s failed: %d\n", drv->name, ret);
+               name = drv->name;
+               if (!name)
+                       name = drv->driver.name;
+               dev_err(dev, "Probe of %s failed: %d\n", name, ret);
                dev_pm_domain_detach(dev, false);
                return ret;
        }
@@ -174,11 +178,16 @@ static void sdw_drv_shutdown(struct device *dev)
  */
 int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
 {
+       const char *name;
+
        drv->driver.bus = &sdw_bus_type;
 
        if (!drv->probe) {
-               pr_err("driver %s didn't provide SDW probe routine\n",
-                      drv->name);
+               name = drv->name;
+               if (!name)
+                       name = drv->driver.name;
+
+               pr_err("driver %s didn't provide SDW probe routine\n", name);
                return -EINVAL;
        }
 
index d05442e646a38317c125ef5e9bf5606deff1c130..192dac10f0c2c0d326b297e09cf2d517abc79186 100644 (file)
@@ -905,7 +905,7 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
 EXPORT_SYMBOL(sdw_cdns_irq);
 
 /**
- * To update slave status in a work since we will need to handle
+ * cdns_update_slave_status_work - update slave status in a work since we will need to handle
  * other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave
  * process.
  * @work: cdns worker thread
@@ -968,7 +968,7 @@ int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
 EXPORT_SYMBOL(sdw_cdns_exit_reset);
 
 /**
- * sdw_cdns_enable_slave_interrupt() - Enable SDW slave interrupts
+ * cdns_enable_slave_interrupts() - Enable SDW slave interrupts
  * @cdns: Cadence instance
  * @state: boolean for true/false
  */
@@ -1450,10 +1450,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
        }
 
        /* Prepare slaves for clock stop */
-       ret = sdw_bus_prep_clk_stop(&cdns->bus);
-       if (ret < 0) {
-               dev_err(cdns->dev, "prepare clock stop failed %d", ret);
-               return ret;
+       if (slave_present) {
+               ret = sdw_bus_prep_clk_stop(&cdns->bus);
+               if (ret < 0 && ret != -ENODATA) {
+                       dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
+                       return ret;
+               }
        }
 
        /*
@@ -1462,7 +1464,7 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
         */
        ret = sdw_bus_clk_stop(&cdns->bus);
        if (ret < 0 && slave_present && ret != -ENODATA) {
-               dev_err(cdns->dev, "bus clock stop failed %d", ret);
+               dev_err(cdns->dev, "bus clock stop failed %d\n", ret);
                return ret;
        }
 
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
new file mode 100644 (file)
index 0000000..82061c1
--- /dev/null
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2021 Intel Corporation.
+
+/*
+ * Soundwire DMI quirks
+ */
+
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+
+struct adr_remap {
+       u64 adr;
+       u64 remapped_adr;
+};
+
+/*
+ * HP Spectre 360 Convertible devices do not expose the correct _ADR
+ * in the DSDT.
+ * Remap the bad _ADR values to the ones reported by hardware
+ */
+static const struct adr_remap hp_spectre_360[] = {
+       {
+               0x000010025D070100,
+               0x000020025D071100
+       },
+       {
+               0x000110025d070100,
+               0x000120025D130800
+       },
+       {}
+};
+
+/*
+ * The initial version of the Dell SKU 0A3E did not expose the devices
+ * on the correct links.
+ */
+static const struct adr_remap dell_sku_0A3E[] = {
+       /* rt715 on link0 */
+       {
+               0x00020025d071100,
+               0x00021025d071500
+       },
+       /* rt711 on link1 */
+       {
+               0x000120025d130800,
+               0x000120025d071100,
+       },
+       /* rt1308 on link2 */
+       {
+               0x000220025d071500,
+               0x000220025d130800
+       },
+       {}
+};
+
+static const struct dmi_system_id adr_remap_quirk_table[] = {
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible"),
+               },
+               .driver_data = (void *)hp_spectre_360,
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
+               },
+               .driver_data = (void *)dell_sku_0A3E,
+       },
+       {}
+};
+
+u64 sdw_dmi_override_adr(struct sdw_bus *bus, u64 addr)
+{
+       const struct dmi_system_id *dmi_id;
+
+       /* check if any address remap quirk applies */
+       dmi_id = dmi_first_match(adr_remap_quirk_table);
+       if (dmi_id) {
+               struct adr_remap *map = dmi_id->driver_data;
+
+               for (map = dmi_id->driver_data; map->adr; map++) {
+                       if (map->adr == addr) {
+                               dev_dbg(bus->dev, "remapped _ADR 0x%llx as 0x%llx\n",
+                                       addr, map->remapped_adr);
+                               addr = map->remapped_adr;
+                               break;
+                       }
+               }
+       }
+
+       return addr;
+}
index 0bdef38c9a301f92c55d83781bcd02ea0206c676..84d1295870842034958a9a1b256087eb6bcbfc67 100644 (file)
@@ -62,7 +62,7 @@ static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
                                              sample_int, port_bo, port_bo >> 8,
                                              t_data->hstart,
                                              t_data->hstop,
-                                             (SDW_BLK_GRP_CNT_1 * ch), 0x0);
+                                             SDW_BLK_PKG_PER_PORT, 0x0);
 
                        sdw_fill_port_params(&p_rt->port_params,
                                             p_rt->num, bps,
@@ -95,7 +95,7 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
        struct sdw_bus *bus = m_rt->bus;
        struct sdw_bus_params *b_params = &bus->params;
        int sample_int, hstart = 0;
-       unsigned int rate, bps, ch, no_ch;
+       unsigned int rate, bps, ch;
 
        rate = m_rt->stream->params.rate;
        bps = m_rt->stream->params.bps;
@@ -110,12 +110,11 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
        t_data.hstart = hstart;
 
        list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
-               no_ch = sdw_ch_mask_to_ch(p_rt->ch_mask);
 
                sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
                                      false, SDW_BLK_GRP_CNT_1, sample_int,
                                      port_bo, port_bo >> 8, hstart, hstop,
-                                     (SDW_BLK_GRP_CNT_1 * no_ch), 0x0);
+                                     SDW_BLK_PKG_PER_PORT, 0x0);
 
                sdw_fill_port_params(&p_rt->port_params,
                                     p_rt->num, bps,
@@ -143,7 +142,7 @@ static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
 static void _sdw_compute_port_params(struct sdw_bus *bus,
                                     struct sdw_group_params *params, int count)
 {
-       struct sdw_master_runtime *m_rt = NULL;
+       struct sdw_master_runtime *m_rt;
        int hstop = bus->params.col - 1;
        int block_offset, port_bo, i;
 
@@ -169,7 +168,7 @@ static int sdw_compute_group_params(struct sdw_bus *bus,
                                    struct sdw_group_params *params,
                                    int *rates, int count)
 {
-       struct sdw_master_runtime *m_rt = NULL;
+       struct sdw_master_runtime *m_rt;
        int sel_col = bus->params.col;
        unsigned int rate, bps, ch;
        int i, column_needed = 0;
@@ -406,14 +405,14 @@ int sdw_compute_params(struct sdw_bus *bus)
        /* Computes clock frequency, frame shape and frame frequency */
        ret = sdw_compute_bus_params(bus);
        if (ret < 0) {
-               dev_err(bus->dev, "Compute bus params failed: %d", ret);
+               dev_err(bus->dev, "Compute bus params failed: %d\n", ret);
                return ret;
        }
 
        /* Compute transport and port params */
        ret = sdw_compute_port_params(bus);
        if (ret < 0) {
-               dev_err(bus->dev, "Compute transport params failed: %d", ret);
+               dev_err(bus->dev, "Compute transport params failed: %d\n", ret);
                return ret;
        }
 
index a2d5cdaa9998b66a390b8f9b5dd0a1703ca52436..fd95f94630b1c59e4842f1a60c2d0167f119d96e 100644 (file)
@@ -561,8 +561,6 @@ static int intel_link_power_down(struct sdw_intel *sdw)
                ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
        }
 
-       link_control = intel_readl(shim, SDW_SHIM_LCTL);
-
        mutex_unlock(sdw->link_res->shim_lock);
 
        if (ret < 0) {
@@ -997,7 +995,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
 
        dma = snd_soc_dai_get_dma_data(dai, substream);
        if (!dma) {
-               dev_err(dai->dev, "failed to get dma data in %s",
+               dev_err(dai->dev, "failed to get dma data in %s\n",
                        __func__);
                return -EIO;
        }
@@ -1061,7 +1059,7 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
 
        ret = intel_free_stream(sdw, substream, dai, sdw->instance);
        if (ret < 0) {
-               dev_err(dai->dev, "intel_free_stream: failed %d", ret);
+               dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
                return ret;
        }
 
@@ -1286,6 +1284,9 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
        if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
                prop->hw_disabled = true;
 
+       prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
+               SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
+
        return 0;
 }
 
@@ -1302,6 +1303,7 @@ static int intel_prop_read(struct sdw_bus *bus)
 
 static struct sdw_master_ops sdw_intel_ops = {
        .read_prop = sdw_master_read_prop,
+       .override_adr = sdw_dmi_override_adr,
        .xfer_msg = cdns_xfer_msg,
        .xfer_msg_defer = cdns_xfer_msg_defer,
        .reset_page_addr = cdns_reset_page_addr,
@@ -1630,7 +1632,7 @@ static int __maybe_unused intel_suspend(struct device *dev)
 
        ret = intel_link_power_down(sdw);
        if (ret) {
-               dev_err(dev, "Link power down failed: %d", ret);
+               dev_err(dev, "Link power down failed: %d\n", ret);
                return ret;
        }
 
@@ -1665,7 +1667,7 @@ static int __maybe_unused intel_suspend_runtime(struct device *dev)
 
                ret = intel_link_power_down(sdw);
                if (ret) {
-                       dev_err(dev, "Link power down failed: %d", ret);
+                       dev_err(dev, "Link power down failed: %d\n", ret);
                        return ret;
                }
 
@@ -1689,7 +1691,7 @@ static int __maybe_unused intel_suspend_runtime(struct device *dev)
 
                ret = intel_link_power_down(sdw);
                if (ret) {
-                       dev_err(dev, "Link power down failed: %d", ret);
+                       dev_err(dev, "Link power down failed: %d\n", ret);
                        return ret;
                }
 
@@ -1738,7 +1740,7 @@ static int __maybe_unused intel_resume(struct device *dev)
 
        ret = intel_init(sdw);
        if (ret) {
-               dev_err(dev, "%s failed: %d", __func__, ret);
+               dev_err(dev, "%s failed: %d\n", __func__, ret);
                return ret;
        }
 
@@ -1822,7 +1824,7 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
        if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
                ret = intel_init(sdw);
                if (ret) {
-                       dev_err(dev, "%s failed: %d", __func__, ret);
+                       dev_err(dev, "%s failed: %d\n", __func__, ret);
                        return ret;
                }
 
@@ -1867,7 +1869,7 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
        } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
                ret = intel_init(sdw);
                if (ret) {
-                       dev_err(dev, "%s failed: %d", __func__, ret);
+                       dev_err(dev, "%s failed: %d\n", __func__, ret);
                        return ret;
                }
 
@@ -1945,7 +1947,7 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
 
                ret = intel_init(sdw);
                if (ret) {
-                       dev_err(dev, "%s failed: %d", __func__, ret);
+                       dev_err(dev, "%s failed: %d\n", __func__, ret);
                        return ret;
                }
 
index 05b726cdfebc5804a7a2608e45341f8dbbf54511..30ce95ec2d703bec11b05b218741a63e7d9f623f 100644 (file)
@@ -178,6 +178,15 @@ static struct sdw_intel_ctx
                link->pdev = pdev;
                link->cdns = platform_get_drvdata(pdev);
 
+               if (!link->cdns) {
+                       dev_err(&adev->dev, "failed to get link->cdns\n");
+                       /*
+                        * 1 will be subtracted from i in the err label, but we need to call
+                        * intel_link_dev_unregister for this ldev, so plus 1 now
+                        */
+                       i++;
+                       goto err;
+               }
                list_add_tail(&link->list, &ctx->link_list);
                bus = &link->cdns->bus;
                /* Calculate number of slaves */
index 6d22df01f35471e8a8803c09d6ea074a756e9734..2827085a323b85078641c055019ea2346c89c203 100644 (file)
 #define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK                   BIT(1)
 #define SWRM_COMP_CFG_ENABLE_MSK                               BIT(0)
 #define SWRM_COMP_PARAMS                                       0x100
+#define SWRM_COMP_PARAMS_WR_FIFO_DEPTH                         GENMASK(14, 10)
+#define SWRM_COMP_PARAMS_RD_FIFO_DEPTH                         GENMASK(19, 15)
 #define SWRM_COMP_PARAMS_DOUT_PORTS_MASK                       GENMASK(4, 0)
 #define SWRM_COMP_PARAMS_DIN_PORTS_MASK                                GENMASK(9, 5)
 #define SWRM_INTERRUPT_STATUS                                  0x200
 #define SWRM_INTERRUPT_STATUS_RMSK                             GENMASK(16, 0)
+#define SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ                   BIT(0)
 #define SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED               BIT(1)
 #define SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS         BIT(2)
+#define SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET                 BIT(3)
+#define SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW                 BIT(4)
+#define SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW                        BIT(5)
+#define SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW             BIT(6)
 #define SWRM_INTERRUPT_STATUS_CMD_ERROR                                BIT(7)
+#define SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION              BIT(8)
+#define SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH                BIT(9)
 #define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED          BIT(10)
+#define SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2             BIT(13)
+#define SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2              BIT(14)
+#define SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP               BIT(16)
+#define SWRM_INTERRUPT_MAX                                     17
 #define SWRM_INTERRUPT_MASK_ADDR                               0x204
 #define SWRM_INTERRUPT_CLEAR                                   0x208
 #define SWRM_INTERRUPT_CPU_EN                                  0x210
 #define SWRM_CMD_FIFO_WR_CMD                                   0x300
 #define SWRM_CMD_FIFO_RD_CMD                                   0x304
 #define SWRM_CMD_FIFO_CMD                                      0x308
+#define SWRM_CMD_FIFO_FLUSH                                    0x1
 #define SWRM_CMD_FIFO_STATUS                                   0x30C
+#define SWRM_RD_CMD_FIFO_CNT_MASK                              GENMASK(20, 16)
+#define SWRM_WR_CMD_FIFO_CNT_MASK                              GENMASK(12, 8)
 #define SWRM_CMD_FIFO_CFG_ADDR                                 0x314
+#define SWRM_CONTINUE_EXEC_ON_CMD_IGNORE                       BIT(31)
 #define SWRM_RD_WR_CMD_RETRIES                                 0x7
 #define SWRM_CMD_FIFO_RD_FIFO_ADDR                             0x318
+#define SWRM_RD_FIFO_CMD_ID_MASK                               GENMASK(11, 8)
 #define SWRM_ENUMERATOR_CFG_ADDR                               0x500
+#define SWRM_ENUMERATOR_SLAVE_DEV_ID_1(m)              (0x530 + 0x8 * (m))
+#define SWRM_ENUMERATOR_SLAVE_DEV_ID_2(m)              (0x534 + 0x8 * (m))
 #define SWRM_MCP_FRAME_CTRL_BANK_ADDR(m)               (0x101C + 0x40 * (m))
 #define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK                 GENMASK(2, 0)
 #define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK                 GENMASK(7, 3)
+#define SWRM_MCP_BUS_CTRL                                      0x1044
+#define SWRM_MCP_BUS_CLK_START                                 BIT(1)
 #define SWRM_MCP_CFG_ADDR                                      0x1048
 #define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK              GENMASK(21, 17)
 #define SWRM_DEF_CMD_NO_PINGS                                  0x1f
 #define SWRM_MCP_STATUS_BANK_NUM_MASK                          BIT(0)
 #define SWRM_MCP_SLV_STATUS                                    0x1090
 #define SWRM_MCP_SLV_STATUS_MASK                               GENMASK(1, 0)
+#define SWRM_MCP_SLV_STATUS_SZ                                 2
 #define SWRM_DP_PORT_CTRL_BANK(n, m)   (0x1124 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_PORT_CTRL_2_BANK(n, m) (0x1128 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_BLOCK_CTRL_1(n)                (0x112C + 0x100 * (n - 1))
+#define SWRM_DP_BLOCK_CTRL2_BANK(n, m) (0x1130 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_PORT_HCTRL_BANK(n, m)  (0x1134 + 0x100 * (n - 1) + 0x40 * m)
 #define SWRM_DP_BLOCK_CTRL3_BANK(n, m) (0x1138 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DIN_DPn_PCM_PORT_CTRL(n)  (0x1054 + 0x100 * (n - 1))
+
 #define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT                         0x18
 #define SWRM_DP_PORT_CTRL_OFFSET2_SHFT                         0x10
 #define SWRM_DP_PORT_CTRL_OFFSET1_SHFT                         0x08
 #define SWRM_SPECIAL_CMD_ID    0xF
 #define MAX_FREQ_NUM           1
 #define TIMEOUT_MS             (2 * HZ)
-#define QCOM_SWRM_MAX_RD_LEN   0xf
+#define QCOM_SWRM_MAX_RD_LEN   0x1
 #define QCOM_SDW_MAX_PORTS     14
 #define DEFAULT_CLK_FREQ       9600000
 #define SWRM_MAX_DAIS          0xF
+#define SWR_INVALID_PARAM 0xFF
+#define SWR_HSTOP_MAX_VAL 0xF
+#define SWR_HSTART_MIN_VAL 0x0
+#define SWR_BROADCAST_CMD_ID    0x0F
+#define SWR_MAX_CMD_ID 14
+#define MAX_FIFO_RD_RETRY 3
+#define SWR_OVERFLOW_RETRY_COUNT 30
 
 struct qcom_swrm_port_config {
        u8 si;
        u8 off1;
        u8 off2;
        u8 bp_mode;
+       u8 hstart;
+       u8 hstop;
+       u8 word_length;
+       u8 blk_group_count;
+       u8 lane_control;
 };
 
 struct qcom_swrm_ctrl {
@@ -86,10 +127,9 @@ struct qcom_swrm_ctrl {
        struct device *dev;
        struct regmap *regmap;
        void __iomem *mmio;
-       struct completion *comp;
+       struct completion broadcast;
+       struct completion enumeration;
        struct work_struct slave_work;
-       /* read/write lock */
-       spinlock_t comp_lock;
        /* Port alloc/free lock */
        struct mutex port_lock;
        struct clk *hclk;
@@ -103,11 +143,17 @@ struct qcom_swrm_ctrl {
        int rows_index;
        unsigned long dout_port_mask;
        unsigned long din_port_mask;
+       u32 intr_mask;
+       u8 rcmd_id;
+       u8 wcmd_id;
        struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
        struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
        enum sdw_slave_status status[SDW_MAX_DEVICES];
        int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
        int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
+       u32 slave_status;
+       u32 wr_fifo_depth;
+       u32 rd_fifo_depth;
 };
 
 struct qcom_swrm_data {
@@ -181,77 +227,180 @@ static int qcom_swrm_cpu_reg_write(struct qcom_swrm_ctrl *ctrl, int reg,
        return SDW_CMD_OK;
 }
 
-static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *ctrl, u8 cmd_data,
+static u32 swrm_get_packed_reg_val(u8 *cmd_id, u8 cmd_data,
+                                  u8 dev_addr, u16 reg_addr)
+{
+       u32 val;
+       u8 id = *cmd_id;
+
+       if (id != SWR_BROADCAST_CMD_ID) {
+               if (id < SWR_MAX_CMD_ID)
+                       id += 1;
+               else
+                       id = 0;
+               *cmd_id = id;
+       }
+       val = SWRM_REG_VAL_PACK(cmd_data, dev_addr, id, reg_addr);
+
+       return val;
+}
+
+static int swrm_wait_for_rd_fifo_avail(struct qcom_swrm_ctrl *swrm)
+{
+       u32 fifo_outstanding_data, value;
+       int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
+
+       do {
+               /* Check for fifo underflow during read */
+               swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+               fifo_outstanding_data = FIELD_GET(SWRM_RD_CMD_FIFO_CNT_MASK, value);
+
+               /* Check if read data is available in read fifo */
+               if (fifo_outstanding_data > 0)
+                       return 0;
+
+               usleep_range(500, 510);
+       } while (fifo_retry_count--);
+
+       if (fifo_outstanding_data == 0) {
+               dev_err_ratelimited(swrm->dev, "%s err read underflow\n", __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int swrm_wait_for_wr_fifo_avail(struct qcom_swrm_ctrl *swrm)
+{
+       u32 fifo_outstanding_cmds, value;
+       int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
+
+       do {
+               /* Check for fifo overflow during write */
+               swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+               fifo_outstanding_cmds = FIELD_GET(SWRM_WR_CMD_FIFO_CNT_MASK, value);
+
+               /* Check for space in write fifo before writing */
+               if (fifo_outstanding_cmds < swrm->wr_fifo_depth)
+                       return 0;
+
+               usleep_range(500, 510);
+       } while (fifo_retry_count--);
+
+       if (fifo_outstanding_cmds == swrm->wr_fifo_depth) {
+               dev_err_ratelimited(swrm->dev, "%s err write overflow\n", __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
                                     u8 dev_addr, u16 reg_addr)
 {
-       DECLARE_COMPLETION_ONSTACK(comp);
-       unsigned long flags;
+
        u32 val;
-       int ret;
+       int ret = 0;
+       u8 cmd_id = 0x0;
 
-       spin_lock_irqsave(&ctrl->comp_lock, flags);
-       ctrl->comp = &comp;
-       spin_unlock_irqrestore(&ctrl->comp_lock, flags);
-       val = SWRM_REG_VAL_PACK(cmd_data, dev_addr,
-                               SWRM_SPECIAL_CMD_ID, reg_addr);
-       ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_WR_CMD, val);
-       if (ret)
-               goto err;
+       if (dev_addr == SDW_BROADCAST_DEV_NUM) {
+               cmd_id = SWR_BROADCAST_CMD_ID;
+               val = swrm_get_packed_reg_val(&cmd_id, cmd_data,
+                                             dev_addr, reg_addr);
+       } else {
+               val = swrm_get_packed_reg_val(&swrm->wcmd_id, cmd_data,
+                                             dev_addr, reg_addr);
+       }
 
-       ret = wait_for_completion_timeout(ctrl->comp,
-                                         msecs_to_jiffies(TIMEOUT_MS));
+       if (swrm_wait_for_wr_fifo_avail(swrm))
+               return SDW_CMD_FAIL_OTHER;
+
+       /* Its assumed that write is okay as we do not get any status back */
+       swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
+
+       /* version 1.3 or less */
+       if (swrm->version <= 0x01030000)
+               usleep_range(150, 155);
+
+       if (cmd_id == SWR_BROADCAST_CMD_ID) {
+               /*
+                * sleep for 10ms for MSM soundwire variant to allow broadcast
+                * command to complete.
+                */
+               ret = wait_for_completion_timeout(&swrm->broadcast,
+                                                 msecs_to_jiffies(TIMEOUT_MS));
+               if (!ret)
+                       ret = SDW_CMD_IGNORED;
+               else
+                       ret = SDW_CMD_OK;
 
-       if (!ret)
-               ret = SDW_CMD_IGNORED;
-       else
+       } else {
                ret = SDW_CMD_OK;
-err:
-       spin_lock_irqsave(&ctrl->comp_lock, flags);
-       ctrl->comp = NULL;
-       spin_unlock_irqrestore(&ctrl->comp_lock, flags);
-
+       }
        return ret;
 }
 
-static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *ctrl,
+static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm,
                                     u8 dev_addr, u16 reg_addr,
                                     u32 len, u8 *rval)
 {
-       int i, ret;
-       u32 val;
-       DECLARE_COMPLETION_ONSTACK(comp);
-       unsigned long flags;
+       u32 cmd_data, cmd_id, val, retry_attempt = 0;
+
+       val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
+
+       /* wait for FIFO RD to complete to avoid overflow */
+       usleep_range(100, 105);
+       swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
+       /* wait for FIFO RD CMD complete to avoid overflow */
+       usleep_range(250, 255);
+
+       if (swrm_wait_for_rd_fifo_avail(swrm))
+               return SDW_CMD_FAIL_OTHER;
+
+       do {
+               swrm->reg_read(swrm, SWRM_CMD_FIFO_RD_FIFO_ADDR, &cmd_data);
+               rval[0] = cmd_data & 0xFF;
+               cmd_id = FIELD_GET(SWRM_RD_FIFO_CMD_ID_MASK, cmd_data);
+
+               if (cmd_id != swrm->rcmd_id) {
+                       if (retry_attempt < (MAX_FIFO_RD_RETRY - 1)) {
+                               /* wait 500 us before retry on fifo read failure */
+                               usleep_range(500, 505);
+                               swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD,
+                                               SWRM_CMD_FIFO_FLUSH);
+                               swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
+                       }
+                       retry_attempt++;
+               } else {
+                       return SDW_CMD_OK;
+               }
 
-       spin_lock_irqsave(&ctrl->comp_lock, flags);
-       ctrl->comp = &comp;
-       spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+       } while (retry_attempt < MAX_FIFO_RD_RETRY);
 
-       val = SWRM_REG_VAL_PACK(len, dev_addr, SWRM_SPECIAL_CMD_ID, reg_addr);
-       ret = ctrl->reg_write(ctrl, SWRM_CMD_FIFO_RD_CMD, val);
-       if (ret)
-               goto err;
+       dev_err(swrm->dev, "failed to read fifo: reg: 0x%x, rcmd_id: 0x%x,\
+               dev_num: 0x%x, cmd_data: 0x%x\n",
+               reg_addr, swrm->rcmd_id, dev_addr, cmd_data);
 
-       ret = wait_for_completion_timeout(ctrl->comp,
-                                         msecs_to_jiffies(TIMEOUT_MS));
+       return SDW_CMD_IGNORED;
+}
 
-       if (!ret) {
-               ret = SDW_CMD_IGNORED;
-               goto err;
-       } else {
-               ret = SDW_CMD_OK;
-       }
+static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
+{
+       u32 val, status;
+       int dev_num;
 
-       for (i = 0; i < len; i++) {
-               ctrl->reg_read(ctrl, SWRM_CMD_FIFO_RD_FIFO_ADDR, &val);
-               rval[i] = val & 0xFF;
-       }
+       ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
 
-err:
-       spin_lock_irqsave(&ctrl->comp_lock, flags);
-       ctrl->comp = NULL;
-       spin_unlock_irqrestore(&ctrl->comp_lock, flags);
+       for (dev_num = 0; dev_num < SDW_MAX_DEVICES; dev_num++) {
+               status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
 
-       return ret;
+               if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
+                       ctrl->status[dev_num] = status;
+                       return dev_num;
+               }
+       }
+
+       return -EINVAL;
 }
 
 static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
@@ -260,6 +409,7 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
        int i;
 
        ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
+       ctrl->slave_status = val;
 
        for (i = 0; i < SDW_MAX_DEVICES; i++) {
                u32 s;
@@ -270,42 +420,188 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
        }
 }
 
-static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
+static void qcom_swrm_set_slave_dev_num(struct sdw_bus *bus,
+                                       struct sdw_slave *slave, int devnum)
 {
-       struct qcom_swrm_ctrl *ctrl = dev_id;
-       u32 sts, value;
-       unsigned long flags;
+       struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+       u32 status;
+
+       ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &status);
+       status = (status >> (devnum * SWRM_MCP_SLV_STATUS_SZ));
+       status &= SWRM_MCP_SLV_STATUS_MASK;
+
+       if (status == SDW_SLAVE_ATTACHED) {
+               if (slave)
+                       slave->dev_num = devnum;
+               mutex_lock(&bus->bus_lock);
+               set_bit(devnum, bus->assigned);
+               mutex_unlock(&bus->bus_lock);
+       }
+}
 
-       ctrl->reg_read(ctrl, SWRM_INTERRUPT_STATUS, &sts);
+static int qcom_swrm_enumerate(struct sdw_bus *bus)
+{
+       struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+       struct sdw_slave *slave, *_s;
+       struct sdw_slave_id id;
+       u32 val1, val2;
+       bool found;
+       u64 addr;
+       int i;
+       char *buf1 = (char *)&val1, *buf2 = (char *)&val2;
+
+       for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+               /*SCP_Devid5 - Devid 4*/
+               ctrl->reg_read(ctrl, SWRM_ENUMERATOR_SLAVE_DEV_ID_1(i), &val1);
+
+               /*SCP_Devid3 - DevId 2 Devid 1 Devid 0*/
+               ctrl->reg_read(ctrl, SWRM_ENUMERATOR_SLAVE_DEV_ID_2(i), &val2);
+
+               if (!val1 && !val2)
+                       break;
+
+               addr = buf2[1] | (buf2[0] << 8) | (buf1[3] << 16) |
+                       ((u64)buf1[2] << 24) | ((u64)buf1[1] << 32) |
+                       ((u64)buf1[0] << 40);
+
+               sdw_extract_slave_id(bus, addr, &id);
+               found = false;
+               /* Now compare with entries */
+               list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
+                       if (sdw_compare_devid(slave, id) == 0) {
+                               qcom_swrm_set_slave_dev_num(bus, slave, i);
+                               found = true;
+                               break;
+                       }
+               }
 
-       if (sts & SWRM_INTERRUPT_STATUS_CMD_ERROR) {
-               ctrl->reg_read(ctrl, SWRM_CMD_FIFO_STATUS, &value);
-               dev_err_ratelimited(ctrl->dev,
-                                   "CMD error, fifo status 0x%x\n",
-                                    value);
-               ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
+               if (!found) {
+                       qcom_swrm_set_slave_dev_num(bus, NULL, i);
+                       sdw_slave_add(bus, &id, NULL);
+               }
        }
 
-       if ((sts & SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED) ||
-           sts & SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS)
-               schedule_work(&ctrl->slave_work);
-
-       /**
-        * clear the interrupt before complete() is called, as complete can
-        * schedule new read/writes which require interrupts, clearing the
-        * interrupt would avoid missing interrupts in such cases.
-        */
-       ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR, sts);
-
-       if (sts & SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED) {
-               spin_lock_irqsave(&ctrl->comp_lock, flags);
-               if (ctrl->comp)
-                       complete(ctrl->comp);
-               spin_unlock_irqrestore(&ctrl->comp_lock, flags);
-       }
+       complete(&ctrl->enumeration);
+       return 0;
+}
+
+static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
+{
+       struct qcom_swrm_ctrl *swrm = dev_id;
+       u32 value, intr_sts, intr_sts_masked, slave_status;
+       u32 i;
+       int devnum;
+       int ret = IRQ_HANDLED;
+
+       swrm->reg_read(swrm, SWRM_INTERRUPT_STATUS, &intr_sts);
+       intr_sts_masked = intr_sts & swrm->intr_mask;
+
+       do {
+               for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
+                       value = intr_sts_masked & BIT(i);
+                       if (!value)
+                               continue;
+
+                       switch (value) {
+                       case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
+                               devnum = qcom_swrm_get_alert_slave_dev_num(swrm);
+                               if (devnum < 0) {
+                                       dev_err_ratelimited(swrm->dev,
+                                           "no slave alert found.spurious interrupt\n");
+                               } else {
+                                       sdw_handle_slave_status(&swrm->bus, swrm->status);
+                               }
 
-       return IRQ_HANDLED;
+                               break;
+                       case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
+                       case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
+                               dev_err_ratelimited(swrm->dev, "%s: SWR new slave attached\n",
+                                       __func__);
+                               swrm->reg_read(swrm, SWRM_MCP_SLV_STATUS, &slave_status);
+                               if (swrm->slave_status == slave_status) {
+                                       dev_err(swrm->dev, "Slave status not changed %x\n",
+                                               slave_status);
+                               } else {
+                                       qcom_swrm_get_device_status(swrm);
+                                       qcom_swrm_enumerate(&swrm->bus);
+                                       sdw_handle_slave_status(&swrm->bus, swrm->status);
+                               }
+                               break;
+                       case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
+                               dev_err_ratelimited(swrm->dev,
+                                               "%s: SWR bus clsh detected\n",
+                                               __func__);
+                               swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
+                               swrm->reg_write(swrm, SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+                               break;
+                       case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
+                               swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+                               dev_err_ratelimited(swrm->dev,
+                                       "%s: SWR read FIFO overflow fifo status 0x%x\n",
+                                       __func__, value);
+                               break;
+                       case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
+                               swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+                               dev_err_ratelimited(swrm->dev,
+                                       "%s: SWR read FIFO underflow fifo status 0x%x\n",
+                                       __func__, value);
+                               break;
+                       case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
+                               swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+                               dev_err(swrm->dev,
+                                       "%s: SWR write FIFO overflow fifo status %x\n",
+                                       __func__, value);
+                               swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
+                               break;
+                       case SWRM_INTERRUPT_STATUS_CMD_ERROR:
+                               swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+                               dev_err_ratelimited(swrm->dev,
+                                       "%s: SWR CMD error, fifo status 0x%x, flushing fifo\n",
+                                       __func__, value);
+                               swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
+                               break;
+                       case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
+                               dev_err_ratelimited(swrm->dev,
+                                               "%s: SWR Port collision detected\n",
+                                               __func__);
+                               swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
+                               swrm->reg_write(swrm,
+                                       SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+                               break;
+                       case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
+                               dev_err_ratelimited(swrm->dev,
+                                       "%s: SWR read enable valid mismatch\n",
+                                       __func__);
+                               swrm->intr_mask &=
+                                       ~SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH;
+                               swrm->reg_write(swrm,
+                                       SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+                               break;
+                       case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
+                               complete(&swrm->broadcast);
+                               break;
+                       case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2:
+                               break;
+                       case SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2:
+                               break;
+                       case SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP:
+                               break;
+                       default:
+                               dev_err_ratelimited(swrm->dev,
+                                               "%s: SWR unknown interrupt value: %d\n",
+                                               __func__, value);
+                               ret = IRQ_NONE;
+                               break;
+                       }
+               }
+               swrm->reg_write(swrm, SWRM_INTERRUPT_CLEAR, intr_sts);
+               swrm->reg_read(swrm, SWRM_INTERRUPT_STATUS, &intr_sts);
+               intr_sts_masked = intr_sts & swrm->intr_mask;
+       } while (intr_sts_masked);
+
+       return ret;
 }
+
 static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
 {
        u32 val;
@@ -316,9 +612,10 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
 
        ctrl->reg_write(ctrl, SWRM_MCP_FRAME_CTRL_BANK_ADDR(0), val);
 
-       /* Disable Auto enumeration */
-       ctrl->reg_write(ctrl, SWRM_ENUMERATOR_CFG_ADDR, 0);
+       /* Enable Auto enumeration */
+       ctrl->reg_write(ctrl, SWRM_ENUMERATOR_CFG_ADDR, 1);
 
+       ctrl->intr_mask = SWRM_INTERRUPT_STATUS_RMSK;
        /* Mask soundwire interrupts */
        ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR,
                        SWRM_INTERRUPT_STATUS_RMSK);
@@ -328,8 +625,17 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
        u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK);
        ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
 
+       ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
        /* Configure number of retries of a read/write cmd */
-       ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR, SWRM_RD_WR_CMD_RETRIES);
+       if (ctrl->version > 0x01050001) {
+               /* Only for versions >= 1.5.1 */
+               ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
+                               SWRM_RD_WR_CMD_RETRIES |
+                               SWRM_CONTINUE_EXEC_ON_CMD_IGNORE);
+       } else {
+               ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
+                               SWRM_RD_WR_CMD_RETRIES);
+       }
 
        /* Set IRQ to PULSE */
        ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR,
@@ -341,6 +647,11 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
                ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN,
                                SWRM_INTERRUPT_STATUS_RMSK);
        }
+       ctrl->slave_status = 0;
+       ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
+       ctrl->rd_fifo_depth = FIELD_GET(SWRM_COMP_PARAMS_RD_FIFO_DEPTH, val);
+       ctrl->wr_fifo_depth = FIELD_GET(SWRM_COMP_PARAMS_WR_FIFO_DEPTH, val);
+
        return 0;
 }
 
@@ -396,8 +707,11 @@ static int qcom_swrm_port_params(struct sdw_bus *bus,
                                 struct sdw_port_params *p_params,
                                 unsigned int bank)
 {
-       /* TBD */
-       return 0;
+       struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+
+       return ctrl->reg_write(ctrl, SWRM_DP_BLOCK_CTRL_1(p_params->num),
+                              p_params->bps - 1);
+
 }
 
 static int qcom_swrm_transport_params(struct sdw_bus *bus,
@@ -405,22 +719,57 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus,
                                      enum sdw_reg_bank bank)
 {
        struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+       struct qcom_swrm_port_config *pcfg;
        u32 value;
        int reg = SWRM_DP_PORT_CTRL_BANK((params->port_num), bank);
        int ret;
 
-       value = params->offset1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
-       value |= params->offset2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
-       value |= params->sample_interval - 1;
+       pcfg = &ctrl->pconfig[params->port_num];
+
+       value = pcfg->off1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
+       value |= pcfg->off2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
+       value |= pcfg->si;
 
        ret = ctrl->reg_write(ctrl, reg, value);
+       if (ret)
+               goto err;
 
-       if (!ret && params->blk_pkg_mode) {
-               reg = SWRM_DP_BLOCK_CTRL3_BANK(params->port_num, bank);
+       if (pcfg->lane_control != SWR_INVALID_PARAM) {
+               reg = SWRM_DP_PORT_CTRL_2_BANK(params->port_num, bank);
+               value = pcfg->lane_control;
+               ret = ctrl->reg_write(ctrl, reg, value);
+               if (ret)
+                       goto err;
+       }
+
+       if (pcfg->blk_group_count != SWR_INVALID_PARAM) {
+               reg = SWRM_DP_BLOCK_CTRL2_BANK(params->port_num, bank);
+               value = pcfg->blk_group_count;
+               ret = ctrl->reg_write(ctrl, reg, value);
+               if (ret)
+                       goto err;
+       }
+
+       if (pcfg->hstart != SWR_INVALID_PARAM
+                       && pcfg->hstop != SWR_INVALID_PARAM) {
+               reg = SWRM_DP_PORT_HCTRL_BANK(params->port_num, bank);
+               value = (pcfg->hstop << 4) | pcfg->hstart;
+               ret = ctrl->reg_write(ctrl, reg, value);
+       } else {
+               reg = SWRM_DP_PORT_HCTRL_BANK(params->port_num, bank);
+               value = (SWR_HSTOP_MAX_VAL << 4) | SWR_HSTART_MIN_VAL;
+               ret = ctrl->reg_write(ctrl, reg, value);
+       }
+
+       if (ret)
+               goto err;
 
-               ret = ctrl->reg_write(ctrl, reg, 1);
+       if (pcfg->bp_mode != SWR_INVALID_PARAM) {
+               reg = SWRM_DP_BLOCK_CTRL3_BANK(params->port_num, bank);
+               ret = ctrl->reg_write(ctrl, reg, pcfg->bp_mode);
        }
 
+err:
        return ret;
 }
 
@@ -460,27 +809,50 @@ static int qcom_swrm_compute_params(struct sdw_bus *bus)
        struct sdw_slave_runtime *s_rt;
        struct sdw_port_runtime *p_rt;
        struct qcom_swrm_port_config *pcfg;
-       int i = 0;
+       struct sdw_slave *slave;
+       unsigned int m_port;
+       int i = 1;
 
        list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
                list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
-                       pcfg = &ctrl->pconfig[p_rt->num - 1];
+                       pcfg = &ctrl->pconfig[p_rt->num];
                        p_rt->transport_params.port_num = p_rt->num;
-                       p_rt->transport_params.sample_interval = pcfg->si + 1;
-                       p_rt->transport_params.offset1 = pcfg->off1;
-                       p_rt->transport_params.offset2 = pcfg->off2;
-                       p_rt->transport_params.blk_pkg_mode = pcfg->bp_mode;
+                       if (pcfg->word_length != SWR_INVALID_PARAM) {
+                               sdw_fill_port_params(&p_rt->port_params,
+                                            p_rt->num,  pcfg->word_length + 1,
+                                            SDW_PORT_FLOW_MODE_ISOCH,
+                                            SDW_PORT_DATA_MODE_NORMAL);
+                       }
+
                }
 
                list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+                       slave = s_rt->slave;
                        list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
-                               pcfg = &ctrl->pconfig[i];
+                               m_port = slave->m_port_map[p_rt->num];
+                               /* port config starts at offset 0 so -1 from actual port number */
+                               if (m_port)
+                                       pcfg = &ctrl->pconfig[m_port];
+                               else
+                                       pcfg = &ctrl->pconfig[i];
                                p_rt->transport_params.port_num = p_rt->num;
                                p_rt->transport_params.sample_interval =
                                        pcfg->si + 1;
                                p_rt->transport_params.offset1 = pcfg->off1;
                                p_rt->transport_params.offset2 = pcfg->off2;
                                p_rt->transport_params.blk_pkg_mode = pcfg->bp_mode;
+                               p_rt->transport_params.blk_grp_ctrl = pcfg->blk_group_count;
+
+                               p_rt->transport_params.hstart = pcfg->hstart;
+                               p_rt->transport_params.hstop = pcfg->hstop;
+                               p_rt->transport_params.lane_ctrl = pcfg->lane_control;
+                               if (pcfg->word_length != SWR_INVALID_PARAM) {
+                                       sdw_fill_port_params(&p_rt->port_params,
+                                                    p_rt->num,
+                                                    pcfg->word_length + 1,
+                                                    SDW_PORT_FLOW_MODE_ISOCH,
+                                                    SDW_PORT_DATA_MODE_NORMAL);
+                               }
                                i++;
                        }
                }
@@ -493,16 +865,6 @@ static u32 qcom_swrm_freq_tbl[MAX_FREQ_NUM] = {
        DEFAULT_CLK_FREQ,
 };
 
-static void qcom_swrm_slave_wq(struct work_struct *work)
-{
-       struct qcom_swrm_ctrl *ctrl =
-                       container_of(work, struct qcom_swrm_ctrl, slave_work);
-
-       qcom_swrm_get_device_status(ctrl);
-       sdw_handle_slave_status(&ctrl->bus, ctrl->status);
-}
-
-
 static void qcom_swrm_stream_free_ports(struct qcom_swrm_ctrl *ctrl,
                                        struct sdw_stream_runtime *stream)
 {
@@ -519,7 +881,7 @@ static void qcom_swrm_stream_free_ports(struct qcom_swrm_ctrl *ctrl,
                        port_mask = &ctrl->din_port_mask;
 
                list_for_each_entry(p_rt, &m_rt->port_list, port_node)
-                       clear_bit(p_rt->num - 1, port_mask);
+                       clear_bit(p_rt->num, port_mask);
        }
 
        mutex_unlock(&ctrl->port_lock);
@@ -535,8 +897,10 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
        struct sdw_master_runtime *m_rt;
        struct sdw_slave_runtime *s_rt;
        struct sdw_port_runtime *p_rt;
+       struct sdw_slave *slave;
        unsigned long *port_mask;
        int i, maxport, pn, nports = 0, ret = 0;
+       unsigned int m_port;
 
        mutex_lock(&ctrl->port_lock);
        list_for_each_entry(m_rt, &stream->master_list, stream_node) {
@@ -549,16 +913,22 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
                }
 
                list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+                       slave = s_rt->slave;
                        list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+                               m_port = slave->m_port_map[p_rt->num];
                                /* Port numbers start from 1 - 14*/
-                               pn = find_first_zero_bit(port_mask, maxport);
-                               if (pn > (maxport - 1)) {
+                               if (m_port)
+                                       pn = m_port;
+                               else
+                                       pn = find_first_zero_bit(port_mask, maxport);
+
+                               if (pn > maxport) {
                                        dev_err(ctrl->dev, "All ports busy\n");
                                        ret = -EBUSY;
                                        goto err;
                                }
                                set_bit(pn, port_mask);
-                               pconfig[nports].num = pn + 1;
+                               pconfig[nports].num = pn;
                                pconfig[nports].ch_mask = p_rt->ch_mask;
                                nports++;
                        }
@@ -580,7 +950,7 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
 err:
        if (ret) {
                for (i = 0; i < nports; i++)
-                       clear_bit(pconfig[i].num - 1, port_mask);
+                       clear_bit(pconfig[i].num, port_mask);
        }
 
        mutex_unlock(&ctrl->port_lock);
@@ -652,7 +1022,7 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
                ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime,
                                                 substream->stream);
                if (ret < 0 && ret != -ENOTSUPP) {
-                       dev_err(dai->dev, "Failed to set sdw stream on %s",
+                       dev_err(dai->dev, "Failed to set sdw stream on %s\n",
                                codec_dai->name);
                        sdw_release_stream(sruntime);
                        return ret;
@@ -728,6 +1098,11 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
        u8 off2[QCOM_SDW_MAX_PORTS];
        u8 si[QCOM_SDW_MAX_PORTS];
        u8 bp_mode[QCOM_SDW_MAX_PORTS] = { 0, };
+       u8 hstart[QCOM_SDW_MAX_PORTS];
+       u8 hstop[QCOM_SDW_MAX_PORTS];
+       u8 word_length[QCOM_SDW_MAX_PORTS];
+       u8 blk_group_count[QCOM_SDW_MAX_PORTS];
+       u8 lane_control[QCOM_SDW_MAX_PORTS];
        int i, ret, nports, val;
 
        ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
@@ -754,6 +1129,9 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
        ctrl->num_dout_ports = val;
 
        nports = ctrl->num_dout_ports + ctrl->num_din_ports;
+       /* Valid port numbers are from 1-14, so mask out port 0 explicitly */
+       set_bit(0, &ctrl->dout_port_mask);
+       set_bit(0, &ctrl->din_port_mask);
 
        ret = of_property_read_u8_array(np, "qcom,ports-offset1",
                                        off1, nports);
@@ -772,11 +1150,35 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
 
        ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
                                        bp_mode, nports);
+       if (ret)
+               return ret;
+
+       memset(hstart, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+       of_property_read_u8_array(np, "qcom,ports-hstart", hstart, nports);
+
+       memset(hstop, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+       of_property_read_u8_array(np, "qcom,ports-hstop", hstop, nports);
+
+       memset(word_length, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+       of_property_read_u8_array(np, "qcom,ports-word-length", word_length, nports);
+
+       memset(blk_group_count, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+       of_property_read_u8_array(np, "qcom,ports-block-group-count", blk_group_count, nports);
+
+       memset(lane_control, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+       of_property_read_u8_array(np, "qcom,ports-lane-control", lane_control, nports);
+
        for (i = 0; i < nports; i++) {
-               ctrl->pconfig[i].si = si[i];
-               ctrl->pconfig[i].off1 = off1[i];
-               ctrl->pconfig[i].off2 = off2[i];
-               ctrl->pconfig[i].bp_mode = bp_mode[i];
+               /* Valid port number range is from 1-14 */
+               ctrl->pconfig[i + 1].si = si[i];
+               ctrl->pconfig[i + 1].off1 = off1[i];
+               ctrl->pconfig[i + 1].off2 = off2[i];
+               ctrl->pconfig[i + 1].bp_mode = bp_mode[i];
+               ctrl->pconfig[i + 1].hstart = hstart[i];
+               ctrl->pconfig[i + 1].hstop = hstop[i];
+               ctrl->pconfig[i + 1].word_length = word_length[i];
+               ctrl->pconfig[i + 1].blk_group_count = blk_group_count[i];
+               ctrl->pconfig[i + 1].lane_control = lane_control[i];
        }
 
        return 0;
@@ -833,9 +1235,9 @@ static int qcom_swrm_probe(struct platform_device *pdev)
 
        ctrl->dev = dev;
        dev_set_drvdata(&pdev->dev, ctrl);
-       spin_lock_init(&ctrl->comp_lock);
        mutex_init(&ctrl->port_lock);
-       INIT_WORK(&ctrl->slave_work, qcom_swrm_slave_wq);
+       init_completion(&ctrl->broadcast);
+       init_completion(&ctrl->enumeration);
 
        ctrl->bus.ops = &qcom_swrm_ops;
        ctrl->bus.port_ops = &qcom_swrm_port_ops;
@@ -882,6 +1284,8 @@ static int qcom_swrm_probe(struct platform_device *pdev)
        }
 
        qcom_swrm_init(ctrl);
+       wait_for_completion_timeout(&ctrl->enumeration,
+                                   msecs_to_jiffies(TIMEOUT_MS));
        ret = qcom_swrm_register_dais(ctrl);
        if (ret)
                goto err_master_add;
index 180f38bd003bcacd622af7b50c4a89dc8c213a13..0eed38a79c6dd1a01d95bb50e556a42018784ac9 100644 (file)
@@ -88,6 +88,7 @@ int sdw_slave_add(struct sdw_bus *bus,
 
        return ret;
 }
+EXPORT_SYMBOL(sdw_slave_add);
 
 #if IS_ENABLED(CONFIG_ACPI)
 
@@ -95,7 +96,7 @@ static bool find_slave(struct sdw_bus *bus,
                       struct acpi_device *adev,
                       struct sdw_slave_id *id)
 {
-       unsigned long long addr;
+       u64 addr;
        unsigned int link_id;
        acpi_status status;
 
@@ -108,6 +109,12 @@ static bool find_slave(struct sdw_bus *bus,
                return false;
        }
 
+       if (bus->ops->override_adr)
+               addr = bus->ops->override_adr(bus, addr);
+
+       if (!addr)
+               return false;
+
        /* Extract link id from ADR, Bit 51 to 48 (included) */
        link_id = SDW_DISCO_LINK_ID(addr);
 
index 1099b5d1262be6cdfb66e1dc5a7ffddacc884aa9..1eaedaaba09441cc5086d424463e1fae0014d226 100644 (file)
@@ -261,7 +261,7 @@ static int sdw_program_master_port_params(struct sdw_bus *bus,
  */
 static int sdw_program_port_params(struct sdw_master_runtime *m_rt)
 {
-       struct sdw_slave_runtime *s_rt = NULL;
+       struct sdw_slave_runtime *s_rt;
        struct sdw_bus *bus = m_rt->bus;
        struct sdw_port_runtime *p_rt;
        int ret = 0;
@@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
        }
 
        ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
-       if (ret)
+       if (ret) {
+               /*
+                * sdw_release_master_stream will release s_rt in slave_rt_list in
+                * stream_error case, but s_rt is only added to slave_rt_list
+                * when sdw_config_stream is successful, so free s_rt explicitly
+                * when sdw_config_stream is failed.
+                */
+               kfree(s_rt);
                goto stream_error;
+       }
 
        list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
 
@@ -1449,7 +1457,7 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
 static void sdw_acquire_bus_lock(struct sdw_stream_runtime *stream)
 {
        struct sdw_master_runtime *m_rt;
-       struct sdw_bus *bus = NULL;
+       struct sdw_bus *bus;
 
        /* Iterate for all Master(s) in Master list */
        list_for_each_entry(m_rt, &stream->master_list, stream_node) {
@@ -1470,8 +1478,8 @@ static void sdw_acquire_bus_lock(struct sdw_stream_runtime *stream)
  */
 static void sdw_release_bus_lock(struct sdw_stream_runtime *stream)
 {
-       struct sdw_master_runtime *m_rt = NULL;
-       struct sdw_bus *bus = NULL;
+       struct sdw_master_runtime *m_rt;
+       struct sdw_bus *bus;
 
        /* Iterate for all Master(s) in Master list */
        list_for_each_entry_reverse(m_rt, &stream->master_list, stream_node) {
@@ -1513,7 +1521,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
                if (bus->compute_params) {
                        ret = bus->compute_params(bus);
                        if (ret < 0) {
-                               dev_err(bus->dev, "Compute params failed: %d",
+                               dev_err(bus->dev, "Compute params failed: %d\n",
                                        ret);
                                return ret;
                        }
@@ -1791,7 +1799,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
                if (bus->compute_params) {
                        ret = bus->compute_params(bus);
                        if (ret < 0) {
-                               dev_err(bus->dev, "Compute params failed: %d",
+                               dev_err(bus->dev, "Compute params failed: %d\n",
                                        ret);
                                return ret;
                        }
@@ -1855,7 +1863,7 @@ static int set_stream(struct snd_pcm_substream *substream,
        for_each_rtd_dais(rtd, i, dai) {
                ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
                if (ret < 0) {
-                       dev_err(rtd->dev, "failed to set stream pointer on dai %s", dai->name);
+                       dev_err(rtd->dev, "failed to set stream pointer on dai %s\n", dai->name);
                        break;
                }
        }
@@ -1888,7 +1896,7 @@ int sdw_startup_stream(void *sdw_substream)
 
        sdw_stream = sdw_alloc_stream(name);
        if (!sdw_stream) {
-               dev_err(rtd->dev, "alloc stream failed for substream DAI %s", substream->name);
+               dev_err(rtd->dev, "alloc stream failed for substream DAI %s\n", substream->name);
                ret = -ENOMEM;
                goto error;
        }
@@ -1927,7 +1935,7 @@ void sdw_shutdown_stream(void *sdw_substream)
        sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
 
        if (IS_ERR(sdw_stream)) {
-               dev_err(rtd->dev, "no stream found for DAI %s", dai->name);
+               dev_err(rtd->dev, "no stream found for DAI %s\n", dai->name);
                return;
        }
 
index d0e7ed8f28cc8d7db00c36a533d95690b674c4ee..e5c443bfbdf9d96523ddbe0817f963c34d26f514 100644 (file)
@@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        target_get_sess_cmd(&cmd->se_cmd, true);
 
+       cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
        cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
        if (cmd->sense_reason) {
                if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
@@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (cmd->sense_reason)
                goto attach_cmd;
 
-       /* only used for printks or comparing with ->ref_task_tag */
-       cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
        cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
        if (cmd->sense_reason)
                goto attach_cmd;
index 620bcf586ee2416fd5c193b0dc84057d5d5ec777..c44fad2b9fbbf1059bffd9ccfae1716c1299d224 100644 (file)
@@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
        ret = tb_retimer_nvm_add(rt);
        if (ret) {
                dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
-               device_del(&rt->dev);
+               device_unregister(&rt->dev);
                return ret;
        }
 
@@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
  */
 int tb_retimer_scan(struct tb_port *port)
 {
-       u32 status[TB_MAX_RETIMER_INDEX] = {};
+       u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
        int ret, i, last_idx = 0;
 
        if (!port->cap_usb4)
index 202ee81cfc2ba4de6f37ceaa74cecf0a29f0406e..5531f3afeb2130a958ae2d398c9a58eb36c7d9c1 100644 (file)
@@ -165,4 +165,21 @@ config UIO_HV_GENERIC
          to network and storage devices from userspace.
 
          If you compile this as a module, it will be called uio_hv_generic.
+
+config UIO_DFL
+       tristate "Generic driver for DFL (Device Feature List) bus"
+       depends on FPGA_DFL
+       help
+         Generic DFL (Device Feature List) driver for Userspace I/O devices.
+         It is useful to provide direct access to DFL devices from userspace.
+         A sample userspace application using this driver is available for
+         download in a git repository:
+
+           git clone https://github.com/OPAE/opae-sdk.git
+
+         It could be found at:
+
+           opae-sdk/tools/libopaeuio/
+
+         If you compile this as a module, it will be called uio_dfl.
 endif
index c285dd2a4539b80fd9b957f5dc5f947d0ac2e04d..f2f416a142286b791be31dc0f0e234b2124e75df 100644 (file)
@@ -11,3 +11,4 @@ obj-$(CONFIG_UIO_PRUSS)         += uio_pruss.o
 obj-$(CONFIG_UIO_MF624)         += uio_mf624.o
 obj-$(CONFIG_UIO_FSL_ELBC_GPCM)        += uio_fsl_elbc_gpcm.o
 obj-$(CONFIG_UIO_HV_GENERIC)   += uio_hv_generic.o
+obj-$(CONFIG_UIO_DFL)  += uio_dfl.o
diff --git a/drivers/uio/uio_dfl.c b/drivers/uio/uio_dfl.c
new file mode 100644 (file)
index 0000000..89c0fc7
--- /dev/null
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic DFL driver for Userspace I/O devicess
+ *
+ * Copyright (C) 2021 Intel Corporation, Inc.
+ */
+#include <linux/dfl.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/uio_driver.h>
+
+#define DRIVER_NAME "uio_dfl"
+
+static int uio_dfl_probe(struct dfl_device *ddev)
+{
+       struct resource *r = &ddev->mmio_res;
+       struct device *dev = &ddev->dev;
+       struct uio_info *uioinfo;
+       struct uio_mem *uiomem;
+       int ret;
+
+       uioinfo = devm_kzalloc(dev, sizeof(struct uio_info), GFP_KERNEL);
+       if (!uioinfo)
+               return -ENOMEM;
+
+       uioinfo->name = DRIVER_NAME;
+       uioinfo->version = "0";
+
+       uiomem = &uioinfo->mem[0];
+       uiomem->memtype = UIO_MEM_PHYS;
+       uiomem->addr = r->start & PAGE_MASK;
+       uiomem->offs = r->start & ~PAGE_MASK;
+       uiomem->size = (uiomem->offs + resource_size(r)
+                       + PAGE_SIZE - 1) & PAGE_MASK;
+       uiomem->name = r->name;
+
+       /* Irq is yet to be supported */
+       uioinfo->irq = UIO_IRQ_NONE;
+
+       ret = devm_uio_register_device(dev, uioinfo);
+       if (ret)
+               dev_err(dev, "unable to register uio device\n");
+
+       return ret;
+}
+
+#define FME_FEATURE_ID_ETH_GROUP       0x10
+
+static const struct dfl_device_id uio_dfl_ids[] = {
+       { FME_ID, FME_FEATURE_ID_ETH_GROUP },
+       { }
+};
+MODULE_DEVICE_TABLE(dfl, uio_dfl_ids);
+
+static struct dfl_driver uio_dfl_driver = {
+       .drv = {
+               .name = DRIVER_NAME,
+       },
+       .id_table       = uio_dfl_ids,
+       .probe          = uio_dfl_probe,
+};
+module_dfl_driver(uio_dfl_driver);
+
+MODULE_DESCRIPTION("Generic DFL driver for Userspace I/O devices");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
index f2ebbacd932e943caba483be1fa45042b2c6ed1a..d7d4bdd57f46fbe4c47c9e99a6b67d8bb7cddc5a 100644 (file)
@@ -1128,6 +1128,10 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
                return -ESHUTDOWN;
        }
 
+       /* Requests has been dequeued during disabling endpoint. */
+       if (!(pep->ep_state & EP_ENABLED))
+               return 0;
+
        spin_lock_irqsave(&pdev->lock, flags);
        ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
        spin_unlock_irqrestore(&pdev->lock, flags);
index 8f1de1fbbeedfc870c1ce1ece01703b6462206c6..d8d3892e5a69af32b6c15f00dff3976edc0e2680 100644 (file)
@@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
 
                dev_info(dev, "stub up\n");
 
+               mutex_lock(&sdev->ud.sysfs_lock);
                spin_lock_irq(&sdev->ud.lock);
 
                if (sdev->ud.status != SDEV_ST_AVAILABLE) {
@@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
                if (IS_ERR(tcp_rx)) {
                        sockfd_put(socket);
-                       return -EINVAL;
+                       goto unlock_mutex;
                }
                tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
                if (IS_ERR(tcp_tx)) {
                        kthread_stop(tcp_rx);
                        sockfd_put(socket);
-                       return -EINVAL;
+                       goto unlock_mutex;
                }
 
                /* get task structs now */
@@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                wake_up_process(sdev->ud.tcp_rx);
                wake_up_process(sdev->ud.tcp_tx);
 
+               mutex_unlock(&sdev->ud.sysfs_lock);
+
        } else {
                dev_info(dev, "stub down\n");
 
@@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                spin_unlock_irq(&sdev->ud.lock);
 
                usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
+               mutex_unlock(&sdev->ud.sysfs_lock);
        }
 
        return count;
@@ -130,6 +134,8 @@ sock_err:
        sockfd_put(socket);
 err:
        spin_unlock_irq(&sdev->ud.lock);
+unlock_mutex:
+       mutex_unlock(&sdev->ud.sysfs_lock);
        return -EINVAL;
 }
 static DEVICE_ATTR_WO(usbip_sockfd);
@@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
        sdev->ud.side           = USBIP_STUB;
        sdev->ud.status         = SDEV_ST_AVAILABLE;
        spin_lock_init(&sdev->ud.lock);
+       mutex_init(&sdev->ud.sysfs_lock);
        sdev->ud.tcp_socket     = NULL;
        sdev->ud.sockfd         = -1;
 
index d60ce17d3dd2acf786771334378f7b9b76e1a29d..ea2a20e6d27d306ad841a1b0953a4ca99ea486d7 100644 (file)
@@ -263,6 +263,9 @@ struct usbip_device {
        /* lock for status */
        spinlock_t lock;
 
+       /* mutex for synchronizing sysfs store paths */
+       struct mutex sysfs_lock;
+
        int sockfd;
        struct socket *tcp_socket;
 
index 5d88917c963149b0ba1daa02cc4da4e84ca3c106..086ca76dd0531acfced7450ed2a321f06428bdf1 100644 (file)
@@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work)
        while ((ud = get_event()) != NULL) {
                usbip_dbg_eh("pending event %lx\n", ud->event);
 
+               mutex_lock(&ud->sysfs_lock);
                /*
                 * NOTE: shutdown must come first.
                 * Shutdown the device.
@@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work)
                        ud->eh_ops.unusable(ud);
                        unset_event(ud, USBIP_EH_UNUSABLE);
                }
+               mutex_unlock(&ud->sysfs_lock);
 
                wake_up(&ud->eh_waitq);
        }
index a20a8380ca0c97ab318153e0373333e356f95317..4ba6bcdaa8e9d49b972d7aa608ad9cc7bcd1233c 100644 (file)
@@ -1101,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev)
        vdev->ud.side   = USBIP_VHCI;
        vdev->ud.status = VDEV_ST_NULL;
        spin_lock_init(&vdev->ud.lock);
+       mutex_init(&vdev->ud.sysfs_lock);
 
        INIT_LIST_HEAD(&vdev->priv_rx);
        INIT_LIST_HEAD(&vdev->priv_tx);
index c4b4256e5dad3b0f3d432dfa86e86133f9305b10..e2847cd3e6e363767a6dbf411f24bb628eb29566 100644 (file)
@@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
 
        usbip_dbg_vhci_sysfs("enter\n");
 
+       mutex_lock(&vdev->ud.sysfs_lock);
+
        /* lock */
        spin_lock_irqsave(&vhci->lock, flags);
        spin_lock(&vdev->ud.lock);
@@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
                /* unlock */
                spin_unlock(&vdev->ud.lock);
                spin_unlock_irqrestore(&vhci->lock, flags);
+               mutex_unlock(&vdev->ud.sysfs_lock);
 
                return -EINVAL;
        }
@@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
 
        usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
 
+       mutex_unlock(&vdev->ud.sysfs_lock);
+
        return 0;
 }
 
@@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
        else
                vdev = &vhci->vhci_hcd_hs->vdev[rhport];
 
+       mutex_lock(&vdev->ud.sysfs_lock);
+
        /* Extract socket from fd. */
        socket = sockfd_lookup(sockfd, &err);
        if (!socket) {
                dev_err(dev, "failed to lookup sock");
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
        if (socket->type != SOCK_STREAM) {
                dev_err(dev, "Expecting SOCK_STREAM - found %d",
                        socket->type);
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
 
        /* create threads before locking */
        tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
        if (IS_ERR(tcp_rx)) {
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
        tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
        if (IS_ERR(tcp_tx)) {
                kthread_stop(tcp_rx);
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
 
        /* get task structs now */
@@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
                 * Will be retried from userspace
                 * if there's another free port.
                 */
-               return -EBUSY;
+               err = -EBUSY;
+               goto unlock_mutex;
        }
 
        dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
@@ -423,7 +435,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
 
        rh_port_connect(vdev, speed);
 
+       dev_info(dev, "Device attached\n");
+
+       mutex_unlock(&vdev->ud.sysfs_lock);
+
        return count;
+
+unlock_mutex:
+       mutex_unlock(&vdev->ud.sysfs_lock);
+       return err;
 }
 static DEVICE_ATTR_WO(attach);
 
index c8eeabdd9b5685a1310748eedc4386b22a74af68..2bc428f2e26108a1b1c201206d2feaa4c5235560 100644 (file)
@@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc)
        init_waitqueue_head(&udc->tx_waitq);
 
        spin_lock_init(&ud->lock);
+       mutex_init(&ud->sysfs_lock);
        ud->status = SDEV_ST_AVAILABLE;
        ud->side = USBIP_VUDC;
 
index 7383a543c6d121b814671fdb8ba89a501f98d773..f7633ee655a17bed82b5baf9cffefc69b6ffd7f0 100644 (file)
@@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
                dev_err(dev, "no device");
                return -ENODEV;
        }
+       mutex_lock(&udc->ud.sysfs_lock);
        spin_lock_irqsave(&udc->lock, flags);
        /* Don't export what we don't have */
        if (!udc->driver || !udc->pullup) {
@@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev,
 
                wake_up_process(udc->ud.tcp_rx);
                wake_up_process(udc->ud.tcp_tx);
+
+               mutex_unlock(&udc->ud.sysfs_lock);
                return count;
 
        } else {
@@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
        }
 
        spin_unlock_irqrestore(&udc->lock, flags);
+       mutex_unlock(&udc->ud.sysfs_lock);
 
        return count;
 
@@ -216,6 +220,7 @@ unlock_ud:
        spin_unlock_irq(&udc->ud.lock);
 unlock:
        spin_unlock_irqrestore(&udc->lock, flags);
+       mutex_unlock(&udc->ud.sysfs_lock);
 
        return ret;
 }
index 08f742fd24099e4f5e616c4818336dfe9fd26ded..b6cc53ba980cccc1c90f0d952f7359bd59e7a137 100644 (file)
@@ -4,9 +4,13 @@
 #ifndef __MLX5_VDPA_H__
 #define __MLX5_VDPA_H__
 
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/vdpa.h>
 #include <linux/mlx5/driver.h>
 
+#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
 struct mlx5_vdpa_direct_mr {
        u64 start;
        u64 end;
index d300f799efcd1f982a41a550c61efc964cfb04e6..800cfd1967adaf75ac2b1df8c8d5dad94162aa51 100644 (file)
@@ -219,6 +219,11 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
        mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
 }
 
+static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
+{
+       return &mvdev->mdev->pdev->dev;
+}
+
 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
                         struct vhost_iotlb *iotlb)
 {
@@ -234,7 +239,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
        u64 pa;
        u64 paend;
        struct scatterlist *sg;
-       struct device *dma = mvdev->mdev->device;
+       struct device *dma = get_dma_device(mvdev);
 
        for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
             map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -273,8 +278,10 @@ done:
        mr->log_size = log_entity_size;
        mr->nsg = nsg;
        mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
-       if (!mr->nent)
+       if (!mr->nent) {
+               err = -ENOMEM;
                goto err_map;
+       }
 
        err = create_direct_mr(mvdev, mr);
        if (err)
@@ -291,7 +298,7 @@ err_map:
 
 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
 {
-       struct device *dma = mvdev->mdev->device;
+       struct device *dma = get_dma_device(mvdev);
 
        destroy_direct_mr(mvdev, mr);
        dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
index 96e6421c5d1cf896447b9db566be421d560e3943..6521cbd0f5c2784fea9ee4956a4e3ba9faaf1452 100644 (file)
@@ -246,7 +246,8 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
        if (err)
                goto err_key;
 
-       kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+       kick_addr = mdev->bar_addr + offset;
+
        res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
        if (!res->kick_addr) {
                err = -ENOMEM;
index 71397fdafa6a4c013f680afefb51acf74108ad57..4d2809c7d4e32f9b0919b8269fbc1ba0217a437b 100644 (file)
@@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
        MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
        MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
        MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
-                !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+                !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
        MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
        MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
        MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
@@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
                return;
        }
        mvq->avail_idx = attr.available_index;
+       mvq->used_idx = attr.used_index;
 }
 
 static void suspend_vqs(struct mlx5_vdpa_net *ndev)
@@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
                return -EINVAL;
        }
 
+       mvq->used_idx = state->avail_index;
        mvq->avail_idx = state->avail_index;
        return 0;
 }
@@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
         * that cares about emulating the index after vq is stopped.
         */
        if (!mvq->initialized) {
-               state->avail_index = mvq->avail_idx;
+               /* Firmware returns a wrong value for the available index.
+                * Since both values should be identical, we take the value of
+                * used_idx which is reported correctly.
+                */
+               state->avail_index = mvq->used_idx;
                return 0;
        }
 
@@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
                mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
                return err;
        }
-       state->avail_index = attr.available_index;
+       state->avail_index = attr.used_index;
        return 0;
 }
 
@@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
        }
 }
 
-static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
-{
-       int i;
-
-       for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
-               ndev->vqs[i].avail_idx = 0;
-               ndev->vqs[i].used_idx = 0;
-       }
-}
-
 /* TODO: cross-endian support */
 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
 {
        return virtio_legacy_is_little_endian() ||
-               (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+               (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
 }
 
 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
@@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
        if (!status) {
                mlx5_vdpa_info(mvdev, "performing device reset\n");
                teardown_driver(ndev);
-               clear_virtqueues(ndev);
                mlx5_vdpa_destroy_mr(&ndev->mvdev);
                ndev->mvdev.status = 0;
                ndev->mvdev.mlx_features = 0;
@@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .free = mlx5_vdpa_free,
 };
 
+static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+       u16 hw_mtu;
+       int err;
+
+       err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+       if (err)
+               return err;
+
+       *mtu = hw_mtu - MLX5V_ETH_HARD_MTU;
+       return 0;
+}
+
 static int alloc_resources(struct mlx5_vdpa_net *ndev)
 {
        struct mlx5_vdpa_net_resources *res = &ndev->res;
@@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        init_mvqs(ndev);
        mutex_init(&ndev->reslock);
        config = &ndev->config;
-       err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+       err = query_mtu(mdev, &ndev->mtu);
        if (err)
                goto err_mtu;
 
index 65e7e6b44578c29b3297225096f6bca07ea1cdc9..5023e23db3bcb5824e9bdf4091dbbdbf274b7d61 100644 (file)
@@ -1656,6 +1656,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
 
        index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
 
+       if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+               return -EINVAL;
        if (vma->vm_end < vma->vm_start)
                return -EINVAL;
        if ((vma->vm_flags & VM_SHARED) == 0)
@@ -1664,7 +1666,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
                int regnum = index - VFIO_PCI_NUM_REGIONS;
                struct vfio_pci_region *region = vdev->region + regnum;
 
-               if (region && region->ops && region->ops->mmap &&
+               if (region->ops && region->ops->mmap &&
                    (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
                        return region->ops->mmap(vdev, region, vma);
                return -EINVAL;
index e0a27e3362935651067c87f7afc1099d54978924..bfa4c6ef554e58e98b5737690181e9ce228627a5 100644 (file)
@@ -745,9 +745,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
        const struct vdpa_config_ops *ops = vdpa->config;
        int r = 0;
 
+       mutex_lock(&dev->mutex);
+
        r = vhost_dev_check_owner(dev);
        if (r)
-               return r;
+               goto unlock;
 
        switch (msg->type) {
        case VHOST_IOTLB_UPDATE:
@@ -768,6 +770,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
                r = -EINVAL;
                break;
        }
+unlock:
+       mutex_unlock(&dev->mutex);
 
        return r;
 }
index 757d5c3f620b78bd55d7c7ebdb87849868dfe25c..ff09e57f3c3801eac7832b14fd1704907c0b84f0 100644 (file)
@@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
                if (!len)
                        return 0;
 
-               cmap->red = kmalloc(size, flags);
+               cmap->red = kzalloc(size, flags);
                if (!cmap->red)
                        goto fail;
-               cmap->green = kmalloc(size, flags);
+               cmap->green = kzalloc(size, flags);
                if (!cmap->green)
                        goto fail;
-               cmap->blue = kmalloc(size, flags);
+               cmap->blue = kzalloc(size, flags);
                if (!cmap->blue)
                        goto fail;
                if (transp) {
-                       cmap->transp = kmalloc(size, flags);
+                       cmap->transp = kzalloc(size, flags);
                        if (!cmap->transp)
                                goto fail;
                } else {
index 4dc9077dd2ac04d07401e210b13fcb052b0f6ab1..a7e6eea2c4a1d09ca364f84e6c2961967f514047 100644 (file)
@@ -308,7 +308,7 @@ static inline int synthvid_send(struct hv_device *hdev,
                               VM_PKT_DATA_INBAND, 0);
 
        if (ret)
-               pr_err("Unable to send packet via vmbus\n");
+               pr_err_ratelimited("Unable to send packet via vmbus; error %d\n", ret);
 
        return ret;
 }
index 7804a2492ad70c775c4a4f9b58bab80b6ae879c2..0d002a355a93627d7eed4f3902ffe690a76cb8ec 100644 (file)
@@ -94,7 +94,7 @@ int acrn_vm_destroy(struct acrn_vm *vm)
 }
 
 /**
- * acrn_inject_msi() - Inject a MSI interrupt into a User VM
+ * acrn_msi_inject() - Inject a MSI interrupt into a User VM
  * @vm:                User VM
  * @msi_addr:  The MSI address
  * @msi_data:  The MSI data
index c281fe5ed68875eab8bbb1d22fae02650b2d4f31..9dcb5a54f7fc411990de615910c980193b9a7524 100644 (file)
@@ -90,7 +90,7 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
                             struct bin_attribute *bin_attr, char *buf,
                             loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        return w1_ds2780_io(dev, buf, off, count, 0);
 }
 
index f0d393ae070b87a7b3bbd10f5456fb4114793984..2cb7c020b6075eb2135d8eec760cea5d551767d1 100644 (file)
@@ -87,7 +87,7 @@ static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
                             struct bin_attribute *bin_attr, char *buf,
                             loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        return w1_ds2781_io(dev, buf, off, count, 0);
 }
 
index 206186db727d4f184ca99cfc538e508cebcdd8d1..6b5d12ba1b6551fdb102338f501e816f26e4275e 100644 (file)
@@ -291,20 +291,7 @@ static struct w1_family w1_family_0d = {
        .fops = &w1_f0d_fops,
 };
 
-static int __init w1_f0d_init(void)
-{
-       pr_info("%s()\n", __func__);
-       return w1_register_family(&w1_family_0d);
-}
-
-static void __exit w1_f0d_fini(void)
-{
-       pr_info("%s()\n", __func__);
-       w1_unregister_family(&w1_family_0d);
-}
-
-module_init(w1_f0d_init);
-module_exit(w1_f0d_fini);
+module_w1_family(w1_family_0d);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Andrew Worsley amworsley@gmail.com");
index 6b00db7169ab311d3f0c9b707fd0a6422fbbb058..aed10b72fc9988c1e937970762368271acced9f7 100644 (file)
@@ -752,18 +752,4 @@ static struct w1_family w1_family_19 = {
        .fops = &w1_f19_fops,
 };
 
-
-/* Module init and remove functions. */
-static int __init w1_f19_init(void)
-{
-       return w1_register_family(&w1_family_19);
-}
-
-static void __exit w1_f19_fini(void)
-{
-       w1_unregister_family(&w1_family_19);
-}
-
-module_init(w1_f19_init);
-module_exit(w1_f19_fini);
-
+module_w1_family(w1_family_19);
index 976eea28f268a62f3eb8332f971161f3ed4ad44f..9d08a1c9c44564b1ebf28863f1eb04417110ec35 100644 (file)
@@ -63,8 +63,8 @@ static u16 bulk_read_device_counter; /* =0 as per C standard */
 #define EEPROM_CMD_READ     "restore"  /* cmd for read eeprom sysfs */
 #define BULK_TRIGGER_CMD    "trigger"  /* cmd to trigger a bulk read */
 
-#define MIN_TEMP       -55     /* min temperature that can be mesured */
-#define MAX_TEMP       125     /* max temperature that can be mesured */
+#define MIN_TEMP       -55     /* min temperature that can be measured */
+#define MAX_TEMP       125     /* max temperature that can be measured */
 
 /* Allowed values for sysfs conv_time attribute */
 #define CONV_TIME_DEFAULT 0
@@ -906,8 +906,7 @@ static inline int temperature_from_RAM(struct w1_slave *sl, u8 rom[9])
 static inline s8 int_to_short(int i)
 {
        /* Prepare to cast to short by eliminating out of range values */
-       i = i > MAX_TEMP ? MAX_TEMP : i;
-       i = i < MIN_TEMP ? MIN_TEMP : i;
+       i = clamp(i, MIN_TEMP, MAX_TEMP);
        return (s8) i;
 }
 
index e5dcb26d85f0a95c9046f0c976e52f9d92d1de0e..1635f421ef2c380ecc69cfe106d60304b96874e4 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Watchdog driver for Marvell Armada 37xx SoCs
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/clk.h>
@@ -366,7 +366,7 @@ static struct platform_driver armada_37xx_wdt_driver = {
 
 module_platform_driver(armada_37xx_wdt_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("Armada 37xx CPU Watchdog");
 
 MODULE_LICENSE("GPL v2");
index 258dfcf9cbdada75e1b4073d1dc818ca9d913c22..2b9017e1cd91f8db5f70d26d0bacf887acac18a6 100644 (file)
@@ -8,6 +8,7 @@
  * Rewritten by Aaro Koskinen.
  */
 
+#include <linux/devm-helpers.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/device.h>
@@ -127,9 +128,12 @@ static int retu_wdt_probe(struct platform_device *pdev)
        wdev->rdev              = rdev;
        wdev->dev               = &pdev->dev;
 
-       INIT_DELAYED_WORK(&wdev->ping_work, retu_wdt_ping_work);
+       ret = devm_delayed_work_autocancel(&pdev->dev, &wdev->ping_work,
+                                          retu_wdt_ping_work);
+       if (ret)
+               return ret;
 
-       ret = watchdog_register_device(retu_wdt);
+       ret = devm_watchdog_register_device(&pdev->dev, retu_wdt);
        if (ret < 0)
                return ret;
 
@@ -138,25 +142,11 @@ static int retu_wdt_probe(struct platform_device *pdev)
        else
                retu_wdt_ping_enable(wdev);
 
-       platform_set_drvdata(pdev, retu_wdt);
-
-       return 0;
-}
-
-static int retu_wdt_remove(struct platform_device *pdev)
-{
-       struct watchdog_device *wdog = platform_get_drvdata(pdev);
-       struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
-
-       watchdog_unregister_device(wdog);
-       cancel_delayed_work_sync(&wdev->ping_work);
-
        return 0;
 }
 
 static struct platform_driver retu_wdt_driver = {
        .probe          = retu_wdt_probe,
-       .remove         = retu_wdt_remove,
        .driver         = {
                .name   = "retu-wdt",
        },
index ea0efd290c3722c34a7f788b724bbbc77493b3fb..5f1ce59b44b95c5bdd4aad5a04dcd61559585eca 100644 (file)
@@ -238,37 +238,6 @@ config XEN_PRIVCMD
        depends on XEN
        default m
 
-config XEN_STUB
-       bool "Xen stub drivers"
-       depends on XEN && X86_64 && BROKEN
-       help
-         Allow kernel to install stub drivers, to reserve space for Xen drivers,
-         i.e. memory hotplug and cpu hotplug, and to block native drivers loaded,
-         so that real Xen drivers can be modular.
-
-         To enable Xen features like cpu and memory hotplug, select Y here.
-
-config XEN_ACPI_HOTPLUG_MEMORY
-       tristate "Xen ACPI memory hotplug"
-       depends on XEN_DOM0 && XEN_STUB && ACPI
-       help
-         This is Xen ACPI memory hotplug.
-
-         Currently Xen only support ACPI memory hot-add. If you want
-         to hot-add memory at runtime (the hot-added memory cannot be
-         removed until machine stop), select Y/M here, otherwise select N.
-
-config XEN_ACPI_HOTPLUG_CPU
-       tristate "Xen ACPI cpu hotplug"
-       depends on XEN_DOM0 && XEN_STUB && ACPI
-       select ACPI_CONTAINER
-       help
-         Xen ACPI cpu enumerating and hotplugging
-
-         For hotplugging, currently Xen only support ACPI cpu hotadd.
-         If you want to hotadd cpu at runtime (the hotadded cpu cannot
-         be removed until machine stop), select Y/M here.
-
 config XEN_ACPI_PROCESSOR
        tristate "Xen ACPI processor"
        depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
index c3621b9f4012f4128522e88fecb510f0e59c005a..3434593455b2884984004e203d6225bafc0fcc6b 100644 (file)
@@ -26,9 +26,6 @@ obj-$(CONFIG_SWIOTLB_XEN)             += swiotlb-xen.o
 obj-$(CONFIG_XEN_MCE_LOG)              += mcelog.o
 obj-$(CONFIG_XEN_PCIDEV_BACKEND)       += xen-pciback/
 obj-$(CONFIG_XEN_PRIVCMD)              += xen-privcmd.o
-obj-$(CONFIG_XEN_STUB)                 += xen-stub.o
-obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY)  += xen-acpi-memhotplug.o
-obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU)     += xen-acpi-cpuhotplug.o
 obj-$(CONFIG_XEN_ACPI_PROCESSOR)       += xen-acpi-processor.o
 obj-$(CONFIG_XEN_EFI)                  += efi.o
 obj-$(CONFIG_XEN_SCSI_BACKEND)         += xen-scsiback.o
index 8236e2364eeb4e4415d2ab53348dc2306ce60126..7bbfd58958bcc542049e5763bb1d4df1bcbcadb6 100644 (file)
@@ -110,7 +110,7 @@ struct irq_info {
        unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
        unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
        u64 eoi_time;           /* Time in jiffies when to EOI. */
-       spinlock_t lock;
+       raw_spinlock_t lock;
 
        union {
                unsigned short virq;
@@ -312,7 +312,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
        info->evtchn = evtchn;
        info->cpu = cpu;
        info->mask_reason = EVT_MASK_REASON_EXPLICIT;
-       spin_lock_init(&info->lock);
+       raw_spin_lock_init(&info->lock);
 
        ret = set_evtchn_to_irq(evtchn, irq);
        if (ret < 0)
@@ -472,28 +472,28 @@ static void do_mask(struct irq_info *info, u8 reason)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock, flags);
+       raw_spin_lock_irqsave(&info->lock, flags);
 
        if (!info->mask_reason)
                mask_evtchn(info->evtchn);
 
        info->mask_reason |= reason;
 
-       spin_unlock_irqrestore(&info->lock, flags);
+       raw_spin_unlock_irqrestore(&info->lock, flags);
 }
 
 static void do_unmask(struct irq_info *info, u8 reason)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock, flags);
+       raw_spin_lock_irqsave(&info->lock, flags);
 
        info->mask_reason &= ~reason;
 
        if (!info->mask_reason)
                unmask_evtchn(info->evtchn);
 
-       spin_unlock_irqrestore(&info->lock, flags);
+       raw_spin_unlock_irqrestore(&info->lock, flags);
 }
 
 #ifdef CONFIG_X86
index cdc6daa7a9f66f9a534e619af244bc8843bcccef..1bcdd52277716edebc9ad4e4807b87a5c181f715 100644 (file)
@@ -345,41 +345,6 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/* Sync with Xen hypervisor after cpu hotadded */
-void xen_pcpu_hotplug_sync(void)
-{
-       schedule_work(&xen_pcpu_work);
-}
-EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync);
-
-/*
- * For hypervisor presented cpu, return logic cpu id;
- * For hypervisor non-presented cpu, return -ENODEV.
- */
-int xen_pcpu_id(uint32_t acpi_id)
-{
-       int cpu_id = 0, max_id = 0;
-       struct xen_platform_op op;
-
-       op.cmd = XENPF_get_cpuinfo;
-       while (cpu_id <= max_id) {
-               op.u.pcpu_info.xen_cpuid = cpu_id;
-               if (HYPERVISOR_platform_op(&op)) {
-                       cpu_id++;
-                       continue;
-               }
-
-               if (acpi_id == op.u.pcpu_info.acpi_id)
-                       return cpu_id;
-               if (op.u.pcpu_info.max_present > max_id)
-                       max_id = op.u.pcpu_info.max_present;
-               cpu_id++;
-       }
-
-       return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(xen_pcpu_id);
-
 static int __init xen_pcpu_init(void)
 {
        int irq, ret;
index 108edbcbc040f753af3ec5df375cf763bbd5eeb1..152dd33bb223673a59e1ff4a74d3bb2bd2bad3bd 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/math64.h>
 #include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/static_call.h>
 
 #include <asm/paravirt.h>
 #include <asm/xen/hypervisor.h>
@@ -175,7 +176,7 @@ void __init xen_time_setup_guest(void)
        xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
                                        VMASST_TYPE_runstate_update_flag);
 
-       pv_ops.time.steal_clock = xen_steal_clock;
+       static_call_update(pv_steal_clock, xen_steal_clock);
 
        static_key_slow_inc(&paravirt_steal_enabled);
        if (xen_runstate_remote)
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c
deleted file mode 100644 (file)
index 00ab1ec..0000000
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 Intel Corporation
- *    Author: Liu Jinsong <jinsong.liu@intel.com>
- *    Author: Jiang Yunhong <yunhong.jiang@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/cpu.h>
-#include <linux/acpi.h>
-#include <linux/uaccess.h>
-#include <acpi/processor.h>
-#include <xen/acpi.h>
-#include <xen/interface/platform.h>
-#include <asm/xen/hypercall.h>
-
-#define PREFIX "ACPI:xen_cpu_hotplug:"
-
-#define INSTALL_NOTIFY_HANDLER         0
-#define UNINSTALL_NOTIFY_HANDLER       1
-
-static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr);
-
-/* --------------------------------------------------------------------------
-                               Driver Interface
--------------------------------------------------------------------------- */
-
-static int xen_acpi_processor_enable(struct acpi_device *device)
-{
-       acpi_status status = 0;
-       unsigned long long value;
-       union acpi_object object = { 0 };
-       struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-       struct acpi_processor *pr = acpi_driver_data(device);
-
-       if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
-               /* Declared with "Processor" statement; match ProcessorID */
-               status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
-               if (ACPI_FAILURE(status)) {
-                       pr_err(PREFIX "Evaluating processor object\n");
-                       return -ENODEV;
-               }
-
-               pr->acpi_id = object.processor.proc_id;
-       } else {
-               /* Declared with "Device" statement; match _UID */
-               status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
-                                               NULL, &value);
-               if (ACPI_FAILURE(status)) {
-                       pr_err(PREFIX "Evaluating processor _UID\n");
-                       return -ENODEV;
-               }
-
-               pr->acpi_id = value;
-       }
-
-       pr->id = xen_pcpu_id(pr->acpi_id);
-
-       if (invalid_logical_cpuid(pr->id))
-               /* This cpu is not presented at hypervisor, try to hotadd it */
-               if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) {
-                       pr_err(PREFIX "Hotadd CPU (acpi_id = %d) failed.\n",
-                                       pr->acpi_id);
-                       return -ENODEV;
-               }
-
-       return 0;
-}
-
-static int xen_acpi_processor_add(struct acpi_device *device)
-{
-       int ret;
-       struct acpi_processor *pr;
-
-       if (!device)
-               return -EINVAL;
-
-       pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
-       if (!pr)
-               return -ENOMEM;
-
-       pr->handle = device->handle;
-       strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
-       strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
-       device->driver_data = pr;
-
-       ret = xen_acpi_processor_enable(device);
-       if (ret)
-               pr_err(PREFIX "Error when enabling Xen processor\n");
-
-       return ret;
-}
-
-static int xen_acpi_processor_remove(struct acpi_device *device)
-{
-       struct acpi_processor *pr;
-
-       if (!device)
-               return -EINVAL;
-
-       pr = acpi_driver_data(device);
-       if (!pr)
-               return -EINVAL;
-
-       kfree(pr);
-       return 0;
-}
-
-/*--------------------------------------------------------------
-               Acpi processor hotplug support
---------------------------------------------------------------*/
-
-static int is_processor_present(acpi_handle handle)
-{
-       acpi_status status;
-       unsigned long long sta = 0;
-
-
-       status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-
-       if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
-               return 1;
-
-       /*
-        * _STA is mandatory for a processor that supports hot plug
-        */
-       if (status == AE_NOT_FOUND)
-               pr_info(PREFIX "Processor does not support hot plug\n");
-       else
-               pr_info(PREFIX "Processor Device is not present");
-       return 0;
-}
-
-static int xen_apic_id(acpi_handle handle)
-{
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *obj;
-       struct acpi_madt_local_apic *lapic;
-       int apic_id;
-
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
-               return -EINVAL;
-
-       if (!buffer.length || !buffer.pointer)
-               return -EINVAL;
-
-       obj = buffer.pointer;
-       if (obj->type != ACPI_TYPE_BUFFER ||
-           obj->buffer.length < sizeof(*lapic)) {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
-
-       lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
-
-       if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
-           !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
-
-       apic_id = (uint32_t)lapic->id;
-       kfree(buffer.pointer);
-       buffer.length = ACPI_ALLOCATE_BUFFER;
-       buffer.pointer = NULL;
-
-       return apic_id;
-}
-
-static int xen_hotadd_cpu(struct acpi_processor *pr)
-{
-       int cpu_id, apic_id, pxm;
-       struct xen_platform_op op;
-
-       apic_id = xen_apic_id(pr->handle);
-       if (apic_id < 0) {
-               pr_err(PREFIX "Failed to get apic_id for acpi_id %d\n",
-                               pr->acpi_id);
-               return -ENODEV;
-       }
-
-       pxm = xen_acpi_get_pxm(pr->handle);
-       if (pxm < 0) {
-               pr_err(PREFIX "Failed to get _PXM for acpi_id %d\n",
-                               pr->acpi_id);
-               return pxm;
-       }
-
-       op.cmd = XENPF_cpu_hotadd;
-       op.u.cpu_add.apic_id = apic_id;
-       op.u.cpu_add.acpi_id = pr->acpi_id;
-       op.u.cpu_add.pxm = pxm;
-
-       cpu_id = HYPERVISOR_platform_op(&op);
-       if (cpu_id < 0)
-               pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n",
-                               pr->acpi_id);
-
-       return cpu_id;
-}
-
-static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr)
-{
-       if (!is_processor_present(pr->handle))
-               return AE_ERROR;
-
-       pr->id = xen_hotadd_cpu(pr);
-       if (invalid_logical_cpuid(pr->id))
-               return AE_ERROR;
-
-       /*
-        * Sync with Xen hypervisor, providing new /sys/.../xen_cpuX
-        * interface after cpu hotadded.
-        */
-       xen_pcpu_hotplug_sync();
-
-       return AE_OK;
-}
-
-static int acpi_processor_device_remove(struct acpi_device *device)
-{
-       pr_debug(PREFIX "Xen does not support CPU hotremove\n");
-
-       return -ENOSYS;
-}
-
-static void acpi_processor_hotplug_notify(acpi_handle handle,
-                                         u32 event, void *data)
-{
-       struct acpi_processor *pr;
-       struct acpi_device *device = NULL;
-       u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-       int result;
-
-       acpi_scan_lock_acquire();
-
-       switch (event) {
-       case ACPI_NOTIFY_BUS_CHECK:
-       case ACPI_NOTIFY_DEVICE_CHECK:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                       "Processor driver received %s event\n",
-                       (event == ACPI_NOTIFY_BUS_CHECK) ?
-                       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
-
-               if (!is_processor_present(handle))
-                       break;
-
-               acpi_bus_get_device(handle, &device);
-               if (acpi_device_enumerated(device))
-                       break;
-
-               result = acpi_bus_scan(handle);
-               if (result) {
-                       pr_err(PREFIX "Unable to add the device\n");
-                       break;
-               }
-               device = NULL;
-               acpi_bus_get_device(handle, &device);
-               if (!acpi_device_enumerated(device)) {
-                       pr_err(PREFIX "Missing device object\n");
-                       break;
-               }
-               ost_code = ACPI_OST_SC_SUCCESS;
-               break;
-
-       case ACPI_NOTIFY_EJECT_REQUEST:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "received ACPI_NOTIFY_EJECT_REQUEST\n"));
-
-               if (acpi_bus_get_device(handle, &device)) {
-                       pr_err(PREFIX "Device don't exist, dropping EJECT\n");
-                       break;
-               }
-               pr = acpi_driver_data(device);
-               if (!pr) {
-                       pr_err(PREFIX "Driver data is NULL, dropping EJECT\n");
-                       break;
-               }
-
-               /*
-                * TBD: implement acpi_processor_device_remove if Xen support
-                * CPU hotremove in the future.
-                */
-               acpi_processor_device_remove(device);
-               break;
-
-       default:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Unsupported event [0x%x]\n", event));
-
-               /* non-hotplug event; possibly handled by other handler */
-               goto out;
-       }
-
-       (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
-
-out:
-       acpi_scan_lock_release();
-}
-
-static acpi_status is_processor_device(acpi_handle handle)
-{
-       struct acpi_device_info *info;
-       char *hid;
-       acpi_status status;
-
-       status = acpi_get_object_info(handle, &info);
-       if (ACPI_FAILURE(status))
-               return status;
-
-       if (info->type == ACPI_TYPE_PROCESSOR) {
-               kfree(info);
-               return AE_OK;   /* found a processor object */
-       }
-
-       if (!(info->valid & ACPI_VALID_HID)) {
-               kfree(info);
-               return AE_ERROR;
-       }
-
-       hid = info->hardware_id.string;
-       if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
-               kfree(info);
-               return AE_ERROR;
-       }
-
-       kfree(info);
-       return AE_OK;   /* found a processor device object */
-}
-
-static acpi_status
-processor_walk_namespace_cb(acpi_handle handle,
-                           u32 lvl, void *context, void **rv)
-{
-       acpi_status status;
-       int *action = context;
-
-       status = is_processor_device(handle);
-       if (ACPI_FAILURE(status))
-               return AE_OK;   /* not a processor; continue to walk */
-
-       switch (*action) {
-       case INSTALL_NOTIFY_HANDLER:
-               acpi_install_notify_handler(handle,
-                                           ACPI_SYSTEM_NOTIFY,
-                                           acpi_processor_hotplug_notify,
-                                           NULL);
-               break;
-       case UNINSTALL_NOTIFY_HANDLER:
-               acpi_remove_notify_handler(handle,
-                                          ACPI_SYSTEM_NOTIFY,
-                                          acpi_processor_hotplug_notify);
-               break;
-       default:
-               break;
-       }
-
-       /* found a processor; skip walking underneath */
-       return AE_CTRL_DEPTH;
-}
-
-static
-void acpi_processor_install_hotplug_notify(void)
-{
-       int action = INSTALL_NOTIFY_HANDLER;
-       acpi_walk_namespace(ACPI_TYPE_ANY,
-                           ACPI_ROOT_OBJECT,
-                           ACPI_UINT32_MAX,
-                           processor_walk_namespace_cb, NULL, &action, NULL);
-}
-
-static
-void acpi_processor_uninstall_hotplug_notify(void)
-{
-       int action = UNINSTALL_NOTIFY_HANDLER;
-       acpi_walk_namespace(ACPI_TYPE_ANY,
-                           ACPI_ROOT_OBJECT,
-                           ACPI_UINT32_MAX,
-                           processor_walk_namespace_cb, NULL, &action, NULL);
-}
-
-static const struct acpi_device_id processor_device_ids[] = {
-       {ACPI_PROCESSOR_OBJECT_HID, 0},
-       {ACPI_PROCESSOR_DEVICE_HID, 0},
-       {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, processor_device_ids);
-
-static struct acpi_driver xen_acpi_processor_driver = {
-       .name = "processor",
-       .class = ACPI_PROCESSOR_CLASS,
-       .ids = processor_device_ids,
-       .ops = {
-               .add = xen_acpi_processor_add,
-               .remove = xen_acpi_processor_remove,
-               },
-};
-
-static int __init xen_acpi_processor_init(void)
-{
-       int result = 0;
-
-       if (!xen_initial_domain())
-               return -ENODEV;
-
-       /* unregister the stub which only used to reserve driver space */
-       xen_stub_processor_exit();
-
-       result = acpi_bus_register_driver(&xen_acpi_processor_driver);
-       if (result < 0) {
-               xen_stub_processor_init();
-               return result;
-       }
-
-       acpi_processor_install_hotplug_notify();
-       return 0;
-}
-
-static void __exit xen_acpi_processor_exit(void)
-{
-       if (!xen_initial_domain())
-               return;
-
-       acpi_processor_uninstall_hotplug_notify();
-
-       acpi_bus_unregister_driver(&xen_acpi_processor_driver);
-
-       /*
-        * stub reserve space again to prevent any chance of native
-        * driver loading.
-        */
-       xen_stub_processor_init();
-       return;
-}
-
-module_init(xen_acpi_processor_init);
-module_exit(xen_acpi_processor_exit);
-ACPI_MODULE_NAME("xen-acpi-cpuhotplug");
-MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
-MODULE_DESCRIPTION("Xen Hotplug CPU Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
deleted file mode 100644 (file)
index f914b72..0000000
+++ /dev/null
@@ -1,475 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 Intel Corporation
- *    Author: Liu Jinsong <jinsong.liu@intel.com>
- *    Author: Jiang Yunhong <yunhong.jiang@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/acpi.h>
-#include <xen/acpi.h>
-#include <xen/interface/platform.h>
-#include <asm/xen/hypercall.h>
-
-#define PREFIX "ACPI:xen_memory_hotplug:"
-
-struct acpi_memory_info {
-       struct list_head list;
-       u64 start_addr;         /* Memory Range start physical addr */
-       u64 length;             /* Memory Range length */
-       unsigned short caching; /* memory cache attribute */
-       unsigned short write_protect;   /* memory read/write attribute */
-                               /* copied from buffer getting from _CRS */
-       unsigned int enabled:1;
-};
-
-struct acpi_memory_device {
-       struct acpi_device *device;
-       struct list_head res_list;
-};
-
-static bool acpi_hotmem_initialized __read_mostly;
-
-static int xen_hotadd_memory(int pxm, struct acpi_memory_info *info)
-{
-       int rc;
-       struct xen_platform_op op;
-
-       op.cmd = XENPF_mem_hotadd;
-       op.u.mem_add.spfn = info->start_addr >> PAGE_SHIFT;
-       op.u.mem_add.epfn = (info->start_addr + info->length) >> PAGE_SHIFT;
-       op.u.mem_add.pxm = pxm;
-
-       rc = HYPERVISOR_dom0_op(&op);
-       if (rc)
-               pr_err(PREFIX "Xen Hotplug Memory Add failed on "
-                       "0x%lx -> 0x%lx, _PXM: %d, error: %d\n",
-                       (unsigned long)info->start_addr,
-                       (unsigned long)(info->start_addr + info->length),
-                       pxm, rc);
-
-       return rc;
-}
-
-static int xen_acpi_memory_enable_device(struct acpi_memory_device *mem_device)
-{
-       int pxm, result;
-       int num_enabled = 0;
-       struct acpi_memory_info *info;
-
-       if (!mem_device)
-               return -EINVAL;
-
-       pxm = xen_acpi_get_pxm(mem_device->device->handle);
-       if (pxm < 0)
-               return pxm;
-
-       list_for_each_entry(info, &mem_device->res_list, list) {
-               if (info->enabled) { /* just sanity check...*/
-                       num_enabled++;
-                       continue;
-               }
-
-               if (!info->length)
-                       continue;
-
-               result = xen_hotadd_memory(pxm, info);
-               if (result)
-                       continue;
-               info->enabled = 1;
-               num_enabled++;
-       }
-
-       if (!num_enabled)
-               return -ENODEV;
-
-       return 0;
-}
-
-static acpi_status
-acpi_memory_get_resource(struct acpi_resource *resource, void *context)
-{
-       struct acpi_memory_device *mem_device = context;
-       struct acpi_resource_address64 address64;
-       struct acpi_memory_info *info, *new;
-       acpi_status status;
-
-       status = acpi_resource_to_address64(resource, &address64);
-       if (ACPI_FAILURE(status) ||
-           (address64.resource_type != ACPI_MEMORY_RANGE))
-               return AE_OK;
-
-       list_for_each_entry(info, &mem_device->res_list, list) {
-               if ((info->caching == address64.info.mem.caching) &&
-                   (info->write_protect == address64.info.mem.write_protect) &&
-                   (info->start_addr + info->length == address64.address.minimum)) {
-                       info->length += address64.address.address_length;
-                       return AE_OK;
-               }
-       }
-
-       new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
-       if (!new)
-               return AE_ERROR;
-
-       INIT_LIST_HEAD(&new->list);
-       new->caching = address64.info.mem.caching;
-       new->write_protect = address64.info.mem.write_protect;
-       new->start_addr = address64.address.minimum;
-       new->length = address64.address.address_length;
-       list_add_tail(&new->list, &mem_device->res_list);
-
-       return AE_OK;
-}
-
-static int
-acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
-{
-       acpi_status status;
-       struct acpi_memory_info *info, *n;
-
-       if (!list_empty(&mem_device->res_list))
-               return 0;
-
-       status = acpi_walk_resources(mem_device->device->handle,
-               METHOD_NAME__CRS, acpi_memory_get_resource, mem_device);
-
-       if (ACPI_FAILURE(status)) {
-               list_for_each_entry_safe(info, n, &mem_device->res_list, list)
-                       kfree(info);
-               INIT_LIST_HEAD(&mem_device->res_list);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int acpi_memory_get_device(acpi_handle handle,
-                                 struct acpi_memory_device **mem_device)
-{
-       struct acpi_device *device = NULL;
-       int result = 0;
-
-       acpi_scan_lock_acquire();
-
-       acpi_bus_get_device(handle, &device);
-       if (acpi_device_enumerated(device))
-               goto end;
-
-       /*
-        * Now add the notified device.  This creates the acpi_device
-        * and invokes .add function
-        */
-       result = acpi_bus_scan(handle);
-       if (result) {
-               pr_warn(PREFIX "ACPI namespace scan failed\n");
-               result = -EINVAL;
-               goto out;
-       }
-       device = NULL;
-       acpi_bus_get_device(handle, &device);
-       if (!acpi_device_enumerated(device)) {
-               pr_warn(PREFIX "Missing device object\n");
-               result = -EINVAL;
-               goto out;
-       }
-
-end:
-       *mem_device = acpi_driver_data(device);
-       if (!(*mem_device)) {
-               pr_err(PREFIX "driver data not found\n");
-               result = -ENODEV;
-               goto out;
-       }
-
-out:
-       acpi_scan_lock_release();
-       return result;
-}
-
-static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
-{
-       unsigned long long current_status;
-
-       /* Get device present/absent information from the _STA */
-       if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
-                               "_STA", NULL, &current_status)))
-               return -ENODEV;
-       /*
-        * Check for device status. Device should be
-        * present/enabled/functioning.
-        */
-       if (!((current_status & ACPI_STA_DEVICE_PRESENT)
-             && (current_status & ACPI_STA_DEVICE_ENABLED)
-             && (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
-               return -ENODEV;
-
-       return 0;
-}
-
-static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
-{
-       pr_debug(PREFIX "Xen does not support memory hotremove\n");
-
-       return -ENOSYS;
-}
-
-static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
-{
-       struct acpi_memory_device *mem_device;
-       struct acpi_device *device;
-       u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-
-       switch (event) {
-       case ACPI_NOTIFY_BUS_CHECK:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                       "\nReceived BUS CHECK notification for device\n"));
-               fallthrough;
-       case ACPI_NOTIFY_DEVICE_CHECK:
-               if (event == ACPI_NOTIFY_DEVICE_CHECK)
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                       "\nReceived DEVICE CHECK notification for device\n"));
-
-               if (acpi_memory_get_device(handle, &mem_device)) {
-                       pr_err(PREFIX "Cannot find driver data\n");
-                       break;
-               }
-
-               ost_code = ACPI_OST_SC_SUCCESS;
-               break;
-
-       case ACPI_NOTIFY_EJECT_REQUEST:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                       "\nReceived EJECT REQUEST notification for device\n"));
-
-               acpi_scan_lock_acquire();
-               if (acpi_bus_get_device(handle, &device)) {
-                       acpi_scan_lock_release();
-                       pr_err(PREFIX "Device doesn't exist\n");
-                       break;
-               }
-               mem_device = acpi_driver_data(device);
-               if (!mem_device) {
-                       acpi_scan_lock_release();
-                       pr_err(PREFIX "Driver Data is NULL\n");
-                       break;
-               }
-
-               /*
-                * TBD: implement acpi_memory_disable_device and invoke
-                * acpi_bus_remove if Xen support hotremove in the future
-                */
-               acpi_memory_disable_device(mem_device);
-               acpi_scan_lock_release();
-               break;
-
-       default:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Unsupported event [0x%x]\n", event));
-               /* non-hotplug event; possibly handled by other handler */
-               return;
-       }
-
-       (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
-       return;
-}
-
-static int xen_acpi_memory_device_add(struct acpi_device *device)
-{
-       int result;
-       struct acpi_memory_device *mem_device = NULL;
-
-
-       if (!device)
-               return -EINVAL;
-
-       mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
-       if (!mem_device)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&mem_device->res_list);
-       mem_device->device = device;
-       sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
-       sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
-       device->driver_data = mem_device;
-
-       /* Get the range from the _CRS */
-       result = acpi_memory_get_device_resources(mem_device);
-       if (result) {
-               kfree(mem_device);
-               return result;
-       }
-
-       /*
-        * For booting existed memory devices, early boot code has recognized
-        * memory area by EFI/E820. If DSDT shows these memory devices on boot,
-        * hotplug is not necessary for them.
-        * For hot-added memory devices during runtime, it need hypercall to
-        * Xen hypervisor to add memory.
-        */
-       if (!acpi_hotmem_initialized)
-               return 0;
-
-       if (!acpi_memory_check_device(mem_device))
-               result = xen_acpi_memory_enable_device(mem_device);
-
-       return result;
-}
-
-static int xen_acpi_memory_device_remove(struct acpi_device *device)
-{
-       struct acpi_memory_device *mem_device = NULL;
-
-       if (!device || !acpi_driver_data(device))
-               return -EINVAL;
-
-       mem_device = acpi_driver_data(device);
-       kfree(mem_device);
-
-       return 0;
-}
-
-/*
- * Helper function to check for memory device
- */
-static acpi_status is_memory_device(acpi_handle handle)
-{
-       char *hardware_id;
-       acpi_status status;
-       struct acpi_device_info *info;
-
-       status = acpi_get_object_info(handle, &info);
-       if (ACPI_FAILURE(status))
-               return status;
-
-       if (!(info->valid & ACPI_VALID_HID)) {
-               kfree(info);
-               return AE_ERROR;
-       }
-
-       hardware_id = info->hardware_id.string;
-       if ((hardware_id == NULL) ||
-           (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
-               status = AE_ERROR;
-
-       kfree(info);
-       return status;
-}
-
-static acpi_status
-acpi_memory_register_notify_handler(acpi_handle handle,
-                                   u32 level, void *ctxt, void **retv)
-{
-       acpi_status status;
-
-       status = is_memory_device(handle);
-       if (ACPI_FAILURE(status))
-               return AE_OK;   /* continue */
-
-       status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-                                            acpi_memory_device_notify, NULL);
-       /* continue */
-       return AE_OK;
-}
-
-static acpi_status
-acpi_memory_deregister_notify_handler(acpi_handle handle,
-                                     u32 level, void *ctxt, void **retv)
-{
-       acpi_status status;
-
-       status = is_memory_device(handle);
-       if (ACPI_FAILURE(status))
-               return AE_OK;   /* continue */
-
-       status = acpi_remove_notify_handler(handle,
-                                           ACPI_SYSTEM_NOTIFY,
-                                           acpi_memory_device_notify);
-
-       return AE_OK;   /* continue */
-}
-
-static const struct acpi_device_id memory_device_ids[] = {
-       {ACPI_MEMORY_DEVICE_HID, 0},
-       {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, memory_device_ids);
-
-static struct acpi_driver xen_acpi_memory_device_driver = {
-       .name = "acpi_memhotplug",
-       .class = ACPI_MEMORY_DEVICE_CLASS,
-       .ids = memory_device_ids,
-       .ops = {
-               .add = xen_acpi_memory_device_add,
-               .remove = xen_acpi_memory_device_remove,
-               },
-};
-
-static int __init xen_acpi_memory_device_init(void)
-{
-       int result;
-       acpi_status status;
-
-       if (!xen_initial_domain())
-               return -ENODEV;
-
-       /* unregister the stub which only used to reserve driver space */
-       xen_stub_memory_device_exit();
-
-       result = acpi_bus_register_driver(&xen_acpi_memory_device_driver);
-       if (result < 0) {
-               xen_stub_memory_device_init();
-               return -ENODEV;
-       }
-
-       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-                                    ACPI_UINT32_MAX,
-                                    acpi_memory_register_notify_handler,
-                                    NULL, NULL, NULL);
-
-       if (ACPI_FAILURE(status)) {
-               pr_warn(PREFIX "walk_namespace failed\n");
-               acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
-               xen_stub_memory_device_init();
-               return -ENODEV;
-       }
-
-       acpi_hotmem_initialized = true;
-       return 0;
-}
-
-static void __exit xen_acpi_memory_device_exit(void)
-{
-       acpi_status status;
-
-       if (!xen_initial_domain())
-               return;
-
-       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-                                    ACPI_UINT32_MAX,
-                                    acpi_memory_deregister_notify_handler,
-                                    NULL, NULL, NULL);
-       if (ACPI_FAILURE(status))
-               pr_warn(PREFIX "walk_namespace failed\n");
-
-       acpi_bus_unregister_driver(&xen_acpi_memory_device_driver);
-
-       /*
-        * stub reserve space again to prevent any chance of native
-        * driver loading.
-        */
-       xen_stub_memory_device_init();
-       return;
-}
-
-module_init(xen_acpi_memory_device_init);
-module_exit(xen_acpi_memory_device_exit);
-ACPI_MODULE_NAME("xen-acpi-memhotplug");
-MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>");
-MODULE_DESCRIPTION("Xen Hotplug Mem Driver");
-MODULE_LICENSE("GPL");
index cb904ac830064672d8125a0bb6f86a79a0bed346..f8e4faa96ad66f1e5178ca0d9cf36b0b45d5b0b4 100644 (file)
@@ -802,7 +802,7 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
                        "guest with no AER driver should have been killed\n");
                goto end;
        }
-       result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
+       result = common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_slotreset, result);
 
        if (result == PCI_ERS_RESULT_NONE ||
                result == PCI_ERS_RESULT_DISCONNECT) {
@@ -859,7 +859,7 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
                        "guest with no AER driver should have been killed\n");
                goto end;
        }
-       result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
+       result = common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_mmio, result);
 
        if (result == PCI_ERS_RESULT_NONE ||
                result == PCI_ERS_RESULT_DISCONNECT) {
@@ -970,7 +970,7 @@ static void xen_pcibk_error_resume(struct pci_dev *dev)
                kill_domain_by_device(psdev);
                goto end;
        }
-       common_process(psdev, 1, XEN_PCI_OP_aer_resume,
+       common_process(psdev, pci_channel_io_normal, XEN_PCI_OP_aer_resume,
                       PCI_ERS_RESULT_RECOVERED);
 end:
        if (psdev)
index 5447b5ab7c7666d207b288092fbb90f2783aa71a..4162d0e7e00d7ed5334d20c2ad2ac226fe27837f 100644 (file)
@@ -233,7 +233,6 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
                                        unsigned int *devfn)
 {
        struct pci_dev_entry *entry;
-       struct pci_dev *dev = NULL;
        struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
        int found = 0, slot;
 
@@ -242,11 +241,7 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
                list_for_each_entry(entry,
                            &vpci_dev->dev_list[slot],
                            list) {
-                       dev = entry->dev;
-                       if (dev && dev->bus->number == pcidev->bus->number
-                               && pci_domain_nr(dev->bus) ==
-                                       pci_domain_nr(pcidev->bus)
-                               && dev->devfn == pcidev->devfn) {
+                       if (entry->dev == pcidev) {
                                found = 1;
                                *domain = 0;
                                *bus = 0;
diff --git a/drivers/xen/xen-stub.c b/drivers/xen/xen-stub.c
deleted file mode 100644 (file)
index 3be4e74..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * xen-stub.c - stub drivers to reserve space for Xen
- *
- * Copyright (C) 2012 Intel Corporation
- *    Author: Liu Jinsong <jinsong.liu@intel.com>
- *    Author: Jiang Yunhong <yunhong.jiang@intel.com>
- *
- * Copyright (C) 2012 Oracle Inc
- *    Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/acpi.h>
-#include <xen/acpi.h>
-
-#ifdef CONFIG_ACPI
-
-/*--------------------------------------------
-       stub driver for Xen memory hotplug
---------------------------------------------*/
-
-static const struct acpi_device_id memory_device_ids[] = {
-       {ACPI_MEMORY_DEVICE_HID, 0},
-       {"", 0},
-};
-
-static struct acpi_driver xen_stub_memory_device_driver = {
-       /* same name as native memory driver to block native loaded */
-       .name = "acpi_memhotplug",
-       .class = ACPI_MEMORY_DEVICE_CLASS,
-       .ids = memory_device_ids,
-};
-
-int xen_stub_memory_device_init(void)
-{
-       if (!xen_initial_domain())
-               return -ENODEV;
-
-       /* just reserve space for Xen, block native driver loaded */
-       return acpi_bus_register_driver(&xen_stub_memory_device_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_memory_device_init);
-subsys_initcall(xen_stub_memory_device_init);
-
-void xen_stub_memory_device_exit(void)
-{
-       acpi_bus_unregister_driver(&xen_stub_memory_device_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit);
-
-
-/*--------------------------------------------
-       stub driver for Xen cpu hotplug
---------------------------------------------*/
-
-static const struct acpi_device_id processor_device_ids[] = {
-       {ACPI_PROCESSOR_OBJECT_HID, 0},
-       {ACPI_PROCESSOR_DEVICE_HID, 0},
-       {"", 0},
-};
-
-static struct acpi_driver xen_stub_processor_driver = {
-       /* same name as native processor driver to block native loaded */
-       .name = "processor",
-       .class = ACPI_PROCESSOR_CLASS,
-       .ids = processor_device_ids,
-};
-
-int xen_stub_processor_init(void)
-{
-       if (!xen_initial_domain())
-               return -ENODEV;
-
-       /* just reserve space for Xen, block native driver loaded */
-       return acpi_bus_register_driver(&xen_stub_processor_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_processor_init);
-subsys_initcall(xen_stub_processor_init);
-
-void xen_stub_processor_exit(void)
-{
-       acpi_bus_unregister_driver(&xen_stub_processor_driver);
-}
-EXPORT_SYMBOL_GPL(xen_stub_processor_exit);
-
-#endif
index 1f972b75a9ab66eb098df7e8669a572cfea12214..eeb3ebe11d7ae4c19c157239d1cc37fc1af63a60 100644 (file)
 /* Pseudo write pointer value for conventional zone */
 #define WP_CONVENTIONAL ((u64)-2)
 
+/*
+ * Location of the first zone of superblock logging zone pairs.
+ *
+ * - primary superblock:    0B (zone 0)
+ * - first copy:          512G (zone starting at that offset)
+ * - second copy:           4T (zone starting at that offset)
+ */
+#define BTRFS_SB_LOG_PRIMARY_OFFSET    (0ULL)
+#define BTRFS_SB_LOG_FIRST_OFFSET      (512ULL * SZ_1G)
+#define BTRFS_SB_LOG_SECOND_OFFSET     (4096ULL * SZ_1G)
+
+#define BTRFS_SB_LOG_FIRST_SHIFT       const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
+#define BTRFS_SB_LOG_SECOND_SHIFT      const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
+
 /* Number of superblock log zones */
 #define BTRFS_NR_SB_LOG_ZONES 2
 
+/*
+ * Maximum supported zone size. Currently, SMR disks have a zone size of
+ * 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. We do not
+ * expect the zone size to become larger than 8GiB in the near future.
+ */
+#define BTRFS_MAX_ZONE_SIZE            SZ_8G
+
 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
 {
        struct blk_zone *zones = data;
@@ -111,23 +132,22 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
 }
 
 /*
- * The following zones are reserved as the circular buffer on ZONED btrfs.
- *  - The primary superblock: zones 0 and 1
- *  - The first copy: zones 16 and 17
- *  - The second copy: zones 1024 or zone at 256GB which is minimum, and
- *                     the following one
+ * Get the first zone number of the superblock mirror
  */
 static inline u32 sb_zone_number(int shift, int mirror)
 {
-       ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
+       u64 zone;
 
+       ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
        switch (mirror) {
-       case 0: return 0;
-       case 1: return 16;
-       case 2: return min_t(u64, btrfs_sb_offset(mirror) >> shift, 1024);
+       case 0: zone = 0; break;
+       case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
+       case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
        }
 
-       return 0;
+       ASSERT(zone <= U32_MAX);
+
+       return (u32)zone;
 }
 
 /*
@@ -300,10 +320,21 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
                zone_sectors = bdev_zone_sectors(bdev);
        }
 
-       nr_sectors = bdev_nr_sectors(bdev);
        /* Check if it's power of 2 (see is_power_of_2) */
        ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
        zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
+
+       /* We reject devices with a zone size larger than 8GB */
+       if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
+               btrfs_err_in_rcu(fs_info,
+               "zoned: %s: zone size %llu larger than supported maximum %llu",
+                                rcu_str_deref(device->name),
+                                zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       nr_sectors = bdev_nr_sectors(bdev);
        zone_info->zone_size_shift = ilog2(zone_info->zone_size);
        zone_info->max_zone_append_size =
                (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
index fe03cbdae9592fe015d5c98738587afe1c5d14c1..bf52e9326ebe8ef3c4eeebb9c4141d7661b67ace 100644 (file)
@@ -18,6 +18,7 @@ config CIFS
        select CRYPTO_AES
        select CRYPTO_LIB_DES
        select KEYS
+       select DNS_RESOLVER
        help
          This is the client VFS module for the SMB3 family of NAS protocols,
          (including support for the most recent, most secure dialect SMB3.1.1)
@@ -112,7 +113,6 @@ config CIFS_WEAK_PW_HASH
 config CIFS_UPCALL
        bool "Kerberos/SPNEGO advanced session setup"
        depends on CIFS
-       select DNS_RESOLVER
        help
          Enables an upcall mechanism for CIFS which accesses userspace helper
          utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets
@@ -179,7 +179,6 @@ config CIFS_DEBUG_DUMP_KEYS
 config CIFS_DFS_UPCALL
        bool "DFS feature support"
        depends on CIFS
-       select DNS_RESOLVER
        help
          Distributed File System (DFS) support is used to access shares
          transparently in an enterprise name space, even if the share
index 5213b20843b500d68c7283015437800c1012823d..3ee3b7de4dedffbd67b6eccdc80d6c65597c1cea 100644 (file)
@@ -10,13 +10,14 @@ cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
          cifs_unicode.o nterr.o cifsencrypt.o \
          readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \
          smb2ops.o smb2maperror.o smb2transport.o \
-         smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o
+         smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
+         dns_resolve.o
 
 cifs-$(CONFIG_CIFS_XATTR) += xattr.o
 
 cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
 
-cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o dfs_cache.o
+cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
 
 cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
 
index 099ad9f3660bb28db1b6a9aea9538282b41c6455..5ddd20b62484de389a5ee67654b4375559bf874e 100644 (file)
@@ -476,7 +476,8 @@ static int cifs_show_devname(struct seq_file *m, struct dentry *root)
                seq_puts(m, "none");
        else {
                convert_delimiter(devname, '/');
-               seq_puts(m, devname);
+               /* escape all spaces in share names */
+               seq_escape(m, devname, " \t");
                kfree(devname);
        }
        return 0;
index 67c056a9a519ce76033e2e40f14be129ed22d6a5..ec824ab8c5ca38a916484e6ddb1586ba3d673c4b 100644 (file)
@@ -1283,8 +1283,6 @@ struct cifs_aio_ctx {
        bool                    direct_io;
 };
 
-struct cifs_readdata;
-
 /* asynchronous read support */
 struct cifs_readdata {
        struct kref                     refcount;
index eec8a2052da226bdda1cd4e61a68b367d0201d7a..24668eb006c63f9d09a40ad6518213353915f85f 100644 (file)
@@ -87,7 +87,6 @@ static void cifs_prune_tlinks(struct work_struct *work);
  *
  * This should be called with server->srv_mutex held.
  */
-#ifdef CONFIG_CIFS_DFS_UPCALL
 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
 {
        int rc;
@@ -124,6 +123,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
        return !rc ? -1 : 0;
 }
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
 /* These functions must be called with server->srv_mutex held */
 static void reconn_set_next_dfs_target(struct TCP_Server_Info *server,
                                       struct cifs_sb_info *cifs_sb,
@@ -321,14 +321,29 @@ cifs_reconnect(struct TCP_Server_Info *server)
 #endif
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
+               if (cifs_sb && cifs_sb->origin_fullpath)
                        /*
                         * Set up next DFS target server (if any) for reconnect. If DFS
                         * feature is disabled, then we will retry last server we
                         * connected to before.
                         */
                        reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
+               else {
+#endif
+                       /*
+                        * Resolve the hostname again to make sure that IP address is up-to-date.
+                        */
+                       rc = reconn_set_ipaddr_from_hostname(server);
+                       if (rc) {
+                               cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+                                               __func__, rc);
+                       }
+
+#ifdef CONFIG_CIFS_DFS_UPCALL
+               }
 #endif
 
+
 #ifdef CONFIG_CIFS_SWN_UPCALL
                }
 #endif
index 128d63df5bfb62a6331c0687325212cbecaaabb7..ef5ca22bfb3eaa2371ceb3104077217c8e99a94f 100644 (file)
@@ -175,10 +175,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
        ret = call_mmap(vma->vm_file, vma);
 
        if (ret) {
-               /* if call_mmap fails, our caller will put coda_file so we
-                * should drop the reference to the host_file that we got.
+               /* if call_mmap fails, our caller will put host_file so we
+                * should drop the reference to the coda_file that we got.
                 */
-               fput(host_file);
+               fput(coda_file);
                kfree(cvm_ops);
        } else {
                /* here we add redirects for the open/close vm_operations */
index a5f5c30368a203d5c1f901ac00cdf1093ba4834f..2d0c8922f63506ce99f4813cc1c0bd314adeba0f 100644 (file)
@@ -14,16 +14,30 @@ config FS_ENCRYPTION
          F2FS and UBIFS make use of this feature.
 
 # Filesystems supporting encryption must select this if FS_ENCRYPTION.  This
-# allows the algorithms to be built as modules when all the filesystems are.
+# allows the algorithms to be built as modules when all the filesystems are,
+# whereas selecting them from FS_ENCRYPTION would force them to be built-in.
+#
+# Note: this option only pulls in the algorithms that filesystem encryption
+# needs "by default".  If userspace will use "non-default" encryption modes such
+# as Adiantum encryption, then those other modes need to be explicitly enabled
+# in the crypto API; see Documentation/filesystems/fscrypt.rst for details.
+#
+# Also note that this option only pulls in the generic implementations of the
+# algorithms, not any per-architecture optimized implementations.  It is
+# strongly recommended to enable optimized implementations too.  It is safe to
+# disable these generic implementations if corresponding optimized
+# implementations will always be available too; for this reason, these are soft
+# dependencies ('imply' rather than 'select').  Only disable these generic
+# implementations if you're sure they will never be needed, though.
 config FS_ENCRYPTION_ALGS
        tristate
-       select CRYPTO_AES
-       select CRYPTO_CBC
-       select CRYPTO_CTS
-       select CRYPTO_ECB
-       select CRYPTO_HMAC
-       select CRYPTO_SHA512
-       select CRYPTO_XTS
+       imply CRYPTO_AES
+       imply CRYPTO_CBC
+       imply CRYPTO_CTS
+       imply CRYPTO_ECB
+       imply CRYPTO_HMAC
+       imply CRYPTO_SHA512
+       imply CRYPTO_XTS
 
 config FS_ENCRYPTION_INLINE_CRYPT
        bool "Enable fscrypt to use inline crypto"
index 686e0ad287880c15406a479a3acc209a8fa1396f..9979d981e9beb8f8ad8b84e6a08feaee43c0e786 100644 (file)
@@ -773,7 +773,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_atomic_t);
 ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
                               size_t count, loff_t *ppos)
 {
-       char buf[3];
+       char buf[2];
        bool val;
        int r;
        struct dentry *dentry = F_DENTRY(file);
@@ -789,7 +789,6 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
        else
                buf[0] = 'N';
        buf[1] = '\n';
-       buf[2] = 0x00;
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 }
 EXPORT_SYMBOL_GPL(debugfs_read_file_bool);
index 22e86ae4dd5a829ebbef4fbdf346d18315446601..1d252164d97b6f989f1836dc27e69e3dc698abdf 100644 (file)
@@ -35,7 +35,7 @@
 static struct vfsmount *debugfs_mount;
 static int debugfs_mount_count;
 static bool debugfs_registered;
-static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS;
+static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
 
 /*
  * Don't allow access attributes to be changed whilst the kernel is locked down
index b61491bf31665eebdade68c06c02de2c620a70ee..b2e86e739d7a12294141d82c6bb2f36212997967 100644 (file)
@@ -812,6 +812,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
                    struct buffer_head *map_bh)
 {
        int ret = 0;
+       int boundary = sdio->boundary;  /* dio_send_cur_page may clear it */
 
        if (dio->op == REQ_OP_WRITE) {
                /*
@@ -850,10 +851,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
        sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
 out:
        /*
-        * If sdio->boundary then we want to schedule the IO now to
+        * If boundary then we want to schedule the IO now to
         * avoid metadata seeks.
         */
-       if (sdio->boundary) {
+       if (boundary) {
                ret = dio_send_cur_page(dio, sdio, map_bh);
                if (sdio->bio)
                        dio_bio_submit(dio, sdio);
index f3a4bac2cbe91522f05c4943bb20bb3485cb2306..f633348029a5a235908f970f6ee2253ed670e3ca 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -629,17 +629,30 @@ int close_fd(unsigned fd)
 }
 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
 
+/**
+ * last_fd - return last valid index into fd table
+ * @cur_fds: files struct
+ *
+ * Context: Either rcu read lock or files_lock must be held.
+ *
+ * Returns: Last valid index into fdtable.
+ */
+static inline unsigned last_fd(struct fdtable *fdt)
+{
+       return fdt->max_fds - 1;
+}
+
 static inline void __range_cloexec(struct files_struct *cur_fds,
                                   unsigned int fd, unsigned int max_fd)
 {
        struct fdtable *fdt;
 
-       if (fd > max_fd)
-               return;
-
+       /* make sure we're using the correct maximum value */
        spin_lock(&cur_fds->file_lock);
        fdt = files_fdtable(cur_fds);
-       bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
+       max_fd = min(last_fd(fdt), max_fd);
+       if (fd <= max_fd)
+               bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
        spin_unlock(&cur_fds->file_lock);
 }
 
index 29e407762626497e3e5b64349e554e1b0ec2df46..743a005a5c64cca77d941392402ffb7c71cc8d51 100644 (file)
@@ -144,7 +144,7 @@ static char *follow_link(char *link)
        char *name, *resolved, *end;
        int n;
 
-       name = __getname();
+       name = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!name) {
                n = -ENOMEM;
                goto out_free;
@@ -173,12 +173,11 @@ static char *follow_link(char *link)
                goto out_free;
        }
 
-       __putname(name);
-       kfree(link);
+       kfree(name);
        return resolved;
 
  out_free:
-       __putname(name);
+       kfree(name);
        return ERR_PTR(n);
 }
 
index 433c4d3c3c1c8c76b93ee0f848a08b3debe4d0bd..4eba531bea5a9206ba7fbd3eadbbabe040eebfa5 100644 (file)
@@ -415,6 +415,7 @@ static void io_worker_handle_work(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
        struct io_wq *wq = wqe->wq;
+       bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
 
        do {
                struct io_wq_work *work;
@@ -444,6 +445,9 @@ get_next:
                        unsigned int hash = io_get_work_hash(work);
 
                        next_hashed = wq_next_work(work);
+
+                       if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
+                               work->flags |= IO_WQ_WORK_CANCEL;
                        wq->do_work(work);
                        io_assign_current_work(worker, NULL);
 
index 65a17d560a7327921e77f9a2be69693977d9d275..dff34975d86bc35d023c4f69efbcbab683a5a580 100644 (file)
@@ -2762,6 +2762,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_async_rw *io = req->async_data;
+       bool check_reissue = kiocb->ki_complete == io_complete_rw;
 
        /* add previously done IO, if any */
        if (io && io->bytes_done > 0) {
@@ -2777,6 +2778,18 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
                __io_complete_rw(req, ret, 0, issue_flags);
        else
                io_rw_done(kiocb, ret);
+
+       if (check_reissue && req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
+               if (!io_rw_reissue(req)) {
+                       int cflags = 0;
+
+                       req_set_fail_links(req);
+                       if (req->flags & REQ_F_BUFFER_SELECTED)
+                               cflags = io_put_rw_kbuf(req);
+                       __io_req_complete(req, issue_flags, ret, cflags);
+               }
+       }
 }
 
 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
@@ -3294,6 +3307,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
        ret = io_iter_do_read(req, iter);
 
        if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
+               req->flags &= ~REQ_F_REISSUE;
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
                        goto done;
@@ -3417,8 +3431,10 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        else
                ret2 = -EINVAL;
 
-       if (req->flags & REQ_F_REISSUE)
+       if (req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
                ret2 = -EAGAIN;
+       }
 
        /*
         * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
@@ -6173,7 +6189,6 @@ static void io_wq_submit_work(struct io_wq_work *work)
                ret = -ECANCELED;
 
        if (!ret) {
-               req->flags &= ~REQ_F_REISSUE;
                do {
                        ret = io_issue_sqe(req, 0);
                        /*
@@ -6739,6 +6754,9 @@ static int io_sq_thread(void *data)
        current->flags |= PF_NO_SETAFFINITY;
 
        mutex_lock(&sqd->lock);
+       /* a user may had exited before the thread started */
+       io_run_task_work_head(&sqd->park_task_work);
+
        while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
                int ret;
                bool cap_entries, sqt_spin, needs_sched;
@@ -6755,10 +6773,10 @@ static int io_sq_thread(void *data)
                        }
                        cond_resched();
                        mutex_lock(&sqd->lock);
-                       if (did_sig)
-                               break;
                        io_run_task_work();
                        io_run_task_work_head(&sqd->park_task_work);
+                       if (did_sig)
+                               break;
                        timeout = jiffies + sqd->sq_thread_idle;
                        continue;
                }
index 216f16e74351f846980323799a9d6626741b74de..48a2f288e8023f13d72770d339f53fcef8ea2f30 100644 (file)
@@ -579,6 +579,8 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
        p->stack = p->internal;
        p->dfd = dfd;
        p->name = name;
+       p->path.mnt = NULL;
+       p->path.dentry = NULL;
        p->total_link_count = old ? old->total_link_count : 0;
        p->saved = old;
        current->nameidata = p;
@@ -652,6 +654,8 @@ static void terminate_walk(struct nameidata *nd)
                rcu_read_unlock();
        }
        nd->depth = 0;
+       nd->path.mnt = NULL;
+       nd->path.dentry = NULL;
 }
 
 /* path_put is needed afterwards regardless of success or failure */
@@ -2322,8 +2326,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
        }
 
        nd->root.mnt = NULL;
-       nd->path.mnt = NULL;
-       nd->path.dentry = NULL;
 
        /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
        if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
@@ -2419,16 +2421,16 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
        while (!(err = link_path_walk(s, nd)) &&
               (s = lookup_last(nd)) != NULL)
                ;
+       if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
+               err = handle_lookup_down(nd);
+               nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
+       }
        if (!err)
                err = complete_walk(nd);
 
        if (!err && nd->flags & LOOKUP_DIRECTORY)
                if (!d_can_lookup(nd->path.dentry))
                        err = -ENOTDIR;
-       if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
-               err = handle_lookup_down(nd);
-               nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
-       }
        if (!err) {
                *path = nd->path;
                nd->path.mnt = NULL;
index 3bfb4147895a093927165510aac6ae07f83c6f81..ad20403b383fad11ac6d0f05962be179a5372dd8 100644 (file)
@@ -2295,7 +2295,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
        struct ocfs2_alloc_context *meta_ac = NULL;
        handle_t *handle = NULL;
        loff_t end = offset + bytes;
-       int ret = 0, credits = 0, locked = 0;
+       int ret = 0, credits = 0;
 
        ocfs2_init_dealloc_ctxt(&dealloc);
 
@@ -2306,13 +2306,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
            !dwc->dw_orphaned)
                goto out;
 
-       /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
-        * are in that context. */
-       if (dwc->dw_writer_pid != task_pid_nr(current)) {
-               inode_lock(inode);
-               locked = 1;
-       }
-
        ret = ocfs2_inode_lock(inode, &di_bh, 1);
        if (ret < 0) {
                mlog_errno(ret);
@@ -2393,8 +2386,6 @@ out:
        if (meta_ac)
                ocfs2_free_alloc_context(meta_ac);
        ocfs2_run_deallocs(osb, &dealloc);
-       if (locked)
-               inode_unlock(inode);
        ocfs2_dio_free_write_ctx(inode, dwc);
 
        return ret;
index 6611c64ca0bef15d223462cba9d1dd9b7c7c5e83..5edc1d0cf115fb6750dd230c4e6351d36f2ee4cf 100644 (file)
@@ -1245,22 +1245,24 @@ int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                                goto bail_unlock;
                        }
                }
+               down_write(&OCFS2_I(inode)->ip_alloc_sem);
                handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
                                           2 * ocfs2_quota_trans_credits(sb));
                if (IS_ERR(handle)) {
                        status = PTR_ERR(handle);
                        mlog_errno(status);
-                       goto bail_unlock;
+                       goto bail_unlock_alloc;
                }
                status = __dquot_transfer(inode, transfer_to);
                if (status < 0)
                        goto bail_commit;
        } else {
+               down_write(&OCFS2_I(inode)->ip_alloc_sem);
                handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
                if (IS_ERR(handle)) {
                        status = PTR_ERR(handle);
                        mlog_errno(status);
-                       goto bail_unlock;
+                       goto bail_unlock_alloc;
                }
        }
 
@@ -1273,6 +1275,8 @@ int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
 
 bail_commit:
        ocfs2_commit_trans(osb, handle);
+bail_unlock_alloc:
+       up_write(&OCFS2_I(inode)->ip_alloc_sem);
 bail_unlock:
        if (status && inode_locked) {
                ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
index dbfb35fb0ff7921aa4cad53104b583b1cc69dbc9..3847cdc069b557f1a0bf4336fc247ff7a2174a28 100644 (file)
@@ -430,20 +430,11 @@ static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
        if (WARN_ON(file != vma->vm_file))
                return -EIO;
 
-       vma->vm_file = get_file(realfile);
+       vma_set_file(vma, realfile);
 
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
        ret = call_mmap(vma->vm_file, vma);
        revert_creds(old_cred);
-
-       if (ret) {
-               /* Drop reference count from new vm_file value */
-               fput(realfile);
-       } else {
-               /* Drop reference count from previous vm_file value */
-               fput(file);
-       }
-
        ovl_file_accessed(file);
 
        return ret;
index 19434b3c982cd3544c4cf6dfd8dc23a05960701a..09e8ed7d416141406ed5b2eacef0a9de30120bd3 100644 (file)
@@ -150,6 +150,9 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
 
        if (buf->result)
                return -EINVAL;
+       buf->result = verify_dirent_name(name, namlen);
+       if (buf->result < 0)
+               return buf->result;
        d_ino = ino;
        if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
                buf->result = -EOVERFLOW;
@@ -405,6 +408,9 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
 
        if (buf->result)
                return -EINVAL;
+       buf->result = verify_dirent_name(name, namlen);
+       if (buf->result < 0)
+               return buf->result;
        d_ino = ino;
        if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
                buf->result = -EOVERFLOW;
index 88fb25119899d3fac6dc40fa1f64b1c9178f98b9..24d1b54de8079f8df6ff26924468685449b033e7 100644 (file)
@@ -3,9 +3,13 @@
 config FS_VERITY
        bool "FS Verity (read-only file-based authenticity protection)"
        select CRYPTO
-       # SHA-256 is selected as it's intended to be the default hash algorithm.
+       # SHA-256 is implied as it's intended to be the default hash algorithm.
        # To avoid bloat, other wanted algorithms must be selected explicitly.
-       select CRYPTO_SHA256
+       # Note that CRYPTO_SHA256 denotes the generic C implementation, but
+       # some architectures provided optimized implementations of the same
+       # algorithm that may be used instead. In this case, CRYPTO_SHA256 may
+       # be omitted even if SHA-256 is being used.
+       imply CRYPTO_SHA256
        help
          This option enables fs-verity.  fs-verity is the dm-verity
          mechanism implemented at the file level.  On supported
index 83448e837dedb82fac90f8d52d26620605d01ff1..515c3fb06ab3fc3dba39a3d6ba0de030359db795 100644 (file)
@@ -89,9 +89,9 @@
 #define HV_ACCESS_STATS                                BIT(8)
 #define HV_DEBUGGING                           BIT(11)
 #define HV_CPU_MANAGEMENT                      BIT(12)
+#define HV_ENABLE_EXTENDED_HYPERCALLS          BIT(20)
 #define HV_ISOLATION                           BIT(22)
 
-
 /*
  * TSC page layout.
  */
@@ -159,11 +159,18 @@ struct ms_hyperv_tsc_page {
 #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
 #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
 
+/* Extended hypercalls */
+#define HV_EXT_CALL_QUERY_CAPABILITIES         0x8001
+#define HV_EXT_CALL_MEMORY_HEAT_HINT           0x8003
+
 #define HV_FLUSH_ALL_PROCESSORS                        BIT(0)
 #define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES    BIT(1)
 #define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY      BIT(2)
 #define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT     BIT(3)
 
+/* Extended capability bits */
+#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
+
 enum HV_GENERIC_SET_FORMAT {
        HV_GENERIC_SET_SPARSE_4K,
        HV_GENERIC_SET_ALL,
@@ -220,6 +227,41 @@ enum HV_GENERIC_SET_FORMAT {
 #define HV_MESSAGE_PAYLOAD_BYTE_COUNT  (240)
 #define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
 
+/*
+ * Define hypervisor message types. Some of the message types
+ * are x86/x64 specific, but there's no good way to separate
+ * them out into the arch-specific version of hyperv-tlfs.h
+ * because C doesn't provide a way to extend enum types.
+ * Keeping them all in the arch neutral hyperv-tlfs.h seems
+ * the least messy compromise.
+ */
+enum hv_message_type {
+       HVMSG_NONE                      = 0x00000000,
+
+       /* Memory access messages. */
+       HVMSG_UNMAPPED_GPA              = 0x80000000,
+       HVMSG_GPA_INTERCEPT             = 0x80000001,
+
+       /* Timer notification messages. */
+       HVMSG_TIMER_EXPIRED             = 0x80000010,
+
+       /* Error messages. */
+       HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+       HVMSG_UNRECOVERABLE_EXCEPTION   = 0x80000021,
+       HVMSG_UNSUPPORTED_FEATURE       = 0x80000022,
+
+       /* Trace buffer complete messages. */
+       HVMSG_EVENTLOG_BUFFERCOMPLETE   = 0x80000040,
+
+       /* Platform-specific processor intercept messages. */
+       HVMSG_X64_IOPORT_INTERCEPT      = 0x80010000,
+       HVMSG_X64_MSR_INTERCEPT         = 0x80010001,
+       HVMSG_X64_CPUID_INTERCEPT       = 0x80010002,
+       HVMSG_X64_EXCEPTION_INTERCEPT   = 0x80010003,
+       HVMSG_X64_APIC_EOI              = 0x80010004,
+       HVMSG_X64_LEGACY_FP_ERROR       = 0x80010005
+};
+
 /* Define synthetic interrupt controller message flags. */
 union hv_message_flags {
        __u8 asu8;
@@ -373,8 +415,10 @@ struct hv_guest_mapping_flush {
  *  by the bitwidth of "additional_pages" in union hv_gpa_page_range.
  */
 #define HV_MAX_FLUSH_PAGES (2048)
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB                0
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB                1
 
-/* HvFlushGuestPhysicalAddressList hypercall */
+/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
 union hv_gpa_page_range {
        u64 address_space;
        struct {
@@ -382,6 +426,12 @@ union hv_gpa_page_range {
                u64 largepage:1;
                u64 basepfn:52;
        } page;
+       struct {
+               u64 reserved:12;
+               u64 page_size:1;
+               u64 reserved1:8;
+               u64 base_large_pfn:43;
+       };
 };
 
 /*
@@ -739,4 +789,20 @@ struct hv_input_unmap_device_interrupt {
 #define HV_SOURCE_SHADOW_NONE               0x0
 #define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE   0x1
 
+/*
+ * The whole argument should fit in a page to be able to pass to the hypervisor
+ * in one hypercall.
+ */
+#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES  \
+       ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
+               sizeof(union hv_gpa_page_range))
+
+/* HvExtCallMemoryHeatHint hypercall */
+#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD      2
+struct hv_memory_hint {
+       u64 type:2;
+       u64 reserved:62;
+       union hv_gpa_page_range ranges[];
+} __packed;
+
 #endif
index dff58a3db5d5c9f3e5a6b20337ea826c3759f7aa..9a000ba2bb754bbcd4b61381b951c1bb8e17bacf 100644 (file)
@@ -27,7 +27,7 @@
 
 struct ms_hyperv_info {
        u32 features;
-       u32 features_b;
+       u32 priv_high;
        u32 misc_features;
        u32 hints;
        u32 nested_features;
@@ -41,6 +41,53 @@ extern struct ms_hyperv_info ms_hyperv;
 extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
 extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
 
+/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
+static inline int hv_result(u64 status)
+{
+       return status & HV_HYPERCALL_RESULT_MASK;
+}
+
+static inline bool hv_result_success(u64 status)
+{
+       return hv_result(status) == HV_STATUS_SUCCESS;
+}
+
+static inline unsigned int hv_repcomp(u64 status)
+{
+       /* Bits [43:32] of status have 'Reps completed' data. */
+       return (status & HV_HYPERCALL_REP_COMP_MASK) >>
+                        HV_HYPERCALL_REP_COMP_OFFSET;
+}
+
+/*
+ * Rep hypercalls. Callers of this functions are supposed to ensure that
+ * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+                                     void *input, void *output)
+{
+       u64 control = code;
+       u64 status;
+       u16 rep_comp;
+
+       control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
+       control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+
+       do {
+               status = hv_do_hypercall(control, input, output);
+               if (!hv_result_success(status))
+                       return status;
+
+               rep_comp = hv_repcomp(status);
+
+               control &= ~HV_HYPERCALL_REP_START_MASK;
+               control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
+
+               touch_nmi_watchdog();
+       } while (rep_comp < rep_count);
+
+       return status;
+}
 
 /* Generate the guest OS identifier as described in the Hyper-V TLFS */
 static inline  __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
@@ -56,7 +103,6 @@ static inline  __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
        return guest_id;
 }
 
-
 /* Free the message slot and signal end-of-message if required */
 static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
 {
@@ -88,14 +134,14 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
                 * possibly deliver another msg from the
                 * hypervisor
                 */
-               hv_signal_eom();
+               hv_set_register(HV_REGISTER_EOM, 0);
        }
 }
 
-int hv_setup_vmbus_irq(int irq, void (*handler)(void));
-void hv_remove_vmbus_irq(void);
-void hv_enable_vmbus_irq(void);
-void hv_disable_vmbus_irq(void);
+void hv_setup_vmbus_handler(void (*handler)(void));
+void hv_remove_vmbus_handler(void);
+void hv_setup_stimer0_handler(void (*handler)(void));
+void hv_remove_stimer0_handler(void);
 
 void hv_setup_kexec_handler(void (*handler)(void));
 void hv_remove_kexec_handler(void);
@@ -103,6 +149,7 @@ void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
 void hv_remove_crash_handler(void);
 
 extern int vmbus_interrupt;
+extern int vmbus_irq;
 
 #if IS_ENABLED(CONFIG_HYPERV)
 /*
@@ -117,6 +164,10 @@ extern u32 hv_max_vp_index;
 /* Sentinel value for an uninitialized entry in hv_vp_index array */
 #define VP_INVAL       U32_MAX
 
+void *hv_alloc_hyperv_page(void);
+void *hv_alloc_hyperv_zeroed_page(void);
+void hv_free_hyperv_page(unsigned long addr);
+
 /**
  * hv_cpu_number_to_vp_number() - Map CPU to VP.
  * @cpu_number: CPU number in Linux terms
@@ -169,21 +220,16 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
 }
 
 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
-void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
 bool hv_is_hyperv_initialized(void);
 bool hv_is_hibernation_supported(void);
 enum hv_isolation_type hv_get_isolation_type(void);
 bool hv_is_isolation_supported(void);
 void hyperv_cleanup(void);
+bool hv_query_ext_cap(u64 cap_query);
 #else /* CONFIG_HYPERV */
 static inline bool hv_is_hyperv_initialized(void) { return false; }
 static inline bool hv_is_hibernation_supported(void) { return false; }
 static inline void hyperv_cleanup(void) {}
 #endif /* CONFIG_HYPERV */
 
-#if IS_ENABLED(CONFIG_HYPERV)
-extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
-extern void hv_remove_stimer0_irq(int irq);
-#endif
-
 #endif
index 34eef083c9882fafbd3bf5cc25a0dd5fdb38b06d..b6774aa5a4b865fa4b0b32c20d7315bf3cf96ea4 100644 (file)
@@ -21,8 +21,7 @@
 #define HV_MIN_DELTA_TICKS 1
 
 /* Routines called by the VMbus driver */
-extern int hv_stimer_alloc(void);
-extern void hv_stimer_free(void);
+extern int hv_stimer_alloc(bool have_percpu_irqs);
 extern int hv_stimer_cleanup(unsigned int cpu);
 extern void hv_stimer_legacy_init(unsigned int cpu, int sint);
 extern void hv_stimer_legacy_cleanup(unsigned int cpu);
index fcde59c65a81b2a9ff6204ae79416a520ee1f057..cb3d6b1c655decb8bf092c7a97b73897fe1a79b8 100644 (file)
@@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
  * crypto_free_acomp() -- free ACOMPRESS tfm handle
  *
  * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
  */
 static inline void crypto_free_acomp(struct crypto_acomp *tfm)
 {
index fcc12c593ef8b4a59ca9e20e6e88588e84a62bc2..e728469c4cccb3aa56835b042ee83373377acf24 100644 (file)
@@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
 /**
  * crypto_free_aead() - zeroize and free aead handle
  * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
  */
 static inline void crypto_free_aead(struct crypto_aead *tfm)
 {
index 1d3aa252cabafba10df19f4128d3718c9a106cc2..5764b46bd1ec18165b70e7b88003145e34da0dac 100644 (file)
@@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
  * crypto_free_akcipher() - free AKCIPHER tfm handle
  *
  * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
  */
 static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
 {
index 3a1c72fdb7cf5b468c881a1c2149c0afcdf282e2..dabaee6987186b9890deb81c721cecbc171708a3 100644 (file)
@@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
                hchacha_block_generic(state, out, nrounds);
 }
 
-void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
-static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
+static inline void chacha_init_consts(u32 *state)
 {
        state[0]  = 0x61707865; /* "expa" */
        state[1]  = 0x3320646e; /* "nd 3" */
        state[2]  = 0x79622d32; /* "2-by" */
        state[3]  = 0x6b206574; /* "te k" */
+}
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
+static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
+{
+       chacha_init_consts(state);
        state[4]  = key[0];
        state[5]  = key[1];
        state[6]  = key[2];
diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h
new file mode 100644 (file)
index 0000000..7096478
--- /dev/null
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021 HiSilicon */
+
+#ifndef _CRYTO_ECC_CURVE_H
+#define _CRYTO_ECC_CURVE_H
+
+#include <linux/types.h>
+
+/**
+ * struct ecc_point - elliptic curve point in affine coordinates
+ *
+ * @x:         X coordinate in vli form.
+ * @y:         Y coordinate in vli form.
+ * @ndigits:   Length of vlis in u64 qwords.
+ */
+struct ecc_point {
+       u64 *x;
+       u64 *y;
+       u8 ndigits;
+};
+
+/**
+ * struct ecc_curve - definition of elliptic curve
+ *
+ * @name:      Short name of the curve.
+ * @g:         Generator point of the curve.
+ * @p:         Prime number, if Barrett's reduction is used for this curve
+ *             pre-calculated value 'mu' is appended to the @p after ndigits.
+ *             Use of Barrett's reduction is heuristically determined in
+ *             vli_mmod_fast().
+ * @n:         Order of the curve group.
+ * @a:         Curve parameter a.
+ * @b:         Curve parameter b.
+ */
+struct ecc_curve {
+       char *name;
+       struct ecc_point g;
+       u64 *p;
+       u64 *n;
+       u64 *a;
+       u64 *b;
+};
+
+/**
+ * ecc_get_curve() - get elliptic curve;
+ * @curve_id:           Curves IDs:
+ *                      defined in 'include/crypto/ecdh.h';
+ *
+ * Returns curve if get curve succssful, NULL otherwise
+ */
+const struct ecc_curve *ecc_get_curve(unsigned int curve_id);
+
+/**
+ * ecc_get_curve25519() - get curve25519 curve;
+ *
+ * Returns curve25519
+ */
+const struct ecc_curve *ecc_get_curve25519(void);
+
+#endif
index a5b805b5526d2c6a5304b0ddb10c4fba1fe22a3b..a9f98078d29cf61a081f5a6c05ab2447b095b83b 100644 (file)
 /* Curves IDs */
 #define ECC_CURVE_NIST_P192    0x0001
 #define ECC_CURVE_NIST_P256    0x0002
+#define ECC_CURVE_NIST_P384    0x0003
 
 /**
  * struct ecdh - define an ECDH private key
  *
- * @curve_id:  ECC curve the key is based on.
  * @key:       Private ECDH key
  * @key_size:  Size of the private ECDH key
  */
 struct ecdh {
-       unsigned short curve_id;
        char *key;
        unsigned short key_size;
 };
index 13f8a6a54ca878f8761d7d95376be283de5698f3..b2bc1e46e86a766cd19de68150c6870a40dcb0de 100644 (file)
@@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
 /**
  * crypto_free_ahash() - zeroize and free the ahash handle
  * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
  */
 static inline void crypto_free_ahash(struct crypto_ahash *tfm)
 {
@@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
 /**
  * crypto_free_shash() - zeroize and free the message digest handle
  * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
  */
 static inline void crypto_free_shash(struct crypto_shash *tfm)
 {
index 064e52ca5248019fd1a2b02fd3ab30fcf4f7010f..196aa769f296867df68b245122e86a9364bed73b 100644 (file)
@@ -18,7 +18,8 @@
  * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
  */
 
-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
+void poly1305_core_setkey(struct poly1305_core_key *key,
+                         const u8 raw_key[POLY1305_BLOCK_SIZE]);
 static inline void poly1305_core_init(struct poly1305_state *state)
 {
        *state = (struct poly1305_state){};
index 88b591215d5c8f5872374391c7aa34c2838da3db..cccceadc164b9e6041aa8afa07adf13dbe0dbbc0 100644 (file)
@@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
  * crypto_free_kpp() - free KPP tfm handle
  *
  * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
  */
 static inline void crypto_free_kpp(struct crypto_kpp *tfm)
 {
index f1f67fc749cf4edc3e7a5a0c17a2e6bd1cb19ee4..090692ec3bc7342da3bed3a812a4003c5f395ceb 100644 (file)
@@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
        };
 };
 
-void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
+void poly1305_init_arch(struct poly1305_desc_ctx *desc,
+                       const u8 key[POLY1305_KEY_SIZE]);
+void poly1305_init_generic(struct poly1305_desc_ctx *desc,
+                          const u8 key[POLY1305_KEY_SIZE]);
 
 static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
 {
index 8b4b844b4eef8841b3790c6c2c0e08512e94fdb8..17bb3673d3c1700359515c3d79f7d0da531908f8 100644 (file)
@@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
 /**
  * crypto_free_rng() - zeroize and free RNG handle
  * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
  */
 static inline void crypto_free_rng(struct crypto_rng *tfm)
 {
index 6a733b171a5d0ae5ca8edd8abfc74addadedc58f..ef0fc9ed4342e6dca5f48efb2e705877c576d7f1 100644 (file)
@@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
 /**
  * crypto_free_skcipher() - zeroize and free cipher handle
  * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
  */
 static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
 {
index dc9345440ebefa359e1bcabe484fcff88b36e479..10528de7b3ef20b2457ad72df5f89b185069dbea 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Constant for device tree bindings for Turris Mox module configuration bus
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #ifndef _DT_BINDINGS_BUS_MOXTET_H
diff --git a/include/dt-bindings/interconnect/qcom,sdm660.h b/include/dt-bindings/interconnect/qcom,sdm660.h
new file mode 100644 (file)
index 0000000..62e8d86
--- /dev/null
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* SDM660 interconnect IDs */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM660_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM660_H
+
+/* A2NOC */
+#define MASTER_IPA                     0
+#define MASTER_CNOC_A2NOC              1
+#define MASTER_SDCC_1                  2
+#define MASTER_SDCC_2                  3
+#define MASTER_BLSP_1                  4
+#define MASTER_BLSP_2                  5
+#define MASTER_UFS                     6
+#define MASTER_USB_HS                  7
+#define MASTER_USB3                    8
+#define MASTER_CRYPTO_C0               9
+#define SLAVE_A2NOC_SNOC               10
+
+/* BIMC */
+#define MASTER_GNOC_BIMC               0
+#define MASTER_OXILI                   1
+#define MASTER_MNOC_BIMC               2
+#define MASTER_SNOC_BIMC               3
+#define MASTER_PIMEM                   4
+#define SLAVE_EBI                      5
+#define SLAVE_HMSS_L3                  6
+#define SLAVE_BIMC_SNOC                        7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC               0
+#define MASTER_QDSS_DAP                        1
+#define SLAVE_CNOC_A2NOC               2
+#define SLAVE_MPM                      3
+#define SLAVE_PMIC_ARB                 4
+#define SLAVE_TLMM_NORTH               5
+#define SLAVE_TCSR                     6
+#define SLAVE_PIMEM_CFG                        7
+#define SLAVE_IMEM_CFG                 8
+#define SLAVE_MESSAGE_RAM              9
+#define SLAVE_GLM                      10
+#define SLAVE_BIMC_CFG                 11
+#define SLAVE_PRNG                     12
+#define SLAVE_SPDM                     13
+#define SLAVE_QDSS_CFG                 14
+#define SLAVE_CNOC_MNOC_CFG            15
+#define SLAVE_SNOC_CFG                 16
+#define SLAVE_QM_CFG                   17
+#define SLAVE_CLK_CTL                  18
+#define SLAVE_MSS_CFG                  19
+#define SLAVE_TLMM_SOUTH               20
+#define SLAVE_UFS_CFG                  21
+#define SLAVE_A2NOC_CFG                        22
+#define SLAVE_A2NOC_SMMU_CFG           23
+#define SLAVE_GPUSS_CFG                        24
+#define SLAVE_AHB2PHY                  25
+#define SLAVE_BLSP_1                   26
+#define SLAVE_SDCC_1                   27
+#define SLAVE_SDCC_2                   28
+#define SLAVE_TLMM_CENTER              29
+#define SLAVE_BLSP_2                   30
+#define SLAVE_PDM                      31
+#define SLAVE_CNOC_MNOC_MMSS_CFG       32
+#define SLAVE_USB_HS                   33
+#define SLAVE_USB3_0                   34
+#define SLAVE_SRVC_CNOC                        35
+
+/* GNOC */
+#define MASTER_APSS_PROC               0
+#define SLAVE_GNOC_BIMC                        1
+#define SLAVE_GNOC_SNOC                        2
+
+/* MNOC */
+#define MASTER_CPP                     0
+#define MASTER_JPEG                    1
+#define MASTER_MDP_P0                  2
+#define MASTER_MDP_P1                  3
+#define MASTER_VENUS                   4
+#define MASTER_VFE                     5
+#define SLAVE_MNOC_BIMC                        6
+#define MASTER_CNOC_MNOC_MMSS_CFG      7
+#define MASTER_CNOC_MNOC_CFG           8
+#define SLAVE_CAMERA_CFG               9
+#define SLAVE_CAMERA_THROTTLE_CFG      10
+#define SLAVE_MISC_CFG                 11
+#define SLAVE_VENUS_THROTTLE_CFG       12
+#define SLAVE_VENUS_CFG                        13
+#define SLAVE_MMSS_CLK_XPU_CFG         14
+#define SLAVE_MMSS_CLK_CFG             15
+#define SLAVE_MNOC_MPU_CFG             16
+#define SLAVE_DISPLAY_CFG              17
+#define SLAVE_CSI_PHY_CFG              18
+#define SLAVE_DISPLAY_THROTTLE_CFG     19
+#define SLAVE_SMMU_CFG                 20
+#define SLAVE_SRVC_MNOC                        21
+
+/* SNOC */
+#define MASTER_QDSS_ETR                        0
+#define MASTER_QDSS_BAM                        1
+#define MASTER_SNOC_CFG                        2
+#define MASTER_BIMC_SNOC               3
+#define MASTER_A2NOC_SNOC              4
+#define MASTER_GNOC_SNOC               5
+#define SLAVE_HMSS                     6
+#define SLAVE_LPASS                    7
+#define SLAVE_WLAN                     8
+#define SLAVE_CDSP                     9
+#define SLAVE_IPA                      10
+#define SLAVE_SNOC_BIMC                        11
+#define SLAVE_SNOC_CNOC                        12
+#define SLAVE_IMEM                     13
+#define SLAVE_PIMEM                    14
+#define SLAVE_QDSS_STM                 15
+#define SLAVE_SRVC_SNOC                        16
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h
new file mode 100644 (file)
index 0000000..c7f7ed3
--- /dev/null
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SM8350 interconnect IDs
+ *
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8350_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8350_H
+
+#define MASTER_QSPI_0                  0
+#define MASTER_QUP_1                   1
+#define MASTER_A1NOC_CFG               2
+#define MASTER_SDCC_4                  3
+#define MASTER_UFS_MEM                 4
+#define MASTER_USB3_0                  5
+#define MASTER_USB3_1                  6
+#define SLAVE_A1NOC_SNOC               7
+#define SLAVE_SERVICE_A1NOC            8
+
+#define MASTER_QDSS_BAM                        0
+#define MASTER_QUP_0                   1
+#define MASTER_QUP_2                   2
+#define MASTER_A2NOC_CFG               3
+#define MASTER_CRYPTO                  4
+#define MASTER_IPA                     5
+#define MASTER_PCIE_0                  6
+#define MASTER_PCIE_1                  7
+#define MASTER_QDSS_ETR                        8
+#define MASTER_SDCC_2                  9
+#define MASTER_UFS_CARD                        10
+#define SLAVE_A2NOC_SNOC               11
+#define SLAVE_ANOC_PCIE_GEM_NOC                12
+#define SLAVE_SERVICE_A2NOC            13
+
+#define MASTER_GEM_NOC_CNOC            0
+#define MASTER_GEM_NOC_PCIE_SNOC       1
+#define MASTER_QDSS_DAP                        2
+#define SLAVE_AHB2PHY_SOUTH            3
+#define SLAVE_AHB2PHY_NORTH            4
+#define SLAVE_AOSS                     5
+#define SLAVE_APPSS                    6
+#define SLAVE_CAMERA_CFG               7
+#define SLAVE_CLK_CTL                  8
+#define SLAVE_CDSP_CFG                 9
+#define SLAVE_RBCPR_CX_CFG             10
+#define SLAVE_RBCPR_MMCX_CFG           11
+#define SLAVE_RBCPR_MX_CFG             12
+#define SLAVE_CRYPTO_0_CFG             13
+#define SLAVE_CX_RDPM                  14
+#define SLAVE_DCC_CFG                  15
+#define SLAVE_DISPLAY_CFG              16
+#define SLAVE_GFX3D_CFG                        17
+#define SLAVE_HWKM                     18
+#define SLAVE_IMEM_CFG                 19
+#define SLAVE_IPA_CFG                  20
+#define SLAVE_IPC_ROUTER_CFG           21
+#define SLAVE_LPASS                    22
+#define SLAVE_CNOC_MSS                 23
+#define SLAVE_MX_RDPM                  24
+#define SLAVE_PCIE_0_CFG               25
+#define SLAVE_PCIE_1_CFG               26
+#define SLAVE_PDM                      27
+#define SLAVE_PIMEM_CFG                        28
+#define SLAVE_PKA_WRAPPER_CFG          29
+#define SLAVE_PMU_WRAPPER_CFG          30
+#define SLAVE_QDSS_CFG                 31
+#define SLAVE_QSPI_0                   32
+#define SLAVE_QUP_0                    33
+#define SLAVE_QUP_1                    34
+#define SLAVE_QUP_2                    35
+#define SLAVE_SDCC_2                   36
+#define SLAVE_SDCC_4                   37
+#define SLAVE_SECURITY                 38
+#define SLAVE_SPSS_CFG                 39
+#define SLAVE_TCSR                     40
+#define SLAVE_TLMM                     41
+#define SLAVE_UFS_CARD_CFG             42
+#define SLAVE_UFS_MEM_CFG              43
+#define SLAVE_USB3_0                   44
+#define SLAVE_USB3_1                   45
+#define SLAVE_VENUS_CFG                        46
+#define SLAVE_VSENSE_CTRL_CFG          47
+#define SLAVE_A1NOC_CFG                        48
+#define SLAVE_A2NOC_CFG                        49
+#define SLAVE_DDRSS_CFG                        50
+#define SLAVE_CNOC_MNOC_CFG            51
+#define SLAVE_SNOC_CFG                 52
+#define SLAVE_BOOT_IMEM                        53
+#define SLAVE_IMEM                     54
+#define SLAVE_PIMEM                    55
+#define SLAVE_SERVICE_CNOC             56
+#define SLAVE_PCIE_0                   57
+#define SLAVE_PCIE_1                   58
+#define SLAVE_QDSS_STM                 59
+#define SLAVE_TCU                      60
+
+#define MASTER_CNOC_DC_NOC             0
+#define SLAVE_LLCC_CFG                 1
+#define SLAVE_GEM_NOC_CFG              2
+
+#define MASTER_GPU_TCU                 0
+#define MASTER_SYS_TCU                 1
+#define MASTER_APPSS_PROC              2
+#define MASTER_COMPUTE_NOC             3
+#define MASTER_GEM_NOC_CFG             4
+#define MASTER_GFX3D                   5
+#define MASTER_MNOC_HF_MEM_NOC         6
+#define MASTER_MNOC_SF_MEM_NOC         7
+#define MASTER_ANOC_PCIE_GEM_NOC       8
+#define MASTER_SNOC_GC_MEM_NOC         9
+#define MASTER_SNOC_SF_MEM_NOC         10
+#define SLAVE_MSS_PROC_MS_MPU_CFG      11
+#define SLAVE_MCDMA_MS_MPU_CFG         12
+#define SLAVE_GEM_NOC_CNOC             13
+#define SLAVE_LLCC                     14
+#define SLAVE_MEM_NOC_PCIE_SNOC                15
+#define SLAVE_SERVICE_GEM_NOC_1                16
+#define SLAVE_SERVICE_GEM_NOC_2                17
+#define SLAVE_SERVICE_GEM_NOC          18
+#define MASTER_MNOC_HF_MEM_NOC_DISP    19
+#define MASTER_MNOC_SF_MEM_NOC_DISP    20
+#define SLAVE_LLCC_DISP                        21
+
+#define MASTER_CNOC_LPASS_AG_NOC       0
+#define SLAVE_LPASS_CORE_CFG           1
+#define SLAVE_LPASS_LPI_CFG            2
+#define SLAVE_LPASS_MPU_CFG            3
+#define SLAVE_LPASS_TOP_CFG            4
+#define SLAVE_SERVICES_LPASS_AML_NOC   5
+#define SLAVE_SERVICE_LPASS_AG_NOC     6
+
+#define MASTER_LLCC                    0
+#define SLAVE_EBI1                     1
+#define MASTER_LLCC_DISP               2
+#define SLAVE_EBI1_DISP                        3
+
+#define MASTER_CAMNOC_HF               0
+#define MASTER_CAMNOC_ICP              1
+#define MASTER_CAMNOC_SF               2
+#define MASTER_CNOC_MNOC_CFG           3
+#define MASTER_VIDEO_P0                        4
+#define MASTER_VIDEO_P1                        5
+#define MASTER_VIDEO_PROC              6
+#define MASTER_MDP0                    7
+#define MASTER_MDP1                    8
+#define MASTER_ROTATOR                 9
+#define SLAVE_MNOC_HF_MEM_NOC          10
+#define SLAVE_MNOC_SF_MEM_NOC          11
+#define SLAVE_SERVICE_MNOC             12
+#define MASTER_MDP0_DISP               13
+#define MASTER_MDP1_DISP               14
+#define MASTER_ROTATOR_DISP            15
+#define SLAVE_MNOC_HF_MEM_NOC_DISP     16
+#define SLAVE_MNOC_SF_MEM_NOC_DISP     17
+
+#define MASTER_CDSP_NOC_CFG            0
+#define MASTER_CDSP_PROC               1
+#define SLAVE_CDSP_MEM_NOC             2
+#define SLAVE_SERVICE_NSP_NOC          3
+
+#define MASTER_A1NOC_SNOC              0
+#define MASTER_A2NOC_SNOC              1
+#define MASTER_SNOC_CFG                        2
+#define MASTER_PIMEM                   3
+#define MASTER_GIC                     4
+#define SLAVE_SNOC_GEM_NOC_GC          5
+#define SLAVE_SNOC_GEM_NOC_SF          6
+#define SLAVE_SERVICE_SNOC             7
+
+#endif
index 9047ec6bd3cf82dceffd67e3d2d2bfc318e6a55b..d417b9268b162dfc2dfccbc58ca6eb70f7897821 100644 (file)
@@ -90,4 +90,9 @@
 #define J7200_SERDES0_LANE3_USB                        0x2
 #define J7200_SERDES0_LANE3_IP4_UNUSED         0x3
 
+/* AM64 */
+
+#define AM64_SERDES0_LANE0_PCIE0               0x0
+#define AM64_SERDES0_LANE0_USB                 0x1
+
 #endif /* _DT_BINDINGS_MUX_TI_SERDES */
diff --git a/include/dt-bindings/phy/phy-cadence-torrent.h b/include/dt-bindings/phy/phy-cadence-torrent.h
deleted file mode 100644 (file)
index e387b6a..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for Cadence Torrent SERDES.
- */
-
-#ifndef _DT_BINDINGS_TORRENT_SERDES_H
-#define _DT_BINDINGS_TORRENT_SERDES_H
-
-#define TORRENT_SERDES_NO_SSC          0
-#define TORRENT_SERDES_EXTERNAL_SSC    1
-#define TORRENT_SERDES_INTERNAL_SSC    2
-
-#endif /* _DT_BINDINGS_TORRENT_SERDES_H */
diff --git a/include/dt-bindings/phy/phy-cadence.h b/include/dt-bindings/phy/phy-cadence.h
new file mode 100644 (file)
index 0000000..4652bcb
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for Cadence SERDES.
+ */
+
+#ifndef _DT_BINDINGS_CADENCE_SERDES_H
+#define _DT_BINDINGS_CADENCE_SERDES_H
+
+/* Torrent */
+#define TORRENT_SERDES_NO_SSC          0
+#define TORRENT_SERDES_EXTERNAL_SSC    1
+#define TORRENT_SERDES_INTERNAL_SSC    2
+
+#define CDNS_TORRENT_REFCLK_DRIVER      0
+
+/* Sierra */
+#define CDNS_SIERRA_PLL_CMNLC          0
+#define CDNS_SIERRA_PLL_CMNLC1         1
+
+#endif /* _DT_BINDINGS_CADENCE_SERDES_H */
diff --git a/include/dt-bindings/phy/phy-ti.h b/include/dt-bindings/phy/phy-ti.h
new file mode 100644 (file)
index 0000000..ad955d3
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for TI SERDES.
+ */
+
+#ifndef _DT_BINDINGS_TI_SERDES
+#define _DT_BINDINGS_TI_SERDES
+
+/* Clock index for output clocks from WIZ */
+
+/* MUX Clocks */
+#define TI_WIZ_PLL0_REFCLK     0
+#define TI_WIZ_PLL1_REFCLK     1
+#define TI_WIZ_REFCLK_DIG      2
+
+/* Reserve index here for future additions */
+
+/* MISC Clocks */
+#define TI_WIZ_PHY_EN_REFCLK   16
+
+#endif /* _DT_BINDINGS_TI_SERDES */
index a29d3ff2e7e8dc59c83ef041179bda0b461a16d7..c432fdb8547f909231a8c69445c55b4b4cc4fb6b 100644 (file)
@@ -72,6 +72,12 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
        return key->payload.data[asym_key_ids];
 }
 
+static inline
+const struct public_key *asymmetric_key_public_key(const struct key *key)
+{
+       return key->payload.data[asym_crypto];
+}
+
 extern struct key *find_asymmetric_key(struct key *keyring,
                                       const struct asymmetric_key_id *id_0,
                                       const struct asymmetric_key_id *id_1,
index fb8b07daa9d15519370e1731b8cf4c726bf83a17..875e002a41804726f712948e5ed9acb1c7402706 100644 (file)
@@ -31,6 +31,7 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
 #define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
 #endif
 
+extern struct pkcs7_message *pkcs7;
 #ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
 extern int mark_hash_blacklisted(const char *hash);
 extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
@@ -49,6 +50,20 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
 }
 #endif
 
+#ifdef CONFIG_SYSTEM_REVOCATION_LIST
+extern int add_key_to_revocation_list(const char *data, size_t size);
+extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
+#else
+static inline int add_key_to_revocation_list(const char *data, size_t size)
+{
+       return 0;
+}
+static inline int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
+{
+       return -ENOKEY;
+}
+#endif
+
 #ifdef CONFIG_IMA_BLACKLIST_KEYRING
 extern struct key *ima_blacklist_keyring;
 
index a94c03a61d8f9b3c57cddd158737ed4298d29ffe..d89fa2579ac0569864508ef4d175669f22314d35 100644 (file)
 #include <linux/rcupdate.h>
 #include <linux/tpm.h>
 
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "trusted_key: " fmt
+
 #define MIN_KEY_SIZE                   32
 #define MAX_KEY_SIZE                   128
 #define MAX_BLOB_SIZE                  512
@@ -22,6 +28,7 @@ struct trusted_key_payload {
        unsigned int key_len;
        unsigned int blob_len;
        unsigned char migratable;
+       unsigned char old_format;
        unsigned char key[MAX_KEY_SIZE + 1];
        unsigned char blob[MAX_BLOB_SIZE];
 };
@@ -30,6 +37,7 @@ struct trusted_key_options {
        uint16_t keytype;
        uint32_t keyhandle;
        unsigned char keyauth[TPM_DIGEST_SIZE];
+       uint32_t blobauth_len;
        unsigned char blobauth[TPM_DIGEST_SIZE];
        uint32_t pcrinfo_len;
        unsigned char pcrinfo[MAX_PCRINFO_SIZE];
@@ -40,6 +48,53 @@ struct trusted_key_options {
        uint32_t policyhandle;
 };
 
+struct trusted_key_ops {
+       /*
+        * flag to indicate if trusted key implementation supports migration
+        * or not.
+        */
+       unsigned char migratable;
+
+       /* Initialize key interface. */
+       int (*init)(void);
+
+       /* Seal a key. */
+       int (*seal)(struct trusted_key_payload *p, char *datablob);
+
+       /* Unseal a key. */
+       int (*unseal)(struct trusted_key_payload *p, char *datablob);
+
+       /* Get a randomized key. */
+       int (*get_random)(unsigned char *key, size_t key_len);
+
+       /* Exit key interface. */
+       void (*exit)(void);
+};
+
+struct trusted_key_source {
+       char *name;
+       struct trusted_key_ops *ops;
+};
+
 extern struct key_type key_type_trusted;
 
+#define TRUSTED_DEBUG 0
+
+#if TRUSTED_DEBUG
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+       pr_info("key_len %d\n", p->key_len);
+       print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
+                      16, 1, p->key, p->key_len, 0);
+       pr_info("bloblen %d\n", p->blob_len);
+       print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
+                      16, 1, p->blob, p->blob_len, 0);
+       pr_info("migratable %d\n", p->migratable);
+}
+#else
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+}
+#endif
+
 #endif /* _KEYS_TRUSTED_TYPE_H */
diff --git a/include/keys/trusted_tee.h b/include/keys/trusted_tee.h
new file mode 100644 (file)
index 0000000..151be25
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ *
+ * Author:
+ * Sumit Garg <sumit.garg@linaro.org>
+ */
+
+#ifndef __TEE_TRUSTED_KEY_H
+#define __TEE_TRUSTED_KEY_H
+
+#include <keys/trusted-type.h>
+
+extern struct trusted_key_ops trusted_key_tee_ops;
+
+#endif
index a56d8e1298f27e76d3c299de8b5d3870b77535d4..7769b726863ab10b5b160366d7fa918784491995 100644 (file)
@@ -16,6 +16,8 @@
 #define LOAD32N(buffer, offset)        (*(uint32_t *)&buffer[offset])
 #define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
 
+extern struct trusted_key_ops trusted_key_tpm_ops;
+
 struct osapsess {
        uint32_t handle;
        unsigned char secret[SHA1_DIGEST_SIZE];
@@ -52,30 +54,19 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
 #if TPM_DEBUG
 static inline void dump_options(struct trusted_key_options *o)
 {
-       pr_info("trusted_key: sealing key type %d\n", o->keytype);
-       pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
-       pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
-       pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
+       pr_info("sealing key type %d\n", o->keytype);
+       pr_info("sealing key handle %0X\n", o->keyhandle);
+       pr_info("pcrlock %d\n", o->pcrlock);
+       pr_info("pcrinfo %d\n", o->pcrinfo_len);
        print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
                       16, 1, o->pcrinfo, o->pcrinfo_len, 0);
 }
 
-static inline void dump_payload(struct trusted_key_payload *p)
-{
-       pr_info("trusted_key: key_len %d\n", p->key_len);
-       print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
-                      16, 1, p->key, p->key_len, 0);
-       pr_info("trusted_key: bloblen %d\n", p->blob_len);
-       print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
-                      16, 1, p->blob, p->blob_len, 0);
-       pr_info("trusted_key: migratable %d\n", p->migratable);
-}
-
 static inline void dump_sess(struct osapsess *s)
 {
        print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
                       16, 1, &s->handle, 4, 0);
-       pr_info("trusted-key: secret:\n");
+       pr_info("secret:\n");
        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
                       16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
        pr_info("trusted-key: enonce:\n");
@@ -87,7 +78,7 @@ static inline void dump_tpm_buf(unsigned char *buf)
 {
        int len;
 
-       pr_info("\ntrusted-key: tpm buffer\n");
+       pr_info("\ntpm buffer\n");
        len = LOAD32(buf, TPM_SIZE_OFFSET);
        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
 }
@@ -96,10 +87,6 @@ static inline void dump_options(struct trusted_key_options *o)
 {
 }
 
-static inline void dump_payload(struct trusted_key_payload *p)
-{
-}
-
 static inline void dump_sess(struct osapsess *s)
 {
 }
index 57bb54f6767a3979af385e5d0e84fda7e3b937ec..ef4bd705eb6570200dbc6011ab5387ba1429883b 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * rWTM BIU Mailbox driver for Armada 37xx
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h
new file mode 100644 (file)
index 0000000..08cd0c2
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ASN1_ENCODER_H
+#define _LINUX_ASN1_ENCODER_H
+
+#include <linux/types.h>
+#include <linux/asn1.h>
+#include <linux/asn1_ber_bytecode.h>
+#include <linux/bug.h>
+
+#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32))
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+                   s64 integer);
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+               u32 oid[], int oid_len);
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+               u32 tag, const unsigned char *string, int len);
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+                        const unsigned char *end_data,
+                        const unsigned char *string, u32 len);
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+                    const unsigned char *seq, int len);
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+                   bool val);
+
+#endif
index 40bad71865ea762869e917d2174aa2ec6cb31286..532bcbfc471616f01a08a8356fc954f91c36c257 100644 (file)
@@ -476,7 +476,6 @@ struct virtchnl_rss_key {
        u16 vsi_id;
        u16 key_len;
        u8 key[1];         /* RSS hash key, packed bytes */
-       u8 pad[1];
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
@@ -485,7 +484,6 @@ struct virtchnl_rss_lut {
        u16 vsi_id;
        u16 lut_entries;
        u8 lut[1];        /* RSS lookup table */
-       u8 pad[1];
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
index a19519f4241dce4f18b8c338648a70af75392994..eed86eb0a1dea08a4f813537b198639730e87d45 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <linux/preempt.h>
 
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS)
 extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
 #else
 static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
@@ -32,4 +32,10 @@ static inline void local_bh_enable(void)
        __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
 }
 
+#ifdef CONFIG_PREEMPT_RT
+extern bool local_bh_blocked(void);
+#else
+static inline bool local_bh_blocked(void) { return false; }
+#endif
+
 #endif /* _LINUX_BH_H */
index 3625f019767dfd5f3f2dd40b23f170ccdb9b8768..fdac0534ce79d9b69fd0f5aa40452010f0cb24f0 100644 (file)
@@ -40,6 +40,7 @@ struct bpf_local_storage;
 struct bpf_local_storage_map;
 struct kobject;
 struct mem_cgroup;
+struct module;
 
 extern struct idr btf_idr;
 extern spinlock_t btf_idr_lock;
@@ -623,6 +624,7 @@ struct bpf_trampoline {
        /* Executable image of trampoline */
        struct bpf_tramp_image *cur_image;
        u64 selector;
+       struct module *mod;
 };
 
 struct bpf_attach_target_info {
index 86d143db65231649af215c30dfc24814f0d38349..a247b089ca78d753ba038f0e110361c738adfa8a 100644 (file)
@@ -70,7 +70,7 @@ struct module;
  * @mark_unstable:     Optional function to inform the clocksource driver that
  *                     the watchdog marked the clocksource unstable
  * @tick_stable:        Optional function called periodically from the watchdog
- *                     code to provide stable syncrhonization points
+ *                     code to provide stable synchronization points
  * @wd_list:           List head to enqueue into the watchdog list (internal)
  * @cs_last:           Last clocksource value for clocksource watchdog
  * @wd_last:           Last watchdog value corresponding to @cs_last
index f14adb882338194cc7f127f9ba7ad3288c084370..3d4442397bf90d26013d5317a0703716b7adb955 100644 (file)
@@ -135,6 +135,7 @@ enum cpuhp_state {
        CPUHP_AP_RISCV_TIMER_STARTING,
        CPUHP_AP_CLINT_TIMER_STARTING,
        CPUHP_AP_CSKY_TIMER_STARTING,
+       CPUHP_AP_TI_GP_TIMER_STARTING,
        CPUHP_AP_HYPERV_TIMER_STARTING,
        CPUHP_AP_KVM_STARTING,
        CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
@@ -175,6 +176,8 @@ enum cpuhp_state {
        CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
        CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
        CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+       CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+       CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
        CPUHP_AP_PERF_ARM_L2X0_ONLINE,
        CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
        CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
index ba660731bd258279d8c9b5513a90015b22fad3e0..38a2071cf77685a3b8c520f4d4eafb50ee080669 100644 (file)
@@ -49,7 +49,7 @@ struct dev_iommu;
 /**
  * struct subsys_interface - interfaces to device functions
  * @name:       name of the device function
- * @subsys:     subsytem of the devices to attach to
+ * @subsys:     subsystem of the devices to attach to
  * @node:       the list of functions registered at the subsystem
  * @add_dev:    device hookup to device function handler
  * @remove_dev: device hookup to device function handler
@@ -439,6 +439,9 @@ struct dev_links_info {
  * @state_synced: The hardware state of this device has been synced to match
  *               the software state of this device by calling the driver/bus
  *               sync_state() callback.
+ * @can_match: The device has matched with a driver at least once or it is in
+ *             a bus (like AMBA) which can't check for matching drivers until
+ *             other devices probe successfully.
  * @dma_coherent: this particular device is dma coherent, even if the
  *             architecture supports non-coherent devices.
  * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
@@ -545,6 +548,7 @@ struct device {
        bool                    offline:1;
        bool                    of_node_reused:1;
        bool                    state_synced:1;
+       bool                    can_match:1;
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h
new file mode 100644 (file)
index 0000000..f40f777
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_DEVM_HELPERS_H
+#define __LINUX_DEVM_HELPERS_H
+
+/*
+ * Functions which do automatically cancel operations or release resources upon
+ * driver detach.
+ *
+ * These should be helpful to avoid mixing the manual and devm-based resource
+ * management which can be source of annoying, rarely occurring,
+ * hard-to-reproduce bugs.
+ *
+ * Please take into account that devm based cancellation may be performed some
+ * time after the remove() is ran.
+ *
+ * Thus mixing devm and manual resource management can easily cause problems
+ * when unwinding operations with dependencies. IRQ scheduling a work in a queue
+ * is typical example where IRQs are often devm-managed and WQs are manually
+ * cleaned at remove(). If IRQs are not manually freed at remove() (and this is
+ * often the case when we use devm for IRQs) we have a period of time after
+ * remove() - and before devm managed IRQs are freed - where new IRQ may fire
+ * and schedule a work item which won't be cancelled because remove() was
+ * already ran.
+ */
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+static inline void devm_delayed_work_drop(void *res)
+{
+       cancel_delayed_work_sync(res);
+}
+
+/**
+ * devm_delayed_work_autocancel - Resource-managed delayed work allocation
+ * @dev:       Device which lifetime work is bound to
+ * @w:         Work item to be queued
+ * @worker:    Worker function
+ *
+ * Initialize delayed work which is automatically cancelled when driver is
+ * detached. A few drivers need delayed work which must be cancelled before
+ * driver is detached to avoid accessing removed resources.
+ * devm_delayed_work_autocancel() can be used to omit the explicit
+ * cancelleation when driver is detached.
+ */
+static inline int devm_delayed_work_autocancel(struct device *dev,
+                                              struct delayed_work *w,
+                                              work_func_t worker)
+{
+       INIT_DELAYED_WORK(w, worker);
+       return devm_add_action(dev, devm_delayed_work_drop, w);
+}
+
+#endif
index 883acef895bc4d222c86075974cc55fc23119e02..2e2b8d6140ed47a743c88d03464b9a4d0fc9bcde 100644 (file)
@@ -360,7 +360,7 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs);
  *
  * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
  * exit_to_user_mode(). This function is preferred unless there is a
- * compelling architectural reason to use the seperate functions.
+ * compelling architectural reason to use the separate functions.
  */
 void syscall_exit_to_user_mode(struct pt_regs *regs);
 
@@ -381,7 +381,7 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs);
  * irqentry_exit_to_user_mode - Interrupt exit work
  * @regs:      Pointer to current's pt_regs
  *
- * Invoked with interrupts disbled and fully valid regs. Returns with all
+ * Invoked with interrupts disabled and fully valid regs. Returns with all
  * work handled, interrupts disabled such that the caller can immediately
  * switch to user mode. Called from architecture specific interrupt
  * handling code.
index ec4cd3921c67d9c40367021b9947b5011f3265f2..cdca84e6dd6b0f1f4c3b10db596afb6e45609044 100644 (file)
@@ -87,9 +87,7 @@ u32 ethtool_op_get_link(struct net_device *dev);
 int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
 
 
-/**
- * struct ethtool_link_ext_state_info - link extended state and substate.
- */
+/* Link extended state and substate. */
 struct ethtool_link_ext_state_info {
        enum ethtool_link_ext_state link_ext_state;
        union {
@@ -129,7 +127,6 @@ struct ethtool_link_ksettings {
                __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
        } link_modes;
        u32     lanes;
-       enum ethtool_link_mode_bit_indices link_mode;
 };
 
 /**
@@ -292,6 +289,9 @@ struct ethtool_pause_stats {
  *     do not attach ext_substate attribute to netlink message). If link_ext_state
  *     and link_ext_substate are unknown, return -ENODATA. If not implemented,
  *     link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_eeprom_len: Read range of EEPROM addresses for validation of
+ *     @get_eeprom and @set_eeprom requests.
+ *     Returns 0 if device does not support EEPROM access.
  * @get_eeprom: Read data from the device EEPROM.
  *     Should fill in the magic field.  Don't need to check len for zero
  *     or wraparound.  Fill in the data argument with the eeprom values
@@ -384,6 +384,8 @@ struct ethtool_pause_stats {
  * @get_module_eeprom: Get the eeprom information from the plug-in module
  * @get_eee: Get Energy-Efficient (EEE) supported and status.
  * @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_tunable: Read the value of a driver / device tunable.
+ * @set_tunable: Set the value of a driver / device tunable.
  * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
  *     It must check that the given queue number is valid. If neither a RX nor
  *     a TX queue has this number, return -EINVAL. If only a RX queue or a TX
@@ -547,8 +549,8 @@ struct phy_tdr_config;
  * @get_sset_count: Get number of strings that @get_strings will write.
  * @get_strings: Return a set of strings that describe the requested objects
  * @get_stats: Return extended statistics about the PHY device.
- * @start_cable_test - Start a cable test
- * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test
+ * @start_cable_test: Start a cable test
+ * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
  *
  * All operations are optional (i.e. the function pointer may be set to %NULL)
  * and callers must take this into account. Callers must hold the RTNL lock.
@@ -571,4 +573,12 @@ struct ethtool_phy_ops {
  */
 void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
 
+/*
+ * ethtool_params_from_link_mode - Derive link parameters from a given link mode
+ * @link_ksettings: Link parameters to be derived from the link mode
+ * @link_mode: Link mode
+ */
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+                             enum ethtool_link_mode_bit_indices link_mode);
 #endif /* _LINUX_ETHTOOL_H */
index 7c9d6a2d7e90905ffa267df45a2e4ec040c29a76..69bc86ea382c2d9e503831f9e6b5bffe798519e0 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/preempt.h>
 #include <linux/lockdep.h>
 #include <linux/ftrace_irq.h>
+#include <linux/sched.h>
 #include <linux/vtime.h>
 #include <asm/hardirq.h>
 
index f1d74dcf0353f6e32f1bc8c6b7ed26fcc527120c..9c2373a1cb2d14f14c663005839226ea59a1e448 100644 (file)
@@ -234,6 +234,7 @@ static inline u32 hv_get_avail_to_write_percent(
  * 5 . 0  (Newer Windows 10)
  * 5 . 1  (Windows 10 RS4)
  * 5 . 2  (Windows Server 2019, RS5)
+ * 5 . 3  (Windows Server 2022)
  */
 
 #define VERSION_WS2008  ((0 << 16) | (13))
@@ -245,6 +246,7 @@ static inline u32 hv_get_avail_to_write_percent(
 #define VERSION_WIN10_V5 ((5 << 16) | (0))
 #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
 #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
+#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
 
 /* Make maximum size of pipe payload of 16K */
 #define MAX_PIPE_DATA_PAYLOAD          (sizeof(u8) * 16384)
@@ -284,7 +286,7 @@ struct vmbus_channel_offer {
 
                /*
                 * Pipes:
-                * The following sructure is an integrated pipe protocol, which
+                * The following structure is an integrated pipe protocol, which
                 * is implemented on top of standard user-defined data. Pipe
                 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
                 * use.
@@ -475,6 +477,7 @@ enum vmbus_channel_message_type {
        CHANNELMSG_TL_CONNECT_REQUEST           = 21,
        CHANNELMSG_MODIFYCHANNEL                = 22,
        CHANNELMSG_TL_CONNECT_RESULT            = 23,
+       CHANNELMSG_MODIFYCHANNEL_RESPONSE       = 24,
        CHANNELMSG_COUNT
 };
 
@@ -588,6 +591,13 @@ struct vmbus_channel_open_result {
        u32 status;
 } __packed;
 
+/* Modify Channel Result parameters */
+struct vmbus_channel_modifychannel_response {
+       struct vmbus_channel_message_header header;
+       u32 child_relid;
+       u32 status;
+} __packed;
+
 /* Close channel parameters; */
 struct vmbus_channel_close_channel {
        struct vmbus_channel_message_header header;
@@ -720,6 +730,7 @@ struct vmbus_channel_msginfo {
                struct vmbus_channel_gpadl_torndown gpadl_torndown;
                struct vmbus_channel_gpadl_created gpadl_created;
                struct vmbus_channel_version_response version_response;
+               struct vmbus_channel_modifychannel_response modify_response;
        } response;
 
        u32 msgsize;
@@ -883,11 +894,11 @@ struct vmbus_channel {
         * Support for sub-channels. For high performance devices,
         * it will be useful to have multiple sub-channels to support
         * a scalable communication infrastructure with the host.
-        * The support for sub-channels is implemented as an extention
+        * The support for sub-channels is implemented as an extension
         * to the current infrastructure.
         * The initial offer is considered the primary channel and this
         * offer message will indicate if the host supports sub-channels.
-        * The guest is free to ask for sub-channels to be offerred and can
+        * The guest is free to ask for sub-channels to be offered and can
         * open these sub-channels as a normal "primary" channel. However,
         * all sub-channels will have the same type and instance guids as the
         * primary channel. Requests sent on a given channel will result in a
@@ -951,7 +962,7 @@ struct vmbus_channel {
         * Clearly, these optimizations improve throughput at the expense of
         * latency. Furthermore, since the channel is shared for both
         * control and data messages, control messages currently suffer
-        * unnecessary latency adversley impacting performance and boot
+        * unnecessary latency adversely impacting performance and boot
         * time. To fix this issue, permit tagging the channel as being
         * in "low latency" mode. In this mode, we will bypass the monitor
         * mechanism.
@@ -1594,7 +1605,7 @@ extern __u32 vmbus_proto_version;
 
 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
                                  const guid_t *shv_host_servie_id);
-int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
 void vmbus_set_event(struct vmbus_channel *channel);
 
 /* Get the start of the ring buffer. */
index 76f1161a441a4e771b0d282a8bed2892a8dce474..4777850a6dc7cdafdaabc95dbac8746559e92094 100644 (file)
@@ -658,26 +658,21 @@ enum
        TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
 };
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 static inline int tasklet_trylock(struct tasklet_struct *t)
 {
        return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
 }
 
-static inline void tasklet_unlock(struct tasklet_struct *t)
-{
-       smp_mb__before_atomic();
-       clear_bit(TASKLET_STATE_RUN, &(t)->state);
-}
+void tasklet_unlock(struct tasklet_struct *t);
+void tasklet_unlock_wait(struct tasklet_struct *t);
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
 
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
-       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
 #else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
+static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+static inline void tasklet_unlock(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
 #endif
 
 extern void __tasklet_schedule(struct tasklet_struct *t);
@@ -702,6 +697,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t)
        smp_mb__after_atomic();
 }
 
+/*
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+{
+       tasklet_disable_nosync(t);
+       tasklet_unlock_spin_wait(t);
+       smp_mb();
+}
+
 static inline void tasklet_disable(struct tasklet_struct *t)
 {
        tasklet_disable_nosync(t);
@@ -716,7 +722,6 @@ static inline void tasklet_enable(struct tasklet_struct *t)
 }
 
 extern void tasklet_kill(struct tasklet_struct *t);
-extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
 extern void tasklet_init(struct tasklet_struct *t,
                         void (*func)(unsigned long), unsigned long data);
 extern void tasklet_setup(struct tasklet_struct *t,
index 2efde6a79b7ee7095aad7d7123e6cf4f61b97189..31b347c9f8dd01300dfd02cd7662d40c514e7e5d 100644 (file)
@@ -116,7 +116,7 @@ enum {
  * IRQ_SET_MASK_NOCPY  - OK, chip did update irq_common_data.affinity
  * IRQ_SET_MASK_OK_DONE        - Same as IRQ_SET_MASK_OK for core. Special code to
  *                       support stacked irqchips, which indicates skipping
- *                       all descendent irqchips.
+ *                       all descendant irqchips.
  */
 enum {
        IRQ_SET_MASK_OK = 0,
@@ -302,7 +302,7 @@ static inline bool irqd_is_level_type(struct irq_data *d)
 
 /*
  * Must only be called of irqchip.irq_set_affinity() or low level
- * hieararchy domain allocation functions.
+ * hierarchy domain allocation functions.
  */
 static inline void irqd_set_single_target(struct irq_data *d)
 {
@@ -1258,11 +1258,13 @@ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
  */
 extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
 #else
+#ifndef set_handle_irq
 #define set_handle_irq(handle_irq)             \
        do {                                    \
                (void)handle_irq;               \
                WARN_ON(1);                     \
        } while (0)
 #endif
+#endif
 
 #endif /* _LINUX_IRQ_H */
index 943c3411ca1012510fec3c22b7c139f2c7dbe05a..2c63375bbd43f41d3c5019ab4f4f3583196b522f 100644 (file)
@@ -145,4 +145,6 @@ int its_init_v4(struct irq_domain *domain,
                const struct irq_domain_ops *vpe_ops,
                const struct irq_domain_ops *sgi_ops);
 
+bool gic_cpuif_has_vsgi(void);
+
 #endif
index 891b323266dfc0277e1b2a69694871fb830ed6ce..df465125078552b097e38ccf26e9e4b0249e6970 100644 (file)
@@ -32,7 +32,7 @@ struct pt_regs;
  * @last_unhandled:    aging timer for unhandled count
  * @irqs_unhandled:    stats field for spurious unhandled interrupts
  * @threads_handled:   stats field for deferred spurious detection of threaded handlers
- * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
  * @lock:              locking for SMP
  * @affinity_hint:     hint to user space for preferred irq affinity
  * @affinity_notify:   context for notification of affinity changes
index 33cacc8af26dab4f582d986054c4164d02d73d6c..7a1dd7b969b6eb408bd718044fe3b808038463d6 100644 (file)
@@ -415,15 +415,6 @@ static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
 extern unsigned int irq_find_mapping(struct irq_domain *host,
                                     irq_hw_number_t hwirq);
 extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
-extern int irq_create_strict_mappings(struct irq_domain *domain,
-                                     unsigned int irq_base,
-                                     irq_hw_number_t hwirq_base, int count);
-
-static inline int irq_create_identity_mapping(struct irq_domain *host,
-                                             irq_hw_number_t hwirq)
-{
-       return irq_create_strict_mappings(host, hwirq, hwirq, 1);
-}
 
 extern const struct irq_domain_ops irq_domain_simple_ops;
 
index d92691262f51a6ef5c0a2efa4df6ab4668b6f920..05f5554d860f5cd44e80885a084da7dfd60f93fe 100644 (file)
@@ -382,6 +382,21 @@ struct static_key_false {
                [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT,    \
        }
 
+#define _DEFINE_STATIC_KEY_1(name)     DEFINE_STATIC_KEY_TRUE(name)
+#define _DEFINE_STATIC_KEY_0(name)     DEFINE_STATIC_KEY_FALSE(name)
+#define DEFINE_STATIC_KEY_MAYBE(cfg, name)                     \
+       __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
+#define _DEFINE_STATIC_KEY_RO_1(name)  DEFINE_STATIC_KEY_TRUE_RO(name)
+#define _DEFINE_STATIC_KEY_RO_0(name)  DEFINE_STATIC_KEY_FALSE_RO(name)
+#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name)                  \
+       __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
+
+#define _DECLARE_STATIC_KEY_1(name)    DECLARE_STATIC_KEY_TRUE(name)
+#define _DECLARE_STATIC_KEY_0(name)    DECLARE_STATIC_KEY_FALSE(name)
+#define DECLARE_STATIC_KEY_MAYBE(cfg, name)                    \
+       __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
 extern bool ____wrong_branch_error(void);
 
 #define static_key_enabled(x)                                                  \
@@ -482,6 +497,10 @@ extern bool ____wrong_branch_error(void);
 
 #endif /* CONFIG_JUMP_LABEL */
 
+#define static_branch_maybe(config, x)                                 \
+       (IS_ENABLED(config) ? static_branch_likely(x)                   \
+                           : static_branch_unlikely(x))
+
 /*
  * Advanced usage; refcount, branch is enabled when: count != 0
  */
index b91732bd05d781975a1da533170ae71cf2a31d08..d53ea3c047bcde816c4837616ff410b8217d8e05 100644 (file)
@@ -330,7 +330,7 @@ static inline bool kasan_check_byte(const void *address)
 
 #endif /* CONFIG_KASAN */
 
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
 void kasan_unpoison_task_stack(struct task_struct *task);
 #else
 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
@@ -376,6 +376,12 @@ static inline void *kasan_reset_tag(const void *addr)
 
 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
 
+#ifdef CONFIG_KASAN_HW_TAGS
+
+void kasan_report_async(void);
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
 #ifdef CONFIG_KASAN_SW_TAGS
 void __init kasan_init_sw_tags(void);
 #else
index 52b1610eae68bf3d074600977f19cd5ae970ae72..c544b70dfbd26777e4d7577e152d9ededde84a6b 100644 (file)
 /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
 #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
 
-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+/* These Ethernet switch families contain embedded PHYs, but they do
  * not have a model ID. So the switch driver traps reads to the ID2
  * register and returns the switch family ID
  */
-#define MARVELL_PHY_ID_88E6390         0x01410f90
+#define MARVELL_PHY_ID_88E6341_FAMILY  0x01410f41
+#define MARVELL_PHY_ID_88E6390_FAMILY  0x01410f90
 
 #define MARVELL_PHY_FAMILY_ID(id)      ((id) >> 4)
 
index d26acc8b21cd796e2aa8cc278022cb3811509acf..944aa3aa30355fe33fba8133cc8e0a2ad1653c9b 100644 (file)
@@ -117,6 +117,7 @@ struct mhi_link_info {
  * @MHI_EE_WFW: WLAN firmware mode
  * @MHI_EE_PTHRU: Passthrough
  * @MHI_EE_EDL: Embedded downloader
+ * @MHI_EE_FP: Flash Programmer Environment
  */
 enum mhi_ee_type {
        MHI_EE_PBL,
@@ -126,7 +127,8 @@ enum mhi_ee_type {
        MHI_EE_WFW,
        MHI_EE_PTHRU,
        MHI_EE_EDL,
-       MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
+       MHI_EE_FP,
+       MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
        MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
        MHI_EE_NOT_SUPPORTED,
        MHI_EE_MAX,
@@ -203,7 +205,7 @@ enum mhi_db_brst_mode {
  * @num: The number assigned to this channel
  * @num_elements: The number of elements that can be queued to this channel
  * @local_elements: The local ring length of the channel
- * @event_ring: The event rung index that services this channel
+ * @event_ring: The event ring index that services this channel
  * @dir: Direction that data may flow on this channel
  * @type: Channel type
  * @ee_mask: Execution Environment mask for this channel
@@ -296,7 +298,7 @@ struct mhi_controller_config {
  * @wake_db: MHI WAKE doorbell register address
  * @iova_start: IOMMU starting address for data (required)
  * @iova_stop: IOMMU stop address for data (required)
- * @fw_image: Firmware image name for normal booting (required)
+ * @fw_image: Firmware image name for normal booting (optional)
  * @edl_image: Firmware image name for emergency download mode (optional)
  * @rddm_size: RAM dump size that host should allocate for debugging purpose
  * @sbl_size: SBL image size downloaded through BHIe (optional)
@@ -352,7 +354,6 @@ struct mhi_controller_config {
  * @index: Index of the MHI controller instance
  * @bounce_buf: Use of bounce buffer
  * @fbc_download: MHI host needs to do complete image transfer (optional)
- * @pre_init: MHI host needs to do pre-initialization before power up
  * @wake_set: Device wakeup set flag
  * @irq_flags: irq flags passed to request_irq (optional)
  *
@@ -445,7 +446,6 @@ struct mhi_controller {
        int index;
        bool bounce_buf;
        bool fbc_download;
-       bool pre_init;
        bool wake_set;
        unsigned long irq_flags;
 };
@@ -712,13 +712,27 @@ int mhi_device_get_sync(struct mhi_device *mhi_dev);
 void mhi_device_put(struct mhi_device *mhi_dev);
 
 /**
- * mhi_prepare_for_transfer - Setup channel for data transfer
+ * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
+ *                            Allocate and initialize the channel context and
+ *                            also issue the START channel command to both
+ *                            channels. Channels can be started only if both
+ *                            host and device execution environments match and
+ *                            channels are in a DISABLED state.
  * @mhi_dev: Device associated with the channels
  */
 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
 
 /**
- * mhi_unprepare_from_transfer - Unprepare the channels
+ * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
+ *                               Issue the RESET channel command and let the
+ *                               device clean-up the context so no incoming
+ *                               transfers are seen on the host. Free memory
+ *                               associated with the context on host. If device
+ *                               is unresponsive, only perform a host side
+ *                               clean-up. Channels can be reset only if both
+ *                               host and device execution environments match
+ *                               and channels are in an ENABLED, STOPPED or
+ *                               SUSPENDED state.
  * @mhi_dev: Device associated with the channels
  */
 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
index df5d91c8b2d42b71343d5e268bff95989adfe77f..9c68b2da14c637ce74020d909c1a9ea0b963c9a0 100644 (file)
@@ -437,11 +437,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
        u8         reserved_at_60[0x18];
        u8         log_max_ft_num[0x8];
 
-       u8         reserved_at_80[0x18];
+       u8         reserved_at_80[0x10];
+       u8         log_max_flow_counter[0x8];
        u8         log_max_destination[0x8];
 
-       u8         log_max_flow_counter[0x8];
-       u8         reserved_at_a8[0x10];
+       u8         reserved_at_a0[0x18];
        u8         log_max_flow[0x8];
 
        u8         reserved_at_c0[0x40];
@@ -8835,6 +8835,8 @@ struct mlx5_ifc_pplm_reg_bits {
 
        u8         fec_override_admin_100g_2x[0x10];
        u8         fec_override_admin_50g_1x[0x10];
+
+       u8         reserved_at_140[0x140];
 };
 
 struct mlx5_ifc_ppcnt_reg_bits {
@@ -10198,7 +10200,7 @@ struct mlx5_ifc_pbmc_reg_bits {
 
        struct mlx5_ifc_bufferx_reg_bits buffer[10];
 
-       u8         reserved_at_2e0[0x40];
+       u8         reserved_at_2e0[0x80];
 };
 
 struct mlx5_ifc_qtct_reg_bits {
index 8ba434287387b7d67fb9ce2f2f37024f41ec9894..616dcaf08d992cbef273742ecce36a593df706c2 100644 (file)
@@ -2904,18 +2904,20 @@ static inline void kernel_poison_pages(struct page *page, int numpages) { }
 static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
 #endif
 
-DECLARE_STATIC_KEY_FALSE(init_on_alloc);
+DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
 static inline bool want_init_on_alloc(gfp_t flags)
 {
-       if (static_branch_unlikely(&init_on_alloc))
+       if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+                               &init_on_alloc))
                return true;
        return flags & __GFP_ZERO;
 }
 
-DECLARE_STATIC_KEY_FALSE(init_on_free);
+DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
 static inline bool want_init_on_free(void)
 {
-       return static_branch_unlikely(&init_on_free);
+       return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
+                                  &init_on_free);
 }
 
 extern bool _debug_pagealloc_enabled_early;
index 490db6886dcc8c9d80cc65b0bcc419a4aee3cc15..79184948fab4705d47e2fa47a8c7595f77fc43f3 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox module configuration bus driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #ifndef __LINUX_MOXTET_H
index cec526c8043d5be4cef245e475989a2d65e7a6ec..ee9ad76afbba9c3401a8d3b128406b91c6c6e6b3 100644 (file)
@@ -11,6 +11,7 @@
 
 enum nvdimm_event {
        NVDIMM_REVALIDATE_POISON,
+       NVDIMM_REVALIDATE_REGION,
 };
 
 enum nvdimm_claim_class {
index 7d3537c40ec95a0bdb4b57f47d0f3b03bd62a2e2..26a13294318cf7c159418af93d733883ea0a31bd 100644 (file)
@@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table *);
 int arpt_register_table(struct net *net, const struct xt_table *table,
                        const struct arpt_replace *repl,
                        const struct nf_hook_ops *ops, struct xt_table **res);
-void arpt_unregister_table(struct net *net, struct xt_table *table,
-                          const struct nf_hook_ops *ops);
+void arpt_unregister_table(struct net *net, struct xt_table *table);
+void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops);
 extern unsigned int arpt_do_table(struct sk_buff *skb,
                                  const struct nf_hook_state *state,
                                  struct xt_table *table);
index 2f5c4e6ecd8a4c52ac7627bbd059cff79b87e55c..3a956145a25cb6f4b18e7336a9a80089ee242f4a 100644 (file)
@@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net,
                              const struct ebt_table *table,
                              const struct nf_hook_ops *ops,
                              struct ebt_table **res);
-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
-                                const struct nf_hook_ops *);
+extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
+void ebt_unregister_table_pre_exit(struct net *net, const char *tablename,
+                                  const struct nf_hook_ops *ops);
 extern unsigned int ebt_do_table(struct sk_buff *skb,
                                 const struct nf_hook_state *state,
                                 struct ebt_table *table);
index 052293f4cbdb0a64d95884aee645778e9a63c511..923dada24eb4163b2e5fb7f9be88ae78db2c34ca 100644 (file)
@@ -65,6 +65,10 @@ int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val);
 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val);
+int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
+                                   u32 *val);
+int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
+                                   u64 *val);
 
 /* direct nvmem device read/write interface */
 struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
index 4462ed2c18cddb355ed5579059cadc6ec436b0d2..461b7aa587ba5321332f2e572fe1b623aa504326 100644 (file)
 enum OID {
        OID_id_dsa_with_sha1,           /* 1.2.840.10030.4.3 */
        OID_id_dsa,                     /* 1.2.840.10040.4.1 */
-       OID_id_ecdsa_with_sha1,         /* 1.2.840.10045.4.1 */
        OID_id_ecPublicKey,             /* 1.2.840.10045.2.1 */
+       OID_id_prime192v1,              /* 1.2.840.10045.3.1.1 */
+       OID_id_prime256v1,              /* 1.2.840.10045.3.1.7 */
+       OID_id_ecdsa_with_sha1,         /* 1.2.840.10045.4.1 */
+       OID_id_ecdsa_with_sha224,       /* 1.2.840.10045.4.3.1 */
+       OID_id_ecdsa_with_sha256,       /* 1.2.840.10045.4.3.2 */
+       OID_id_ecdsa_with_sha384,       /* 1.2.840.10045.4.3.3 */
+       OID_id_ecdsa_with_sha512,       /* 1.2.840.10045.4.3.4 */
 
        /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
        OID_rsaEncryption,              /* 1.2.840.113549.1.1.1 */
@@ -58,6 +64,7 @@ enum OID {
 
        OID_certAuthInfoAccess,         /* 1.3.6.1.5.5.7.1.1 */
        OID_sha1,                       /* 1.3.14.3.2.26 */
+       OID_id_ansip384r1,              /* 1.3.132.0.34 */
        OID_sha256,                     /* 2.16.840.1.101.3.4.2.1 */
        OID_sha384,                     /* 2.16.840.1.101.3.4.2.2 */
        OID_sha512,                     /* 2.16.840.1.101.3.4.2.3 */
@@ -113,10 +120,16 @@ enum OID {
        OID_SM2_with_SM3,               /* 1.2.156.10197.1.501 */
        OID_sm3WithRSAEncryption,       /* 1.2.156.10197.1.504 */
 
+       /* TCG defined OIDS for TPM based keys */
+       OID_TPMLoadableKey,             /* 2.23.133.10.1.3 */
+       OID_TPMImportableKey,           /* 2.23.133.10.1.4 */
+       OID_TPMSealedData,              /* 2.23.133.10.1.5 */
+
        OID__NR
 };
 
 extern enum OID look_up_OID(const void *data, size_t datasize);
+extern int parse_OID(const void *data, size_t datasize, enum OID *oid);
 extern int sprint_oid(const void *, size_t, char *, size_t);
 extern int sprint_OID(enum OID, char *, size_t);
 
index e435bdb0bab355eabc94def12674bc51a7076bda..0ed434d02196bbec2395d6b227e43a1922016804 100644 (file)
@@ -44,6 +44,12 @@ enum phy_mode {
        PHY_MODE_DP
 };
 
+enum phy_media {
+       PHY_MEDIA_DEFAULT,
+       PHY_MEDIA_SR,
+       PHY_MEDIA_DAC,
+};
+
 /**
  * union phy_configure_opts - Opaque generic phy configuration
  *
@@ -64,6 +70,8 @@ union phy_configure_opts {
  * @power_on: powering on the phy
  * @power_off: powering off the phy
  * @set_mode: set the mode of the phy
+ * @set_media: set the media type of the phy (optional)
+ * @set_speed: set the speed of the phy (optional)
  * @reset: resetting the phy
  * @calibrate: calibrate the phy
  * @release: ops to be performed while the consumer relinquishes the PHY
@@ -75,6 +83,8 @@ struct phy_ops {
        int     (*power_on)(struct phy *phy);
        int     (*power_off)(struct phy *phy);
        int     (*set_mode)(struct phy *phy, enum phy_mode mode, int submode);
+       int     (*set_media)(struct phy *phy, enum phy_media media);
+       int     (*set_speed)(struct phy *phy, int speed);
 
        /**
         * @configure:
@@ -215,6 +225,8 @@ int phy_power_off(struct phy *phy);
 int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode);
 #define phy_set_mode(phy, mode) \
        phy_set_mode_ext(phy, mode, 0)
+int phy_set_media(struct phy *phy, enum phy_media media);
+int phy_set_speed(struct phy *phy, int speed);
 int phy_configure(struct phy *phy, union phy_configure_opts *opts);
 int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
                 union phy_configure_opts *opts);
@@ -344,6 +356,20 @@ static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode,
 #define phy_set_mode(phy, mode) \
        phy_set_mode_ext(phy, mode, 0)
 
+static inline int phy_set_media(struct phy *phy, enum phy_media media)
+{
+       if (!phy)
+               return 0;
+       return -ENODEV;
+}
+
+static inline int phy_set_speed(struct phy *phy, int speed)
+{
+       if (!phy)
+               return 0;
+       return -ENODEV;
+}
+
 static inline enum phy_mode phy_get_mode(struct phy *phy)
 {
        return PHY_MODE_INVALID;
index 8b30b14b47d3f14866caf83f4cabf0a51ffba7e8..f377817ce75c1033bc53acf97f67f109c5311698 100644 (file)
@@ -85,6 +85,7 @@
  * omap2+ specific GPIO registers
  */
 #define OMAP24XX_GPIO_REVISION         0x0000
+#define OMAP24XX_GPIO_SYSCONFIG                0x0010
 #define OMAP24XX_GPIO_IRQSTATUS1       0x0018
 #define OMAP24XX_GPIO_IRQSTATUS2       0x0028
 #define OMAP24XX_GPIO_IRQENABLE2       0x002c
 #define OMAP24XX_GPIO_SETDATAOUT       0x0094
 
 #define OMAP4_GPIO_REVISION            0x0000
+#define OMAP4_GPIO_SYSCONFIG           0x0010
 #define OMAP4_GPIO_EOI                 0x0020
 #define OMAP4_GPIO_IRQSTATUSRAW0       0x0024
 #define OMAP4_GPIO_IRQSTATUSRAW1       0x0028
 #ifndef __ASSEMBLER__
 struct omap_gpio_reg_offs {
        u16 revision;
+       u16 sysconfig;
        u16 direction;
        u16 datain;
        u16 dataout;
index 3f23f6e430bfa514d7866a941413ea0d274587f7..cd81e060863c95bbb048468db2020aeb3a312aea 100644 (file)
@@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
 }
 #endif /* CONFIG_SUPERH */
 
+/* For now only SuperH uses it */
+void early_platform_cleanup(void);
+
 #endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h
deleted file mode 100644 (file)
index 7bf4990..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * pps-gpio.h -- PPS client for GPIOs
- *
- * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca>
- */
-
-#ifndef _PPS_GPIO_H
-#define _PPS_GPIO_H
-
-struct pps_gpio_platform_data {
-       struct gpio_desc *gpio_pin;
-       struct gpio_desc *echo_pin;
-       bool assert_falling_edge;
-       bool capture_clear;
-       unsigned int echo_active_ms;
-};
-
-#endif /* _PPS_GPIO_H */
index 69cc8b64aa3a0e3b957fff471b15adcd1dbaa5fe..9881eac0698fd8eb25044b47071d54c11813aa67 100644 (file)
 
 #define nmi_count()    (preempt_count() & NMI_MASK)
 #define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
-#define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
+#ifdef CONFIG_PREEMPT_RT
+# define softirq_count()       (current->softirq_disable_cnt & SOFTIRQ_MASK)
+#else
+# define softirq_count()       (preempt_count() & SOFTIRQ_MASK)
+#endif
 #define irq_count()    (nmi_count() | hardirq_count() | softirq_count())
 
 /*
index dd4687b562393120ebd029ecc2fb977c9535f0c3..0d876316e61d5df0f574e1b007f7f6f5fab42302 100644 (file)
@@ -254,6 +254,13 @@ struct software_node_ref_args {
        u64 args[NR_FWNODE_REFERENCE_ARGS];
 };
 
+#define SOFTWARE_NODE_REFERENCE(_ref_, ...)                    \
+(const struct software_node_ref_args) {                                \
+       .node = _ref_,                                          \
+       .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+       .args = { __VA_ARGS__ },                                \
+}
+
 /**
  * struct property_entry - "Built-in" device property representation.
  * @name: Name of the property.
@@ -362,11 +369,7 @@ struct property_entry {
        .name = _name_,                                                 \
        .length = sizeof(struct software_node_ref_args),                \
        .type = DEV_PROP_REF,                                           \
-       { .pointer = &(const struct software_node_ref_args) {           \
-               .node = _ref_,                                          \
-               .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
-               .args = { __VA_ARGS__ },                                \
-       } },                                                            \
+       { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \
 }
 
 struct property_entry *
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
new file mode 100644 (file)
index 0000000..fd80fab
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_RANDOMIZE_KSTACK_H
+#define _LINUX_RANDOMIZE_KSTACK_H
+
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <linux/percpu-defs.h>
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
+                        randomize_kstack_offset);
+DECLARE_PER_CPU(u32, kstack_offset);
+
+/*
+ * Do not use this anywhere else in the kernel. This is used here because
+ * it provides an arch-agnostic way to grow the stack with correct
+ * alignment. Also, since this use is being explicitly masked to a max of
+ * 10 bits, stack-clash style attacks are unlikely. For more details see
+ * "VLAs" in Documentation/process/deprecated.rst
+ */
+void *__builtin_alloca(size_t size);
+/*
+ * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
+ * "VLA" from being unbounded (see above). 10 bits leaves enough room for
+ * per-arch offset masks to reduce entropy (by removing higher bits, since
+ * high entropy may overly constrain usable stack space), and for
+ * compiler/arch-specific stack alignment to remove the lower bits.
+ */
+#define KSTACK_OFFSET_MAX(x)   ((x) & 0x3FF)
+
+/*
+ * These macros must be used during syscall entry when interrupts and
+ * preempt are disabled, and after user registers have been stored to
+ * the stack.
+ */
+#define add_random_kstack_offset() do {                                        \
+       if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+                               &randomize_kstack_offset)) {            \
+               u32 offset = raw_cpu_read(kstack_offset);               \
+               u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset));  \
+               /* Keep allocation even after "ptr" loses scope. */     \
+               asm volatile("" : "=o"(*ptr) :: "memory");              \
+       }                                                               \
+} while (0)
+
+#define choose_random_kstack_offset(rand) do {                         \
+       if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+                               &randomize_kstack_offset)) {            \
+               u32 offset = raw_cpu_read(kstack_offset);               \
+               offset ^= (rand);                                       \
+               raw_cpu_write(kstack_offset, offset);                   \
+       }                                                               \
+} while (0)
+
+#endif
index bd04f722714f65dea4791076d3f63feb5e16e3c9..6d855ef091ba55e798457f4099594f8f7ea90289 100644 (file)
@@ -334,7 +334,8 @@ static inline void rcu_preempt_sleep_check(void) { }
 #define rcu_sleep_check()                                              \
        do {                                                            \
                rcu_preempt_sleep_check();                              \
-               RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),        \
+               if (!IS_ENABLED(CONFIG_PREEMPT_RT))                     \
+                   RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),    \
                                 "Illegal context switch in RCU-bh read-side critical section"); \
                RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),     \
                                 "Illegal context switch in RCU-sched read-side critical section"); \
index ef00bb22164cd60291aa4b3c9761a26e865ef505..743a613c9cf3e85f3d61cabf0e49a523931d803a 100644 (file)
@@ -1044,6 +1044,9 @@ struct task_struct {
        int                             softirq_context;
        int                             irq_config;
 #endif
+#ifdef CONFIG_PREEMPT_RT
+       int                             softirq_disable_cnt;
+#endif
 
 #ifdef CONFIG_LOCKDEP
 # define MAX_LOCK_DEPTH                        48UL
index 8edbbf5f2f9325d120b67a661f2a33bbfec861a3..822c048934e3f6689e4e950e7139ad55f8934124 100644 (file)
@@ -349,8 +349,13 @@ static inline void sk_psock_update_proto(struct sock *sk,
 static inline void sk_psock_restore_proto(struct sock *sk,
                                          struct sk_psock *psock)
 {
-       sk->sk_prot->unhash = psock->saved_unhash;
        if (inet_csk_has_ulp(sk)) {
+               /* TLS does not have an unhash proto in SW cases, but we need
+                * to ensure we stop using the sock_map unhash routine because
+                * the associated psock is being removed. So use the original
+                * unhash handler.
+                */
+               WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
                tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
        } else {
                sk->sk_write_space = psock->saved_write_space;
index d08039d65825efadd7dd443bbfa0033f1c1d961b..ced07f8fde8701fddbd814b10d8e5eb645744a77 100644 (file)
@@ -125,6 +125,12 @@ enum sdw_dpn_grouping {
        SDW_BLK_GRP_CNT_4 = 3,
 };
 
+/* block packing mode enum */
+enum sdw_dpn_pkg_mode {
+       SDW_BLK_PKG_PER_PORT = 0,
+       SDW_BLK_PKG_PER_CHANNEL = 1
+};
+
 /**
  * enum sdw_stream_type: data stream type
  *
@@ -405,6 +411,7 @@ struct sdw_slave_prop {
  * command
  * @mclk_freq: clock reference passed to SoundWire Master, in Hz.
  * @hw_disabled: if true, the Master is not functional, typically due to pin-mux
+ * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification
  */
 struct sdw_master_prop {
        u32 revision;
@@ -421,8 +428,29 @@ struct sdw_master_prop {
        u32 err_threshold;
        u32 mclk_freq;
        bool hw_disabled;
+       u64 quirks;
 };
 
+/* Definitions for Master quirks */
+
+/*
+ * In a number of platforms bus clashes are reported after a hardware
+ * reset but without any explanations or evidence of a real problem.
+ * The following quirk will discard all initial bus clash interrupts
+ * but will leave the detection on should real bus clashes happen
+ */
+#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH  BIT(0)
+
+/*
+ * Some Slave devices have known issues with incorrect parity errors
+ * reported after a hardware reset. However during integration unexplained
+ * parity errors can be reported by Slave devices, possibly due to electrical
+ * issues at the Master level.
+ * The following quirk will discard all initial parity errors but will leave
+ * the detection on should real parity errors happen.
+ */
+#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY BIT(1)
+
 int sdw_master_read_prop(struct sdw_bus *bus);
 int sdw_slave_read_prop(struct sdw_slave *slave);
 
@@ -614,6 +642,7 @@ struct sdw_slave_ops {
  * @debugfs: Slave debugfs
  * @node: node for bus list
  * @port_ready: Port ready completion flag for each Slave port
+ * @m_port_map: static Master port map for each Slave port
  * @dev_num: Current Device Number, values can be 0 or dev_num_sticky
  * @dev_num_sticky: one-time static Device Number assigned by Bus
  * @probed: boolean tracking driver state
@@ -645,6 +674,7 @@ struct sdw_slave {
 #endif
        struct list_head node;
        struct completion port_ready[SDW_MAX_PORTS];
+       unsigned int m_port_map[SDW_MAX_PORTS];
        enum sdw_clk_stop_mode curr_clk_stop_mode;
        u16 dev_num;
        u16 dev_num_sticky;
@@ -804,6 +834,7 @@ struct sdw_defer {
 /**
  * struct sdw_master_ops - Master driver ops
  * @read_prop: Read Master properties
+ * @override_adr: Override value read from firmware (quirk for buggy firmware)
  * @xfer_msg: Transfer message callback
  * @xfer_msg_defer: Defer version of transfer message callback
  * @reset_page_addr: Reset the SCP page address registers
@@ -813,7 +844,8 @@ struct sdw_defer {
  */
 struct sdw_master_ops {
        int (*read_prop)(struct sdw_bus *bus);
-
+       u64 (*override_adr)
+                       (struct sdw_bus *bus, u64 addr);
        enum sdw_command_response (*xfer_msg)
                        (struct sdw_bus *bus, struct sdw_msg *msg);
        enum sdw_command_response (*xfer_msg_defer)
@@ -1009,5 +1041,7 @@ int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
+void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
 
 #endif /* __SOUNDWIRE_H */
index 50e2df30b0aa316d056953710852c2b4d8caed59..9edecb494e9e2d5414367598238f7a5396dfc679 100644 (file)
@@ -52,8 +52,27 @@ typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr);
  */
 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
                     struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * arch_stack_walk_reliable - Architecture specific function to walk the
+ *                           stack reliably
+ *
+ * @consume_entry:     Callback which is invoked by the architecture code for
+ *                     each entry.
+ * @cookie:            Caller supplied pointer which is handed back to
+ *                     @consume_entry
+ * @task:              Pointer to a task struct, can be NULL
+ *
+ * This function returns an error if it detects any unreliable
+ * features of the stack. Otherwise it guarantees that the stack
+ * trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is
+ * inactive and its stack is pinned.
+ */
 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
                             struct task_struct *task);
+
 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
                          const struct pt_regs *regs);
 
index 85ecc789f4ffdd8f302849d38594e16748271e47..e01b61ab86b10ffb5a435ae1562b2159cdab8909 100644 (file)
@@ -20,6 +20,7 @@
  *   static_call(name)(args...);
  *   static_call_cond(name)(args...);
  *   static_call_update(name, func);
+ *   static_call_query(name);
  *
  * Usage example:
  *
  *
  *   which will include the required value tests to avoid NULL-pointer
  *   dereferences.
+ *
+ *   To query which function is currently set to be called, use:
+ *
+ *   func = static_call_query(name);
  */
 
 #include <linux/types.h>
@@ -118,6 +123,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
                             STATIC_CALL_TRAMP_ADDR(name), func);       \
 })
 
+#define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func))
+
 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
 
 extern int __init static_call_init(void);
@@ -128,16 +135,6 @@ struct static_call_mod {
        struct static_call_site *sites;
 };
 
-struct static_call_key {
-       void *func;
-       union {
-               /* bit 0: 0 = mods, 1 = sites */
-               unsigned long type;
-               struct static_call_mod *mods;
-               struct static_call_site *sites;
-       };
-};
-
 /* For finding the key associated with a trampoline */
 struct static_call_tramp_key {
        s32 tramp;
@@ -187,10 +184,6 @@ extern long __static_call_return0(void);
 
 static inline int static_call_init(void) { return 0; }
 
-struct static_call_key {
-       void *func;
-};
-
 #define __DEFINE_STATIC_CALL(name, _func, _func_init)                  \
        DECLARE_STATIC_CALL(name, _func);                               \
        struct static_call_key STATIC_CALL_KEY(name) = {                \
@@ -205,6 +198,7 @@ struct static_call_key {
        };                                                              \
        ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
 
+
 #define static_call_cond(name) (void)__static_call(name)
 
 static inline
@@ -243,10 +237,6 @@ static inline long __static_call_return0(void)
 
 static inline int static_call_init(void) { return 0; }
 
-struct static_call_key {
-       void *func;
-};
-
 static inline long __static_call_return0(void)
 {
        return 0;
index ae5662d368b98f7bceb3d065e2e832507b32f671..5a00b8b2cf9fcceb1fa247a4da5fe2f330f378ce 100644 (file)
@@ -58,11 +58,25 @@ struct static_call_site {
        __raw_static_call(name);                                        \
 })
 
+struct static_call_key {
+       void *func;
+       union {
+               /* bit 0: 0 = mods, 1 = sites */
+               unsigned long type;
+               struct static_call_mod *mods;
+               struct static_call_site *sites;
+       };
+};
+
 #else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
 
 #define __STATIC_CALL_ADDRESSABLE(name)
 #define __static_call(name)    __raw_static_call(name)
 
+struct static_call_key {
+       void *func;
+};
+
 #endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
 
 #ifdef MODULE
@@ -77,6 +91,10 @@ struct static_call_site {
 
 #else
 
+struct static_call_key {
+       void *func;
+};
+
 #define static_call(name)                                              \
        ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
 
index f4b1ba8873849f8244d3a35c64da39cf28348c14..0806796eabcbff3f3f19fe93934a9deb15371a0e 100644 (file)
@@ -344,16 +344,16 @@ struct ssam_request_spec_md {
  * request has been fully completed. The required transport buffer will be
  * allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_controller
- * *ctrl)``, returning the status of the request, which is zero on success and
- * negative on failure. The ``ctrl`` parameter is the controller via which the
- * request is being sent.
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl)``, returning the status of the request, which is
+ * zero on success and negative on failure. The ``ctrl`` parameter is the
+ * controller via which the request is being sent.
  *
  * Refer to ssam_request_sync_onstack() for more details on the behavior of
  * the generated function.
  */
 #define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...)                              \
-       int name(struct ssam_controller *ctrl)                                  \
+       static int name(struct ssam_controller *ctrl)                           \
        {                                                                       \
                struct ssam_request_spec s = (struct ssam_request_spec)spec;    \
                struct ssam_request rqst;                                       \
@@ -383,17 +383,17 @@ struct ssam_request_spec_md {
  * returning once the request has been fully completed. The required transport
  * buffer will be allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_controller
- * *ctrl, const atype *arg)``, returning the status of the request, which is
- * zero on success and negative on failure. The ``ctrl`` parameter is the
- * controller via which the request is sent. The request argument is specified
- * via the ``arg`` pointer.
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, const atype *arg)``, returning the status of the
+ * request, which is zero on success and negative on failure. The ``ctrl``
+ * parameter is the controller via which the request is sent. The request
+ * argument is specified via the ``arg`` pointer.
  *
  * Refer to ssam_request_sync_onstack() for more details on the behavior of
  * the generated function.
  */
 #define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...)                       \
-       int name(struct ssam_controller *ctrl, const atype *arg)                \
+       static int name(struct ssam_controller *ctrl, const atype *arg)         \
        {                                                                       \
                struct ssam_request_spec s = (struct ssam_request_spec)spec;    \
                struct ssam_request rqst;                                       \
@@ -424,17 +424,17 @@ struct ssam_request_spec_md {
  * request itself, returning once the request has been fully completed. The
  * required transport buffer will be allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_controller
- * *ctrl, rtype *ret)``, returning the status of the request, which is zero on
- * success and negative on failure. The ``ctrl`` parameter is the controller
- * via which the request is sent. The request's return value is written to the
- * memory pointed to by the ``ret`` parameter.
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, rtype *ret)``, returning the status of the request,
+ * which is zero on success and negative on failure. The ``ctrl`` parameter is
+ * the controller via which the request is sent. The request's return value is
+ * written to the memory pointed to by the ``ret`` parameter.
  *
  * Refer to ssam_request_sync_onstack() for more details on the behavior of
  * the generated function.
  */
 #define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...)                       \
-       int name(struct ssam_controller *ctrl, rtype *ret)                      \
+       static int name(struct ssam_controller *ctrl, rtype *ret)               \
        {                                                                       \
                struct ssam_request_spec s = (struct ssam_request_spec)spec;    \
                struct ssam_request rqst;                                       \
@@ -483,17 +483,17 @@ struct ssam_request_spec_md {
  * returning once the request has been fully completed. The required transport
  * buffer will be allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_controller
- * *ctrl, u8 tid, u8 iid)``, returning the status of the request, which is
- * zero on success and negative on failure. The ``ctrl`` parameter is the
- * controller via which the request is sent, ``tid`` the target ID for the
- * request, and ``iid`` the instance ID.
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid)``, returning the status of the
+ * request, which is zero on success and negative on failure. The ``ctrl``
+ * parameter is the controller via which the request is sent, ``tid`` the
+ * target ID for the request, and ``iid`` the instance ID.
  *
  * Refer to ssam_request_sync_onstack() for more details on the behavior of
  * the generated function.
  */
 #define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...)                           \
-       int name(struct ssam_controller *ctrl, u8 tid, u8 iid)                  \
+       static int name(struct ssam_controller *ctrl, u8 tid, u8 iid)           \
        {                                                                       \
                struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
                struct ssam_request rqst;                                       \
@@ -524,18 +524,18 @@ struct ssam_request_spec_md {
  * the request itself, returning once the request has been fully completed.
  * The required transport buffer will be allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_controller
- * *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the status of the
- * request, which is zero on success and negative on failure. The ``ctrl``
- * parameter is the controller via which the request is sent, ``tid`` the
- * target ID for the request, and ``iid`` the instance ID. The request argument
- * is specified via the ``arg`` pointer.
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the
+ * status of the request, which is zero on success and negative on failure.
+ * The ``ctrl`` parameter is the controller via which the request is sent,
+ * ``tid`` the target ID for the request, and ``iid`` the instance ID. The
+ * request argument is specified via the ``arg`` pointer.
  *
  * Refer to ssam_request_sync_onstack() for more details on the behavior of
  * the generated function.
  */
 #define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...)                    \
-       int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)\
+       static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg) \
        {                                                                       \
                struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
                struct ssam_request rqst;                                       \
@@ -567,18 +567,18 @@ struct ssam_request_spec_md {
  * execution of the request itself, returning once the request has been fully
  * completed. The required transport buffer will be allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_controller
- * *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status of the request,
- * which is zero on success and negative on failure. The ``ctrl`` parameter is
- * the controller via which the request is sent, ``tid`` the target ID for the
- * request, and ``iid`` the instance ID. The request's return value is written
- * to the memory pointed to by the ``ret`` parameter.
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status
+ * of the request, which is zero on success and negative on failure. The
+ * ``ctrl`` parameter is the controller via which the request is sent, ``tid``
+ * the target ID for the request, and ``iid`` the instance ID. The request's
+ * return value is written to the memory pointed to by the ``ret`` parameter.
  *
  * Refer to ssam_request_sync_onstack() for more details on the behavior of
  * the generated function.
  */
 #define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...)                    \
-       int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret)      \
+       static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \
        {                                                                       \
                struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
                struct ssam_request rqst;                                       \
index 02f3e06c0a604739a54635d23ec358af37109c35..4441ad667c3f9426200d7f56fee2f9d9693f6fb2 100644 (file)
@@ -336,17 +336,18 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
  * request has been fully completed. The required transport buffer will be
  * allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_device *sdev)``,
- * returning the status of the request, which is zero on success and negative
- * on failure. The ``sdev`` parameter specifies both the target device of the
- * request and by association the controller via which the request is sent.
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev)``, returning the status of the request, which is zero on success and
+ * negative on failure. The ``sdev`` parameter specifies both the target
+ * device of the request and by association the controller via which the
+ * request is sent.
  *
  * Refer to ssam_request_sync_onstack() for more details on the behavior of
  * the generated function.
  */
 #define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...)                   \
        SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec)               \
-       int name(struct ssam_device *sdev)                              \
+       static int name(struct ssam_device *sdev)                       \
        {                                                               \
                return __raw_##name(sdev->ctrl, sdev->uid.target,       \
                                    sdev->uid.instance);                \
@@ -368,19 +369,19 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
  * itself, returning once the request has been fully completed. The required
  * transport buffer will be allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_device *sdev,
- * const atype *arg)``, returning the status of the request, which is zero on
- * success and negative on failure. The ``sdev`` parameter specifies both the
- * target device of the request and by association the controller via which
- * the request is sent. The request's argument is specified via the ``arg``
- * pointer.
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, const atype *arg)``, returning the status of the request, which is
+ * zero on success and negative on failure. The ``sdev`` parameter specifies
+ * both the target device of the request and by association the controller via
+ * which the request is sent. The request's argument is specified via the
+ * ``arg`` pointer.
  *
  * Refer to ssam_request_sync_onstack() for more details on the behavior of
  * the generated function.
  */
 #define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...)            \
        SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec)        \
-       int name(struct ssam_device *sdev, const atype *arg)            \
+       static int name(struct ssam_device *sdev, const atype *arg)     \
        {                                                               \
                return __raw_##name(sdev->ctrl, sdev->uid.target,       \
                                    sdev->uid.instance, arg);           \
@@ -402,8 +403,8 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
  * itself, returning once the request has been fully completed. The required
  * transport buffer will be allocated on the stack.
  *
- * The generated function is defined as ``int name(struct ssam_device *sdev,
- * rtype *ret)``, returning the status of the request, which is zero on
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, rtype *ret)``, returning the status of the request, which is zero on
  * success and negative on failure. The ``sdev`` parameter specifies both the
  * target device of the request and by association the controller via which
  * the request is sent. The request's return value is written to the memory
@@ -414,7 +415,7 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
  */
 #define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...)            \
        SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec)        \
-       int name(struct ssam_device *sdev, rtype *ret)                  \
+       static int name(struct ssam_device *sdev, rtype *ret)           \
        {                                                               \
                return __raw_##name(sdev->ctrl, sdev->uid.target,       \
                                    sdev->uid.instance, ret);           \
index 754b74a2167feb7f1b12430882e7ad02faaa198e..c6540ceea14303151317627d3216d9f86a549341 100644 (file)
@@ -124,7 +124,7 @@ extern u64 timecounter_read(struct timecounter *tc);
  * This allows conversion of cycle counter values which were generated
  * in the past.
  */
-extern u64 timecounter_cyc2time(struct timecounter *tc,
+extern u64 timecounter_cyc2time(const struct timecounter *tc,
                                u64 cycle_tstamp);
 
 #endif
index 9c2e54faf9b71c677aa3de55bdc11334737f79d1..059b18eb1f1fab1c3af72b9369273a2782ba641d 100644 (file)
 
 /*
  * kernel variables
- * Note: maximum error = NTP synch distance = dispersion + delay / 2;
+ * Note: maximum error = NTP sync distance = dispersion + delay / 2;
  * estimated error = NTP dispersion.
  */
 extern unsigned long tick_usec;                /* USER_HZ period (usec) */
index 543aa3b1dedc0766fe52b956b3149e724f51bdaf..aa11fe323c56b5d86a7d66095a826cf0e4cf1c57 100644 (file)
@@ -305,6 +305,8 @@ struct tpm_buf {
 };
 
 enum tpm2_object_attributes {
+       TPM2_OA_FIXED_TPM               = BIT(1),
+       TPM2_OA_FIXED_PARENT            = BIT(4),
        TPM2_OA_USER_WITH_AUTH          = BIT(6),
 };
 
index 64cf8ebdc4ec9434af97ea8a68ff801021378fed..f6c5f784be5abac74f32d322adc655bb6e3886e0 100644 (file)
@@ -63,6 +63,9 @@ struct user_namespace {
        kgid_t                  group;
        struct ns_common        ns;
        unsigned long           flags;
+       /* parent_could_setfcap: true if the creator if this ns had CAP_SETFCAP
+        * in its effective capability set at the child ns creation time. */
+       bool                    parent_could_setfcap;
 
 #ifdef CONFIG_KEYS
        /* List of joinable keyrings in this namespace.  Modification access of
index 6b5fcfa1e5553576b0e853ae31a2df655c04204b..b465f8f3e554f27ced45c35f54f113cf6dce1f07 100644 (file)
@@ -62,15 +62,21 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                        return -EINVAL;
        }
 
+       skb_reset_mac_header(skb);
+
        if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-               u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
-               u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+               u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+               u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+               u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
+
+               if (!pskb_may_pull(skb, needed))
+                       return -EINVAL;
 
                if (!skb_partial_csum_set(skb, start, off))
                        return -EINVAL;
 
                p_off = skb_transport_offset(skb) + thlen;
-               if (p_off > skb_headlen(skb))
+               if (!pskb_may_pull(skb, p_off))
                        return -EINVAL;
        } else {
                /* gso packets without NEEDS_CSUM do not set transport_offset.
@@ -100,14 +106,14 @@ retry:
                        }
 
                        p_off = keys.control.thoff + thlen;
-                       if (p_off > skb_headlen(skb) ||
+                       if (!pskb_may_pull(skb, p_off) ||
                            keys.basic.ip_proto != ip_proto)
                                return -EINVAL;
 
                        skb_set_transport_header(skb, keys.control.thoff);
                } else if (gso_type) {
                        p_off = thlen;
-                       if (p_off > skb_headlen(skb))
+                       if (!pskb_may_pull(skb, p_off))
                                return -EINVAL;
                }
        }
index 8ef7e7faea1e2d438dc607051ba1cdefc1fc310a..2cb3913c1f5061bdaa2d2bcca5d9950c23a00a40 100644 (file)
@@ -37,7 +37,7 @@ struct wmi_driver {
        const struct wmi_device_id *id_table;
 
        int (*probe)(struct wmi_device *wdev, const void *context);
-       int (*remove)(struct wmi_device *wdev);
+       void (*remove)(struct wmi_device *wdev);
        void (*notify)(struct wmi_device *device, union acpi_object *data);
        long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd,
                                struct wmi_ioctl_buffer *arg);
index 2bf3092ae7eccecedc310f2deb3851d1823570cb..086b291e9530b225ba0fa151e856ff0638a07502 100644 (file)
@@ -170,12 +170,7 @@ void tcf_idr_insert_many(struct tc_action *actions[]);
 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
                        struct tc_action **a, int bind);
-int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
-
-static inline int tcf_idr_release(struct tc_action *a, bool bind)
-{
-       return __tcf_idr_release(a, bind, false);
-}
+int tcf_idr_release(struct tc_action *a, bool bind);
 
 int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
 int tcf_unregister_action(struct tc_action_ops *a,
@@ -185,7 +180,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
                    int nr_actions, struct tcf_result *res);
 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
                    struct nlattr *est, char *name, int ovr, int bind,
-                   struct tc_action *actions[], size_t *attr_size,
+                   struct tc_action *actions[], int init_res[], size_t *attr_size,
                    bool rtnl_held, struct netlink_ext_ack *extack);
 struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
                                         bool rtnl_held,
@@ -193,7 +188,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
-                                   struct tc_action_ops *ops, bool rtnl_held,
+                                   struct tc_action_ops *a_o, int *init_res,
+                                   bool rtnl_held,
                                    struct netlink_ext_ack *extack);
 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
                    int ref, bool terse);
index 59f45b1e9dac06386fdff3eb78016f2da15d4f67..e816b6a3ef2b0ef28ce9ffc0a6bbb8e6b419f567 100644 (file)
@@ -72,7 +72,9 @@ struct netns_xfrm {
 #if IS_ENABLED(CONFIG_IPV6)
        struct dst_ops          xfrm6_dst_ops;
 #endif
-       spinlock_t xfrm_state_lock;
+       spinlock_t              xfrm_state_lock;
+       seqcount_spinlock_t     xfrm_state_hash_generation;
+
        spinlock_t xfrm_policy_lock;
        struct mutex xfrm_cfg_mutex;
 };
index 0b39eff1d50aebc43784ebec3fe615fec0bf995c..be11dbd26492094e8b477339c66b3a098e06c39f 100644 (file)
@@ -171,9 +171,9 @@ static inline void red_set_vars(struct red_vars *v)
 static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
                                    u8 Scell_log, u8 *stab)
 {
-       if (fls(qth_min) + Wlog > 32)
+       if (fls(qth_min) + Wlog >= 32)
                return false;
-       if (fls(qth_max) + Wlog > 32)
+       if (fls(qth_max) + Wlog >= 32)
                return false;
        if (Scell_log >= 32)
                return false;
index 4da61c950e931a424d60c6af5b7400e80d6bb904..479f60ef54c0475c8233c6520e85b32619b7d34d 100644 (file)
@@ -147,8 +147,8 @@ struct rtnl_af_ops {
        int                     (*validate_link_af)(const struct net_device *dev,
                                                    const struct nlattr *attr);
        int                     (*set_link_af)(struct net_device *dev,
-                                              const struct nlattr *attr);
-
+                                              const struct nlattr *attr,
+                                              struct netlink_ext_ack *extack);
        int                     (*fill_stats_af)(struct sk_buff *skb,
                                                 const struct net_device *dev);
        size_t                  (*get_stats_af_size)(const struct net_device *dev);
index 0b6266fd6bf6f4496b09dd170869ff4db38dfeb9..8487f58da36d21335f690edd2194986c3d4fed23 100644 (file)
@@ -934,9 +934,13 @@ static inline void sk_acceptq_added(struct sock *sk)
        WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
 }
 
+/* Note: If you think the test should be:
+ *     return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
+ */
 static inline bool sk_acceptq_is_full(const struct sock *sk)
 {
-       return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+       return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
 }
 
 /*
@@ -2221,6 +2225,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        sk_mem_charge(sk, skb->truesize);
 }
 
+static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+{
+       if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+               skb_orphan(skb);
+               skb->destructor = sock_efree;
+               skb->sk = sk;
+       }
+}
+
 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
                    unsigned long expires);
 
index b2a06f10b62ce11fb2618b3b4c6001d8a2e757b6..c58a6d4eb61033d222dd22c2242e321f286b3d93 100644 (file)
@@ -1097,7 +1097,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
                return __xfrm_policy_check(sk, ndir, skb, family);
 
        return  (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
-               (skb_dst(skb)->flags & DST_NOPOLICY) ||
+               (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
                __xfrm_policy_check(sk, ndir, skb, family);
 }
 
@@ -1557,7 +1557,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
 int xfrm_trans_queue(struct sk_buff *skb,
                     int (*finish)(struct net *, struct sock *,
                                   struct sk_buff *));
-int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 
 #if IS_ENABLED(CONFIG_NET_PKTGEN)
index 9570a10cb949b5792c4290ba8e82a077ac655069..3d7b432ca5f311cfb16d30bb9ebb82875ebe3f20 100644 (file)
@@ -85,28 +85,6 @@ TRACE_EVENT(credit_entropy_bits,
                  __entry->entropy_count, (void *)__entry->IP)
 );
 
-TRACE_EVENT(push_to_pool,
-       TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
-
-       TP_ARGS(pool_name, pool_bits, input_bits),
-
-       TP_STRUCT__entry(
-               __field( const char *,  pool_name               )
-               __field(          int,  pool_bits               )
-               __field(          int,  input_bits              )
-       ),
-
-       TP_fast_assign(
-               __entry->pool_name      = pool_name;
-               __entry->pool_bits      = pool_bits;
-               __entry->input_bits     = input_bits;
-       ),
-
-       TP_printk("%s: pool_bits %d input_pool_bits %d",
-                 __entry->pool_name, __entry->pool_bits,
-                 __entry->input_bits)
-);
-
 TRACE_EVENT(debit_entropy,
        TP_PROTO(const char *pool_name, int debit_bits),
 
@@ -161,35 +139,6 @@ TRACE_EVENT(add_disk_randomness,
                  MINOR(__entry->dev), __entry->input_bits)
 );
 
-TRACE_EVENT(xfer_secondary_pool,
-       TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
-                int pool_entropy, int input_entropy),
-
-       TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
-               input_entropy),
-
-       TP_STRUCT__entry(
-               __field( const char *,  pool_name               )
-               __field(          int,  xfer_bits               )
-               __field(          int,  request_bits            )
-               __field(          int,  pool_entropy            )
-               __field(          int,  input_entropy           )
-       ),
-
-       TP_fast_assign(
-               __entry->pool_name      = pool_name;
-               __entry->xfer_bits      = xfer_bits;
-               __entry->request_bits   = request_bits;
-               __entry->pool_entropy   = pool_entropy;
-               __entry->input_entropy  = input_entropy;
-       ),
-
-       TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
-                 "input_entropy %d", __entry->pool_name, __entry->xfer_bits,
-                 __entry->request_bits, __entry->pool_entropy,
-                 __entry->input_entropy)
-);
-
 DECLARE_EVENT_CLASS(random__get_random_bytes,
        TP_PROTO(int nbytes, unsigned long IP),
 
@@ -253,38 +202,6 @@ DEFINE_EVENT(random__extract_entropy, extract_entropy,
        TP_ARGS(pool_name, nbytes, entropy_count, IP)
 );
 
-DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
-       TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
-                unsigned long IP),
-
-       TP_ARGS(pool_name, nbytes, entropy_count, IP)
-);
-
-TRACE_EVENT(random_read,
-       TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
-
-       TP_ARGS(got_bits, need_bits, pool_left, input_left),
-
-       TP_STRUCT__entry(
-               __field(          int,  got_bits                )
-               __field(          int,  need_bits               )
-               __field(          int,  pool_left               )
-               __field(          int,  input_left              )
-       ),
-
-       TP_fast_assign(
-               __entry->got_bits       = got_bits;
-               __entry->need_bits      = need_bits;
-               __entry->pool_left      = pool_left;
-               __entry->input_left     = input_left;
-       ),
-
-       TP_printk("got_bits %d still_needed_bits %d "
-                 "blocking_pool_entropy_left %d input_entropy_left %d",
-                 __entry->got_bits, __entry->got_bits, __entry->pool_left,
-                 __entry->input_left)
-);
-
 TRACE_EVENT(urandom_read,
        TP_PROTO(int got_bits, int pool_left, int input_left),
 
index ec84ad10656834d690089c0556110ad09005c2b1..20e435fe657a1ffe82a685d24fb4fadff59324e6 100644 (file)
@@ -217,6 +217,18 @@ struct binder_node_info_for_ref {
        __u32            reserved3;
 };
 
+struct binder_freeze_info {
+       __u32            pid;
+       __u32            enable;
+       __u32            timeout_ms;
+};
+
+struct binder_frozen_status_info {
+       __u32            pid;
+       __u32            sync_recv;
+       __u32            async_recv;
+};
+
 #define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
 #define BINDER_SET_IDLE_TIMEOUT                _IOW('b', 3, __s64)
 #define BINDER_SET_MAX_THREADS         _IOW('b', 5, __u32)
@@ -227,6 +239,9 @@ struct binder_node_info_for_ref {
 #define BINDER_GET_NODE_DEBUG_INFO     _IOWR('b', 11, struct binder_node_debug_info)
 #define BINDER_GET_NODE_INFO_FOR_REF   _IOWR('b', 12, struct binder_node_info_for_ref)
 #define BINDER_SET_CONTEXT_MGR_EXT     _IOW('b', 13, struct flat_binder_object)
+#define BINDER_FREEZE                  _IOW('b', 14, struct binder_freeze_info)
+#define BINDER_GET_FROZEN_INFO         _IOWR('b', 15, struct binder_frozen_status_info)
+#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION    _IOW('b', 16, __u32)
 
 /*
  * NOTE: Two special error codes you should check for when calling
@@ -408,6 +423,19 @@ enum binder_driver_return_protocol {
         * The last transaction (either a bcTRANSACTION or
         * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
         */
+
+       BR_FROZEN_REPLY = _IO('r', 18),
+       /*
+        * The target of the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) is frozen.  No parameters.
+        */
+
+       BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19),
+       /*
+        * Current process sent too many oneway calls to target, and the last
+        * asynchronous transaction makes the allocated async buffer size exceed
+        * detection threshold.  No parameters.
+        */
 };
 
 enum binder_driver_command_protocol {
index f75238ac6dced4cbc59fb6a4d1fb1fe47113c14c..c7535352fef646937a923bddf1d9656e9edcb982 100644 (file)
@@ -113,7 +113,7 @@ struct can_frame {
                 */
                __u8 len;
                __u8 can_dlc; /* deprecated */
-       };
+       } __attribute__((packed)); /* disable padding added in some ABIs */
        __u8 __pad; /* padding */
        __u8 __res0; /* reserved / padding */
        __u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */
index c6ca330341471d3f54da948d1484c2919eec1f76..2ddb4226cd231105b9b8d1ed1a530b65a01f7405 100644 (file)
@@ -335,7 +335,8 @@ struct vfs_ns_cap_data {
 
 #define CAP_AUDIT_CONTROL    30
 
-/* Set or remove capabilities on files */
+/* Set or remove capabilities on files.
+   Map uid=0 into a child user namespace. */
 
 #define CAP_SETFCAP         31
 
index 30f68b42eeb53f58b7140b519f07bc623cf43ba4..61bf4774b8f2a13c6517c8df8f1db7717cfa79f5 100644 (file)
@@ -426,6 +426,7 @@ typedef struct elf64_shdr {
 #define NT_ARM_PACA_KEYS       0x407   /* ARM pointer authentication address keys */
 #define NT_ARM_PACG_KEYS       0x408   /* ARM pointer authentication generic key */
 #define NT_ARM_TAGGED_ADDR_CTRL        0x409   /* arm64 tagged address control (prctl()) */
+#define NT_ARM_PAC_ENABLED_KEYS        0x40a   /* arm64 ptr auth enabled keys (prctl()) */
 #define NT_ARC_V2      0x600           /* ARCv2 accumulator/extra registers */
 #define NT_VMCOREDD    0x700           /* Vmcore Device Dump Note */
 #define NT_MIPS_DSP    0x800           /* MIPS DSP ASE registers */
index cde753bb2093562d4dd837821d98362e2c9659e0..5afea692a3f7cf7a124fc8658e960ad3349aa055 100644 (file)
  * have the same layout for 32-bit and 64-bit userland.
  */
 
+/* Note on reserved space.
+ * Reserved fields must not be accessed directly by user space because
+ * they may be replaced by a different field in the future. They must
+ * be initialized to zero before making the request, e.g. via memset
+ * of the entire structure or implicitly by not being set in a structure
+ * initializer.
+ */
+
 /**
  * struct ethtool_cmd - DEPRECATED, link control and status
  * This structure is DEPRECATED, please use struct ethtool_link_settings.
@@ -67,6 +75,7 @@
  *     and other link features that the link partner advertised
  *     through autonegotiation; 0 if unknown or not applicable.
  *     Read-only.
+ * @reserved: Reserved for future use; see the note on reserved space.
  *
  * The link speed in Mbps is split between @speed and @speed_hi.  Use
  * the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
@@ -155,6 +164,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
  * @bus_info: Device bus address.  This should match the dev_name()
  *     string for the underlying bus device, if there is one.  May be
  *     an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
  * @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
  *     %ETHTOOL_SPFLAGS commands; also the number of strings in the
  *     %ETH_SS_PRIV_FLAGS set
@@ -356,6 +366,7 @@ struct ethtool_eeprom {
  * @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
  *     its tx lpi (after reaching 'idle' state). Effective only when eee
  *     was negotiated and tx_lpi_enabled was set.
+ * @reserved: Reserved for future use; see the note on reserved space.
  */
 struct ethtool_eee {
        __u32   cmd;
@@ -374,6 +385,7 @@ struct ethtool_eee {
  * @cmd: %ETHTOOL_GMODULEINFO
  * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
  * @eeprom_len: Length of the eeprom
+ * @reserved: Reserved for future use; see the note on reserved space.
  *
  * This structure is used to return the information to
  * properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
@@ -579,9 +591,7 @@ struct ethtool_pauseparam {
        __u32   tx_pause;
 };
 
-/**
- * enum ethtool_link_ext_state - link extended state
- */
+/* Link extended state */
 enum ethtool_link_ext_state {
        ETHTOOL_LINK_EXT_STATE_AUTONEG,
        ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
@@ -595,10 +605,7 @@ enum ethtool_link_ext_state {
        ETHTOOL_LINK_EXT_STATE_OVERHEAT,
 };
 
-/**
- * enum ethtool_link_ext_substate_autoneg - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_AUTONEG.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
 enum ethtool_link_ext_substate_autoneg {
        ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
@@ -608,9 +615,7 @@ enum ethtool_link_ext_substate_autoneg {
        ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
 };
 
-/**
- * enum ethtool_link_ext_substate_link_training - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
  */
 enum ethtool_link_ext_substate_link_training {
        ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
@@ -619,9 +624,7 @@ enum ethtool_link_ext_substate_link_training {
        ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
 };
 
-/**
- * enum ethtool_link_ext_substate_logical_mismatch - more information in addition
- * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
  */
 enum ethtool_link_ext_substate_link_logical_mismatch {
        ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
@@ -631,19 +634,14 @@ enum ethtool_link_ext_substate_link_logical_mismatch {
        ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
 };
 
-/**
- * enum ethtool_link_ext_substate_bad_signal_integrity - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
  */
 enum ethtool_link_ext_substate_bad_signal_integrity {
        ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
 };
 
-/**
- * enum ethtool_link_ext_substate_cable_issue - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */
 enum ethtool_link_ext_substate_cable_issue {
        ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
@@ -661,6 +659,7 @@ enum ethtool_link_ext_substate_cable_issue {
  *     now deprecated
  * @ETH_SS_FEATURES: Device feature names
  * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
+ * @ETH_SS_TUNABLES: tunable names
  * @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
  * @ETH_SS_PHY_TUNABLES: PHY tunable names
  * @ETH_SS_LINK_MODES: link mode names
@@ -670,6 +669,8 @@ enum ethtool_link_ext_substate_cable_issue {
  * @ETH_SS_TS_TX_TYPES: timestamping Tx types
  * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
  * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
+ *
+ * @ETH_SS_COUNT: number of defined string sets
  */
 enum ethtool_stringset {
        ETH_SS_TEST             = 0,
@@ -715,6 +716,7 @@ struct ethtool_gstrings {
 /**
  * struct ethtool_sset_info - string set information
  * @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @reserved: Reserved for future use; see the note on reserved space.
  * @sset_mask: On entry, a bitmask of string sets to query, with bits
  *     numbered according to &enum ethtool_stringset.  On return, a
  *     bitmask of those string sets queried that are supported.
@@ -759,6 +761,7 @@ enum ethtool_test_flags {
  * @flags: A bitmask of flags from &enum ethtool_test_flags.  Some
  *     flags may be set by the user on entry; others may be set by
  *     the driver on return.
+ * @reserved: Reserved for future use; see the note on reserved space.
  * @len: On return, the number of test results
  * @data: Array of test results
  *
@@ -959,6 +962,7 @@ union ethtool_flow_union {
  * @vlan_etype: VLAN EtherType
  * @vlan_tci: VLAN tag control information
  * @data: user defined data
+ * @padding: Reserved for future use; see the note on reserved space.
  *
  * Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
  * is set in &struct ethtool_rx_flow_spec @flow_type.
@@ -1134,7 +1138,8 @@ struct ethtool_rxfh_indir {
  *     hardware hash key.
  * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
  *     Valid values are one of the %ETH_RSS_HASH_*.
- * @rsvd:      Reserved for future extensions.
+ * @rsvd8: Reserved for future use; see the note on reserved space.
+ * @rsvd32: Reserved for future use; see the note on reserved space.
  * @rss_config: RX ring/queue index for each hash value i.e., indirection table
  *     of @indir_size __u32 elements, followed by hash key of @key_size
  *     bytes.
@@ -1302,7 +1307,9 @@ struct ethtool_sfeatures {
  * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
  * @phc_index: device index of the associated PHC, or -1 if there is none
  * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @tx_reserved: Reserved for future use; see the note on reserved space.
  * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ * @rx_reserved: Reserved for future use; see the note on reserved space.
  *
  * The bits in the 'tx_types' and 'rx_filters' fields correspond to
  * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
@@ -1958,6 +1965,11 @@ enum ethtool_reset_flags {
  *     autonegotiation; 0 if unknown or not applicable.  Read-only.
  * @transceiver: Used to distinguish different possible PHY types,
  *     reported consistently by PHYLIB.  Read-only.
+ * @master_slave_cfg: Master/slave port mode.
+ * @master_slave_state: Master/slave port state.
+ * @reserved: Reserved for future use; see the note on reserved space.
+ * @reserved1: Reserved for future use; see the note on reserved space.
+ * @link_mode_masks: Variable length bitmaps.
  *
  * If autonegotiation is disabled, the speed and @duplex represent the
  * fixed link mode and are writable if the driver supports multiple
index 236d437947bc9d6d70bd0386ba90571755d31908..e33997b4d750e9ca474306cadbf637b2975f9594 100644 (file)
@@ -247,8 +247,8 @@ struct dsa_completion_record {
                        uint32_t        rsvd2:8;
                };
 
-               uint16_t        delta_rec_size;
-               uint16_t        crc_val;
+               uint32_t        delta_rec_size;
+               uint32_t        crc_val;
 
                /* DIF check & strip */
                struct {
index 8b02088f96e3733e3ba08b5f856a6705339ed696..04c8b55812e74733c14106a231d13146f41b4a00 100644 (file)
@@ -1,20 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
 /*
  * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef MAP_TO_7SEGMENT_H
index 667f1aed091c23c45494264a9c6940ed5ff623ef..18a9f59dc067f333d3a1c86343439917606cb36b 100644 (file)
@@ -255,4 +255,8 @@ struct prctl_mm_map {
 # define SYSCALL_DISPATCH_FILTER_ALLOW 0
 # define SYSCALL_DISPATCH_FILTER_BLOCK 1
 
+/* Set/get enabled arm64 pointer authentication keys */
+#define PR_PAC_SET_ENABLED_KEYS                60
+#define PR_PAC_GET_ENABLED_KEYS                61
+
 #endif /* _LINUX_PRCTL_H */
index 03e8af87b364c08b7db6c3bb8dec8fdd7ebbe15a..9b77cfc42efa3bca9d66511a572c727524cfadba 100644 (file)
@@ -86,34 +86,90 @@ enum rfkill_hard_block_reasons {
  * @op: operation code
  * @hard: hard state (0/1)
  * @soft: soft state (0/1)
+ *
+ * Structure used for userspace communication on /dev/rfkill,
+ * used for events from the kernel and control to the kernel.
+ */
+struct rfkill_event {
+       __u32 idx;
+       __u8  type;
+       __u8  op;
+       __u8  soft;
+       __u8  hard;
+} __attribute__((packed));
+
+/**
+ * struct rfkill_event_ext - events for userspace on /dev/rfkill
+ * @idx: index of dev rfkill
+ * @type: type of the rfkill struct
+ * @op: operation code
+ * @hard: hard state (0/1)
+ * @soft: soft state (0/1)
  * @hard_block_reasons: valid if hard is set. One or several reasons from
  *     &enum rfkill_hard_block_reasons.
  *
  * Structure used for userspace communication on /dev/rfkill,
  * used for events from the kernel and control to the kernel.
+ *
+ * See the extensibility docs below.
  */
-struct rfkill_event {
+struct rfkill_event_ext {
        __u32 idx;
        __u8  type;
        __u8  op;
        __u8  soft;
        __u8  hard;
+
+       /*
+        * older kernels will accept/send only up to this point,
+        * and if extended further up to any chunk marked below
+        */
+
        __u8  hard_block_reasons;
 } __attribute__((packed));
 
-/*
- * We are planning to be backward and forward compatible with changes
- * to the event struct, by adding new, optional, members at the end.
- * When reading an event (whether the kernel from userspace or vice
- * versa) we need to accept anything that's at least as large as the
- * version 1 event size, but might be able to accept other sizes in
- * the future.
+/**
+ * DOC: Extensibility
+ *
+ * Originally, we had planned to allow backward and forward compatible
+ * changes by just adding fields at the end of the structure that are
+ * then not reported on older kernels on read(), and not written to by
+ * older kernels on write(), with the kernel reporting the size it did
+ * accept as the result.
+ *
+ * This would have allowed userspace to detect on read() and write()
+ * which kernel structure version it was dealing with, and if was just
+ * recompiled it would have gotten the new fields, but obviously not
+ * accessed them, but things should've continued to work.
+ *
+ * Unfortunately, while actually exercising this mechanism to add the
+ * hard block reasons field, we found that userspace (notably systemd)
+ * did all kinds of fun things not in line with this scheme:
+ *
+ * 1. treat the (expected) short writes as an error;
+ * 2. ask to read sizeof(struct rfkill_event) but then compare the
+ *    actual return value to RFKILL_EVENT_SIZE_V1 and treat any
+ *    mismatch as an error.
+ *
+ * As a consequence, just recompiling with a new struct version caused
+ * things to no longer work correctly on old and new kernels.
+ *
+ * Hence, we've rolled back &struct rfkill_event to the original version
+ * and added &struct rfkill_event_ext. This effectively reverts to the
+ * old behaviour for all userspace, unless it explicitly opts in to the
+ * rules outlined here by using the new &struct rfkill_event_ext.
+ *
+ * Userspace using &struct rfkill_event_ext must adhere to the following
+ * rules
  *
- * One exception is the kernel -- we already have two event sizes in
- * that we've made the 'hard' member optional since our only option
- * is to ignore it anyway.
+ * 1. accept short writes, optionally using them to detect that it's
+ *    running on an older kernel;
+ * 2. accept short reads, knowing that this means it's running on an
+ *    older kernel;
+ * 3. treat reads that are as long as requested as acceptable, not
+ *    checking against RFKILL_EVENT_SIZE_V1 or such.
  */
-#define RFKILL_EVENT_SIZE_V1   8
+#define RFKILL_EVENT_SIZE_V1   sizeof(struct rfkill_event)
 
 /* ioctl for turning off rfkill-input (if present) */
 #define RFKILL_IOC_MAGIC       'R'
diff --git a/include/uapi/linux/surface_aggregator/dtx.h b/include/uapi/linux/surface_aggregator/dtx.h
new file mode 100644 (file)
index 0000000..0833aab
--- /dev/null
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Surface DTX (clipboard detachment system driver) user-space interface.
+ *
+ * Definitions, structs, and IOCTLs for the /dev/surface/dtx misc device. This
+ * device allows user-space to control the clipboard detachment process on
+ * Surface Book series devices.
+ *
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
+#define _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Status/error categories */
+#define SDTX_CATEGORY_STATUS           0x0000
+#define SDTX_CATEGORY_RUNTIME_ERROR    0x1000
+#define SDTX_CATEGORY_HARDWARE_ERROR   0x2000
+#define SDTX_CATEGORY_UNKNOWN          0xf000
+
+#define SDTX_CATEGORY_MASK             0xf000
+#define SDTX_CATEGORY(value)           ((value) & SDTX_CATEGORY_MASK)
+
+#define SDTX_STATUS(code)              ((code) | SDTX_CATEGORY_STATUS)
+#define SDTX_ERR_RT(code)              ((code) | SDTX_CATEGORY_RUNTIME_ERROR)
+#define SDTX_ERR_HW(code)              ((code) | SDTX_CATEGORY_HARDWARE_ERROR)
+#define SDTX_UNKNOWN(code)             ((code) | SDTX_CATEGORY_UNKNOWN)
+
+#define SDTX_SUCCESS(value)            (SDTX_CATEGORY(value) == SDTX_CATEGORY_STATUS)
+
+/* Latch status values */
+#define SDTX_LATCH_CLOSED              SDTX_STATUS(0x00)
+#define SDTX_LATCH_OPENED              SDTX_STATUS(0x01)
+
+/* Base state values */
+#define SDTX_BASE_DETACHED             SDTX_STATUS(0x00)
+#define SDTX_BASE_ATTACHED             SDTX_STATUS(0x01)
+
+/* Runtime errors (non-critical) */
+#define SDTX_DETACH_NOT_FEASIBLE       SDTX_ERR_RT(0x01)
+#define SDTX_DETACH_TIMEDOUT           SDTX_ERR_RT(0x02)
+
+/* Hardware errors (critical) */
+#define SDTX_ERR_FAILED_TO_OPEN                SDTX_ERR_HW(0x01)
+#define SDTX_ERR_FAILED_TO_REMAIN_OPEN SDTX_ERR_HW(0x02)
+#define SDTX_ERR_FAILED_TO_CLOSE       SDTX_ERR_HW(0x03)
+
+/* Base types */
+#define SDTX_DEVICE_TYPE_HID           0x0100
+#define SDTX_DEVICE_TYPE_SSH           0x0200
+
+#define SDTX_DEVICE_TYPE_MASK          0x0f00
+#define SDTX_DEVICE_TYPE(value)                ((value) & SDTX_DEVICE_TYPE_MASK)
+
+#define SDTX_BASE_TYPE_HID(id)         ((id) | SDTX_DEVICE_TYPE_HID)
+#define SDTX_BASE_TYPE_SSH(id)         ((id) | SDTX_DEVICE_TYPE_SSH)
+
+/**
+ * enum sdtx_device_mode - Mode describing how (and if) the clipboard is
+ * attached to the base of the device.
+ * @SDTX_DEVICE_MODE_TABLET: The clipboard is detached from the base and the
+ *                           device operates as tablet.
+ * @SDTX_DEVICE_MODE_LAPTOP: The clipboard is attached normally to the base
+ *                           and the device operates as laptop.
+ * @SDTX_DEVICE_MODE_STUDIO: The clipboard is attached to the base in reverse.
+ *                           The device operates as tablet with keyboard and
+ *                           touchpad deactivated, however, the base battery
+ *                           and, if present in the specific device model, dGPU
+ *                           are available to the system.
+ */
+enum sdtx_device_mode {
+       SDTX_DEVICE_MODE_TABLET         = 0x00,
+       SDTX_DEVICE_MODE_LAPTOP         = 0x01,
+       SDTX_DEVICE_MODE_STUDIO         = 0x02,
+};
+
+/**
+ * struct sdtx_event - Event provided by reading from the DTX device file.
+ * @length: Length of the event payload, in bytes.
+ * @code:   Event code, detailing what type of event this is.
+ * @data:   Payload of the event, containing @length bytes.
+ *
+ * See &enum sdtx_event_code for currently valid event codes.
+ */
+struct sdtx_event {
+       __u16 length;
+       __u16 code;
+       __u8 data[];
+} __attribute__((__packed__));
+
+/**
+ * enum sdtx_event_code - Code describing the type of an event.
+ * @SDTX_EVENT_REQUEST:         Detachment request event type.
+ * @SDTX_EVENT_CANCEL:          Cancel detachment process event type.
+ * @SDTX_EVENT_BASE_CONNECTION: Base/clipboard connection change event type.
+ * @SDTX_EVENT_LATCH_STATUS:    Latch status change event type.
+ * @SDTX_EVENT_DEVICE_MODE:     Device mode change event type.
+ *
+ * Used in &struct sdtx_event to describe the type of the event. Further event
+ * codes are reserved for future use. Any event parser should be able to
+ * gracefully handle unknown events, i.e. by simply skipping them.
+ *
+ * Consult the DTX user-space interface documentation for details regarding
+ * the individual event types.
+ */
+enum sdtx_event_code {
+       SDTX_EVENT_REQUEST              = 1,
+       SDTX_EVENT_CANCEL               = 2,
+       SDTX_EVENT_BASE_CONNECTION      = 3,
+       SDTX_EVENT_LATCH_STATUS         = 4,
+       SDTX_EVENT_DEVICE_MODE          = 5,
+};
+
+/**
+ * struct sdtx_base_info - Describes if and what type of base is connected.
+ * @state:   The state of the connection. Valid values are %SDTX_BASE_DETACHED,
+ *           %SDTX_BASE_ATTACHED, and %SDTX_DETACH_NOT_FEASIBLE (in case a base
+ *           is attached but low clipboard battery prevents detachment). Other
+ *           values are currently reserved.
+ * @base_id: The type of base connected. Zero if no base is connected.
+ */
+struct sdtx_base_info {
+       __u16 state;
+       __u16 base_id;
+} __attribute__((__packed__));
+
+/* IOCTLs */
+#define SDTX_IOCTL_EVENTS_ENABLE       _IO(0xa5, 0x21)
+#define SDTX_IOCTL_EVENTS_DISABLE      _IO(0xa5, 0x22)
+
+#define SDTX_IOCTL_LATCH_LOCK          _IO(0xa5, 0x23)
+#define SDTX_IOCTL_LATCH_UNLOCK                _IO(0xa5, 0x24)
+
+#define SDTX_IOCTL_LATCH_REQUEST       _IO(0xa5, 0x25)
+#define SDTX_IOCTL_LATCH_CONFIRM       _IO(0xa5, 0x26)
+#define SDTX_IOCTL_LATCH_HEARTBEAT     _IO(0xa5, 0x27)
+#define SDTX_IOCTL_LATCH_CANCEL                _IO(0xa5, 0x28)
+
+#define SDTX_IOCTL_GET_BASE_INFO       _IOR(0xa5, 0x29, struct sdtx_base_info)
+#define SDTX_IOCTL_GET_DEVICE_MODE     _IOR(0xa5, 0x2a, __u16)
+#define SDTX_IOCTL_GET_LATCH_STATUS    _IOR(0xa5, 0x2b, __u16)
+
+#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H */
index 5a86b521a4506e6c4dffbbd448b59bee9df4cd95..d3e017b5f0dba52cd767160b34fef7fde7bbf2e6 100644 (file)
@@ -297,6 +297,7 @@ enum hl_device_status {
 #define HL_INFO_SYNC_MANAGER           14
 #define HL_INFO_TOTAL_ENERGY           15
 #define HL_INFO_PLL_FREQUENCY          16
+#define HL_INFO_POWER                  17
 
 #define HL_INFO_VERSION_MAX_LEN        128
 #define HL_INFO_CARD_NAME_MAX_LEN      16
@@ -410,6 +411,14 @@ struct hl_pll_frequency_info {
        __u16 output[HL_PLL_NUM_OUTPUTS];
 };
 
+/**
+ * struct hl_power_info - power information
+ * @power: power consumption
+ */
+struct hl_power_info {
+       __u64 power;
+};
+
 /**
  * struct hl_info_sync_manager - sync manager information
  * @first_available_sync_object: first available sob
@@ -621,6 +630,7 @@ struct hl_cs_chunk {
 #define HL_CS_FLAGS_STAGED_SUBMISSION          0x40
 #define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST    0x80
 #define HL_CS_FLAGS_STAGED_SUBMISSION_LAST     0x100
+#define HL_CS_FLAGS_CUSTOM_TIMEOUT             0x200
 
 #define HL_CS_STATUS_SUCCESS           0
 
@@ -634,17 +644,10 @@ struct hl_cs_in {
        /* holds address of array of hl_cs_chunk for execution phase */
        __u64 chunks_execute;
 
-       union {
-               /* this holds address of array of hl_cs_chunk for store phase -
-                * Currently not in use
-                */
-               __u64 chunks_store;
-
-               /* Sequence number of a staged submission CS
-                * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set
-                */
-               __u64 seq;
-       };
+       /* Sequence number of a staged submission CS
+        * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set
+        */
+       __u64 seq;
 
        /* Number of chunks in restore phase array. Maximum number is
         * HL_MAX_JOBS_PER_CS
@@ -656,8 +659,10 @@ struct hl_cs_in {
         */
        __u32 num_chunks_execute;
 
-       /* Number of chunks in restore phase array - Currently not in use */
-       __u32 num_chunks_store;
+       /* timeout in seconds - valid only if HL_CS_FLAGS_CUSTOM_TIMEOUT
+        * is set
+        */
+       __u32 timeout;
 
        /* HL_CS_FLAGS_* */
        __u32 cs_flags;
@@ -682,14 +687,46 @@ union hl_cs_args {
        struct hl_cs_out out;
 };
 
+#define HL_WAIT_CS_FLAGS_INTERRUPT     0x2
+#define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000
+
 struct hl_wait_cs_in {
-       /* Command submission sequence number */
-       __u64 seq;
-       /* Absolute timeout to wait in microseconds */
-       __u64 timeout_us;
+       union {
+               struct {
+                       /* Command submission sequence number */
+                       __u64 seq;
+                       /* Absolute timeout to wait for command submission
+                        * in microseconds
+                        */
+                       __u64 timeout_us;
+               };
+
+               struct {
+                       /* User address for completion comparison.
+                        * upon interrupt, driver will compare the value pointed
+                        * by this address with the supplied target value.
+                        * in order not to perform any comparison, set address
+                        * to all 1s.
+                        * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+                        */
+                       __u64 addr;
+                       /* Target value for completion comparison */
+                       __u32 target;
+                       /* Absolute timeout to wait for interrupt
+                        * in microseconds
+                        */
+                       __u32 interrupt_timeout_us;
+               };
+       };
+
        /* Context ID - Currently not in use */
        __u32 ctx_id;
-       __u32 pad;
+       /* HL_WAIT_CS_FLAGS_*
+        * If HL_WAIT_CS_FLAGS_INTERRUPT is set, this field should include
+        * interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK, in order
+        * not to specify an interrupt id ,set mask to all 1s.
+        */
+       __u32 flags;
 };
 
 #define HL_WAIT_CS_STATUS_COMPLETED    0
@@ -999,8 +1036,8 @@ struct hl_debug_args {
  * Each JOB will be enqueued on a specific queue, according to the user's input.
  * There can be more then one JOB per queue.
  *
- * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
- * a second set is for "execution" phase and a third set is for "store" phase.
+ * The CS IOCTL will receive two sets of JOBS. One set is for "restore" phase
+ * and a second set is for "execution" phase.
  * The JOBS on the "restore" phase are enqueued only after context-switch
  * (or if its the first CS for this context). The user can also order the
  * driver to run the "restore" phase explicitly
index 6435f0bcb556cda8bc801619bfdd15dceed33976..1faef5ff87ef908102468f8f062929752bdd0c07 100644 (file)
@@ -16,6 +16,7 @@ struct hisi_qp_ctx {
 
 #define HISI_QM_API_VER_BASE "hisi_qm_v1"
 #define HISI_QM_API_VER2_BASE "hisi_qm_v2"
+#define HISI_QM_API_VER3_BASE "hisi_qm_v3"
 
 /* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */
 #define UACCE_CMD_QM_SET_QP_CTX        _IOWR('H', 10, struct hisi_qp_ctx)
index 4ddd7dc4a61e2c9b2470bf58349559dc4196f938..b1e11863144d1580cc9399f719d7e41228fc5805 100644 (file)
 #include <xen/xen.h>
 #include <linux/acpi.h>
 
-#define ACPI_MEMORY_DEVICE_CLASS        "memory"
-#define ACPI_MEMORY_DEVICE_HID          "PNP0C80"
-#define ACPI_MEMORY_DEVICE_NAME         "Hotplug Mem Device"
-
-int xen_stub_memory_device_init(void);
-void xen_stub_memory_device_exit(void);
-
-#define ACPI_PROCESSOR_CLASS            "processor"
-#define ACPI_PROCESSOR_DEVICE_HID       "ACPI0007"
-#define ACPI_PROCESSOR_DEVICE_NAME      "Processor"
-
-int xen_stub_processor_init(void);
-void xen_stub_processor_exit(void);
-
-void xen_pcpu_hotplug_sync(void);
-int xen_pcpu_id(uint32_t acpi_id);
-
-static inline int xen_acpi_get_pxm(acpi_handle h)
-{
-       unsigned long long pxm;
-       acpi_status status;
-       acpi_handle handle;
-       acpi_handle phandle = h;
-
-       do {
-               handle = phandle;
-               status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
-               if (ACPI_SUCCESS(status))
-                       return pxm;
-               status = acpi_get_parent(handle, &phandle);
-       } while (ACPI_SUCCESS(status));
-
-       return -ENXIO;
-}
-
 int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
                                     u32 pm1a_cnt, u32 pm1b_cnd);
 int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
diff --git a/include/xen/arm/swiotlb-xen.h b/include/xen/arm/swiotlb-xen.h
new file mode 100644 (file)
index 0000000..2994fe6
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_SWIOTLB_XEN_H
+#define _ASM_ARM_SWIOTLB_XEN_H
+
+extern int xen_swiotlb_detect(void);
+
+#endif /* _ASM_ARM_SWIOTLB_XEN_H */
index 6d1384abfbdf96edf21665022b9ef1eb9a5fe7d1..5a7bdefa06a8654919826adcf1637afa7fc8f18d 100644 (file)
  */
 #define XENFEAT_linux_rsdp_unrestricted   15
 
+/*
+ * A direct-mapped (or 1:1 mapped) domain is a domain for which its
+ * local pages have gfn == mfn. If a domain is direct-mapped,
+ * XENFEAT_direct_mapped is set; otherwise XENFEAT_not_direct_mapped
+ * is set.
+ *
+ * If neither flag is set (e.g. older Xen releases) the assumptions are:
+ * - not auto_translated domains (x86 only) are always direct-mapped
+ * - on x86, auto_translated domains are not direct-mapped
+ * - on ARM, Dom0 is direct-mapped, DomUs are not
+ */
+#define XENFEAT_not_direct_mapped         16
+#define XENFEAT_direct_mapped             17
+
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */
index d5eaf9d682b804be2d563e2a284ae292436e9980..dbc4a4b785f68d999f977b3f5ddaa6a058297a14 100644 (file)
@@ -3,6 +3,7 @@
 #define __LINUX_SWIOTLB_XEN_H
 
 #include <linux/swiotlb.h>
+#include <asm/xen/swiotlb-xen.h>
 
 void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
                          size_t size, enum dma_data_direction dir);
index 53b278845b886e1aacd643f6bd7e8902080f525a..f498aac26e8cbedf1178a39cee3cc043c8409729 100644 (file)
@@ -844,6 +844,29 @@ static void __init mm_init(void)
        pti_init();
 }
 
+#ifdef CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
+                          randomize_kstack_offset);
+DEFINE_PER_CPU(u32, kstack_offset);
+
+static int __init early_randomize_kstack_offset(char *buf)
+{
+       int ret;
+       bool bool_result;
+
+       ret = kstrtobool(buf, &bool_result);
+       if (ret)
+               return ret;
+
+       if (bool_result)
+               static_branch_enable(&randomize_kstack_offset);
+       else
+               static_branch_disable(&randomize_kstack_offset);
+       return 0;
+}
+early_param("randomize_kstack_offset", early_randomize_kstack_offset);
+#endif
+
 void __init __weak arch_call_rest_init(void)
 {
        rest_init();
index 3acc7e0b69169fd0566762363e15f95f946c2741..faa54d58972c3e9ca83bf8b19552317be7202449 100644 (file)
@@ -84,7 +84,7 @@ static const char *const bpf_atomic_alu_string[16] = {
        [BPF_ADD >> 4]  = "add",
        [BPF_AND >> 4]  = "and",
        [BPF_OR >> 4]  = "or",
-       [BPF_XOR >> 4]  = "or",
+       [BPF_XOR >> 4]  = "xor",
 };
 
 static const char *const bpf_ldst_string[] = {
index 1576ff331ee4491817bb09b3628e02225b73320b..d2de2abec35b6ea5d41caceebc640612f09c8a47 100644 (file)
@@ -543,11 +543,11 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
                return PTR_ERR(raw);
 
        if (type == BPF_TYPE_PROG)
-               ret = bpf_prog_new_fd(raw);
+               ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw);
        else if (type == BPF_TYPE_MAP)
                ret = bpf_map_new_fd(raw, f_flags);
        else if (type == BPF_TYPE_LINK)
-               ret = bpf_link_new_fd(raw);
+               ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
        else
                return -ENOENT;
 
index be35bfb7fb13f31b921b39bc48fa78303bc7f66d..6fbc2abe9c91611df5240286ce4514cb606473fe 100644 (file)
@@ -517,9 +517,17 @@ const struct bpf_func_proto bpf_get_stack_proto = {
 BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
           u32, size, u64, flags)
 {
-       struct pt_regs *regs = task_pt_regs(task);
+       struct pt_regs *regs;
+       long res;
 
-       return __bpf_get_stack(regs, task, NULL, buf, size, flags);
+       if (!try_get_task_stack(task))
+               return -EFAULT;
+
+       regs = task_pt_regs(task);
+       res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
+       put_task_stack(task);
+
+       return res;
 }
 
 BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
index 1f3a4be4b175f3d8e05489bb022df91b4bef83f4..4aa8b52adf25f18e53f0954479aee9e100326417 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/btf.h>
 #include <linux/rcupdate_trace.h>
 #include <linux/rcupdate_wait.h>
+#include <linux/module.h>
 
 /* dummy _ops. The verifier will operate on target program's ops. */
 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
@@ -87,6 +88,26 @@ out:
        return tr;
 }
 
+static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
+{
+       struct module *mod;
+       int err = 0;
+
+       preempt_disable();
+       mod = __module_text_address((unsigned long) tr->func.addr);
+       if (mod && !try_module_get(mod))
+               err = -ENOENT;
+       preempt_enable();
+       tr->mod = mod;
+       return err;
+}
+
+static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
+{
+       module_put(tr->mod);
+       tr->mod = NULL;
+}
+
 static int is_ftrace_location(void *ip)
 {
        long addr;
@@ -108,6 +129,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
                ret = unregister_ftrace_direct((long)ip, (long)old_addr);
        else
                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
+
+       if (!ret)
+               bpf_trampoline_module_put(tr);
        return ret;
 }
 
@@ -134,10 +158,16 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
                return ret;
        tr->func.ftrace_managed = ret;
 
+       if (bpf_trampoline_module_get(tr))
+               return -ENOENT;
+
        if (tr->func.ftrace_managed)
                ret = register_ftrace_direct((long)ip, (long)new_addr);
        else
                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
+
+       if (ret)
+               bpf_trampoline_module_put(tr);
        return ret;
 }
 
index 44e4ec1640f1da8d6d30a654baa4d79bfb5cbac4..0399ac092b3639d712e516d78a1b8859852b9cc9 100644 (file)
@@ -5856,40 +5856,51 @@ static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
        return &env->insn_aux_data[env->insn_idx];
 }
 
+enum {
+       REASON_BOUNDS   = -1,
+       REASON_TYPE     = -2,
+       REASON_PATHS    = -3,
+       REASON_LIMIT    = -4,
+       REASON_STACK    = -5,
+};
+
 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
-                             u32 *ptr_limit, u8 opcode, bool off_is_neg)
+                             const struct bpf_reg_state *off_reg,
+                             u32 *alu_limit, u8 opcode)
 {
+       bool off_is_neg = off_reg->smin_value < 0;
        bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
                            (opcode == BPF_SUB && !off_is_neg);
-       u32 off, max;
+       u32 max = 0, ptr_limit = 0;
+
+       if (!tnum_is_const(off_reg->var_off) &&
+           (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+               return REASON_BOUNDS;
 
        switch (ptr_reg->type) {
        case PTR_TO_STACK:
                /* Offset 0 is out-of-bounds, but acceptable start for the
-                * left direction, see BPF_REG_FP.
+                * left direction, see BPF_REG_FP. Also, unknown scalar
+                * offset where we would need to deal with min/max bounds is
+                * currently prohibited for unprivileged.
                 */
                max = MAX_BPF_STACK + mask_to_left;
-               /* Indirect variable offset stack access is prohibited in
-                * unprivileged mode so it's not handled here.
-                */
-               off = ptr_reg->off + ptr_reg->var_off.value;
-               if (mask_to_left)
-                       *ptr_limit = MAX_BPF_STACK + off;
-               else
-                       *ptr_limit = -off - 1;
-               return *ptr_limit >= max ? -ERANGE : 0;
+               ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
+               break;
        case PTR_TO_MAP_VALUE:
                max = ptr_reg->map_ptr->value_size;
-               if (mask_to_left) {
-                       *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
-               } else {
-                       off = ptr_reg->smin_value + ptr_reg->off;
-                       *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
-               }
-               return *ptr_limit >= max ? -ERANGE : 0;
+               ptr_limit = (mask_to_left ?
+                            ptr_reg->smin_value :
+                            ptr_reg->umax_value) + ptr_reg->off;
+               break;
        default:
-               return -EINVAL;
+               return REASON_TYPE;
        }
+
+       if (ptr_limit >= max)
+               return REASON_LIMIT;
+       *alu_limit = ptr_limit;
+       return 0;
 }
 
 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
@@ -5907,7 +5918,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
        if (aux->alu_state &&
            (aux->alu_state != alu_state ||
             aux->alu_limit != alu_limit))
-               return -EACCES;
+               return REASON_PATHS;
 
        /* Corresponding fixup done in fixup_bpf_calls(). */
        aux->alu_state = alu_state;
@@ -5926,14 +5937,22 @@ static int sanitize_val_alu(struct bpf_verifier_env *env,
        return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
 }
 
+static bool sanitize_needed(u8 opcode)
+{
+       return opcode == BPF_ADD || opcode == BPF_SUB;
+}
+
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                            struct bpf_insn *insn,
                            const struct bpf_reg_state *ptr_reg,
+                           const struct bpf_reg_state *off_reg,
                            struct bpf_reg_state *dst_reg,
-                           bool off_is_neg)
+                           struct bpf_insn_aux_data *tmp_aux,
+                           const bool commit_window)
 {
+       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
        struct bpf_verifier_state *vstate = env->cur_state;
-       struct bpf_insn_aux_data *aux = cur_aux(env);
+       bool off_is_neg = off_reg->smin_value < 0;
        bool ptr_is_dst_reg = ptr_reg == dst_reg;
        u8 opcode = BPF_OP(insn->code);
        u32 alu_state, alu_limit;
@@ -5951,18 +5970,33 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
        if (vstate->speculative)
                goto do_sim;
 
-       alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
-       alu_state |= ptr_is_dst_reg ?
-                    BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
-
-       err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
+       err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
        if (err < 0)
                return err;
 
+       if (commit_window) {
+               /* In commit phase we narrow the masking window based on
+                * the observed pointer move after the simulated operation.
+                */
+               alu_state = tmp_aux->alu_state;
+               alu_limit = abs(tmp_aux->alu_limit - alu_limit);
+       } else {
+               alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+               alu_state |= ptr_is_dst_reg ?
+                            BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+       }
+
        err = update_alu_sanitation_state(aux, alu_state, alu_limit);
        if (err < 0)
                return err;
 do_sim:
+       /* If we're in commit phase, we're done here given we already
+        * pushed the truncated dst_reg into the speculative verification
+        * stack.
+        */
+       if (commit_window)
+               return 0;
+
        /* Simulate and find potential out-of-bounds access under
         * speculative execution from truncation as a result of
         * masking when off was not within expected range. If off
@@ -5979,7 +6013,46 @@ do_sim:
        ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
        if (!ptr_is_dst_reg && ret)
                *dst_reg = tmp;
-       return !ret ? -EFAULT : 0;
+       return !ret ? REASON_STACK : 0;
+}
+
+static int sanitize_err(struct bpf_verifier_env *env,
+                       const struct bpf_insn *insn, int reason,
+                       const struct bpf_reg_state *off_reg,
+                       const struct bpf_reg_state *dst_reg)
+{
+       static const char *err = "pointer arithmetic with it prohibited for !root";
+       const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
+       u32 dst = insn->dst_reg, src = insn->src_reg;
+
+       switch (reason) {
+       case REASON_BOUNDS:
+               verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
+                       off_reg == dst_reg ? dst : src, err);
+               break;
+       case REASON_TYPE:
+               verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
+                       off_reg == dst_reg ? src : dst, err);
+               break;
+       case REASON_PATHS:
+               verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
+                       dst, op, err);
+               break;
+       case REASON_LIMIT:
+               verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
+                       dst, op, err);
+               break;
+       case REASON_STACK:
+               verbose(env, "R%d could not be pushed for speculative verification, %s\n",
+                       dst, err);
+               break;
+       default:
+               verbose(env, "verifier internal error: unknown reason (%d)\n",
+                       reason);
+               break;
+       }
+
+       return -EACCES;
 }
 
 /* check that stack access falls within stack limits and that 'reg' doesn't
@@ -6016,6 +6089,37 @@ static int check_stack_access_for_ptr_arithmetic(
        return 0;
 }
 
+static int sanitize_check_bounds(struct bpf_verifier_env *env,
+                                const struct bpf_insn *insn,
+                                const struct bpf_reg_state *dst_reg)
+{
+       u32 dst = insn->dst_reg;
+
+       /* For unprivileged we require that resulting offset must be in bounds
+        * in order to be able to sanitize access later on.
+        */
+       if (env->bypass_spec_v1)
+               return 0;
+
+       switch (dst_reg->type) {
+       case PTR_TO_STACK:
+               if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
+                                       dst_reg->off + dst_reg->var_off.value))
+                       return -EACCES;
+               break;
+       case PTR_TO_MAP_VALUE:
+               if (check_map_access(env, dst, dst_reg->off, 1, false)) {
+                       verbose(env, "R%d pointer arithmetic of map value goes out of range, "
+                               "prohibited for !root\n", dst);
+                       return -EACCES;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
 
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
@@ -6035,8 +6139,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
            smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
        u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
            umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
-       u32 dst = insn->dst_reg, src = insn->src_reg;
+       struct bpf_insn_aux_data tmp_aux = {};
        u8 opcode = BPF_OP(insn->code);
+       u32 dst = insn->dst_reg;
        int ret;
 
        dst_reg = &regs[dst];
@@ -6084,13 +6189,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                verbose(env, "R%d pointer arithmetic on %s prohibited\n",
                        dst, reg_type_str[ptr_reg->type]);
                return -EACCES;
-       case PTR_TO_MAP_VALUE:
-               if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
-                       verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
-                               off_reg == dst_reg ? dst : src);
-                       return -EACCES;
-               }
-               fallthrough;
        default:
                break;
        }
@@ -6108,13 +6206,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        /* pointer types do not carry 32-bit bounds at the moment. */
        __mark_reg32_unbounded(dst_reg);
 
+       if (sanitize_needed(opcode)) {
+               ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
+                                      &tmp_aux, false);
+               if (ret < 0)
+                       return sanitize_err(env, insn, ret, off_reg, dst_reg);
+       }
+
        switch (opcode) {
        case BPF_ADD:
-               ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
-               if (ret < 0) {
-                       verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
-                       return ret;
-               }
                /* We can take a fixed offset as long as it doesn't overflow
                 * the s32 'off' field
                 */
@@ -6165,11 +6265,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                }
                break;
        case BPF_SUB:
-               ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
-               if (ret < 0) {
-                       verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
-                       return ret;
-               }
                if (dst_reg == off_reg) {
                        /* scalar -= pointer.  Creates an unknown scalar */
                        verbose(env, "R%d tried to subtract pointer from scalar\n",
@@ -6250,21 +6345,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
 
-       /* For unprivileged we require that resulting offset must be in bounds
-        * in order to be able to sanitize access later on.
-        */
-       if (!env->bypass_spec_v1) {
-               if (dst_reg->type == PTR_TO_MAP_VALUE &&
-                   check_map_access(env, dst, dst_reg->off, 1, false)) {
-                       verbose(env, "R%d pointer arithmetic of map value goes out of range, "
-                               "prohibited for !root\n", dst);
-                       return -EACCES;
-               } else if (dst_reg->type == PTR_TO_STACK &&
-                          check_stack_access_for_ptr_arithmetic(
-                                  env, dst, dst_reg, dst_reg->off +
-                                  dst_reg->var_off.value)) {
-                       return -EACCES;
-               }
+       if (sanitize_check_bounds(env, insn, dst_reg) < 0)
+               return -EACCES;
+       if (sanitize_needed(opcode)) {
+               ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
+                                      &tmp_aux, true);
+               if (ret < 0)
+                       return sanitize_err(env, insn, ret, off_reg, dst_reg);
        }
 
        return 0;
@@ -6858,9 +6945,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        s32 s32_min_val, s32_max_val;
        u32 u32_min_val, u32_max_val;
        u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
-       u32 dst = insn->dst_reg;
-       int ret;
        bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
+       int ret;
 
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
@@ -6902,6 +6988,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                return 0;
        }
 
+       if (sanitize_needed(opcode)) {
+               ret = sanitize_val_alu(env, insn);
+               if (ret < 0)
+                       return sanitize_err(env, insn, ret, NULL, NULL);
+       }
+
        /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
         * There are two classes of instructions: The first class we track both
         * alu32 and alu64 sign/unsigned bounds independently this provides the
@@ -6918,21 +7010,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
         */
        switch (opcode) {
        case BPF_ADD:
-               ret = sanitize_val_alu(env, insn);
-               if (ret < 0) {
-                       verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
-                       return ret;
-               }
                scalar32_min_max_add(dst_reg, &src_reg);
                scalar_min_max_add(dst_reg, &src_reg);
                dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
                break;
        case BPF_SUB:
-               ret = sanitize_val_alu(env, insn);
-               if (ret < 0) {
-                       verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
-                       return ret;
-               }
                scalar32_min_max_sub(dst_reg, &src_reg);
                scalar_min_max_sub(dst_reg, &src_reg);
                dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
@@ -12158,6 +12240,11 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
        u32 btf_id, member_idx;
        const char *mname;
 
+       if (!prog->gpl_compatible) {
+               verbose(env, "struct ops programs must have a GPL compatible license\n");
+               return -EINVAL;
+       }
+
        btf_id = prog->aux->attach_btf_id;
        st_ops = bpf_struct_ops_find(btf_id);
        if (!st_ops) {
index 8442e5c9cfa269a6b0fab833a26c09d3970ca889..a0b3b04fb5965995ea991f8aa6e375c62e8d9c2d 100644 (file)
@@ -341,7 +341,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
         * Checking for rcu_is_watching() here would prevent the nesting
         * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
         * the tick then rcu_flavor_sched_clock_irq() would wrongfully
-        * assume that it is the first interupt and eventually claim
+        * assume that it is the first interrupt and eventually claim
         * quiescent state and end grace periods prematurely.
         *
         * Unconditionally invoke rcu_irq_enter() so RCU state stays
@@ -422,7 +422,7 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
 
                instrumentation_begin();
                if (IS_ENABLED(CONFIG_PREEMPTION)) {
-#ifdef CONFIG_PREEMT_DYNAMIC
+#ifdef CONFIG_PREEMPT_DYNAMIC
                        static_call(irqentry_exit_cond_resched)();
 #else
                        irqentry_exit_cond_resched();
index 8743150db2acc527f6b253131388281530c68b3e..c466c7fbdece557de01f448b4584bd563fa2ee09 100644 (file)
@@ -70,7 +70,9 @@ struct gcov_fn_info {
 
        u32 ident;
        u32 checksum;
+#if CONFIG_CLANG_VERSION < 110000
        u8 use_extra_checksum;
+#endif
        u32 cfg_checksum;
 
        u32 num_counters;
@@ -145,10 +147,8 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
 
        list_add_tail(&info->head, &current_info->functions);
 }
-EXPORT_SYMBOL(llvm_gcda_emit_function);
 #else
-void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
-               u8 use_extra_checksum, u32 cfg_checksum)
+void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum)
 {
        struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
 
@@ -158,12 +158,11 @@ void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
        INIT_LIST_HEAD(&info->head);
        info->ident = ident;
        info->checksum = func_checksum;
-       info->use_extra_checksum = use_extra_checksum;
        info->cfg_checksum = cfg_checksum;
        list_add_tail(&info->head, &current_info->functions);
 }
-EXPORT_SYMBOL(llvm_gcda_emit_function);
 #endif
+EXPORT_SYMBOL(llvm_gcda_emit_function);
 
 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
 {
@@ -293,11 +292,16 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
                !list_is_last(&fn_ptr2->head, &info2->functions)) {
                if (fn_ptr1->checksum != fn_ptr2->checksum)
                        return false;
+#if CONFIG_CLANG_VERSION < 110000
                if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum)
                        return false;
                if (fn_ptr1->use_extra_checksum &&
                        fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
                        return false;
+#else
+               if (fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
+                       return false;
+#endif
                fn_ptr1 = list_next_entry(fn_ptr1, head);
                fn_ptr2 = list_next_entry(fn_ptr2, head);
        }
@@ -529,17 +533,22 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
 
        list_for_each_entry(fi_ptr, &info->functions, head) {
                u32 i;
-               u32 len = 2;
-
-               if (fi_ptr->use_extra_checksum)
-                       len++;
 
                pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
-               pos += store_gcov_u32(buffer, pos, len);
+#if CONFIG_CLANG_VERSION < 110000
+               pos += store_gcov_u32(buffer, pos,
+                       fi_ptr->use_extra_checksum ? 3 : 2);
+#else
+               pos += store_gcov_u32(buffer, pos, 3);
+#endif
                pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
                pos += store_gcov_u32(buffer, pos, fi_ptr->checksum);
+#if CONFIG_CLANG_VERSION < 110000
                if (fi_ptr->use_extra_checksum)
                        pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+#else
+               pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+#endif
 
                pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE);
                pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2);
index 6d89e33fe3aa5463a042ab0989d907635d967aaa..8cc8e571328704f8dcab22c63da7cf7a5b6f4f45 100644 (file)
@@ -761,7 +761,7 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
  *     handle_edge_irq - edge type IRQ handler
  *     @desc:  the interrupt description structure for this irq
  *
- *     Interrupt occures on the falling and/or rising edge of a hardware
+ *     Interrupt occurs on the falling and/or rising edge of a hardware
  *     signal. The occurrence is latched into the irq controller hardware
  *     and must be acked in order to be reenabled. After the ack another
  *     interrupt can happen on the same source even before the first one
@@ -808,7 +808,7 @@ void handle_edge_irq(struct irq_desc *desc)
                /*
                 * When another irq arrived while we were handling
                 * one, we could have masked the irq.
-                * Renable it, if it was not disabled in meantime.
+                * Reenable it, if it was not disabled in meantime.
                 */
                if (unlikely(desc->istate & IRQS_PENDING)) {
                        if (!irqd_irq_disabled(&desc->irq_data) &&
@@ -1419,7 +1419,7 @@ EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
  * @dest:      The affinity mask to set
  * @force:     Flag to enforce setting (disable online checks)
  *
- * Conditinal, as the underlying parent chip might not implement it.
+ * Conditional, as the underlying parent chip might not implement it.
  */
 int irq_chip_set_affinity_parent(struct irq_data *data,
                                 const struct cpumask *dest, bool force)
@@ -1531,7 +1531,7 @@ EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
 #endif
 
 /**
- * irq_chip_compose_msi_msg - Componse msi message for a irq chip
+ * irq_chip_compose_msi_msg - Compose msi message for a irq chip
  * @data:      Pointer to interrupt specific data
  * @msg:       Pointer to the MSI message
  *
index 0b0cdf206dc44d7151171934344c6d17af26dea1..7fe6cffe7d0df8b3c336a17528ff041812ee9d26 100644 (file)
@@ -13,7 +13,7 @@
 
 /*
  * What should we do if we get a hw irq event on an illegal vector?
- * Each architecture has to answer this themself.
+ * Each architecture has to answer this themselves.
  */
 static void ack_bad(struct irq_data *data)
 {
index 43e3d1be622c58afc1265038c66acd5235dc1294..52f11c791bf8a52c909e47b05334c8c0512c18d4 100644 (file)
@@ -107,7 +107,7 @@ free_descs:
  * @irq:       linux irq number to be destroyed
  * @dest:      cpumask of cpus which should have the IPI removed
  *
- * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
+ * The IPIs allocated with irq_reserve_ipi() are returned to the system
  * destroying all virqs associated with them.
  *
  * Return 0 on success or error code on failure.
index 40880c350b95d484c9c30bf3d3b72f1a56667721..0cd02efa3a742ed77fbcafb49869bf45e82520fa 100644 (file)
@@ -24,10 +24,6 @@ struct irq_sim_irq_ctx {
        struct irq_sim_work_ctx *work_ctx;
 };
 
-struct irq_sim_devres {
-       struct irq_domain       *domain;
-};
-
 static void irq_sim_irqmask(struct irq_data *data)
 {
        struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
@@ -216,11 +212,11 @@ void irq_domain_remove_sim(struct irq_domain *domain)
 }
 EXPORT_SYMBOL_GPL(irq_domain_remove_sim);
 
-static void devm_irq_domain_release_sim(struct device *dev, void *res)
+static void devm_irq_domain_remove_sim(void *data)
 {
-       struct irq_sim_devres *this = res;
+       struct irq_domain *domain = data;
 
-       irq_domain_remove_sim(this->domain);
+       irq_domain_remove_sim(domain);
 }
 
 /**
@@ -238,20 +234,17 @@ struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
                                              struct fwnode_handle *fwnode,
                                              unsigned int num_irqs)
 {
-       struct irq_sim_devres *dr;
+       struct irq_domain *domain;
+       int ret;
 
-       dr = devres_alloc(devm_irq_domain_release_sim,
-                         sizeof(*dr), GFP_KERNEL);
-       if (!dr)
-               return ERR_PTR(-ENOMEM);
+       domain = irq_domain_create_sim(fwnode, num_irqs);
+       if (IS_ERR(domain))
+               return domain;
 
-       dr->domain = irq_domain_create_sim(fwnode, num_irqs);
-       if (IS_ERR(dr->domain)) {
-               devres_free(dr);
-               return dr->domain;
-       }
+       ret = devm_add_action_or_reset(dev, devm_irq_domain_remove_sim, domain);
+       if (ret)
+               return ERR_PTR(ret);
 
-       devres_add(dev, dr);
-       return dr->domain;
+       return domain;
 }
 EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
index cc1a09406c6e46eeca6d78c149bdc8d3dc1645e4..4a617d7312a4785454f001a0f2cc381cc58936d6 100644 (file)
@@ -31,7 +31,7 @@ static int __init irq_affinity_setup(char *str)
        cpulist_parse(str, irq_default_affinity);
        /*
         * Set at least the boot cpu. We don't want to end up with
-        * bugreports caused by random comandline masks
+        * bugreports caused by random commandline masks
         */
        cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
        return 1;
index d10ab1d689d58c39c72b8e615b0e64807f082d7c..f42ef868efd3f229672009486f6443a389f31c18 100644 (file)
@@ -62,7 +62,7 @@ EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
  * @name:      Optional user provided domain name
  * @pa:                Optional user-provided physical address
  *
- * Allocate a struct irqchip_fwid, and return a poiner to the embedded
+ * Allocate a struct irqchip_fwid, and return a pointer to the embedded
  * fwnode_handle (or NULL on failure).
  *
  * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
@@ -665,7 +665,7 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
 
        pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
 
-       /* Look for default domain if nececssary */
+       /* Look for default domain if necessary */
        if (domain == NULL)
                domain = irq_default_domain;
        if (domain == NULL) {
@@ -703,41 +703,6 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
 }
 EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
 
-/**
- * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
- * @domain: domain owning the interrupt range
- * @irq_base: beginning of linux IRQ range
- * @hwirq_base: beginning of hardware IRQ range
- * @count: Number of interrupts to map
- *
- * This routine is used for allocating and mapping a range of hardware
- * irqs to linux irqs where the linux irq numbers are at pre-defined
- * locations. For use by controllers that already have static mappings
- * to insert in to the domain.
- *
- * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
- * domain insertion.
- *
- * 0 is returned upon success, while any failure to establish a static
- * mapping is treated as an error.
- */
-int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
-                              irq_hw_number_t hwirq_base, int count)
-{
-       struct device_node *of_node;
-       int ret;
-
-       of_node = irq_domain_get_of_node(domain);
-       ret = irq_alloc_descs(irq_base, irq_base, count,
-                             of_node_to_nid(of_node));
-       if (unlikely(ret < 0))
-               return ret;
-
-       irq_domain_associate_many(domain, irq_base, hwirq_base, count);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
-
 static int irq_domain_translate(struct irq_domain *d,
                                struct irq_fwspec *fwspec,
                                irq_hw_number_t *hwirq, unsigned int *type)
@@ -906,7 +871,7 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
 {
        struct irq_data *data;
 
-       /* Look for default domain if nececssary */
+       /* Look for default domain if necessary */
        if (domain == NULL)
                domain = irq_default_domain;
        if (domain == NULL)
@@ -1436,7 +1401,7 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
  * The whole process to setup an IRQ has been split into two steps.
  * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
  * descriptor and required hardware resources. The second step,
- * irq_domain_activate_irq(), is to program hardwares with preallocated
+ * irq_domain_activate_irq(), is to program the hardware with preallocated
  * resources. In this way, it's easier to rollback when failing to
  * allocate resources.
  */
@@ -1694,12 +1659,10 @@ void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
 
 /**
  * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
+ * @domain:    Domain below which interrupts must be allocated
  * @irq_base:  Base IRQ number
  * @nr_irqs:   Number of IRQs to allocate
  * @arg:       Allocation data (arch/domain specific)
- *
- * Check whether the domain has been setup recursive. If not allocate
- * through the parent domain.
  */
 int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
                                 unsigned int irq_base, unsigned int nr_irqs,
@@ -1715,11 +1678,9 @@ EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
 
 /**
  * irq_domain_free_irqs_parent - Free interrupts from parent domain
+ * @domain:    Domain below which interrupts must be freed
  * @irq_base:  Base IRQ number
  * @nr_irqs:   Number of IRQs to free
- *
- * Check whether the domain has been setup recursive. If not free
- * through the parent domain.
  */
 void irq_domain_free_irqs_parent(struct irq_domain *domain,
                                 unsigned int irq_base, unsigned int nr_irqs)
index 49288e941365809ae56958d3a657bc2668f188a4..4c14356543d93bc394c12fbee9aee4418dafeed8 100644 (file)
@@ -179,7 +179,7 @@ bool irq_can_set_affinity_usr(unsigned int irq)
 
 /**
  *     irq_set_thread_affinity - Notify irq threads to adjust affinity
- *     @desc:          irq descriptor which has affitnity changed
+ *     @desc:          irq descriptor which has affinity changed
  *
  *     We just set IRQTF_AFFINITY and delegate the affinity setting
  *     to the interrupt thread itself. We can not call
@@ -326,7 +326,7 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
         * If the interrupt is not yet activated, just store the affinity
         * mask and do not call the chip driver at all. On activation the
         * driver has to make sure anyway that the interrupt is in a
-        * useable state so startup works.
+        * usable state so startup works.
         */
        if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
            irqd_is_activated(data) || !irqd_affinity_on_activate(data))
@@ -1054,7 +1054,7 @@ again:
         * to IRQS_INPROGRESS and the irq line is masked forever.
         *
         * This also serializes the state of shared oneshot handlers
-        * versus "desc->threads_onehsot |= action->thread_mask;" in
+        * versus "desc->threads_oneshot |= action->thread_mask;" in
         * irq_wake_thread(). See the comment there which explains the
         * serialization.
         */
@@ -1157,7 +1157,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 
 /*
  * Interrupts explicitly requested as threaded interrupts want to be
- * preemtible - many of them need to sleep and wait for slow busses to
+ * preemptible - many of them need to sleep and wait for slow busses to
  * complete.
  */
 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
@@ -1913,7 +1913,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        /* Last action releases resources */
        if (!desc->action) {
                /*
-                * Reaquire bus lock as irq_release_resources() might
+                * Reacquire bus lock as irq_release_resources() might
                 * require it to deallocate resources over the slow bus.
                 */
                chip_bus_lock(desc);
@@ -2753,7 +2753,7 @@ int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
  *     irq_get_irqchip_state - returns the irqchip state of a interrupt.
  *     @irq: Interrupt line that is forwarded to a VM
  *     @which: One of IRQCHIP_STATE_* the caller wants to know about
- *     @state: a pointer to a boolean where the state is to be storeed
+ *     @state: a pointer to a boolean where the state is to be stored
  *
  *     This call snapshots the internal irqchip state of an
  *     interrupt, returning into @state the bit corresponding to
index 651a4ad6d711f82d8192d07a6b7616ef9337651f..578596e41cb60731cdae15038e5ad0df03dd647c 100644 (file)
@@ -337,15 +337,14 @@ void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
  * irq_matrix_reserve - Reserve interrupts
  * @m:         Matrix pointer
  *
- * This is merily a book keeping call. It increments the number of globally
+ * This is merely a book keeping call. It increments the number of globally
  * reserved interrupt bits w/o actually allocating them. This allows to
  * setup interrupt descriptors w/o assigning low level resources to it.
  * The actual allocation happens when the interrupt gets activated.
  */
 void irq_matrix_reserve(struct irq_matrix *m)
 {
-       if (m->global_reserved <= m->global_available &&
-           m->global_reserved + 1 > m->global_available)
+       if (m->global_reserved == m->global_available)
                pr_warn("Interrupt reservation exceeds available resources\n");
 
        m->global_reserved++;
@@ -356,7 +355,7 @@ void irq_matrix_reserve(struct irq_matrix *m)
  * irq_matrix_remove_reserved - Remove interrupt reservation
  * @m:         Matrix pointer
  *
- * This is merily a book keeping call. It decrements the number of globally
+ * This is merely a book keeping call. It decrements the number of globally
  * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
  * interrupt was never in use and a real vector allocated, which undid the
  * reservation.
@@ -423,7 +422,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
        if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
                return;
 
-       clear_bit(bit, cm->alloc_map);
+       if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
+               return;
+
        cm->allocated--;
        if(managed)
                cm->managed_allocated--;
index def48589ea484d44188b8c574cadacfabf21b827..61ca924ef4b4c2d5fff88ba5b85b8993b69698c0 100644 (file)
@@ -7,7 +7,7 @@
 
 /**
  * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
- * @desc:              Interrupt descpriptor to clean up
+ * @desc:              Interrupt descriptor to clean up
  * @force_clear:       If set clear the move pending bit unconditionally.
  *                     If not set, clear it only when the dying CPU is the
  *                     last one in the pending mask.
index b338d622f26e330e0e889c1453fec4b4849eab79..c41965e348b5bbfd737a3de0eb04d462e285cf45 100644 (file)
@@ -5,7 +5,7 @@
  *
  * This file is licensed under GPLv2.
  *
- * This file contains common code to support Message Signalled Interrupt for
+ * This file contains common code to support Message Signaled Interrupts for
  * PCI compatible and non PCI compatible devices.
  */
 #include <linux/types.h>
index 98138788cb04df4b1a6956f6c49bb131a7c94388..7c5cd42df3b93e7b5bb15bbd9978864fbe80debc 100644 (file)
@@ -144,7 +144,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
        if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
                return -EIO;
 
-       if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
                return -ENOMEM;
 
        if (type)
@@ -238,7 +238,7 @@ static ssize_t default_affinity_write(struct file *file,
        cpumask_var_t new_value;
        int err;
 
-       if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
                return -ENOMEM;
 
        err = cpumask_parse_user(buffer, count, new_value);
index bd1d85c610aab9d3eb3ffdfb38b61aa3ce3d500f..0c46e9fe3a89de94dd32366ae93215f755d426b9 100644 (file)
@@ -128,7 +128,7 @@ int check_irq_resend(struct irq_desc *desc, bool inject)
        if (!try_retrigger(desc))
                err = irq_sw_resend(desc);
 
-       /* If the retrigger was successfull, mark it with the REPLAY bit */
+       /* If the retrigger was successful, mark it with the REPLAY bit */
        if (!err)
                desc->istate |= IRQS_REPLAY;
        return err;
index f865e5f4d3825461bc4e1994eb4da80981b7e3fc..c481d845832573e94fbdda078b38972a241368b2 100644 (file)
@@ -403,6 +403,10 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
                        desc->irqs_unhandled -= ok;
        }
 
+       if (likely(!desc->irqs_unhandled))
+               return;
+
+       /* Now getting into unhandled irq detection */
        desc->irq_count++;
        if (likely(desc->irq_count < 100000))
                return;
index 773b6105c4aea2843afb3f49313e4dff768af1c0..d309d6fbf5bddc18293b12dcae2d1757b8609119 100644 (file)
@@ -84,7 +84,7 @@ void irq_timings_disable(void)
  * 2. Log interval
  *
  * We saw the irq timings allow to compute the interval of the
- * occurrences for a specific interrupt. We can reasonibly assume the
+ * occurrences for a specific interrupt. We can reasonably assume the
  * longer is the interval, the higher is the error for the next event
  * and we can consider storing those interval values into an array
  * where each slot in the array correspond to an interval at the power
@@ -416,7 +416,7 @@ static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now)
         * Copy the content of the circular buffer into another buffer
         * in order to linearize the buffer instead of dealing with
         * wrapping indexes and shifted array which will be prone to
-        * error and extremelly difficult to debug.
+        * error and extremely difficult to debug.
         */
        for (i = 0; i < count; i++) {
                int index = (start + i) & IRQ_TIMINGS_MASK;
@@ -485,7 +485,7 @@ static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts)
 
        /*
         * The interrupt triggered more than one second apart, that
-        * ends the sequence as predictible for our purpose. In this
+        * ends the sequence as predictable for our purpose. In this
         * case, assume we have the beginning of a sequence and the
         * timestamp is the first value. As it is impossible to
         * predict anything at this point, return.
@@ -514,7 +514,7 @@ static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts)
  *      If more than the array size interrupts happened during the
  *      last busy/idle cycle, the index wrapped up and we have to
  *      begin with the next element in the array which is the last one
- *      in the sequence, otherwise it is a the index 0.
+ *      in the sequence, otherwise it is at the index 0.
  *
  * - have an indication of the interrupts activity on this CPU
  *   (eg. irq/sec)
index c6d0c1dc625324cd8bbbaaf78f1c2709f30dd345..ef28a0b9cf1efa21807d9aafa0e41cfeb4560ed6 100644 (file)
@@ -705,7 +705,7 @@ static void print_lock_name(struct lock_class *class)
 
        printk(KERN_CONT " (");
        __print_lock_name(class);
-       printk(KERN_CONT "){%s}-{%hd:%hd}", usage,
+       printk(KERN_CONT "){%s}-{%d:%d}", usage,
                        class->wait_type_outer ?: class->wait_type_inner,
                        class->wait_type_inner);
 }
@@ -930,7 +930,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
                /* Debug-check: all keys must be persistent! */
                debug_locks_off();
                pr_err("INFO: trying to register non-static key.\n");
-               pr_err("the code is fine but needs lockdep annotation.\n");
+               pr_err("The code is fine but needs lockdep annotation, or maybe\n");
+               pr_err("you didn't initialize this object before use?\n");
                pr_err("turning off the locking correctness validator.\n");
                dump_stack();
                return false;
@@ -1392,7 +1393,7 @@ static int add_lock_to_list(struct lock_class *this,
 /*
  * For good efficiency of modular, we use power of 2
  */
-#define MAX_CIRCULAR_QUEUE_SIZE                4096UL
+#define MAX_CIRCULAR_QUEUE_SIZE                (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS)
 #define CQ_MASK                                (MAX_CIRCULAR_QUEUE_SIZE-1)
 
 /*
index de49f9e1c11ba8be13fffcda9159c9c91db16e85..ecb8662e7a4ed52c7935b19d280e79458af6c30b 100644 (file)
@@ -99,16 +99,16 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
 #define MAX_STACK_TRACE_ENTRIES        262144UL
 #define STACK_TRACE_HASH_SIZE  8192
 #else
-#define MAX_LOCKDEP_ENTRIES    32768UL
+#define MAX_LOCKDEP_ENTRIES    (1UL << CONFIG_LOCKDEP_BITS)
 
-#define MAX_LOCKDEP_CHAINS_BITS        16
+#define MAX_LOCKDEP_CHAINS_BITS        CONFIG_LOCKDEP_CHAINS_BITS
 
 /*
  * Stack-trace: tightly packed array of stack backtrace
  * addresses. Protected by the hash_lock.
  */
-#define MAX_STACK_TRACE_ENTRIES        524288UL
-#define STACK_TRACE_HASH_SIZE  16384
+#define MAX_STACK_TRACE_ENTRIES        (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS)
+#define STACK_TRACE_HASH_SIZE  (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS)
 #endif
 
 /*
index 4786dd271b45afa4031c47d58f4c5b9a1679c7a2..b94f3831e963a12576b33f5824251294147b938c 100644 (file)
@@ -60,6 +60,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
  */
 void queued_write_lock_slowpath(struct qrwlock *lock)
 {
+       int cnts;
+
        /* Put the writer into the wait queue */
        arch_spin_lock(&lock->wait_lock);
 
@@ -73,9 +75,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
 
        /* When no more readers or writers, set the locked flag */
        do {
-               atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
-       } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
-                                       _QW_LOCKED) != _QW_WAITING);
+               cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
+       } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
 unlock:
        arch_spin_unlock(&lock->wait_lock);
 }
index 6f69a4195d5630bdd9217bfd183579fd117d3976..c2ebddb5e97466a99612adc40a9672249eee9bfc 100644 (file)
@@ -430,7 +430,7 @@ static ssize_t prof_cpu_mask_proc_write(struct file *file,
        cpumask_var_t new_value;
        int err;
 
-       if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
                return -ENOMEM;
 
        err = cpumask_parse_user(buffer, count, new_value);
index 5f611658eeab1be91dda5177d5359892e2dfdff9..2c36a5fad58912a4c1b27b4f5f8b0f3312e82b6d 100644 (file)
@@ -60,7 +60,7 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
        cpu = smp_processor_id();
        delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
        irqtime->irq_start_time += delta;
-       pc = preempt_count() - offset;
+       pc = irq_count() - offset;
 
        /*
         * We do not account for softirq time from ksoftirqd here.
@@ -421,7 +421,7 @@ void vtime_task_switch(struct task_struct *prev)
 
 void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
 {
-       unsigned int pc = preempt_count() - offset;
+       unsigned int pc = irq_count() - offset;
 
        if (pc & HARDIRQ_OFFSET) {
                vtime_account_hardirq(tsk);
index 9908ec4a9bfed905b500e5a0040553acbb986ce1..5a99696da86ac8fdd6d540a59f344f9859f4b530 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
+#include <linux/local_lock.h>
 #include <linux/mm.h>
 #include <linux/notifier.h>
 #include <linux/percpu.h>
@@ -25,6 +26,7 @@
 #include <linux/smpboot.h>
 #include <linux/tick.h>
 #include <linux/irq.h>
+#include <linux/wait_bit.h>
 
 #include <asm/softirq_stack.h>
 
@@ -102,20 +104,204 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
 #endif
 
 /*
- * preempt_count and SOFTIRQ_OFFSET usage:
- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
- *   softirq processing.
- * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+ * SOFTIRQ_OFFSET usage:
+ *
+ * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
+ * to a per CPU counter and to task::softirqs_disabled_cnt.
+ *
+ * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
+ *   processing.
+ *
+ * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
  *   on local_bh_disable or local_bh_enable.
+ *
  * This lets us distinguish between whether we are currently processing
  * softirq and whether we just have bh disabled.
  */
+#ifdef CONFIG_PREEMPT_RT
+
+/*
+ * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
+ * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
+ * softirq disabled section to be preempted.
+ *
+ * The per task counter is used for softirq_count(), in_softirq() and
+ * in_serving_softirqs() because these counts are only valid when the task
+ * holding softirq_ctrl::lock is running.
+ *
+ * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
+ * the task which is in a softirq disabled section is preempted or blocks.
+ */
+struct softirq_ctrl {
+       local_lock_t    lock;
+       int             cnt;
+};
+
+static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
+       .lock   = INIT_LOCAL_LOCK(softirq_ctrl.lock),
+};
+
+/**
+ * local_bh_blocked() - Check for idle whether BH processing is blocked
+ *
+ * Returns false if the per CPU softirq::cnt is 0 otherwise true.
+ *
+ * This is invoked from the idle task to guard against false positive
+ * softirq pending warnings, which would happen when the task which holds
+ * softirq_ctrl::lock was the only running task on the CPU and blocks on
+ * some other lock.
+ */
+bool local_bh_blocked(void)
+{
+       return __this_cpu_read(softirq_ctrl.cnt) != 0;
+}
+
+void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+       unsigned long flags;
+       int newcnt;
+
+       WARN_ON_ONCE(in_hardirq());
+
+       /* First entry of a task into a BH disabled section? */
+       if (!current->softirq_disable_cnt) {
+               if (preemptible()) {
+                       local_lock(&softirq_ctrl.lock);
+                       /* Required to meet the RCU bottomhalf requirements. */
+                       rcu_read_lock();
+               } else {
+                       DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
+               }
+       }
+
+       /*
+        * Track the per CPU softirq disabled state. On RT this is per CPU
+        * state to allow preemption of bottom half disabled sections.
+        */
+       newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
+       /*
+        * Reflect the result in the task state to prevent recursion on the
+        * local lock and to make softirq_count() & al work.
+        */
+       current->softirq_disable_cnt = newcnt;
+
+       if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
+               raw_local_irq_save(flags);
+               lockdep_softirqs_off(ip);
+               raw_local_irq_restore(flags);
+       }
+}
+EXPORT_SYMBOL(__local_bh_disable_ip);
+
+static void __local_bh_enable(unsigned int cnt, bool unlock)
+{
+       unsigned long flags;
+       int newcnt;
+
+       DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
+                           this_cpu_read(softirq_ctrl.cnt));
+
+       if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
+               raw_local_irq_save(flags);
+               lockdep_softirqs_on(_RET_IP_);
+               raw_local_irq_restore(flags);
+       }
+
+       newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
+       current->softirq_disable_cnt = newcnt;
+
+       if (!newcnt && unlock) {
+               rcu_read_unlock();
+               local_unlock(&softirq_ctrl.lock);
+       }
+}
+
+void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+{
+       bool preempt_on = preemptible();
+       unsigned long flags;
+       u32 pending;
+       int curcnt;
+
+       WARN_ON_ONCE(in_irq());
+       lockdep_assert_irqs_enabled();
+
+       local_irq_save(flags);
+       curcnt = __this_cpu_read(softirq_ctrl.cnt);
+
+       /*
+        * If this is not reenabling soft interrupts, no point in trying to
+        * run pending ones.
+        */
+       if (curcnt != cnt)
+               goto out;
+
+       pending = local_softirq_pending();
+       if (!pending || ksoftirqd_running(pending))
+               goto out;
+
+       /*
+        * If this was called from non preemptible context, wake up the
+        * softirq daemon.
+        */
+       if (!preempt_on) {
+               wakeup_softirqd();
+               goto out;
+       }
+
+       /*
+        * Adjust softirq count to SOFTIRQ_OFFSET which makes
+        * in_serving_softirq() become true.
+        */
+       cnt = SOFTIRQ_OFFSET;
+       __local_bh_enable(cnt, false);
+       __do_softirq();
+
+out:
+       __local_bh_enable(cnt, preempt_on);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__local_bh_enable_ip);
+
+/*
+ * Invoked from ksoftirqd_run() outside of the interrupt disabled section
+ * to acquire the per CPU local lock for reentrancy protection.
+ */
+static inline void ksoftirqd_run_begin(void)
+{
+       __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+       local_irq_disable();
+}
+
+/* Counterpart to ksoftirqd_run_begin() */
+static inline void ksoftirqd_run_end(void)
+{
+       __local_bh_enable(SOFTIRQ_OFFSET, true);
+       WARN_ON_ONCE(in_interrupt());
+       local_irq_enable();
+}
+
+static inline void softirq_handle_begin(void) { }
+static inline void softirq_handle_end(void) { }
+
+static inline bool should_wake_ksoftirqd(void)
+{
+       return !this_cpu_read(softirq_ctrl.cnt);
+}
+
+static inline void invoke_softirq(void)
+{
+       if (should_wake_ksoftirqd())
+               wakeup_softirqd();
+}
+
+#else /* CONFIG_PREEMPT_RT */
 
-#ifdef CONFIG_TRACE_IRQFLAGS
 /*
- * This is for softirq.c-internal use, where hardirqs are disabled
+ * This one is for softirq.c-internal use, where hardirqs are disabled
  * legitimately:
  */
+#ifdef CONFIG_TRACE_IRQFLAGS
 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 {
        unsigned long flags;
@@ -206,6 +392,32 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 }
 EXPORT_SYMBOL(__local_bh_enable_ip);
 
+static inline void softirq_handle_begin(void)
+{
+       __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+}
+
+static inline void softirq_handle_end(void)
+{
+       __local_bh_enable(SOFTIRQ_OFFSET);
+       WARN_ON_ONCE(in_interrupt());
+}
+
+static inline void ksoftirqd_run_begin(void)
+{
+       local_irq_disable();
+}
+
+static inline void ksoftirqd_run_end(void)
+{
+       local_irq_enable();
+}
+
+static inline bool should_wake_ksoftirqd(void)
+{
+       return true;
+}
+
 static inline void invoke_softirq(void)
 {
        if (ksoftirqd_running(local_softirq_pending()))
@@ -250,6 +462,8 @@ asmlinkage __visible void do_softirq(void)
        local_irq_restore(flags);
 }
 
+#endif /* !CONFIG_PREEMPT_RT */
+
 /*
  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
  * but break the loop if need_resched() is set or after 2 ms.
@@ -318,7 +532,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
 
        pending = local_softirq_pending();
 
-       __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+       softirq_handle_begin();
        in_hardirq = lockdep_softirq_start();
        account_softirq_enter(current);
 
@@ -354,8 +568,10 @@ restart:
                pending >>= softirq_bit;
        }
 
-       if (__this_cpu_read(ksoftirqd) == current)
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
+           __this_cpu_read(ksoftirqd) == current)
                rcu_softirq_qs();
+
        local_irq_disable();
 
        pending = local_softirq_pending();
@@ -369,8 +585,7 @@ restart:
 
        account_softirq_exit(current);
        lockdep_softirq_end(in_hardirq);
-       __local_bh_enable(SOFTIRQ_OFFSET);
-       WARN_ON_ONCE(in_interrupt());
+       softirq_handle_end();
        current_restore_flags(old_flags, PF_MEMALLOC);
 }
 
@@ -465,7 +680,7 @@ inline void raise_softirq_irqoff(unsigned int nr)
         * Otherwise we wake up ksoftirqd to make sure we
         * schedule the softirq soon.
         */
-       if (!in_interrupt())
+       if (!in_interrupt() && should_wake_ksoftirqd())
                wakeup_softirqd();
 }
 
@@ -531,6 +746,20 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
 }
 EXPORT_SYMBOL(__tasklet_hi_schedule);
 
+static bool tasklet_clear_sched(struct tasklet_struct *t)
+{
+       if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
+               wake_up_var(&t->state);
+               return true;
+       }
+
+       WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
+                 t->use_callback ? "callback" : "func",
+                 t->use_callback ? (void *)t->callback : (void *)t->func);
+
+       return false;
+}
+
 static void tasklet_action_common(struct softirq_action *a,
                                  struct tasklet_head *tl_head,
                                  unsigned int softirq_nr)
@@ -550,13 +779,12 @@ static void tasklet_action_common(struct softirq_action *a,
 
                if (tasklet_trylock(t)) {
                        if (!atomic_read(&t->count)) {
-                               if (!test_and_clear_bit(TASKLET_STATE_SCHED,
-                                                       &t->state))
-                                       BUG();
-                               if (t->use_callback)
-                                       t->callback(t);
-                               else
-                                       t->func(t->data);
+                               if (tasklet_clear_sched(t)) {
+                                       if (t->use_callback)
+                                               t->callback(t);
+                                       else
+                                               t->func(t->data);
+                               }
                                tasklet_unlock(t);
                                continue;
                        }
@@ -606,21 +834,62 @@ void tasklet_init(struct tasklet_struct *t,
 }
 EXPORT_SYMBOL(tasklet_init);
 
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+/*
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+{
+       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+               if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+                       /*
+                        * Prevent a live lock when current preempted soft
+                        * interrupt processing or prevents ksoftirqd from
+                        * running. If the tasklet runs on a different CPU
+                        * then this has no effect other than doing the BH
+                        * disable/enable dance for nothing.
+                        */
+                       local_bh_disable();
+                       local_bh_enable();
+               } else {
+                       cpu_relax();
+               }
+       }
+}
+EXPORT_SYMBOL(tasklet_unlock_spin_wait);
+#endif
+
 void tasklet_kill(struct tasklet_struct *t)
 {
        if (in_interrupt())
                pr_notice("Attempt to kill tasklet from interrupt\n");
 
-       while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
-               do {
-                       yield();
-               } while (test_bit(TASKLET_STATE_SCHED, &t->state));
-       }
+       while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+               wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
+
        tasklet_unlock_wait(t);
-       clear_bit(TASKLET_STATE_SCHED, &t->state);
+       tasklet_clear_sched(t);
 }
 EXPORT_SYMBOL(tasklet_kill);
 
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+void tasklet_unlock(struct tasklet_struct *t)
+{
+       smp_mb__before_atomic();
+       clear_bit(TASKLET_STATE_RUN, &t->state);
+       smp_mb__after_atomic();
+       wake_up_var(&t->state);
+}
+EXPORT_SYMBOL_GPL(tasklet_unlock);
+
+void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+       wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
+}
+EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
+#endif
+
 void __init softirq_init(void)
 {
        int cpu;
@@ -643,53 +912,21 @@ static int ksoftirqd_should_run(unsigned int cpu)
 
 static void run_ksoftirqd(unsigned int cpu)
 {
-       local_irq_disable();
+       ksoftirqd_run_begin();
        if (local_softirq_pending()) {
                /*
                 * We can safely run softirq on inline stack, as we are not deep
                 * in the task stack here.
                 */
                __do_softirq();
-               local_irq_enable();
+               ksoftirqd_run_end();
                cond_resched();
                return;
        }
-       local_irq_enable();
+       ksoftirqd_run_end();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-/*
- * tasklet_kill_immediate is called to remove a tasklet which can already be
- * scheduled for execution on @cpu.
- *
- * Unlike tasklet_kill, this function removes the tasklet
- * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
- *
- * When this function is called, @cpu must be in the CPU_DEAD state.
- */
-void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
-{
-       struct tasklet_struct **i;
-
-       BUG_ON(cpu_online(cpu));
-       BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
-
-       if (!test_bit(TASKLET_STATE_SCHED, &t->state))
-               return;
-
-       /* CPU is dead, so no lock needed. */
-       for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
-               if (*i == t) {
-                       *i = t->next;
-                       /* If this was the tail element, move the tail ptr */
-                       if (*i == NULL)
-                               per_cpu(tasklet_vec, cpu).tail = i;
-                       return;
-               }
-       }
-       BUG();
-}
-
 static int takeover_tasklets(unsigned int cpu)
 {
        /* CPU is dead, so no lock needed. */
index 2e2e3f378d97f61625c6a936b38e6c86c7037e0d..3d62c9599dc041019d23c1368c014672c51b40fc 100644 (file)
 #ifndef PAC_RESET_KEYS
 # define PAC_RESET_KEYS(a, b)  (-EINVAL)
 #endif
+#ifndef PAC_SET_ENABLED_KEYS
+# define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL)
+#endif
+#ifndef PAC_GET_ENABLED_KEYS
+# define PAC_GET_ENABLED_KEYS(a)       (-EINVAL)
+#endif
 #ifndef SET_TAGGED_ADDR_CTRL
 # define SET_TAGGED_ADDR_CTRL(a)       (-EINVAL)
 #endif
@@ -2497,6 +2503,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                        return -EINVAL;
                error = PAC_RESET_KEYS(me, arg2);
                break;
+       case PR_PAC_SET_ENABLED_KEYS:
+               if (arg4 || arg5)
+                       return -EINVAL;
+               error = PAC_SET_ENABLED_KEYS(me, arg2, arg3);
+               break;
+       case PR_PAC_GET_ENABLED_KEYS:
+               if (arg2 || arg3 || arg4 || arg5)
+                       return -EINVAL;
+               error = PAC_GET_ENABLED_KEYS(me);
+               break;
        case PR_SET_TAGGED_ADDR_CTRL:
                if (arg3 || arg4 || arg5)
                        return -EINVAL;
index 4d94e2b5499d894346cac8a772b7cb25141f0988..bea9d08b16988b33a82b7d911c3a174bd4a0bb16 100644 (file)
@@ -2,13 +2,13 @@
 /*
  * Alarmtimer interface
  *
- * This interface provides a timer which is similarto hrtimers,
+ * This interface provides a timer which is similar to hrtimers,
  * but triggers a RTC alarm if the box is suspend.
  *
  * This interface is influenced by the Android RTC Alarm timer
  * interface.
  *
- * Copyright (C) 2010 IBM Corperation
+ * Copyright (C) 2010 IBM Corporation
  *
  * Author: John Stultz <john.stultz@linaro.org>
  */
@@ -811,7 +811,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
 /**
  * alarm_timer_nsleep - alarmtimer nanosleep
  * @which_clock: clockid
- * @flags: determins abstime or relative
+ * @flags: determines abstime or relative
  * @tsreq: requested sleep time (abs or rel)
  *
  * Handles clock_nanosleep calls against _ALARM clockids
index cce484a2cc7ca4b710622435c5fb065d0ef2c61d..1d1a61371b5a1f4754df3af10f6ced10f7481942 100644 (file)
@@ -38,7 +38,7 @@
  * calculated mult and shift factors. This guarantees that no 64bit
  * overflow happens when the input value of the conversion is
  * multiplied with the calculated mult factor. Larger ranges may
- * reduce the conversion accuracy by chosing smaller mult and shift
+ * reduce the conversion accuracy by choosing smaller mult and shift
  * factors.
  */
 void
@@ -518,7 +518,7 @@ static void clocksource_suspend_select(bool fallback)
  * the suspend time when resuming system.
  *
  * This function is called late in the suspend process from timekeeping_suspend(),
- * that means processes are freezed, non-boot cpus and interrupts are disabled
+ * that means processes are frozen, non-boot cpus and interrupts are disabled
  * now. It is therefore possible to start the suspend timer without taking the
  * clocksource mutex.
  */
index 5c9d968187ae850e7a1151adffb986bf0543bddb..4a66725b1d4ac0ffbce018f404f26fe1f068f7a4 100644 (file)
@@ -683,7 +683,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
         * T1 is removed, so this code is called and would reprogram
         * the hardware to 5s from now. Any hrtimer_start after that
         * will not reprogram the hardware due to hang_detected being
-        * set. So we'd effectivly block all timers until the T2 event
+        * set. So we'd effectively block all timers until the T2 event
         * fires.
         */
        if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
@@ -1019,7 +1019,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
         * cpu_base->next_timer. This happens when we remove the first
         * timer on a remote cpu. No harm as we never dereference
         * cpu_base->next_timer. So the worst thing what can happen is
-        * an superflous call to hrtimer_force_reprogram() on the
+        * an superfluous call to hrtimer_force_reprogram() on the
         * remote cpu later on if the same timer gets enqueued again.
         */
        if (reprogram && timer == cpu_base->next_timer)
@@ -1212,7 +1212,7 @@ static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
  * The counterpart to hrtimer_cancel_wait_running().
  *
  * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
- * the timer callback to finish. Drop expiry_lock and reaquire it. That
+ * the timer callback to finish. Drop expiry_lock and reacquire it. That
  * allows the waiter to acquire the lock and make progress.
  */
 static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
@@ -1398,7 +1398,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
        int base;
 
        /*
-        * On PREEMPT_RT enabled kernels hrtimers which are not explicitely
+        * On PREEMPT_RT enabled kernels hrtimers which are not explicitly
         * marked for hard interrupt expiry mode are moved into soft
         * interrupt context for latency reasons and because the callbacks
         * can invoke functions which might sleep on RT, e.g. spin_lock().
@@ -1430,7 +1430,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  * hrtimer_init - initialize a timer to the given clock
  * @timer:     the timer to be initialized
  * @clock_id:  the clock to be used
- * @mode:       The modes which are relevant for intitialization:
+ * @mode:       The modes which are relevant for initialization:
  *              HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
  *              HRTIMER_MODE_REL_SOFT
  *
@@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
  * insufficient for that.
  *
  * The sequence numbers are required because otherwise we could still observe
- * a false negative if the read side got smeared over multiple consequtive
+ * a false negative if the read side got smeared over multiple consecutive
  * __run_hrtimer() invocations.
  */
 
@@ -1588,7 +1588,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
                         * minimizing wakeups, not running timers at the
                         * earliest interrupt after their soft expiration.
                         * This allows us to avoid using a Priority Search
-                        * Tree, which can answer a stabbing querry for
+                        * Tree, which can answer a stabbing query for
                         * overlapping intervals and instead use the simple
                         * BST we already have.
                         * We don't add extra wakeups by delaying timers that
@@ -1822,7 +1822,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
                                   clockid_t clock_id, enum hrtimer_mode mode)
 {
        /*
-        * On PREEMPT_RT enabled kernels hrtimers which are not explicitely
+        * On PREEMPT_RT enabled kernels hrtimers which are not explicitly
         * marked for hard interrupt expiry mode are moved into soft
         * interrupt context either for latency reasons or because the
         * hrtimer callback takes regular spinlocks or invokes other
@@ -1835,7 +1835,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
         * the same CPU. That causes a latency spike due to the wakeup of
         * a gazillion threads.
         *
-        * OTOH, priviledged real-time user space applications rely on the
+        * OTOH, privileged real-time user space applications rely on the
         * low latency of hard interrupt wakeups. If the current task is in
         * a real-time scheduling class, mark the mode for hard interrupt
         * expiry.
index a5cffe2a177038fae41df257349250034e1cbab5..a492e4da69ba2a88ac176d0a19f27252b920ffec 100644 (file)
@@ -44,7 +44,7 @@ static u64 jiffies_read(struct clocksource *cs)
  * the timer interrupt frequency HZ and it suffers
  * inaccuracies caused by missed or lost timer
  * interrupts and the inability for the timer
- * interrupt hardware to accuratly tick at the
+ * interrupt hardware to accurately tick at the
  * requested HZ value. It is also not recommended
  * for "tick-less" systems.
  */
index 5247afd7f34552b0c4fe0f1421302ad7c1cbc5c1..406dccb79c2b6b5dde3c619e3107afd9925ab7ff 100644 (file)
@@ -544,7 +544,7 @@ static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec,
                                  struct timespec64 *to_set,
                                  const struct timespec64 *now)
 {
-       /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
+       /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */
        const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
        struct timespec64 delay = {.tv_sec = -1,
                                   .tv_nsec = set_offset_nsec};
index 9abe15255bc4ef4a3a01ff548fb119b338651e4a..3bb96a8b49c9bd8888e1aaa1791470fe169f4a8f 100644 (file)
@@ -279,7 +279,7 @@ void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
  * @tsk:       Task for which cputime needs to be started
  * @samples:   Storage for time samples
  *
- * The thread group cputime accouting is avoided when there are no posix
+ * The thread group cputime accounting is avoided when there are no posix
  * CPU timers armed. Before starting a timer it's required to check whether
  * the time accounting is active. If not, a full update of the atomic
  * accounting store needs to be done and the accounting enabled.
@@ -390,7 +390,7 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
        /*
         * If posix timer expiry is handled in task work context then
         * timer::it_lock can be taken without disabling interrupts as all
-        * other locking happens in task context. This requires a seperate
+        * other locking happens in task context. This requires a separate
         * lock class key otherwise regular posix timer expiry would record
         * the lock class being taken in interrupt context and generate a
         * false positive warning.
@@ -1216,7 +1216,7 @@ static void handle_posix_cpu_timers(struct task_struct *tsk)
                check_process_timers(tsk, &firing);
 
                /*
-                * The above timer checks have updated the exipry cache and
+                * The above timer checks have updated the expiry cache and
                 * because nothing can have queued or modified timers after
                 * sighand lock was taken above it is guaranteed to be
                 * consistent. So the next timer interrupt fastpath check
index bf540f5a4115a2bff84ee7c2eb9f1af21385e1cd..dd5697d7347b1daaa0e843dd1e23abc8c547ddfc 100644 (file)
@@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
 
        err = do_clock_adjtime(which_clock, &ktx);
 
-       if (err >= 0)
-               err = put_old_timex32(utp, &ktx);
+       if (err >= 0 && put_old_timex32(utp, &ktx))
+               return -EFAULT;
 
        return err;
 }
index 77c63005dc4e03ab22d1f8a5599c8317c9afa5a4..13b11eb62685e451488b6e0da55a131d74d09662 100644 (file)
@@ -21,7 +21,6 @@
 #define DEBUGFS_FILENAME "udelay_test"
 
 static DEFINE_MUTEX(udelay_test_lock);
-static struct dentry *udelay_test_debugfs_file;
 static int udelay_test_usecs;
 static int udelay_test_iterations = DEFAULT_ITERATIONS;
 
@@ -138,8 +137,8 @@ static const struct file_operations udelay_test_debugfs_ops = {
 static int __init udelay_test_init(void)
 {
        mutex_lock(&udelay_test_lock);
-       udelay_test_debugfs_file = debugfs_create_file(DEBUGFS_FILENAME,
-                       S_IRUSR, NULL, NULL, &udelay_test_debugfs_ops);
+       debugfs_create_file(DEBUGFS_FILENAME, S_IRUSR, NULL, NULL,
+                           &udelay_test_debugfs_ops);
        mutex_unlock(&udelay_test_lock);
 
        return 0;
@@ -150,7 +149,7 @@ module_init(udelay_test_init);
 static void __exit udelay_test_exit(void)
 {
        mutex_lock(&udelay_test_lock);
-       debugfs_remove(udelay_test_debugfs_file);
+       debugfs_remove(debugfs_lookup(DEBUGFS_FILENAME, NULL));
        mutex_unlock(&udelay_test_lock);
 }
 
index b5a65e212df2f9a1e4b55df717a7f97652b1ed61..797eb93103ad4d373d2fca50b4baeadf8da10e96 100644 (file)
@@ -53,7 +53,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
         * reasons.
         *
         * Each caller tries to arm the hrtimer on its own CPU, but if the
-        * hrtimer callbback function is currently running, then
+        * hrtimer callback function is currently running, then
         * hrtimer_start() cannot move it and the timer stays on the CPU on
         * which it is assigned at the moment.
         *
index 5a23829372c7fc2d4b60814b397a1af9e72298ba..a440552287969e7e87d550e22b3307ab00056387 100644 (file)
@@ -107,6 +107,19 @@ void tick_install_broadcast_device(struct clock_event_device *dev)
        tick_broadcast_device.evtdev = dev;
        if (!cpumask_empty(tick_broadcast_mask))
                tick_broadcast_start_periodic(dev);
+
+       if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+               return;
+
+       /*
+        * If the system already runs in oneshot mode, switch the newly
+        * registered broadcast device to oneshot mode explicitly.
+        */
+       if (tick_broadcast_oneshot_active()) {
+               tick_broadcast_switch_to_oneshot();
+               return;
+       }
+
        /*
         * Inform all cpus about this. We might be in a situation
         * where we did not switch to oneshot mode because the per cpu
@@ -115,8 +128,7 @@ void tick_install_broadcast_device(struct clock_event_device *dev)
         * notification the systems stays stuck in periodic mode
         * forever.
         */
-       if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
-               tick_clock_notify();
+       tick_clock_notify();
 }
 
 /*
@@ -157,7 +169,7 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
 }
 
 /*
- * Check, if the device is disfunctional and a place holder, which
+ * Check, if the device is dysfunctional and a placeholder, which
  * needs to be handled by the broadcast device.
  */
 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
@@ -391,7 +403,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
                         * - the broadcast device exists
                         * - the broadcast device is not a hrtimer based one
                         * - the broadcast device is in periodic mode to
-                        *   avoid a hickup during switch to oneshot mode
+                        *   avoid a hiccup during switch to oneshot mode
                         */
                        if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
                            tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
index 9d3a22510babb2f18770ecdcf0e473198e92066b..e15bc0ef191282561eaae1e9c91e851d692e54b6 100644 (file)
@@ -348,12 +348,7 @@ void tick_check_new_device(struct clock_event_device *newdev)
        td = &per_cpu(tick_cpu_device, cpu);
        curdev = td->evtdev;
 
-       /* cpu local device ? */
-       if (!tick_check_percpu(curdev, newdev, cpu))
-               goto out_bc;
-
-       /* Preference decision */
-       if (!tick_check_preferred(curdev, newdev))
+       if (!tick_check_replacement(curdev, newdev))
                goto out_bc;
 
        if (!try_module_get(newdev->owner))
index f9745d47425aa452b334165ebc453217af4ed0ec..475ecceda7688cebc11b9afa8f919f70b2cdf23d 100644 (file)
@@ -45,7 +45,7 @@ int tick_program_event(ktime_t expires, int force)
 }
 
 /**
- * tick_resume_onshot - resume oneshot mode
+ * tick_resume_oneshot - resume oneshot mode
  */
 void tick_resume_oneshot(void)
 {
index e10a4af887373bc8a693b750fd27ff1e2460df3f..d34894f3862a8dde80b5d572d97f1481e205f32d 100644 (file)
@@ -751,7 +751,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
         * Aside of that check whether the local timer softirq is
         * pending. If so its a bad idea to call get_next_timer_interrupt()
         * because there is an already expired timer, so it will request
-        * immeditate expiry, which rearms the hardware timer with a
+        * immediate expiry, which rearms the hardware timer with a
         * minimal delta which brings us back to this place
         * immediately. Lather, rinse and repeat...
         */
@@ -973,7 +973,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
        if (unlikely(local_softirq_pending())) {
                static int ratelimit;
 
-               if (ratelimit < 10 &&
+               if (ratelimit < 10 && !local_bh_blocked() &&
                    (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
                        pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n",
                                (unsigned int) local_softirq_pending());
index 4fb06527cf64fe72da2ddcf550c8cd6de96aa4bc..d952ae393423632e997cc996fad2eaa932c5fc3a 100644 (file)
@@ -29,7 +29,7 @@ enum tick_nohz_mode {
  * @inidle:            Indicator that the CPU is in the tick idle mode
  * @tick_stopped:      Indicator that the idle tick has been stopped
  * @idle_active:       Indicator that the CPU is actively in the tick idle mode;
- *                     it is resetted during irq handling phases.
+ *                     it is reset during irq handling phases.
  * @do_timer_lst:      CPU was the last one doing do_timer before going idle
  * @got_idle_tick:     Tick timer function has run with @inidle set
  * @last_tick:         Store the last tick expiry time when the tick
index 3985b2b32d083e06acfee3c83f952426660dbe9d..29923b20e0e47d2fc6837ef61797e9c7e7584b7c 100644 (file)
@@ -571,7 +571,7 @@ EXPORT_SYMBOL(__usecs_to_jiffies);
 /*
  * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
  * that a remainder subtract here would not do the right thing as the
- * resolution values don't fall on second boundries.  I.e. the line:
+ * resolution values don't fall on second boundaries.  I.e. the line:
  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
  * Note that due to the small error in the multiplier here, this
  * rounding is incorrect for sufficiently large values of tv_nsec, but
index 85b98e727306f1ecec22076d0a397109dbe60c82..e6285288d76572d192435a1a44babc57bfda8770 100644 (file)
@@ -76,7 +76,7 @@ static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
        return ns;
 }
 
-u64 timecounter_cyc2time(struct timecounter *tc,
+u64 timecounter_cyc2time(const struct timecounter *tc,
                         u64 cycle_tstamp)
 {
        u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
index 6aee5768c86ff7dbd4acd58cc96d4fac949f9437..81fe2a33b80c8daa833e298026b883b8a174f976 100644 (file)
@@ -596,14 +596,14 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
  * careful cache layout of the timekeeper because the sequence count and
  * struct tk_read_base would then need two cache lines instead of one.
  *
- * Access to the time keeper clock source is disabled accross the innermost
+ * Access to the time keeper clock source is disabled across the innermost
  * steps of suspend/resume. The accessors still work, but the timestamps
  * are frozen until time keeping is resumed which happens very early.
  *
  * For regular suspend/resume there is no observable difference vs. sched
  * clock, but it might affect some of the nasty low level debug printks.
  *
- * OTOH, access to sched clock is not guaranteed accross suspend/resume on
+ * OTOH, access to sched clock is not guaranteed across suspend/resume on
  * all systems either so it depends on the hardware in use.
  *
  * If that turns out to be a real problem then this could be mitigated by
@@ -899,7 +899,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
 
 /**
- * ktime_mono_to_any() - convert mononotic time to any other time
+ * ktime_mono_to_any() - convert monotonic time to any other time
  * @tmono:     time to convert.
  * @offs:      which offset to use
  */
@@ -1427,35 +1427,45 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
 static int change_clocksource(void *data)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
-       struct clocksource *new, *old;
+       struct clocksource *new, *old = NULL;
        unsigned long flags;
+       bool change = false;
 
        new = (struct clocksource *) data;
 
-       raw_spin_lock_irqsave(&timekeeper_lock, flags);
-       write_seqcount_begin(&tk_core.seq);
-
-       timekeeping_forward_now(tk);
        /*
         * If the cs is in module, get a module reference. Succeeds
         * for built-in code (owner == NULL) as well.
         */
        if (try_module_get(new->owner)) {
-               if (!new->enable || new->enable(new) == 0) {
-                       old = tk->tkr_mono.clock;
-                       tk_setup_internals(tk, new);
-                       if (old->disable)
-                               old->disable(old);
-                       module_put(old->owner);
-               } else {
+               if (!new->enable || new->enable(new) == 0)
+                       change = true;
+               else
                        module_put(new->owner);
-               }
        }
+
+       raw_spin_lock_irqsave(&timekeeper_lock, flags);
+       write_seqcount_begin(&tk_core.seq);
+
+       timekeeping_forward_now(tk);
+
+       if (change) {
+               old = tk->tkr_mono.clock;
+               tk_setup_internals(tk, new);
+       }
+
        timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
 
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
+       if (old) {
+               if (old->disable)
+                       old->disable(old);
+
+               module_put(old->owner);
+       }
+
        return 0;
 }
 
@@ -1948,7 +1958,7 @@ static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
         *      xtime_nsec_1 = offset + xtime_nsec_2
         * Which gives us:
         *      xtime_nsec_2 = xtime_nsec_1 - offset
-        * Which simplfies to:
+        * Which simplifies to:
         *      xtime_nsec -= offset
         */
        if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
@@ -2336,7 +2346,7 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc)
 
                /*
                 * Validate if a timespec/timeval used to inject a time
-                * offset is valid.  Offsets can be postive or negative, so
+                * offset is valid.  Offsets can be positive or negative, so
                 * we don't check tv_sec. The value of the timeval/timespec
                 * is the sum of its fields,but *NOTE*:
                 * The field tv_usec/tv_nsec must always be non-negative and
index f475f1a027c8b36cf339f959c8031a299ba51d8a..d111adf4a0cb4c304fe9a7cbb3fe7b3c4ae2a69b 100644 (file)
@@ -894,7 +894,7 @@ static inline void forward_timer_base(struct timer_base *base)
        /*
         * No need to forward if we are close enough below jiffies.
         * Also while executing timers, base->clk is 1 offset ahead
-        * of jiffies to avoid endless requeuing to current jffies.
+        * of jiffies to avoid endless requeuing to current jiffies.
         */
        if ((long)(jnow - base->clk) < 1)
                return;
@@ -1271,7 +1271,7 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
  * The counterpart to del_timer_wait_running().
  *
  * If there is a waiter for base->expiry_lock, then it was waiting for the
- * timer callback to finish. Drop expiry_lock and reaquire it. That allows
+ * timer callback to finish. Drop expiry_lock and reacquire it. That allows
  * the waiter to acquire the lock and make progress.
  */
 static void timer_sync_wait_running(struct timer_base *base)
index 88e6b8ed6ca5ccd4271b96cc1e5fdb867920e88f..f0d5062d9cbc621a2ab32141b53c2f1f8cba0337 100644 (file)
@@ -108,7 +108,7 @@ void update_vsyscall(struct timekeeper *tk)
 
        /*
         * If the current clocksource is not VDSO capable, then spare the
-        * update of the high reolution parts.
+        * update of the high resolution parts.
         */
        if (clock_mode != VDSO_CLOCKMODE_NONE)
                update_vdso_data(vdata, tk);
index 5c777627212fa81cc3dc4fd70cbba807c62a5acc..915fe8790f045a247df2514c75277b5bfb58ef59 100644 (file)
@@ -3545,7 +3545,11 @@ static char *trace_iter_expand_format(struct trace_iterator *iter)
 {
        char *tmp;
 
-       if (iter->fmt == static_fmt_buf)
+       /*
+        * iter->tr is NULL when used with tp_printk, which makes
+        * this get called where it is not safe to call krealloc().
+        */
+       if (!iter->tr || iter->fmt == static_fmt_buf)
                return NULL;
 
        tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
@@ -3566,7 +3570,7 @@ const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
        if (WARN_ON_ONCE(!fmt))
                return fmt;
 
-       if (iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
+       if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
                return fmt;
 
        p = fmt;
@@ -4828,7 +4832,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
        cpumask_var_t tracing_cpumask_new;
        int err;
 
-       if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
                return -ENOMEM;
 
        err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
@@ -9692,7 +9696,7 @@ void __init early_trace_init(void)
 {
        if (tracepoint_printk) {
                tracepoint_print_iter =
-                       kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
+                       kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
                if (MEM_FAIL(!tracepoint_print_iter,
                             "Failed to allocate trace iterator\n"))
                        tracepoint_printk = 0;
index dc971a68dda4e59913c3af213b2dd4e717b84f99..e57cc0870892cea04cbc939e387056ed8ed96ef2 100644 (file)
@@ -63,8 +63,10 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
                event = p + 1;
                *p = '\0';
        }
-       if (event[0] == '\0')
-               return -EINVAL;
+       if (event[0] == '\0') {
+               ret = -EINVAL;
+               goto out;
+       }
 
        mutex_lock(&event_mutex);
        for_each_dyn_event_safe(pos, n) {
index af612945a4d05edaef7411fb4f683cbe74bb989b..9a4b980d695b8f2f525c92a4b3ef058150448d7c 100644 (file)
@@ -106,6 +106,7 @@ int create_user_ns(struct cred *new)
        if (!ns)
                goto fail_dec;
 
+       ns->parent_could_setfcap = cap_raised(new->cap_effective, CAP_SETFCAP);
        ret = ns_alloc_inum(&ns->ns);
        if (ret)
                goto fail_free;
@@ -841,6 +842,60 @@ static int sort_idmaps(struct uid_gid_map *map)
        return 0;
 }
 
+/**
+ * verify_root_map() - check the uid 0 mapping
+ * @file: idmapping file
+ * @map_ns: user namespace of the target process
+ * @new_map: requested idmap
+ *
+ * If a process requests mapping parent uid 0 into the new ns, verify that the
+ * process writing the map had the CAP_SETFCAP capability as the target process
+ * will be able to write fscaps that are valid in ancestor user namespaces.
+ *
+ * Return: true if the mapping is allowed, false if not.
+ */
+static bool verify_root_map(const struct file *file,
+                           struct user_namespace *map_ns,
+                           struct uid_gid_map *new_map)
+{
+       int idx;
+       const struct user_namespace *file_ns = file->f_cred->user_ns;
+       struct uid_gid_extent *extent0 = NULL;
+
+       for (idx = 0; idx < new_map->nr_extents; idx++) {
+               if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
+                       extent0 = &new_map->extent[idx];
+               else
+                       extent0 = &new_map->forward[idx];
+               if (extent0->lower_first == 0)
+                       break;
+
+               extent0 = NULL;
+       }
+
+       if (!extent0)
+               return true;
+
+       if (map_ns == file_ns) {
+               /* The process unshared its ns and is writing to its own
+                * /proc/self/uid_map.  User already has full capabilites in
+                * the new namespace.  Verify that the parent had CAP_SETFCAP
+                * when it unshared.
+                * */
+               if (!file_ns->parent_could_setfcap)
+                       return false;
+       } else {
+               /* Process p1 is writing to uid_map of p2, who is in a child
+                * user namespace to p1's.  Verify that the opener of the map
+                * file has CAP_SETFCAP against the parent of the new map
+                * namespace */
+               if (!file_ns_capable(file, map_ns->parent, CAP_SETFCAP))
+                       return false;
+       }
+
+       return true;
+}
+
 static ssize_t map_write(struct file *file, const char __user *buf,
                         size_t count, loff_t *ppos,
                         int cap_setid,
@@ -848,7 +903,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
                         struct uid_gid_map *parent_map)
 {
        struct seq_file *seq = file->private_data;
-       struct user_namespace *ns = seq->private;
+       struct user_namespace *map_ns = seq->private;
        struct uid_gid_map new_map;
        unsigned idx;
        struct uid_gid_extent extent;
@@ -895,7 +950,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
        /*
         * Adjusting namespace settings requires capabilities on the target.
         */
-       if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
+       if (cap_valid(cap_setid) && !file_ns_capable(file, map_ns, CAP_SYS_ADMIN))
                goto out;
 
        /* Parse the user data */
@@ -965,7 +1020,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
 
        ret = -EPERM;
        /* Validate the user is allowed to use user id's mapped to. */
-       if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
+       if (!new_idmap_permitted(file, map_ns, cap_setid, &new_map))
                goto out;
 
        ret = -EPERM;
@@ -1086,6 +1141,10 @@ static bool new_idmap_permitted(const struct file *file,
                                struct uid_gid_map *new_map)
 {
        const struct cred *cred = file->f_cred;
+
+       if (cap_setid == CAP_SETUID && !verify_root_map(file, ns, new_map))
+               return false;
+
        /* Don't allow mappings that would allow anything that wouldn't
         * be allowed without the establishment of unprivileged mappings.
         */
index 71109065bd8ebf4ee946a902321b4cd8934a3d95..107bc38b19450afa728d89abfc6b57696d1dd09f 100644 (file)
@@ -278,9 +278,10 @@ void touch_all_softlockup_watchdogs(void)
         * update as well, the only side effect might be a cycle delay for
         * the softlockup check.
         */
-       for_each_cpu(cpu, &watchdog_allowed_mask)
+       for_each_cpu(cpu, &watchdog_allowed_mask) {
                per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
-       wq_watchdog_touch(-1);
+               wq_watchdog_touch(cpu);
+       }
 }
 
 void touch_softlockup_watchdog_sync(void)
index 0d150da252e81d2781fd7bebc6ad3654222a586d..79f2319543ced3d62ac7773e986d0f0e5c79c6e9 100644 (file)
@@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
         */
        lockdep_assert_irqs_disabled();
 
-       debug_work_activate(work);
 
        /* if draining, only works from the same workqueue are allowed */
        if (unlikely(wq->flags & __WQ_DRAINING) &&
@@ -1494,6 +1493,7 @@ retry:
                worklist = &pwq->delayed_works;
        }
 
+       debug_work_activate(work);
        insert_work(pwq, work, worklist, work_flags);
 
 out:
@@ -5787,22 +5787,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
                        continue;
 
                /* get the latest of pool and touched timestamps */
+               if (pool->cpu >= 0)
+                       touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
+               else
+                       touched = READ_ONCE(wq_watchdog_touched);
                pool_ts = READ_ONCE(pool->watchdog_ts);
-               touched = READ_ONCE(wq_watchdog_touched);
 
                if (time_after(pool_ts, touched))
                        ts = pool_ts;
                else
                        ts = touched;
 
-               if (pool->cpu >= 0) {
-                       unsigned long cpu_touched =
-                               READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
-                                                 pool->cpu));
-                       if (time_after(cpu_touched, ts))
-                               ts = cpu_touched;
-               }
-
                /* did we stall? */
                if (time_after(jiffies, ts + thresh)) {
                        lockup_detected = true;
@@ -5826,8 +5821,8 @@ notrace void wq_watchdog_touch(int cpu)
 {
        if (cpu >= 0)
                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
-       else
-               wq_watchdog_touched = jiffies;
+
+       wq_watchdog_touched = jiffies;
 }
 
 static void wq_watchdog_set_thresh(unsigned long thresh)
index a38cc61256f1a44e011e7a9f72042900221f9e4d..ac3b30697b2b392af075f81cc83851a18f33f0d3 100644 (file)
@@ -701,3 +701,6 @@ config GENERIC_LIB_DEVMEM_IS_ALLOWED
 config PLDMFW
        bool
        default n
+
+config ASN1_ENCODER
+       tristate
index 2779c29d9981f1d0587005d8ea916fa52b07e574..2c7f46b366f165f56ff2c0b8ae092600a92132e6 100644 (file)
@@ -1363,13 +1363,53 @@ config LOCKDEP
        bool
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        select KALLSYMS
        select KALLSYMS_ALL
 
 config LOCKDEP_SMALL
        bool
 
+config LOCKDEP_BITS
+       int "Bitsize for MAX_LOCKDEP_ENTRIES"
+       depends on LOCKDEP && !LOCKDEP_SMALL
+       range 10 30
+       default 15
+       help
+         Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
+
+config LOCKDEP_CHAINS_BITS
+       int "Bitsize for MAX_LOCKDEP_CHAINS"
+       depends on LOCKDEP && !LOCKDEP_SMALL
+       range 10 30
+       default 16
+       help
+         Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
+
+config LOCKDEP_STACK_TRACE_BITS
+       int "Bitsize for MAX_STACK_TRACE_ENTRIES"
+       depends on LOCKDEP && !LOCKDEP_SMALL
+       range 10 30
+       default 19
+       help
+         Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
+
+config LOCKDEP_STACK_TRACE_HASH_BITS
+       int "Bitsize for STACK_TRACE_HASH_SIZE"
+       depends on LOCKDEP && !LOCKDEP_SMALL
+       range 10 30
+       default 14
+       help
+         Try increasing this value if you need large MAX_STACK_TRACE_ENTRIES.
+
+config LOCKDEP_CIRCULAR_QUEUE_BITS
+       int "Bitsize for elements in circular_queue struct"
+       depends on LOCKDEP
+       range 10 30
+       default 12
+       help
+         Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
+
 config DEBUG_LOCKDEP
        bool "Lock dependency engine debugging"
        depends on DEBUG_KERNEL && LOCKDEP
@@ -1665,7 +1705,7 @@ config LATENCYTOP
        depends on DEBUG_KERNEL
        depends on STACKTRACE_SUPPORT
        depends on PROC_FS
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        select KALLSYMS
        select KALLSYMS_ALL
        select STACKTRACE
@@ -1918,7 +1958,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
        depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
        depends on !X86_64
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        help
          Provide stacktrace filter for fault-injection capabilities
 
index fba9909e31b717606b46263ff8f7c2e50220ea2f..cffc2ebbf185d3f869b5f142440feea0dad8abcd 100644 (file)
@@ -138,9 +138,10 @@ config KASAN_INLINE
 
 endchoice
 
-config KASAN_STACK_ENABLE
+config KASAN_STACK
        bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
        depends on KASAN_GENERIC || KASAN_SW_TAGS
+       default y if CC_IS_GCC
        help
          The LLVM stack address sanitizer has a know problem that
          causes excessive stack usage in a lot of functions, see
@@ -154,12 +155,6 @@ config KASAN_STACK_ENABLE
          CONFIG_COMPILE_TEST.  On gcc it is assumed to always be safe
          to use and enabled by default.
 
-config KASAN_STACK
-       int
-       depends on KASAN_GENERIC || KASAN_SW_TAGS
-       default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
-       default 0
-
 config KASAN_SW_TAGS_IDENTIFY
        bool "Enable memory corruption identification"
        depends on KASAN_SW_TAGS
index b5307d3eec1aaa6327886e034ad3622f3596ff75..e11cfc18b6c0826ffc8bdcf1329dd1ec1aa2ef2e 100644 (file)
@@ -280,6 +280,7 @@ obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
 obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
 
 obj-$(CONFIG_ASN1) += asn1_decoder.o
+obj-$(CONFIG_ASN1_ENCODER) += asn1_encoder.o
 
 obj-$(CONFIG_FONT_SUPPORT) += fonts/
 
diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c
new file mode 100644 (file)
index 0000000..41e71aa
--- /dev/null
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Simple encoder primitives for ASN.1 BER/DER/CER
+ *
+ * Copyright (C) 2019 James.Bottomley@HansenPartnership.com
+ */
+
+#include <linux/asn1_encoder.h>
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+/**
+ * asn1_encode_integer() - encode positive integer to ASN.1
+ * @data:      pointer to the pointer to the data
+ * @end_data:  end of data pointer, points one beyond last usable byte in @data
+ * @integer:   integer to be encoded
+ *
+ * This is a simplified encoder: it only currently does
+ * positive integers, but it should be simple enough to add the
+ * negative case if a use comes along.
+ */
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+                   s64 integer)
+{
+       int data_len = end_data - data;
+       unsigned char *d = &data[2];
+       bool found = false;
+       int i;
+
+       if (WARN(integer < 0,
+                "BUG: integer encode only supports positive integers"))
+               return ERR_PTR(-EINVAL);
+
+       if (IS_ERR(data))
+               return data;
+
+       /* need at least 3 bytes for tag, length and integer encoding */
+       if (data_len < 3)
+               return ERR_PTR(-EINVAL);
+
+       /* remaining length where at d (the start of the integer encoding) */
+       data_len -= 2;
+
+       data[0] = _tag(UNIV, PRIM, INT);
+       if (integer == 0) {
+               *d++ = 0;
+               goto out;
+       }
+
+       for (i = sizeof(integer); i > 0 ; i--) {
+               int byte = integer >> (8 * (i - 1));
+
+               if (!found && byte == 0)
+                       continue;
+
+               /*
+                * for a positive number the first byte must have bit
+                * 7 clear in two's complement (otherwise it's a
+                * negative number) so prepend a leading zero if
+                * that's not the case
+                */
+               if (!found && (byte & 0x80)) {
+                       /*
+                        * no check needed here, we already know we
+                        * have len >= 1
+                        */
+                       *d++ = 0;
+                       data_len--;
+               }
+
+               found = true;
+               if (data_len == 0)
+                       return ERR_PTR(-EINVAL);
+
+               *d++ = byte;
+               data_len--;
+       }
+
+ out:
+       data[1] = d - data - 2;
+
+       return d;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_integer);
+
+/* calculate the base 128 digit values setting the top bit of the first octet */
+static int asn1_encode_oid_digit(unsigned char **_data, int *data_len, u32 oid)
+{
+       unsigned char *data = *_data;
+       int start = 7 + 7 + 7 + 7;
+       int ret = 0;
+
+       if (*data_len < 1)
+               return -EINVAL;
+
+       /* quick case */
+       if (oid == 0) {
+               *data++ = 0x80;
+               (*data_len)--;
+               goto out;
+       }
+
+       while (oid >> start == 0)
+               start -= 7;
+
+       while (start > 0 && *data_len > 0) {
+               u8 byte;
+
+               byte = oid >> start;
+               oid = oid - (byte << start);
+               start -= 7;
+               byte |= 0x80;
+               *data++ = byte;
+               (*data_len)--;
+       }
+
+       if (*data_len > 0) {
+               *data++ = oid;
+               (*data_len)--;
+       } else {
+               ret = -EINVAL;
+       }
+
+ out:
+       *_data = data;
+       return ret;
+}
+
+/**
+ * asn1_encode_oid() - encode an oid to ASN.1
+ * @data:      position to begin encoding at
+ * @end_data:  end of data pointer, points one beyond last usable byte in @data
+ * @oid:       array of oids
+ * @oid_len:   length of oid array
+ *
+ * this encodes an OID up to ASN.1 when presented as an array of OID values
+ */
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+               u32 oid[], int oid_len)
+{
+       int data_len = end_data - data;
+       unsigned char *d = data + 2;
+       int i, ret;
+
+       if (WARN(oid_len < 2, "OID must have at least two elements"))
+               return ERR_PTR(-EINVAL);
+
+       if (WARN(oid_len > 32, "OID is too large"))
+               return ERR_PTR(-EINVAL);
+
+       if (IS_ERR(data))
+               return data;
+
+
+       /* need at least 3 bytes for tag, length and OID encoding */
+       if (data_len < 3)
+               return ERR_PTR(-EINVAL);
+
+       data[0] = _tag(UNIV, PRIM, OID);
+       *d++ = oid[0] * 40 + oid[1];
+
+       data_len -= 3;
+
+       ret = 0;
+
+       for (i = 2; i < oid_len; i++) {
+               ret = asn1_encode_oid_digit(&d, &data_len, oid[i]);
+               if (ret < 0)
+                       return ERR_PTR(ret);
+       }
+
+       data[1] = d - data - 2;
+
+       return d;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_oid);
+
+/**
+ * asn1_encode_length() - encode a length to follow an ASN.1 tag
+ * @data: pointer to encode at
+ * @data_len: pointer to remaning length (adjusted by routine)
+ * @len: length to encode
+ *
+ * This routine can encode lengths up to 65535 using the ASN.1 rules.
+ * It will accept a negative length and place a zero length tag
+ * instead (to keep the ASN.1 valid).  This convention allows other
+ * encoder primitives to accept negative lengths as singalling the
+ * sequence will be re-encoded when the length is known.
+ */
+static int asn1_encode_length(unsigned char **data, int *data_len, int len)
+{
+       if (*data_len < 1)
+               return -EINVAL;
+
+       if (len < 0) {
+               *((*data)++) = 0;
+               (*data_len)--;
+               return 0;
+       }
+
+       if (len <= 0x7f) {
+               *((*data)++) = len;
+               (*data_len)--;
+               return 0;
+       }
+
+       if (*data_len < 2)
+               return -EINVAL;
+
+       if (len <= 0xff) {
+               *((*data)++) = 0x81;
+               *((*data)++) = len & 0xff;
+               *data_len -= 2;
+               return 0;
+       }
+
+       if (*data_len < 3)
+               return -EINVAL;
+
+       if (len <= 0xffff) {
+               *((*data)++) = 0x82;
+               *((*data)++) = (len >> 8) & 0xff;
+               *((*data)++) = len & 0xff;
+               *data_len -= 3;
+               return 0;
+       }
+
+       if (WARN(len > 0xffffff, "ASN.1 length can't be > 0xffffff"))
+               return -EINVAL;
+
+       if (*data_len < 4)
+               return -EINVAL;
+       *((*data)++) = 0x83;
+       *((*data)++) = (len >> 16) & 0xff;
+       *((*data)++) = (len >> 8) & 0xff;
+       *((*data)++) = len & 0xff;
+       *data_len -= 4;
+
+       return 0;
+}
+
+/**
+ * asn1_encode_tag() - add a tag for optional or explicit value
+ * @data:      pointer to place tag at
+ * @end_data:  end of data pointer, points one beyond last usable byte in @data
+ * @tag:       tag to be placed
+ * @string:    the data to be tagged
+ * @len:       the length of the data to be tagged
+ *
+ * Note this currently only handles short form tags < 31.
+ *
+ * Standard usage is to pass in a @tag, @string and @length and the
+ * @string will be ASN.1 encoded with @tag and placed into @data.  If
+ * the encoding would put data past @end_data then an error is
+ * returned, otherwise a pointer to a position one beyond the encoding
+ * is returned.
+ *
+ * To encode in place pass a NULL @string and -1 for @len and the
+ * maximum allowable beginning and end of the data; all this will do
+ * is add the current maximum length and update the data pointer to
+ * the place where the tag contents should be placed is returned.  The
+ * data should be copied in by the calling routine which should then
+ * repeat the prior statement but now with the known length.  In order
+ * to avoid having to keep both before and after pointers, the repeat
+ * expects to be called with @data pointing to where the first encode
+ * returned it and still NULL for @string but the real length in @len.
+ */
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+               u32 tag, const unsigned char *string, int len)
+{
+       int data_len = end_data - data;
+       int ret;
+
+       if (WARN(tag > 30, "ASN.1 tag can't be > 30"))
+               return ERR_PTR(-EINVAL);
+
+       if (!string && WARN(len > 127,
+                           "BUG: recode tag is too big (>127)"))
+               return ERR_PTR(-EINVAL);
+
+       if (IS_ERR(data))
+               return data;
+
+       if (!string && len > 0) {
+               /*
+                * we're recoding, so move back to the start of the
+                * tag and install a dummy length because the real
+                * data_len should be NULL
+                */
+               data -= 2;
+               data_len = 2;
+       }
+
+       if (data_len < 2)
+               return ERR_PTR(-EINVAL);
+
+       *(data++) = _tagn(CONT, CONS, tag);
+       data_len--;
+       ret = asn1_encode_length(&data, &data_len, len);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       if (!string)
+               return data;
+
+       if (data_len < len)
+               return ERR_PTR(-EINVAL);
+
+       memcpy(data, string, len);
+       data += len;
+
+       return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_tag);
+
+/**
+ * asn1_encode_octet_string() - encode an ASN.1 OCTET STRING
+ * @data:      pointer to encode at
+ * @end_data:  end of data pointer, points one beyond last usable byte in @data
+ * @string:    string to be encoded
+ * @len:       length of string
+ *
+ * Note ASN.1 octet strings may contain zeros, so the length is obligatory.
+ */
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+                        const unsigned char *end_data,
+                        const unsigned char *string, u32 len)
+{
+       int data_len = end_data - data;
+       int ret;
+
+       if (IS_ERR(data))
+               return data;
+
+       /* need minimum of 2 bytes for tag and length of zero length string */
+       if (data_len < 2)
+               return ERR_PTR(-EINVAL);
+
+       *(data++) = _tag(UNIV, PRIM, OTS);
+       data_len--;
+
+       ret = asn1_encode_length(&data, &data_len, len);
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (data_len < len)
+               return ERR_PTR(-EINVAL);
+
+       memcpy(data, string, len);
+       data += len;
+
+       return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_octet_string);
+
+/**
+ * asn1_encode_sequence() - wrap a byte stream in an ASN.1 SEQUENCE
+ * @data:      pointer to encode at
+ * @end_data:  end of data pointer, points one beyond last usable byte in @data
+ * @seq:       data to be encoded as a sequence
+ * @len:       length of the data to be encoded as a sequence
+ *
+ * Fill in a sequence.  To encode in place, pass NULL for @seq and -1
+ * for @len; then call again once the length is known (still with NULL
+ * for @seq). In order to avoid having to keep both before and after
+ * pointers, the repeat expects to be called with @data pointing to
+ * where the first encode placed it.
+ */
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+                    const unsigned char *seq, int len)
+{
+       int data_len = end_data - data;
+       int ret;
+
+       if (!seq && WARN(len > 127,
+                        "BUG: recode sequence is too big (>127)"))
+               return ERR_PTR(-EINVAL);
+
+       if (IS_ERR(data))
+               return data;
+
+       if (!seq && len >= 0) {
+               /*
+                * we're recoding, so move back to the start of the
+                * sequence and install a dummy length because the
+                * real length should be NULL
+                */
+               data -= 2;
+               data_len = 2;
+       }
+
+       if (data_len < 2)
+               return ERR_PTR(-EINVAL);
+
+       *(data++) = _tag(UNIV, CONS, SEQ);
+       data_len--;
+
+       ret = asn1_encode_length(&data, &data_len, len);
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (!seq)
+               return data;
+
+       if (data_len < len)
+               return ERR_PTR(-EINVAL);
+
+       memcpy(data, seq, len);
+       data += len;
+
+       return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_sequence);
+
+/**
+ * asn1_encode_boolean() - encode a boolean value to ASN.1
+ * @data:      pointer to encode at
+ * @end_data:  end of data pointer, points one beyond last usable byte in @data
+ * @val:       the boolean true/false value
+ */
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+                   bool val)
+{
+       int data_len = end_data - data;
+
+       if (IS_ERR(data))
+               return data;
+
+       /* booleans are 3 bytes: tag, length == 1 and value == 0 or 1 */
+       if (data_len < 3)
+               return ERR_PTR(-EINVAL);
+
+       *(data++) = _tag(UNIV, PRIM, BOOL);
+       data_len--;
+
+       asn1_encode_length(&data, &data_len, 1);
+
+       if (val)
+               *(data++) = 1;
+       else
+               *(data++) = 0;
+
+       return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_boolean);
+
+MODULE_LICENSE("GPL");
index 4ccbec442469c30a4fed329852fb421e7dd57c20..b748fd3d256e41cb00d9c77fa463d41632769cac 100644 (file)
@@ -64,7 +64,7 @@ static void chacha_permute(u32 *x, int nrounds)
 }
 
 /**
- * chacha_block - generate one keystream block and increment block counter
+ * chacha_block_generic - generate one keystream block and increment block counter
  * @state: input state matrix (16 32-bit words)
  * @stream: output keystream block (64 bytes)
  * @nrounds: number of rounds (20 or 12; 20 is recommended)
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(chacha_block_generic);
 /**
  * hchacha_block_generic - abbreviated ChaCha core, for XChaCha
  * @state: input state matrix (16 32-bit words)
- * @out: output (8 32-bit words)
+ * @stream: output (8 32-bit words)
  * @nrounds: number of rounds (20 or 12; 20 is recommended)
  *
  * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
index 3cc77d94390b26cdb7b04b5a0a133ec7ab070897..7fb71845cc8464fd8beeb5be2b65b79d1193fcd0 100644 (file)
@@ -10,7 +10,8 @@
 #include <asm/unaligned.h>
 #include <crypto/internal/poly1305.h>
 
-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
+void poly1305_core_setkey(struct poly1305_core_key *key,
+                         const u8 raw_key[POLY1305_BLOCK_SIZE])
 {
        /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
        key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
index 6ae181bb43450cf940d42425965d40c2b9e7dbcb..d34cf4053668958ee48e176092d92d90c7d3c0f8 100644 (file)
@@ -12,7 +12,8 @@
 
 typedef __uint128_t u128;
 
-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
+void poly1305_core_setkey(struct poly1305_core_key *key,
+                         const u8 raw_key[POLY1305_BLOCK_SIZE])
 {
        u64 t0, t1;
 
index 9d2d14df0fee533e2548c44e50573f90d4fa3951..26d87fc3823e83798d052aba09c6a94a59c5fd15 100644 (file)
@@ -12,7 +12,8 @@
 #include <linux/module.h>
 #include <asm/unaligned.h>
 
-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
+void poly1305_init_generic(struct poly1305_desc_ctx *desc,
+                          const u8 key[POLY1305_KEY_SIZE])
 {
        poly1305_core_setkey(&desc->core_r, key);
        desc->s[0] = get_unaligned_le32(key + 16);
index e83628882001bcd8416c600c1be3e7f2c77cbe77..7921193f042439b783ad4d303040067939d9e6d3 100644 (file)
@@ -40,7 +40,7 @@ enum cpio_fields {
 };
 
 /**
- * cpio_data find_cpio_data - Search for files in an uncompressed cpio
+ * find_cpio_data - Search for files in an uncompressed cpio
  * @path:       The directory to search for, including a slash at the end
  * @data:       Pointer to the cpio archive or a header inside
  * @len:        Remaining length of the cpio based on data pointer
@@ -49,7 +49,7 @@ enum cpio_fields {
  *              matching file itself. It can be used to iterate through the cpio
  *              to find all files inside of a directory path.
  *
- * @return:     struct cpio_data containing the address, length and
+ * Return:      &struct cpio_data containing the address, length and
  *              filename (with the directory path cut off) of the found file.
  *              If you search for a filename and not for files in a directory,
  *              pass the absolute path of the filename in the cpio and make sure
index 7998affa45d49a27b8613931ae3eb81499eef926..c87d5b6a8a55a3c02719ebfde3761f948ec2c35b 100644 (file)
@@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
 
 static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
 {
+       int buffer_size = sizeof(env->buf) - env->buflen;
        int len;
 
-       len = strlcpy(&env->buf[env->buflen], subsystem,
-                     sizeof(env->buf) - env->buflen);
-       if (len >= (sizeof(env->buf) - env->buflen)) {
-               WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
+       len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
+       if (len >= buffer_size) {
+               pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
+                       buffer_size, len);
                return -ENOMEM;
        }
 
index c69ee53d8dded9194a27e80d2b6b7683b8255374..52313acbfa6284a57909ec8967e1abac0a24259b 100644 (file)
@@ -76,6 +76,7 @@ int lc_try_lock(struct lru_cache *lc)
 /**
  * lc_create - prepares to track objects in an active set
  * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
+ * @cache: cache root pointer
  * @max_pending_changes: maximum changes to accumulate until a transaction is required
  * @e_count: number of elements allowed to be active simultaneously
  * @e_size: size of the tracked objects
@@ -627,7 +628,7 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index)
 }
 
 /**
- * lc_dump - Dump a complete LRU cache to seq in textual form.
+ * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form.
  * @lc: the lru cache to operate on
  * @seq: the &struct seq_file pointer to seq_printf into
  * @utext: user supplied additional "heading" or other info
index f7ad43f28579359cf352f51ecf4be580e9e70444..3dfaa836e7c58ab6337829c85daacdc404beff75 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/bug.h>
+#include <linux/asn1.h>
 #include "oid_registry_data.c"
 
 MODULE_DESCRIPTION("OID Registry");
@@ -92,6 +93,29 @@ enum OID look_up_OID(const void *data, size_t datasize)
 }
 EXPORT_SYMBOL_GPL(look_up_OID);
 
+/**
+ * parse_OID - Parse an OID from a bytestream
+ * @data: Binary representation of the header + OID
+ * @datasize: Size of the binary representation
+ * @oid: Pointer to oid to return result
+ *
+ * Parse an OID from a bytestream that holds the OID in the format
+ * ASN1_OID | length | oid. The length indicator must equal to datasize - 2.
+ * -EBADMSG is returned if the bytestream is too short.
+ */
+int parse_OID(const void *data, size_t datasize, enum OID *oid)
+{
+       const unsigned char *v = data;
+
+       /* we need 2 bytes of header and at least 1 byte for oid */
+       if (datasize < 3 || v[0] != ASN1_OID || v[1] != datasize - 2)
+               return -EBADMSG;
+
+       *oid = look_up_OID(data + 2, datasize - 2);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(parse_OID);
+
 /*
  * sprint_OID - Print an Object Identifier into a buffer
  * @data: The encoded OID to print
index a11f2f667639725d927566185b5f40b336d5a754..3f8f8d422e62303f14a9cb8cc41d05410ae5d867 100644 (file)
@@ -297,7 +297,7 @@ EXPORT_SYMBOL(parman_destroy);
  * parman_prio_init - initializes a parman priority chunk
  * @parman:    parman instance
  * @prio:      parman prio structure to be initialized
- * @prority:   desired priority of the chunk
+ * @priority:  desired priority of the chunk
  *
  * Note: all locking must be provided by the caller.
  *
@@ -356,7 +356,7 @@ int parman_item_add(struct parman *parman, struct parman_prio *prio,
 EXPORT_SYMBOL(parman_item_add);
 
 /**
- * parman_item_del - deletes parman item
+ * parman_item_remove - deletes parman item
  * @parman:    parman instance
  * @prio:      parman prio instance to delete the item from
  * @item:      parman item instance
index 3a4da11b804d9aaaa148258d3dde8c087c9f237c..b3afafe46fffbca7d7afff3b645b47e1c6faae23 100644 (file)
@@ -166,9 +166,9 @@ static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
 /**
  * radix_tree_find_next_bit - find the next set bit in a memory region
  *
- * @addr: The address to base the search on
- * @size: The bitmap size in bits
- * @offset: The bitnumber to start searching at
+ * @node: where to begin the search
+ * @tag: the tag index
+ * @offset: the bitnumber to start searching at
  *
  * Unrollable variant of find_next_bit() for constant size arrays.
  * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
@@ -461,7 +461,7 @@ out:
 
 /**
  *     radix_tree_shrink    -    shrink radix tree to minimum height
- *     @root           radix tree root
+ *     @root:          radix tree root
  */
 static inline bool radix_tree_shrink(struct radix_tree_root *root)
 {
@@ -691,7 +691,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 }
 
 /**
- *     __radix_tree_insert    -    insert into a radix tree
+ *     radix_tree_insert    -    insert into a radix tree
  *     @root:          radix tree root
  *     @index:         index key
  *     @item:          item to insert
@@ -919,6 +919,7 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
 /**
  * radix_tree_iter_replace - replace item in a slot
  * @root:      radix tree root
+ * @iter:      iterator state
  * @slot:      pointer to slot
  * @item:      new item to store in the slot.
  *
index e5647d147b35059d4cb06f41c375fb62b37ea14e..785e724ce0d8881b780ca4602d2b871d90ff9093 100644 (file)
@@ -69,10 +69,10 @@ static void kasan_test_exit(struct kunit *test)
  * resource named "kasan_data". Do not use this name for KUnit resources
  * outside of KASAN tests.
  *
- * For hardware tag-based KASAN, when a tag fault happens, tag checking is
- * normally auto-disabled. When this happens, this test handler reenables
- * tag checking. As tag checking can be only disabled or enabled per CPU, this
- * handler disables migration (preemption).
+ * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
+ * checking is auto-disabled. When this happens, this test handler reenables
+ * tag checking. As tag checking can be only disabled or enabled per CPU,
+ * this handler disables migration (preemption).
  *
  * Since the compiler doesn't see that the expression can change the fail_data
  * fields, it can reorder or optimize away the accesses to those fields.
@@ -80,7 +80,8 @@ static void kasan_test_exit(struct kunit *test)
  * expression to prevent that.
  */
 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {         \
-       if (IS_ENABLED(CONFIG_KASAN_HW_TAGS))                   \
+       if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&                 \
+           !kasan_async_mode_enabled())                        \
                migrate_disable();                              \
        WRITE_ONCE(fail_data.report_expected, true);            \
        WRITE_ONCE(fail_data.report_found, false);              \
@@ -92,12 +93,16 @@ static void kasan_test_exit(struct kunit *test)
        barrier();                                              \
        expression;                                             \
        barrier();                                              \
+       if (kasan_async_mode_enabled())                         \
+               kasan_force_async_fault();                      \
+       barrier();                                              \
        KUNIT_EXPECT_EQ(test,                                   \
                        READ_ONCE(fail_data.report_expected),   \
                        READ_ONCE(fail_data.report_found));     \
-       if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) {                 \
+       if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&                 \
+           !kasan_async_mode_enabled()) {                      \
                if (READ_ONCE(fail_data.report_found))          \
-                       kasan_enable_tagging();                 \
+                       kasan_enable_tagging_sync();            \
                migrate_enable();                               \
        }                                                       \
 } while (0)
index eee017ff8980529c1267347fabab0dd2e662df1f..f1017f345d6cce6a395187d687b67dcb2c5cf99d 100644 (file)
@@ -22,7 +22,7 @@ static noinline void __init copy_user_test(void)
        char *kmem;
        char __user *usermem;
        size_t size = 10;
-       int unused;
+       int __maybe_unused unused;
 
        kmem = kmalloc(size, GFP_KERNEL);
        if (!kmem)
index 43700480d897d48eb5dbdb879d9d1c5efddb8d0d..6ce832dc59e7383c4ae8821c94586864e3f5ee91 100644 (file)
@@ -1969,8 +1969,14 @@ unlock:
 put:
                put_page(page);
 next:
-               if (!xa_is_value(page) && PageTransHuge(page))
-                       xas_set(&xas, page->index + thp_nr_pages(page));
+               if (!xa_is_value(page) && PageTransHuge(page)) {
+                       unsigned int nr_pages = thp_nr_pages(page);
+
+                       /* Final THP may cross MAX_LFS_FILESIZE on 32-bit */
+                       xas_set(&xas, page->index + nr_pages);
+                       if (xas.xa_index < nr_pages)
+                               break;
+               }
        }
        rcu_read_unlock();
 
@@ -2672,7 +2678,7 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
                loff_t end, int whence)
 {
        XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
-       pgoff_t max = (end - 1) / PAGE_SIZE;
+       pgoff_t max = (end - 1) >> PAGE_SHIFT;
        bool seek_data = (whence == SEEK_DATA);
        struct page *page;
 
@@ -2681,7 +2687,8 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 
        rcu_read_lock();
        while ((page = find_get_entry(&xas, max, XA_PRESENT))) {
-               loff_t pos = xas.xa_index * PAGE_SIZE;
+               loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
+               unsigned int seek_size;
 
                if (start < pos) {
                        if (!seek_data)
@@ -2689,25 +2696,25 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
                        start = pos;
                }
 
-               pos += seek_page_size(&xas, page);
+               seek_size = seek_page_size(&xas, page);
+               pos = round_up(pos + 1, seek_size);
                start = page_seek_hole_data(&xas, mapping, page, start, pos,
                                seek_data);
                if (start < pos)
                        goto unlock;
+               if (start >= end)
+                       break;
+               if (seek_size > PAGE_SIZE)
+                       xas_set(&xas, pos >> PAGE_SHIFT);
                if (!xa_is_value(page))
                        put_page(page);
        }
-       rcu_read_unlock();
-
        if (seek_data)
-               return -ENXIO;
-       goto out;
-
+               start = -ENXIO;
 unlock:
        rcu_read_unlock();
-       if (!xa_is_value(page))
+       if (page && !xa_is_value(page))
                put_page(page);
-out:
        if (start > end)
                return end;
        return start;
index e40579624f107811b3755851c3d9d66cc35dcc4d..ef7d2da9f03ff190ea795dd277cfa5c9106e9d97 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1535,6 +1535,10 @@ struct page *get_dump_page(unsigned long addr)
                                      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
        if (locked)
                mmap_read_unlock(mm);
+
+       if (ret == 1 && is_page_poisoned(page))
+               return NULL;
+
        return (ret == 1) ? page : NULL;
 }
 #endif /* CONFIG_ELF_CORE */
index 1432feec62df09038a04f7b4348041f5c169ae42..cb3c5e0a7799f50cb7778dc977a5060a4cac7f87 100644 (file)
@@ -97,6 +97,26 @@ static inline void set_page_refcounted(struct page *page)
        set_page_count(page, 1);
 }
 
+/*
+ * When kernel touch the user page, the user page may be have been marked
+ * poison but still mapped in user space, if without this page, the kernel
+ * can guarantee the data integrity and operation success, the kernel is
+ * better to check the posion status and avoid touching it, be good not to
+ * panic, coredump for process fatal signal is a sample case matching this
+ * scenario. Or if kernel can't guarantee the data integrity, it's better
+ * not to call this function, let kernel touch the poison page and get to
+ * panic.
+ */
+static inline bool is_page_poisoned(struct page *page)
+{
+       if (PageHWPoison(page))
+               return true;
+       else if (PageHuge(page) && PageHWPoison(compound_head(page)))
+               return true;
+
+       return false;
+}
+
 extern unsigned long highest_memmap_pfn;
 
 /*
index b5e08d4cefecc0effdef84544f94357a2963341c..7b53291dafa17523a622d4b03fb873adcbb6f916 100644 (file)
@@ -63,7 +63,7 @@ void __kasan_unpoison_range(const void *address, size_t size)
        kasan_unpoison(address, size);
 }
 
-#if CONFIG_KASAN_STACK
+#ifdef CONFIG_KASAN_STACK
 /* Unpoison the entire stack for a task. */
 void kasan_unpoison_task_stack(struct task_struct *task)
 {
index 2aad21fda156e4d79e5198a856b1e5da293de46f..4004388b4e4bc35d471fdf4809d46d3473be8a53 100644 (file)
@@ -25,6 +25,12 @@ enum kasan_arg {
        KASAN_ARG_ON,
 };
 
+enum kasan_arg_mode {
+       KASAN_ARG_MODE_DEFAULT,
+       KASAN_ARG_MODE_SYNC,
+       KASAN_ARG_MODE_ASYNC,
+};
+
 enum kasan_arg_stacktrace {
        KASAN_ARG_STACKTRACE_DEFAULT,
        KASAN_ARG_STACKTRACE_OFF,
@@ -38,6 +44,7 @@ enum kasan_arg_fault {
 };
 
 static enum kasan_arg kasan_arg __ro_after_init;
+static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
 static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
 static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
 
@@ -45,6 +52,10 @@ static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
 DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
 EXPORT_SYMBOL(kasan_flag_enabled);
 
+/* Whether the asynchronous mode is enabled. */
+bool kasan_flag_async __ro_after_init;
+EXPORT_SYMBOL_GPL(kasan_flag_async);
+
 /* Whether to collect alloc/free stack traces. */
 DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
 
@@ -68,6 +79,23 @@ static int __init early_kasan_flag(char *arg)
 }
 early_param("kasan", early_kasan_flag);
 
+/* kasan.mode=sync/async */
+static int __init early_kasan_mode(char *arg)
+{
+       if (!arg)
+               return -EINVAL;
+
+       if (!strcmp(arg, "sync"))
+               kasan_arg_mode = KASAN_ARG_MODE_SYNC;
+       else if (!strcmp(arg, "async"))
+               kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
+       else
+               return -EINVAL;
+
+       return 0;
+}
+early_param("kasan.mode", early_kasan_mode);
+
 /* kasan.stacktrace=off/on */
 static int __init early_kasan_flag_stacktrace(char *arg)
 {
@@ -115,7 +143,15 @@ void kasan_init_hw_tags_cpu(void)
                return;
 
        hw_init_tags(KASAN_TAG_MAX);
-       hw_enable_tagging();
+
+       /*
+        * Enable async mode only when explicitly requested through
+        * the command line.
+        */
+       if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
+               hw_enable_tagging_async();
+       else
+               hw_enable_tagging_sync();
 }
 
 /* kasan_init_hw_tags() is called once on boot CPU. */
@@ -132,6 +168,22 @@ void __init kasan_init_hw_tags(void)
        /* Enable KASAN. */
        static_branch_enable(&kasan_flag_enabled);
 
+       switch (kasan_arg_mode) {
+       case KASAN_ARG_MODE_DEFAULT:
+               /*
+                * Default to sync mode.
+                * Do nothing, kasan_flag_async keeps its default value.
+                */
+               break;
+       case KASAN_ARG_MODE_SYNC:
+               /* Do nothing, kasan_flag_async keeps its default value. */
+               break;
+       case KASAN_ARG_MODE_ASYNC:
+               /* Async mode enabled. */
+               kasan_flag_async = true;
+               break;
+       }
+
        switch (kasan_arg_stacktrace) {
        case KASAN_ARG_STACKTRACE_DEFAULT:
                /* Default to enabling stack trace collection. */
@@ -194,10 +246,16 @@ void kasan_set_tagging_report_once(bool state)
 }
 EXPORT_SYMBOL_GPL(kasan_set_tagging_report_once);
 
-void kasan_enable_tagging(void)
+void kasan_enable_tagging_sync(void)
+{
+       hw_enable_tagging_sync();
+}
+EXPORT_SYMBOL_GPL(kasan_enable_tagging_sync);
+
+void kasan_force_async_fault(void)
 {
-       hw_enable_tagging();
+       hw_force_async_tag_fault();
 }
-EXPORT_SYMBOL_GPL(kasan_enable_tagging);
+EXPORT_SYMBOL_GPL(kasan_force_async_fault);
 
 #endif
index 8c55634d6edd92fbab6d0c7030b819364d3f1ced..c1581e8a9b8e0046c2c9fed647505224ce297922 100644 (file)
@@ -7,20 +7,37 @@
 #include <linux/stackdepot.h>
 
 #ifdef CONFIG_KASAN_HW_TAGS
+
 #include <linux/static_key.h>
+
 DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
+extern bool kasan_flag_async __ro_after_init;
+
 static inline bool kasan_stack_collection_enabled(void)
 {
        return static_branch_unlikely(&kasan_flag_stacktrace);
 }
+
+static inline bool kasan_async_mode_enabled(void)
+{
+       return kasan_flag_async;
+}
 #else
+
 static inline bool kasan_stack_collection_enabled(void)
 {
        return true;
 }
+
+static inline bool kasan_async_mode_enabled(void)
+{
+       return false;
+}
+
 #endif
 
 extern bool kasan_flag_panic __ro_after_init;
+extern bool kasan_flag_async __ro_after_init;
 
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 #define KASAN_GRANULE_SIZE     (1UL << KASAN_SHADOW_SCALE_SHIFT)
@@ -231,7 +248,7 @@ void *kasan_find_first_bad_addr(void *addr, size_t size);
 const char *kasan_get_bug_type(struct kasan_access_info *info);
 void kasan_metadata_fetch_row(char *buffer, void *row);
 
-#if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
 void kasan_print_address_stack_frame(const void *addr);
 #else
 static inline void kasan_print_address_stack_frame(const void *addr) { }
@@ -275,8 +292,11 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
 
 #ifdef CONFIG_KASAN_HW_TAGS
 
-#ifndef arch_enable_tagging
-#define arch_enable_tagging()
+#ifndef arch_enable_tagging_sync
+#define arch_enable_tagging_sync()
+#endif
+#ifndef arch_enable_tagging_async
+#define arch_enable_tagging_async()
 #endif
 #ifndef arch_init_tags
 #define arch_init_tags(max_tag)
@@ -284,6 +304,9 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
 #ifndef arch_set_tagging_report_once
 #define arch_set_tagging_report_once(state)
 #endif
+#ifndef arch_force_async_tag_fault
+#define arch_force_async_tag_fault()
+#endif
 #ifndef arch_get_random_tag
 #define arch_get_random_tag()  (0xFF)
 #endif
@@ -294,16 +317,19 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
 #define arch_set_mem_tag_range(addr, size, tag) ((void *)(addr))
 #endif
 
-#define hw_enable_tagging()                    arch_enable_tagging()
+#define hw_enable_tagging_sync()               arch_enable_tagging_sync()
+#define hw_enable_tagging_async()              arch_enable_tagging_async()
 #define hw_init_tags(max_tag)                  arch_init_tags(max_tag)
 #define hw_set_tagging_report_once(state)      arch_set_tagging_report_once(state)
+#define hw_force_async_tag_fault()             arch_force_async_tag_fault()
 #define hw_get_random_tag()                    arch_get_random_tag()
 #define hw_get_mem_tag(addr)                   arch_get_mem_tag(addr)
 #define hw_set_mem_tag_range(addr, size, tag)  arch_set_mem_tag_range((addr), (size), (tag))
 
 #else /* CONFIG_KASAN_HW_TAGS */
 
-#define hw_enable_tagging()
+#define hw_enable_tagging_sync()
+#define hw_enable_tagging_async()
 #define hw_set_tagging_report_once(state)
 
 #endif /* CONFIG_KASAN_HW_TAGS */
@@ -311,12 +337,14 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
 #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
 void kasan_set_tagging_report_once(bool state);
-void kasan_enable_tagging(void);
+void kasan_enable_tagging_sync(void);
+void kasan_force_async_fault(void);
 
 #else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
 
 static inline void kasan_set_tagging_report_once(bool state) { }
-static inline void kasan_enable_tagging(void) { }
+static inline void kasan_enable_tagging_sync(void) { }
+static inline void kasan_force_async_fault(void) { }
 
 #endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
 
index 87b271206163b8682f818276158812a1c01fe3eb..14bd51ea234836101bb596044b778a59e0b5171e 100644 (file)
@@ -87,7 +87,8 @@ static void start_report(unsigned long *flags)
 
 static void end_report(unsigned long *flags, unsigned long addr)
 {
-       trace_error_report_end(ERROR_DETECTOR_KASAN, addr);
+       if (!kasan_async_mode_enabled())
+               trace_error_report_end(ERROR_DETECTOR_KASAN, addr);
        pr_err("==================================================================\n");
        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
        spin_unlock_irqrestore(&report_lock, *flags);
@@ -360,6 +361,25 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
        end_report(&flags, (unsigned long)object);
 }
 
+#ifdef CONFIG_KASAN_HW_TAGS
+void kasan_report_async(void)
+{
+       unsigned long flags;
+
+#if IS_ENABLED(CONFIG_KUNIT)
+       if (current->kunit_test)
+               kasan_update_kunit_status(current->kunit_test);
+#endif /* IS_ENABLED(CONFIG_KUNIT) */
+
+       start_report(&flags);
+       pr_err("BUG: KASAN: invalid-access\n");
+       pr_err("Asynchronous mode enabled: no access details available\n");
+       pr_err("\n");
+       dump_stack();
+       end_report(&flags, 0);
+}
+#endif /* CONFIG_KASAN_HW_TAGS */
+
 static void __kasan_report(unsigned long addr, size_t size, bool is_write,
                                unsigned long ip)
 {
index 41f3745851444ffd0d9a5e95c589aabb6289b95b..de732bc341c5c725f9792abe02df20d6d261e6a0 100644 (file)
@@ -128,7 +128,7 @@ void kasan_metadata_fetch_row(char *buffer, void *row)
        memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
 }
 
-#if CONFIG_KASAN_STACK
+#ifdef CONFIG_KASAN_STACK
 static bool __must_check tokenize_frame_descr(const char **frame_descr,
                                              char *token, size_t max_tok_len,
                                              unsigned long *value)
index b59054ef2e107e8856c2289cb4be234797eb2dac..b890854ec761657a8a67bd43eadf25d4ab88f8a0 100644 (file)
@@ -165,10 +165,12 @@ static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
                return 0;
        }
 
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
        /* Huge pud */
        walk->action = ACTION_CONTINUE;
        if (pud_trans_huge(pudval) || pud_devmap(pudval))
                WARN_ON(pud_write(pudval) || pud_dirty(pudval));
+#endif
 
        return 0;
 }
index 3f287599a7a30173b89035ace52169da3bf1b096..1d96a21acb2f3ba43281f708af0b55c084726394 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -93,6 +93,12 @@ static void unmap_region(struct mm_struct *mm,
  * MAP_PRIVATE r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
  *             w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
  *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
+ *
+ * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+ * MAP_PRIVATE (with Enhanced PAN supported):
+ *                                                             r: (no) no
+ *                                                             w: (no) no
+ *                                                             x: (yes) yes
  */
 pgprot_t protection_map[16] __ro_after_init = {
        __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
index 0dc7149b0c6153776320a7e9a0e7357ca18bfa95..1b9837419bf9cd11e1d452fa7624866975a38b6f 100644 (file)
@@ -249,16 +249,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-/**
- * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
- * @tlb: the mmu_gather structure to initialize
- * @mm: the mm_struct of the target address space
- * @fullmm: @mm is without users and we're going to destroy the full address
- *         space (exit/execve)
- *
- * Called to initialize an (on-stack) mmu_gather structure for page-table
- * tear-down from @mm.
- */
 static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                             bool fullmm)
 {
@@ -283,11 +273,30 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
        inc_tlb_flush_pending(tlb->mm);
 }
 
+/**
+ * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
+ * @tlb: the mmu_gather structure to initialize
+ * @mm: the mm_struct of the target address space
+ *
+ * Called to initialize an (on-stack) mmu_gather structure for page-table
+ * tear-down from @mm.
+ */
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
 {
        __tlb_gather_mmu(tlb, mm, false);
 }
 
+/**
+ * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
+ * @tlb: the mmu_gather structure to initialize
+ * @mm: the mm_struct of the target address space
+ *
+ * In this case, @mm is without users and we're going to destroy the
+ * full address space (exit/execve).
+ *
+ * Called to initialize an (on-stack) mmu_gather structure for page-table
+ * tear-down from @mm.
+ */
 void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
 {
        __tlb_gather_mmu(tlb, mm, true);
index 9efaf430cfd3758c123f051c5dddf5fd2e345260..fa1cf18bac978452b838d3ad61d84e4aae8706e9 100644 (file)
@@ -170,7 +170,7 @@ static bool oom_unkillable_task(struct task_struct *p)
        return false;
 }
 
-/**
+/*
  * Check whether unreclaimable slab amount is greater than
  * all user memory(LRU pages).
  * dump_unreclaimable_slab() could help in the case that
index cfc72873961d9e7a1e499ed50784f9f5651cf7d0..e2f19bf948dbe67fd1618dfdbc76340cc00184c3 100644 (file)
@@ -167,10 +167,10 @@ unsigned long totalcma_pages __read_mostly;
 
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
-DEFINE_STATIC_KEY_FALSE(init_on_alloc);
+DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
 EXPORT_SYMBOL(init_on_alloc);
 
-DEFINE_STATIC_KEY_FALSE(init_on_free);
+DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
 EXPORT_SYMBOL(init_on_free);
 
 static bool _init_on_alloc_enabled_early __read_mostly
index 65cdf844c8adbbe2ad24b562df3479f4c3f77459..655dc58956043983b47cf7732bfb84dcb52c9fea 100644 (file)
@@ -77,12 +77,14 @@ static void unpoison_page(struct page *page)
        void *addr;
 
        addr = kmap_atomic(page);
+       kasan_disable_current();
        /*
         * Page poisoning when enabled poisons each and every page
         * that is freed to buddy. Thus no extra check is done to
         * see if a page was poisoned.
         */
-       check_poison_mem(addr, PAGE_SIZE);
+       check_poison_mem(kasan_reset_tag(addr), PAGE_SIZE);
+       kasan_enable_current();
        kunmap_atomic(addr);
 }
 
index 18b768ac7dcae26326cab9417a976274e1efb8b9..095d7eaa0db426541a5b1ee10cba8eede97dffdc 100644 (file)
@@ -87,7 +87,7 @@ extern spinlock_t pcpu_lock;
 
 extern struct list_head *pcpu_chunk_lists;
 extern int pcpu_nr_slots;
-extern int pcpu_nr_empty_pop_pages;
+extern int pcpu_nr_empty_pop_pages[];
 
 extern struct pcpu_chunk *pcpu_first_chunk;
 extern struct pcpu_chunk *pcpu_reserved_chunk;
index c8400a2adbc2b226ba01b39087d6b40610d12f42..f6026dbcdf6b3beca15ae315dcb858b6361849da 100644 (file)
@@ -145,6 +145,7 @@ static int percpu_stats_show(struct seq_file *m, void *v)
        int slot, max_nr_alloc;
        int *buffer;
        enum pcpu_chunk_type type;
+       int nr_empty_pop_pages;
 
 alloc_buffer:
        spin_lock_irq(&pcpu_lock);
@@ -165,7 +166,11 @@ alloc_buffer:
                goto alloc_buffer;
        }
 
-#define PL(X) \
+       nr_empty_pop_pages = 0;
+       for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
+               nr_empty_pop_pages += pcpu_nr_empty_pop_pages[type];
+
+#define PL(X)                                                          \
        seq_printf(m, "  %-20s: %12lld\n", #X, (long long int)pcpu_stats_ai.X)
 
        seq_printf(m,
@@ -196,7 +201,7 @@ alloc_buffer:
        PU(nr_max_chunks);
        PU(min_alloc_size);
        PU(max_alloc_size);
-       P("empty_pop_pages", pcpu_nr_empty_pop_pages);
+       P("empty_pop_pages", nr_empty_pop_pages);
        seq_putc(m, '\n');
 
 #undef PU
index 6596a0a4286e7de91ca5dca1c1366884a32ebc22..23308113a5ff0b72b96f49b04887d2b05f19e405 100644 (file)
@@ -173,10 +173,10 @@ struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
 static LIST_HEAD(pcpu_map_extend_chunks);
 
 /*
- * The number of empty populated pages, protected by pcpu_lock.  The
- * reserved chunk doesn't contribute to the count.
+ * The number of empty populated pages by chunk type, protected by pcpu_lock.
+ * The reserved chunk doesn't contribute to the count.
  */
-int pcpu_nr_empty_pop_pages;
+int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES];
 
 /*
  * The number of populated pages in use by the allocator, protected by
@@ -556,7 +556,7 @@ static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
 {
        chunk->nr_empty_pop_pages += nr;
        if (chunk != pcpu_reserved_chunk)
-               pcpu_nr_empty_pop_pages += nr;
+               pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr;
 }
 
 /*
@@ -1832,7 +1832,7 @@ area_found:
                mutex_unlock(&pcpu_alloc_mutex);
        }
 
-       if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
+       if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW)
                pcpu_schedule_balance_work();
 
        /* clear the areas and return address relative to base address */
@@ -2000,7 +2000,7 @@ retry_pop:
                pcpu_atomic_alloc_failed = false;
        } else {
                nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
-                                 pcpu_nr_empty_pop_pages,
+                                 pcpu_nr_empty_pop_pages[type],
                                  0, PCPU_EMPTY_POP_PAGES_HIGH);
        }
 
@@ -2580,7 +2580,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
        /* link the first chunk in */
        pcpu_first_chunk = chunk;
-       pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
+       pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages;
        pcpu_chunk_relocate(pcpu_first_chunk, -1);
 
        /* include all regions of the first chunk */
index 4354c1422d57c962fccfd065910d164d48e93bba..da751448d0e4ef219717294882df9bfb33a66174 100644 (file)
@@ -111,7 +111,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
                            unsigned long next, struct mm_walk *walk)
 {
        struct ptdump_state *st = walk->private;
-       pte_t val = READ_ONCE(*pte);
+       pte_t val = ptep_get(pte);
 
        if (st->effective_prot)
                st->effective_prot(st, 4, pte_val(val));
index 9c2e145a747affff680fef96931ad2c7718502a7..c13c33b247e8747c4bed412edc8776ef53e8973c 100644 (file)
@@ -147,8 +147,8 @@ void __meminit __shuffle_zone(struct zone *z)
        spin_unlock_irqrestore(&z->lock, flags);
 }
 
-/**
- * shuffle_free_memory - reduce the predictability of the page allocator
+/*
+ * __shuffle_free_memory - reduce the predictability of the page allocator
  * @pgdat: node page data
  */
 void __meminit __shuffle_free_memory(pg_data_t *pgdat)
index 076582f58f687548174b6d29dc5b2ea2f6d9c03e..774c7221efdc59ec050d0bea3097fc3b2fcf60ac 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -601,7 +601,8 @@ static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
 
 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
 {
-       if (static_branch_unlikely(&init_on_alloc)) {
+       if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+                               &init_on_alloc)) {
                if (c->ctor)
                        return false;
                if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
@@ -613,7 +614,8 @@ static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
 
 static inline bool slab_want_init_on_free(struct kmem_cache *c)
 {
-       if (static_branch_unlikely(&init_on_free))
+       if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
+                               &init_on_free))
                return !(c->ctor ||
                         (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
        return false;
index f8761281aab00735941f061cabf766316d292cb3..434b4f0429092c80a9426d557adf5e00434de4ea 100644 (file)
@@ -890,6 +890,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
        hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
                tt_vlan->vid = htons(vlan->vid);
                tt_vlan->crc = htonl(vlan->tt.crc);
+               tt_vlan->reserved = 0;
 
                tt_vlan++;
        }
@@ -973,6 +974,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
 
                tt_vlan->vid = htons(vlan->vid);
                tt_vlan->crc = htonl(vlan->tt.crc);
+               tt_vlan->reserved = 0;
 
                tt_vlan++;
        }
index 3226fe02e8754c0670838c330ddff343d8894419..989401f116e915ecdc84bb3a0923f4e72ffa1f4c 100644 (file)
@@ -126,8 +126,6 @@ int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32])
        int err;
        struct ecdh p = {0};
 
-       p.curve_id = ECC_CURVE_NIST_P256;
-
        if (private_key) {
                tmp = kmalloc(32, GFP_KERNEL);
                if (!tmp)
index e55976db4403e7ad742d2721f6b8d9a3d91160bc..805ce546b8133d5f102d75602ea9a52f1d98f08c 100644 (file)
@@ -272,12 +272,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
 {
        int ret;
 
-       if (!test_bit(HCI_UP, &hdev->flags))
-               return -ENETDOWN;
-
        /* Serialize all requests */
        hci_req_sync_lock(hdev);
-       ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
+       /* check the state after obtaing the lock to protect the HCI_UP
+        * against any races from hci_dev_do_close when the controller
+        * gets removed.
+        */
+       if (test_bit(HCI_UP, &hdev->flags))
+               ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
+       else
+               ret = -ENETDOWN;
        hci_req_sync_unlock(hdev);
 
        return ret;
index f71c6fa65fb35afabdf99d63bb795670abfe56ee..f49604d44b8778057acaeb2f2424a623f043547d 100644 (file)
@@ -205,7 +205,7 @@ static int __init test_ecdh(void)
 
        calltime = ktime_get();
 
-       tfm = crypto_alloc_kpp("ecdh", 0, 0);
+       tfm = crypto_alloc_kpp("ecdh-nist-p256", 0, 0);
        if (IS_ERR(tfm)) {
                BT_ERR("Unable to create ECDH crypto context");
                err = PTR_ERR(tfm);
index b0c1ee110eff92195bac6b7003193a887e1ca196..21e445993f39c4e6ef2d208125f9a6020c9b2f48 100644 (file)
@@ -1386,7 +1386,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
                goto zfree_smp;
        }
 
-       smp->tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
+       smp->tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0);
        if (IS_ERR(smp->tfm_ecdh)) {
                BT_ERR("Unable to create ECDH crypto context");
                goto free_shash;
@@ -3281,7 +3281,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
                return ERR_CAST(tfm_cmac);
        }
 
-       tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
+       tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0);
        if (IS_ERR(tfm_ecdh)) {
                BT_ERR("Unable to create ECDH crypto context");
                crypto_free_shash(tfm_cmac);
@@ -3806,7 +3806,7 @@ int __init bt_selftest_smp(void)
                return PTR_ERR(tfm_cmac);
        }
 
-       tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
+       tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0);
        if (IS_ERR(tfm_ecdh)) {
                BT_ERR("Unable to create ECDH crypto context");
                crypto_free_shash(tfm_cmac);
index 66e7af16549436a4c0f92172038d4c4bef82c592..32bc2821027f37d8445afcecd3b368a127d50ecc 100644 (file)
@@ -105,14 +105,20 @@ static int __net_init broute_net_init(struct net *net)
                                  &net->xt.broute_table);
 }
 
+static void __net_exit broute_net_pre_exit(struct net *net)
+{
+       ebt_unregister_table_pre_exit(net, "broute", &ebt_ops_broute);
+}
+
 static void __net_exit broute_net_exit(struct net *net)
 {
-       ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute);
+       ebt_unregister_table(net, net->xt.broute_table);
 }
 
 static struct pernet_operations broute_net_ops = {
        .init = broute_net_init,
        .exit = broute_net_exit,
+       .pre_exit = broute_net_pre_exit,
 };
 
 static int __init ebtable_broute_init(void)
index 78cb9b21022d0acc115c3f4aa0007579dbccf7cd..bcf982e12f16b9a47205cc512dc5b487b6ea3ff8 100644 (file)
@@ -99,14 +99,20 @@ static int __net_init frame_filter_net_init(struct net *net)
                                  &net->xt.frame_filter);
 }
 
+static void __net_exit frame_filter_net_pre_exit(struct net *net)
+{
+       ebt_unregister_table_pre_exit(net, "filter", ebt_ops_filter);
+}
+
 static void __net_exit frame_filter_net_exit(struct net *net)
 {
-       ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter);
+       ebt_unregister_table(net, net->xt.frame_filter);
 }
 
 static struct pernet_operations frame_filter_net_ops = {
        .init = frame_filter_net_init,
        .exit = frame_filter_net_exit,
+       .pre_exit = frame_filter_net_pre_exit,
 };
 
 static int __init ebtable_filter_init(void)
index 0888936ef8537e6ee00c5c400760a412652b386f..0d092773f81618e9bcb2a4a297674ac1db33e21b 100644 (file)
@@ -99,14 +99,20 @@ static int __net_init frame_nat_net_init(struct net *net)
                                  &net->xt.frame_nat);
 }
 
+static void __net_exit frame_nat_net_pre_exit(struct net *net)
+{
+       ebt_unregister_table_pre_exit(net, "nat", ebt_ops_nat);
+}
+
 static void __net_exit frame_nat_net_exit(struct net *net)
 {
-       ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat);
+       ebt_unregister_table(net, net->xt.frame_nat);
 }
 
 static struct pernet_operations frame_nat_net_ops = {
        .init = frame_nat_net_init,
        .exit = frame_nat_net_exit,
+       .pre_exit = frame_nat_net_pre_exit,
 };
 
 static int __init ebtable_nat_init(void)
index ebe33b60efd6b064d374b12439cf7e81abddcf50..d481ff24a150161f6b5b0c24ca5bcf0b7e3d54e8 100644 (file)
@@ -1232,10 +1232,34 @@ out:
        return ret;
 }
 
-void ebt_unregister_table(struct net *net, struct ebt_table *table,
-                         const struct nf_hook_ops *ops)
+static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
+{
+       struct ebt_table *t;
+
+       mutex_lock(&ebt_mutex);
+
+       list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
+               if (strcmp(t->name, name) == 0) {
+                       mutex_unlock(&ebt_mutex);
+                       return t;
+               }
+       }
+
+       mutex_unlock(&ebt_mutex);
+       return NULL;
+}
+
+void ebt_unregister_table_pre_exit(struct net *net, const char *name, const struct nf_hook_ops *ops)
+{
+       struct ebt_table *table = __ebt_find_table(net, name);
+
+       if (table)
+               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
+
+void ebt_unregister_table(struct net *net, struct ebt_table *table)
 {
-       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
        __ebt_unregister_table(net, table);
 }
 
index 0e5c37be4a2bd0a53afef34ab1805c18d768c72b..909b9e684e04305c19593278c30d93ec8660b8ee 100644 (file)
@@ -86,6 +86,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
 MODULE_ALIAS("can-proto-2");
 
+#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
 /*
  * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
  * 64 bit aligned so the offset has to be multiples of 8 which is ensured
@@ -1292,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                /* no bound device as default => check msg_name */
                DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
-               if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+               if (msg->msg_namelen < BCM_MIN_NAMELEN)
                        return -EINVAL;
 
                if (addr->can_family != AF_CAN)
@@ -1534,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
        struct net *net = sock_net(sk);
        int ret = 0;
 
-       if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+       if (len < BCM_MIN_NAMELEN)
                return -EINVAL;
 
        lock_sock(sk);
@@ -1616,8 +1618,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
-               __sockaddr_check_size(sizeof(struct sockaddr_can));
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(BCM_MIN_NAMELEN);
+               msg->msg_namelen = BCM_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
index 15ea1234d45730232941a1bbbe441466f865af46..9f94ad3caee92938a81e0fb22e850ec97d73b9c9 100644 (file)
@@ -77,6 +77,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
 MODULE_ALIAS("can-proto-6");
 
+#define ISOTP_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)
+
 #define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \
                         (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
                         (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
@@ -986,7 +988,8 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_timestamp(msg, sk, skb);
 
        if (msg->msg_name) {
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(ISOTP_MIN_NAMELEN);
+               msg->msg_namelen = ISOTP_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
@@ -1056,7 +1059,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        int notify_enetdown = 0;
        int do_rx_reg = 1;
 
-       if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
+       if (len < ISOTP_MIN_NAMELEN)
                return -EINVAL;
 
        /* do not register frame reception for functional addressing */
@@ -1152,13 +1155,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
        if (peer)
                return -EOPNOTSUPP;
 
-       memset(addr, 0, sizeof(*addr));
+       memset(addr, 0, ISOTP_MIN_NAMELEN);
        addr->can_family = AF_CAN;
        addr->can_ifindex = so->ifindex;
        addr->can_addr.tp.rx_id = so->rxid;
        addr->can_addr.tp.tx_id = so->txid;
 
-       return sizeof(*addr);
+       return ISOTP_MIN_NAMELEN;
 }
 
 static int isotp_setsockopt(struct socket *sock, int level, int optname,
index 37b47a39a3edcc22cb3923fed23a2d6cb6ef0b96..139d9471ddcf44754a2806e4b47d4c275690089f 100644 (file)
@@ -60,6 +60,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 MODULE_ALIAS("can-proto-1");
 
+#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
 #define MASK_ALL 0
 
 /* A raw socket has a list of can_filters attached to it, each receiving
@@ -394,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        int err = 0;
        int notify_enetdown = 0;
 
-       if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+       if (len < RAW_MIN_NAMELEN)
                return -EINVAL;
        if (addr->can_family != AF_CAN)
                return -EINVAL;
@@ -475,11 +477,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
        if (peer)
                return -EOPNOTSUPP;
 
-       memset(addr, 0, sizeof(*addr));
+       memset(addr, 0, RAW_MIN_NAMELEN);
        addr->can_family  = AF_CAN;
        addr->can_ifindex = ro->ifindex;
 
-       return sizeof(*addr);
+       return RAW_MIN_NAMELEN;
 }
 
 static int raw_setsockopt(struct socket *sock, int level, int optname,
@@ -739,7 +741,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (msg->msg_name) {
                DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
-               if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+               if (msg->msg_namelen < RAW_MIN_NAMELEN)
                        return -EINVAL;
 
                if (addr->can_family != AF_CAN)
@@ -832,8 +834,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
-               __sockaddr_check_size(sizeof(struct sockaddr_can));
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(RAW_MIN_NAMELEN);
+               msg->msg_namelen = RAW_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
index 0f72ff5d34ba0072b7fcb1588ecdef6b169a7733..1f79b9aa9a3f2392fddd1401f95ad098b5e03204 100644 (file)
@@ -5924,7 +5924,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
        NAPI_GRO_CB(skb)->frag0_len = 0;
 
        if (!skb_headlen(skb) && pinfo->nr_frags &&
-           !PageHighMem(skb_frag_page(frag0))) {
+           !PageHighMem(skb_frag_page(frag0)) &&
+           (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
                NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
                NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
                                                    skb_frag_size(frag0),
@@ -6992,7 +6993,7 @@ static int napi_thread_wait(struct napi_struct *napi)
 
        set_current_state(TASK_INTERRUPTIBLE);
 
-       while (!kthread_should_stop() && !napi_disable_pending(napi)) {
+       while (!kthread_should_stop()) {
                /* Testing SCHED_THREADED bit here to make sure the current
                 * kthread owns this napi and could poll on this napi.
                 * Testing SCHED bit is not enough because SCHED bit might be
@@ -7010,6 +7011,7 @@ static int napi_thread_wait(struct napi_struct *napi)
                set_current_state(TASK_INTERRUPTIBLE);
        }
        __set_current_state(TASK_RUNNING);
+
        return -1;
 }
 
index e2982b3970b88dfa671c7d7da23bd55154f8988c..8379719d1dcef1bb1ea0b673d3dcf1030380439c 100644 (file)
@@ -1379,7 +1379,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
                         * we can reinject the packet there.
                         */
                        n2 = NULL;
-                       if (dst) {
+                       if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
                                n2 = dst_neigh_lookup_skb(dst, skb);
                                if (n2)
                                        n1 = n2;
index 1bdcb33fb561994472a7dde6b9a3c992a300bb87..3485b16a7ff324b2825ad07920e98c88305d336e 100644 (file)
@@ -2863,7 +2863,7 @@ static int do_setlink(const struct sk_buff *skb,
 
                        BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
 
-                       err = af_ops->set_link_af(dev, af);
+                       err = af_ops->set_link_af(dev, af, extack);
                        if (err < 0) {
                                rcu_read_unlock();
                                goto errout;
index 1261512d680735a0b3a7d8201544d53e5162b6a1..5def3a2e85be9c597238266fd57a96a21af49e9e 100644 (file)
@@ -488,6 +488,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
        if (unlikely(!msg))
                return -EAGAIN;
        sk_msg_init(msg);
+       skb_set_owner_r(skb, sk);
        return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
 }
 
@@ -790,7 +791,6 @@ static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int
 {
        switch (verdict) {
        case __SK_REDIRECT:
-               skb_set_owner_r(skb, sk);
                sk_psock_skb_redirect(skb);
                break;
        case __SK_PASS:
@@ -808,10 +808,6 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
        rcu_read_lock();
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
-               /* We skip full set_owner_r here because if we do a SK_PASS
-                * or SK_DROP we can skip skb memory accounting and use the
-                * TLS context.
-                */
                skb->sk = psock->sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
@@ -880,12 +876,13 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
                kfree_skb(skb);
                goto out;
        }
-       skb_set_owner_r(skb, sk);
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
+               skb->sk = sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               skb->sk = NULL;
        }
        sk_psock_verdict_apply(psock, skb, ret);
 out:
@@ -956,12 +953,13 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
                kfree_skb(skb);
                goto out;
        }
-       skb_set_owner_r(skb, sk);
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
+               skb->sk = sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               skb->sk = NULL;
        }
        sk_psock_verdict_apply(psock, skb, ret);
 out:
index cc31b601ae103387fdcdc83713f31b726ec25fc7..5ec90f99e102895417adb6422c00a6943fd09182 100644 (file)
@@ -2132,16 +2132,10 @@ void skb_orphan_partial(struct sk_buff *skb)
        if (skb_is_tcp_pure_ack(skb))
                return;
 
-       if (can_skb_orphan_partial(skb)) {
-               struct sock *sk = skb->sk;
-
-               if (refcount_inc_not_zero(&sk->sk_refcnt)) {
-                       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
-                       skb->destructor = sock_efree;
-               }
-       } else {
+       if (can_skb_orphan_partial(skb))
+               skb_set_owner_sk_safe(skb, skb->sk);
+       else
                skb_orphan(skb);
-       }
 }
 EXPORT_SYMBOL(skb_orphan_partial);
 
index 05354976c1fcfd34dd8c2142a54befa8770017bf..858276e72c6893111ee4ab5d161cb235b286bfc1 100644 (file)
@@ -350,7 +350,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
                /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
                page = virt_to_head_page(data);
-               napi_direct &= !xdp_return_frame_no_direct();
+               if (napi_direct && xdp_return_frame_no_direct())
+                       napi_direct = false;
                page_pool_put_full_page(xa->page_pool, page, napi_direct);
                rcu_read_unlock();
                break;
index d142eb2b288b3d9a28934b5a10f43ae5c363d961..3c3e56a1f34d1fbf45b8fdb8b133b3a7c72cef9a 100644 (file)
@@ -795,8 +795,14 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 
        list_for_each_entry(dp, &dst->ports, list) {
                err = dsa_port_setup(dp);
-               if (err)
+               if (err) {
+                       dsa_port_devlink_teardown(dp);
+                       dp->type = DSA_PORT_TYPE_UNUSED;
+                       err = dsa_port_devlink_setup(dp);
+                       if (err)
+                               goto teardown;
                        continue;
+               }
        }
 
        return 0;
index 4b5da89dc27a25cc37787646690c0b9dcee8fb80..32963276452f84499b8eb481bc1b3b1560c57a5a 100644 (file)
@@ -107,7 +107,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
        bool unset_vlan_filtering = br_vlan_enabled(info->br);
        struct dsa_switch_tree *dst = ds->dst;
        struct netlink_ext_ack extack = {0};
-       int err, i;
+       int err, port;
 
        if (dst->index == info->tree_index && ds->index == info->sw_index &&
            ds->ops->port_bridge_join)
@@ -124,13 +124,16 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
         * it. That is a good thing, because that lets us handle it and also
         * handle the case where the switch's vlan_filtering setting is global
         * (not per port). When that happens, the correct moment to trigger the
-        * vlan_filtering callback is only when the last port left this bridge.
+        * vlan_filtering callback is only when the last port leaves the last
+        * VLAN-aware bridge.
         */
        if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
-               for (i = 0; i < ds->num_ports; i++) {
-                       if (i == info->port)
-                               continue;
-                       if (dsa_to_port(ds, i)->bridge_dev == info->br) {
+               for (port = 0; port < ds->num_ports; port++) {
+                       struct net_device *bridge_dev;
+
+                       bridge_dev = dsa_to_port(ds, port)->bridge_dev;
+
+                       if (bridge_dev && br_vlan_enabled(bridge_dev)) {
                                unset_vlan_filtering = false;
                                break;
                        }
index c6a383dfd6c2b4d484d4d60708973c9386da244e..f9dcbad84788b5417ff6ccf571be81231f863b4f 100644 (file)
@@ -273,6 +273,7 @@ const struct link_mode_info link_mode_params[] = {
        __DEFINE_LINK_MODE_PARAMS(10000, KR, Full),
        [ETHTOOL_LINK_MODE_10000baseR_FEC_BIT] = {
                .speed  = SPEED_10000,
+               .lanes  = 1,
                .duplex = DUPLEX_FULL,
        },
        __DEFINE_LINK_MODE_PARAMS(20000, MLD2, Full),
@@ -562,3 +563,19 @@ void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops)
        rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops);
+
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+                             enum ethtool_link_mode_bit_indices link_mode)
+{
+       const struct link_mode_info *link_info;
+
+       if (WARN_ON_ONCE(link_mode >= __ETHTOOL_LINK_MODE_MASK_NBITS))
+               return;
+
+       link_info = &link_mode_params[link_mode];
+       link_ksettings->base.speed = link_info->speed;
+       link_ksettings->lanes = link_info->lanes;
+       link_ksettings->base.duplex = link_info->duplex;
+}
+EXPORT_SYMBOL_GPL(ethtool_params_from_link_mode);
index 901b7de941abdbf22184bc0de8775ad4a212d0cc..e10bfcc0785317f3b19e2ee2d0513c60a495151b 100644 (file)
@@ -169,8 +169,8 @@ int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info)
        ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
        ethnl_update_bool32(&eee.tx_lpi_enabled,
                            tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
-       ethnl_update_bool32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
-                           &mod);
+       ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
+                        &mod);
        ret = 0;
        if (!mod)
                goto out_ops;
index 24783b71c58494f6b205659f7ef90cea506562f1..771688e1b0da913d0428d68ba0229a37f750df3e 100644 (file)
@@ -426,29 +426,13 @@ struct ethtool_link_usettings {
 int __ethtool_get_link_ksettings(struct net_device *dev,
                                 struct ethtool_link_ksettings *link_ksettings)
 {
-       const struct link_mode_info *link_info;
-       int err;
-
        ASSERT_RTNL();
 
        if (!dev->ethtool_ops->get_link_ksettings)
                return -EOPNOTSUPP;
 
        memset(link_ksettings, 0, sizeof(*link_ksettings));
-
-       link_ksettings->link_mode = -1;
-       err = dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
-       if (err)
-               return err;
-
-       if (link_ksettings->link_mode != -1) {
-               link_info = &link_mode_params[link_ksettings->link_mode];
-               link_ksettings->base.speed = link_info->speed;
-               link_ksettings->lanes = link_info->lanes;
-               link_ksettings->base.duplex = link_info->duplex;
-       }
-
-       return 0;
+       return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
 }
 EXPORT_SYMBOL(__ethtool_get_link_ksettings);
 
index 6eabd58d81bfeda36bb73c43add6cd6944d9cdea..cde9f3169ae5dc0322468ae783ccffd880ade271 100644 (file)
@@ -36,9 +36,9 @@ static inline int ethnl_strz_size(const char *s)
 
 /**
  * ethnl_put_strz() - put string attribute with fixed size string
- * @skb:     skb with the message
- * @attrype: attribute type
- * @s:       ETH_GSTRING_LEN sized string (may not be null terminated)
+ * @skb:      skb with the message
+ * @attrtype: attribute type
+ * @s:        ETH_GSTRING_LEN sized string (may not be null terminated)
  *
  * Puts an attribute with null terminated string from @s into the message.
  *
index 09998dc5c185fc890c2ca705c6febc503a97a16f..d4ac02718b72adccc5913b281650e1bcb0c1434c 100644 (file)
@@ -38,16 +38,16 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
        if (!dev->ethtool_ops->get_pauseparam)
                return -EOPNOTSUPP;
 
+       ethtool_stats_init((u64 *)&data->pausestat,
+                          sizeof(data->pausestat) / 8);
+
        ret = ethnl_ops_begin(dev);
        if (ret < 0)
                return ret;
        dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
        if (req_base->flags & ETHTOOL_FLAG_STATS &&
-           dev->ethtool_ops->get_pause_stats) {
-               ethtool_stats_init((u64 *)&data->pausestat,
-                                  sizeof(data->pausestat) / 8);
+           dev->ethtool_ops->get_pause_stats)
                dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
-       }
        ethnl_ops_complete(dev);
 
        return 0;
index 7444ec6e298e49603e767406f3a858dbc787c7a1..bfcdc75fc01e65136a29370a79bac6ba181d5faa 100644 (file)
@@ -217,6 +217,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
        if (master) {
                skb->dev = master->dev;
+               skb_reset_mac_header(skb);
                hsr_forward_skb(skb, master);
        } else {
                atomic_long_inc(&dev->tx_dropped);
index ed82a470b6e154be28d7e53be57019bccd4a964d..b218e4594009ca5e690e21f1bba04dba3a5ace05 100644 (file)
@@ -555,12 +555,6 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
 {
        struct hsr_frame_info frame;
 
-       if (skb_mac_header(skb) != skb->data) {
-               WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
-                         __FILE__, __LINE__, port->dev->name);
-               goto out_drop;
-       }
-
        if (fill_frame_info(&frame, skb, port) < 0)
                goto out_drop;
 
index 9c640d670ffeb5dd879f042d8d8f8df4c68202ee..0c1b0770c59ea303e4334156f1d203b1d2f16fc3 100644 (file)
@@ -551,9 +551,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
        desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
 
        if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
-               if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
-                   !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
-                     info->attrs[IEEE802154_ATTR_HW_ADDR]))
+               if (!info->attrs[IEEE802154_ATTR_PAN_ID])
                        return -EINVAL;
 
                desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
@@ -562,6 +560,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
                        desc->device_addr.mode = IEEE802154_ADDR_SHORT;
                        desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
                } else {
+                       if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
+                               return -EINVAL;
+
                        desc->device_addr.mode = IEEE802154_ADDR_LONG;
                        desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
                }
index 7c5a1aa5adb422445184adadab9993a8f37cbaab..05f6bd89a7dd8e526369db8247a8fd9661cf41f2 100644 (file)
@@ -820,8 +820,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                goto nla_put_failure;
 
 #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               goto out;
+
        if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
                goto nla_put_failure;
+
+out:
 #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 
        genlmsg_end(msg, hdr);
@@ -1384,6 +1389,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb,
        u32 changed = 0;
        int ret;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
                u8 enabled;
 
@@ -1490,6 +1498,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1544,7 +1557,11 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
        struct ieee802154_llsec_key_id id = { };
        u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
@@ -1592,7 +1609,11 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
        struct ieee802154_llsec_key_id id;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
                return -EINVAL;
 
        if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
@@ -1656,6 +1677,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1742,6 +1768,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_device dev_desc;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
                                          &dev_desc) < 0)
                return -EINVAL;
@@ -1757,7 +1786,11 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
        __le64 extended_addr;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
@@ -1825,6 +1858,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1882,6 +1920,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
        struct ieee802154_llsec_device_key key;
        __le64 extended_addr;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
            nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
                return -EINVAL;
@@ -1913,7 +1954,11 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
        struct ieee802154_llsec_device_key key;
        __le64 extended_addr;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
@@ -1986,6 +2031,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -2070,6 +2120,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_seclevel sl;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
                                 &sl) < 0)
                return -EINVAL;
@@ -2085,6 +2138,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_seclevel sl;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
            llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
                                 &sl) < 0)
@@ -2098,11 +2154,7 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
 #define NL802154_FLAG_NEED_NETDEV      0x02
 #define NL802154_FLAG_NEED_RTNL                0x04
 #define NL802154_FLAG_CHECK_NETDEV_UP  0x08
-#define NL802154_FLAG_NEED_NETDEV_UP   (NL802154_FLAG_NEED_NETDEV |\
-                                        NL802154_FLAG_CHECK_NETDEV_UP)
 #define NL802154_FLAG_NEED_WPAN_DEV    0x10
-#define NL802154_FLAG_NEED_WPAN_DEV_UP (NL802154_FLAG_NEED_WPAN_DEV |\
-                                        NL802154_FLAG_CHECK_NETDEV_UP)
 
 static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                             struct genl_info *info)
index d99e1be94019d06553243ff654c6d99c00de9397..36ed85bf2ad51cfd80bf374a54aa7e0815872997 100644 (file)
@@ -141,7 +141,7 @@ static void ah_output_done(struct crypto_async_request *base, int err)
        }
 
        kfree(AH_SKB_CB(skb)->tmp);
-       xfrm_output_resume(skb, err);
+       xfrm_output_resume(skb->sk, skb, err);
 }
 
 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
index 75f67994fc851f78a7b4068457adf085058b8dc6..2e35f68da40a7f8f9698b724efaa6698de3d3733 100644 (file)
@@ -1978,7 +1978,8 @@ static int inet_validate_link_af(const struct net_device *dev,
        return 0;
 }
 
-static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
+                           struct netlink_ext_ack *extack)
 {
        struct in_device *in_dev = __in_dev_get_rcu(dev);
        struct nlattr *a, *tb[IFLA_INET_MAX+1];
index a3271ec3e1627fb4f6e29da0e0fb1a638fe7e789..4b834bbf95e074d215d32bc6354fe83e2db2dc34 100644 (file)
@@ -279,7 +279,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
                        esp_output_tail_tcp(x, skb);
                else
-                       xfrm_output_resume(skb, err);
+                       xfrm_output_resume(skb->sk, skb, err);
        }
 }
 
index 601f5fbfc63fbecf115ee31948ea3110af37bc40..33687cf58286b7cf63958b6e0eb004524084e0f2 100644 (file)
@@ -217,10 +217,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
 
        if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
             !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
-               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
        else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
                 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
-               esp_features = features & ~NETIF_F_CSUM_MASK;
+               esp_features = features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
 
        xo->flags |= XFRM_GSO_SEGMENT;
 
@@ -312,8 +314,17 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
        ip_hdr(skb)->tot_len = htons(skb->len);
        ip_send_check(ip_hdr(skb));
 
-       if (hw_offload)
+       if (hw_offload) {
+               if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+                       return -ENOMEM;
+
+               xo = xfrm_offload(skb);
+               if (!xo)
+                       return -EINVAL;
+
+               xo->flags |= XFRM_XMIT;
                return 0;
+       }
 
        err = esp_output_tail(x, skb, &esp);
        if (err)
index eb207089ece0b29bbb96dd66544be79133be2ecc..31c6c6d99d5ecf6ff1752c450c46981f8eac7da0 100644 (file)
@@ -218,7 +218,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 
        if (dst->flags & DST_XFRM_QUEUE)
-               goto queued;
+               goto xmit;
 
        if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
                dev->stats.tx_carrier_errors++;
@@ -238,6 +238,8 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        if (skb->len > mtu) {
                skb_dst_update_pmtu_no_confirm(skb, mtu);
                if (skb->protocol == htons(ETH_P_IP)) {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                } else {
@@ -251,7 +253,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                goto tx_error;
        }
 
-queued:
+xmit:
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index d1e04d2b5170ec0d054764f2f116ef6d818d03ef..d6d45d820d79adbc9a9c0bf75b36c2b4ebd79ed2 100644 (file)
@@ -1193,6 +1193,8 @@ static int translate_compat_table(struct net *net,
        if (!newinfo)
                goto out_unlock;
 
+       memset(newinfo->entries, 0, size);
+
        newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
                newinfo->hook_entry[i] = compatr->hook_entry[i];
@@ -1539,10 +1541,15 @@ out_free:
        return ret;
 }
 
-void arpt_unregister_table(struct net *net, struct xt_table *table,
-                          const struct nf_hook_ops *ops)
+void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops)
 {
        nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
+
+void arpt_unregister_table(struct net *net, struct xt_table *table)
+{
        __arpt_unregister_table(net, table);
 }
 
index c216b9ad3bb24d259dede54238ce9c63bae9f8a6..6c300ba5634e2e54055b229ab61989f2370e60d6 100644 (file)
@@ -56,16 +56,24 @@ static int __net_init arptable_filter_table_init(struct net *net)
        return err;
 }
 
+static void __net_exit arptable_filter_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.arptable_filter)
+               arpt_unregister_table_pre_exit(net, net->ipv4.arptable_filter,
+                                              arpfilter_ops);
+}
+
 static void __net_exit arptable_filter_net_exit(struct net *net)
 {
        if (!net->ipv4.arptable_filter)
                return;
-       arpt_unregister_table(net, net->ipv4.arptable_filter, arpfilter_ops);
+       arpt_unregister_table(net, net->ipv4.arptable_filter);
        net->ipv4.arptable_filter = NULL;
 }
 
 static struct pernet_operations arptable_filter_net_ops = {
        .exit = arptable_filter_net_exit,
+       .pre_exit = arptable_filter_net_pre_exit,
 };
 
 static int __init arptable_filter_init(void)
index f15bc21d730164baf6cd2e8bf982c851685ee3c5..f77ea0dbe6562b38a1a10d176857773c3298d6e8 100644 (file)
@@ -1428,6 +1428,8 @@ translate_compat_table(struct net *net,
        if (!newinfo)
                goto out_unlock;
 
+       memset(newinfo->entries, 0, size);
+
        newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_INET_NUMHOOKS; i++) {
                newinfo->hook_entry[i] = compatr->hook_entry[i];
index f55095d3ed1656fdb8badaed7eb58832c5063299..60465f077497ed133d0618799c1fbcbc1b097b11 100644 (file)
@@ -1378,9 +1378,19 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                if (!table)
                        goto err_alloc;
 
-               /* Update the variables to point into the current struct net */
-               for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
-                       table[i].data += (void *)net - (void *)&init_net;
+               for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++) {
+                       if (table[i].data) {
+                               /* Update the variables to point into
+                                * the current struct net
+                                */
+                               table[i].data += (void *)net - (void *)&init_net;
+                       } else {
+                               /* Entries without data pointer are global;
+                                * Make them read-only in non-init_net ns
+                                */
+                               table[i].mode &= ~0222;
+                       }
+               }
        }
 
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
index 4a0478b17243aca6eff4a783132546f37c08d524..99d743eb9dc4688c23198c0663a25776b086c2d1 100644 (file)
@@ -2754,6 +2754,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                val = up->gso_size;
                break;
 
+       case UDP_GRO:
+               val = up->gro_enabled;
+               break;
+
        /* The following two cannot be changed on UDP sockets, the return is
         * always 0 (which corresponds to the full checksum coverage of UDP). */
        case UDPLITE_SEND_CSCOV:
index f2337fb756ac73b9fccd70af47dfd99cbe801feb..a9e53f5942faeec5dc023f0bd2904b9b19bab05b 100644 (file)
@@ -5669,7 +5669,8 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
        return 0;
 }
 
-static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
+static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
+                            struct netlink_ext_ack *extack)
 {
        struct inet6_ifaddr *ifp;
        struct net_device *dev = idev->dev;
@@ -5680,12 +5681,29 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 
        if (!token)
                return -EINVAL;
-       if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
+
+       if (dev->flags & IFF_LOOPBACK) {
+               NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
                return -EINVAL;
-       if (!ipv6_accept_ra(idev))
+       }
+
+       if (dev->flags & IFF_NOARP) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Device does not do neighbour discovery");
+               return -EINVAL;
+       }
+
+       if (!ipv6_accept_ra(idev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Router advertisement is disabled on device");
                return -EINVAL;
-       if (idev->cnf.rtr_solicits == 0)
+       }
+
+       if (idev->cnf.rtr_solicits == 0) {
+               NL_SET_ERR_MSG(extack,
+                              "Router solicitation is disabled on device");
                return -EINVAL;
+       }
 
        write_lock_bh(&idev->lock);
 
@@ -5793,7 +5811,8 @@ static int inet6_validate_link_af(const struct net_device *dev,
        return 0;
 }
 
-static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
+                            struct netlink_ext_ack *extack)
 {
        struct inet6_dev *idev = __in6_dev_get(dev);
        struct nlattr *tb[IFLA_INET6_MAX + 1];
@@ -5806,7 +5825,8 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
                BUG();
 
        if (tb[IFLA_INET6_TOKEN]) {
-               err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
+               err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
+                                       extack);
                if (err)
                        return err;
        }
index 440080da805b5ead265b8ae3e018719b1048a2ae..080ee7f44c649151f7ec7b788a8ced07272823f4 100644 (file)
@@ -316,7 +316,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
        }
 
        kfree(AH_SKB_CB(skb)->tmp);
-       xfrm_output_resume(skb, err);
+       xfrm_output_resume(skb->sk, skb, err);
 }
 
 static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
index 153ad103ba74eb0fb61d09bf08bca07feba33ae2..727d791ed5e67a6f9117a7d31d1bcc84b7db663a 100644 (file)
@@ -314,7 +314,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
                        esp_output_tail_tcp(x, skb);
                else
-                       xfrm_output_resume(skb, err);
+                       xfrm_output_resume(skb->sk, skb, err);
        }
 }
 
index 1ca516fb30e1c29f29c94e90351587ab33b43323..4af56affaafd436fbd35ade87ffd2b7c8e6d4d91 100644 (file)
@@ -254,9 +254,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
        skb->encap_hdr_csum = 1;
 
        if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
-               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
        else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
-               esp_features = features & ~NETIF_F_CSUM_MASK;
+               esp_features = features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
 
        xo->flags |= XFRM_GSO_SEGMENT;
 
@@ -346,8 +348,17 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
 
        ipv6_hdr(skb)->payload_len = htons(len);
 
-       if (hw_offload)
+       if (hw_offload) {
+               if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+                       return -ENOMEM;
+
+               xo = xfrm_offload(skb);
+               if (!xo)
+                       return -EINVAL;
+
+               xo->flags |= XFRM_XMIT;
                return 0;
+       }
 
        err = esp6_output_tail(x, skb, &esp);
        if (err)
index 3fa0eca5a06f8af1afef3a497f9411e98007b149..42fe7db6bbb37ed09005239680d1356b8bbcd308 100644 (file)
@@ -2244,6 +2244,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head
                        t = rtnl_dereference(t->next);
                }
        }
+
+       t = rtnl_dereference(ip6n->tnls_wc[0]);
+       while (t) {
+               /* If dev is in the same netns, it has already
+                * been added to the list by the previous loop.
+                */
+               if (!net_eq(dev_net(t->dev), net))
+                       unregister_netdevice_queue(t->dev, list);
+               t = rtnl_dereference(t->next);
+       }
 }
 
 static int __net_init ip6_tnl_init_net(struct net *net)
index f10e7a72ea6248e5ec1fbdb8b4e1c4e0e874cb96..e0cc32e45880117307e921afebedd5817e2d52ea 100644 (file)
@@ -494,7 +494,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        }
 
        if (dst->flags & DST_XFRM_QUEUE)
-               goto queued;
+               goto xmit;
 
        x = dst->xfrm;
        if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
@@ -523,6 +523,8 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
                        icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                } else {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                }
@@ -531,7 +533,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                goto tx_err_dst_release;
        }
 
-queued:
+xmit:
        skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index 2e2119bfcf137348e1ee77dcd360267e2ef47d77..eb2b5404806c649c2fe4cb0c51b4481dace1652b 100644 (file)
@@ -1443,6 +1443,8 @@ translate_compat_table(struct net *net,
        if (!newinfo)
                goto out_unlock;
 
+       memset(newinfo->entries, 0, size);
+
        newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_INET_NUMHOOKS; i++) {
                newinfo->hook_entry[i] = compatr->hook_entry[i];
index 1f56d9aae5892bc958e19be49d8bcbf544c1d318..bf3646b57c686843ed5fa67bea88674c11aafde1 100644 (file)
@@ -298,7 +298,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                 */
                v4addr = LOOPBACK4_IPV6;
                if (!(addr_type & IPV6_ADDR_MULTICAST) &&
-                   !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
+                   !ipv6_can_nonlocal_bind(sock_net(sk), inet)) {
                        err = -EADDRNOTAVAIL;
                        if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
                                           dev, 0)) {
index 1056b0229ffdc676012f7608eec52da0e752a698..373d48073106f7f1b594f4fd3df275c23bf7304a 100644 (file)
@@ -5209,9 +5209,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                 * nexthops have been replaced by first new, the rest should
                 * be added to it.
                 */
-               cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
-                                                    NLM_F_REPLACE);
-               cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+               if (cfg->fc_nlinfo.nlh) {
+                       cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+                                                            NLM_F_REPLACE);
+                       cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+               }
                nhn++;
        }
 
index 63ccd9f2dcccf972eebefa8f293fb47090feb8e1..9fdccf0718b59ccb91a8d72341bb3f5cd122f06f 100644 (file)
@@ -1867,9 +1867,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
                if (dev->rtnl_link_ops == &sit_link_ops)
                        unregister_netdevice_queue(dev, head);
 
-       for (prio = 1; prio < 4; prio++) {
+       for (prio = 0; prio < 4; prio++) {
                int h;
-               for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
+               for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
                        struct ip_tunnel *t;
 
                        t = rtnl_dereference(sitn->tunnels[prio][h]);
index 68a0de02b56185f76d43ac549e4f5317f739020c..860bc35383d5fa09b72946b19dd74578845e99e9 100644 (file)
@@ -1788,8 +1788,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
                }
 
                if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
-                   sta->sdata->u.vlan.sta)
+                   sta->sdata->u.vlan.sta) {
+                       ieee80211_clear_fast_rx(sta);
                        RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
+               }
 
                if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
                        ieee80211_vif_dec_num_mcast(sta->sdata);
index ce4e3855fec18b801febaa0b23bf233397569196..96f487fc0071353573c44db20642ca39813437fc 100644 (file)
@@ -4707,7 +4707,10 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
                timeout = sta->rx_stats.last_rx;
        timeout += IEEE80211_CONNECTION_IDLE_TIME;
 
-       if (time_is_before_jiffies(timeout)) {
+       /* If timeout is after now, then update timer to fire at
+        * the later date, but do not actually probe at this time.
+        */
+       if (time_is_after_jiffies(timeout)) {
                mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
                return;
        }
index 5d06de61047a6e80f27fa581f434dc6d10aac7a0..3b3bcefbf65770226592e8025d7d6ec3e683e0c2 100644 (file)
@@ -3573,7 +3573,7 @@ begin:
            test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
                goto out;
 
-       if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
+       if (vif->txqs_stopped[txq->ac]) {
                set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
                goto out;
        }
index 585d33144c33f847a9201e55a7cb9646c3c553cf..55550ead2ced8cbd1273bb4c0a02a0928080565f 100644 (file)
@@ -152,7 +152,7 @@ err_tfm0:
        crypto_free_sync_skcipher(key->tfm0);
 err_tfm:
        for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
-               if (key->tfm[i])
+               if (!IS_ERR_OR_NULL(key->tfm[i]))
                        crypto_free_aead(key->tfm[i]);
 
        kfree_sensitive(key);
index 1590b9d4cde28f0e6acdd04d7bf2a09ad6a0da8a..4bde960e19dc00fb379ef694484f36cefc94fa6a 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/netdevice.h>
 #include <linux/sched/signal.h>
 #include <linux/atomic.h>
-#include <linux/igmp.h>
 #include <net/sock.h>
 #include <net/inet_common.h>
 #include <net/inet_hashtables.h>
@@ -20,7 +19,6 @@
 #include <net/tcp_states.h>
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 #include <net/transp_v6.h>
-#include <net/addrconf.h>
 #endif
 #include <net/mptcp.h>
 #include <net/xfrm.h>
@@ -2878,6 +2876,48 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
        return ret;
 }
 
+static bool mptcp_unsupported(int level, int optname)
+{
+       if (level == SOL_IP) {
+               switch (optname) {
+               case IP_ADD_MEMBERSHIP:
+               case IP_ADD_SOURCE_MEMBERSHIP:
+               case IP_DROP_MEMBERSHIP:
+               case IP_DROP_SOURCE_MEMBERSHIP:
+               case IP_BLOCK_SOURCE:
+               case IP_UNBLOCK_SOURCE:
+               case MCAST_JOIN_GROUP:
+               case MCAST_LEAVE_GROUP:
+               case MCAST_JOIN_SOURCE_GROUP:
+               case MCAST_LEAVE_SOURCE_GROUP:
+               case MCAST_BLOCK_SOURCE:
+               case MCAST_UNBLOCK_SOURCE:
+               case MCAST_MSFILTER:
+                       return true;
+               }
+               return false;
+       }
+       if (level == SOL_IPV6) {
+               switch (optname) {
+               case IPV6_ADDRFORM:
+               case IPV6_ADD_MEMBERSHIP:
+               case IPV6_DROP_MEMBERSHIP:
+               case IPV6_JOIN_ANYCAST:
+               case IPV6_LEAVE_ANYCAST:
+               case MCAST_JOIN_GROUP:
+               case MCAST_LEAVE_GROUP:
+               case MCAST_JOIN_SOURCE_GROUP:
+               case MCAST_LEAVE_SOURCE_GROUP:
+               case MCAST_BLOCK_SOURCE:
+               case MCAST_UNBLOCK_SOURCE:
+               case MCAST_MSFILTER:
+                       return true;
+               }
+               return false;
+       }
+       return false;
+}
+
 static int mptcp_setsockopt(struct sock *sk, int level, int optname,
                            sockptr_t optval, unsigned int optlen)
 {
@@ -2886,6 +2926,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
 
        pr_debug("msk=%p", msk);
 
+       if (mptcp_unsupported(level, optname))
+               return -ENOPROTOOPT;
+
        if (level == SOL_SOCKET)
                return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
 
@@ -3419,34 +3462,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
-static int mptcp_release(struct socket *sock)
-{
-       struct mptcp_subflow_context *subflow;
-       struct sock *sk = sock->sk;
-       struct mptcp_sock *msk;
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-
-       msk = mptcp_sk(sk);
-
-       mptcp_for_each_subflow(msk, subflow) {
-               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
-               ip_mc_drop_socket(ssk);
-       }
-
-       release_sock(sk);
-
-       return inet_release(sock);
-}
-
 static const struct proto_ops mptcp_stream_ops = {
        .family            = PF_INET,
        .owner             = THIS_MODULE,
-       .release           = mptcp_release,
+       .release           = inet_release,
        .bind              = mptcp_bind,
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
@@ -3538,35 +3557,10 @@ void __init mptcp_proto_init(void)
 }
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
-static int mptcp6_release(struct socket *sock)
-{
-       struct mptcp_subflow_context *subflow;
-       struct mptcp_sock *msk;
-       struct sock *sk = sock->sk;
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-
-       msk = mptcp_sk(sk);
-
-       mptcp_for_each_subflow(msk, subflow) {
-               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
-               ip_mc_drop_socket(ssk);
-               ipv6_sock_mc_close(ssk);
-               ipv6_sock_ac_close(ssk);
-       }
-
-       release_sock(sk);
-       return inet6_release(sock);
-}
-
 static const struct proto_ops mptcp_v6_stream_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
-       .release           = mptcp6_release,
+       .release           = inet6_release,
        .bind              = mptcp_bind,
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
index a9cb355324d1ab5a4d33870c13f2a178ec793a81..ffff8da707b8c1c9d8f771072ce39f906cb67a69 100644 (file)
@@ -105,13 +105,20 @@ static void ncsi_channel_monitor(struct timer_list *t)
        monitor_state = nc->monitor.state;
        spin_unlock_irqrestore(&nc->lock, flags);
 
-       if (!enabled || chained) {
-               ncsi_stop_channel_monitor(nc);
-               return;
-       }
+       if (!enabled)
+               return;         /* expected race disabling timer */
+       if (WARN_ON_ONCE(chained))
+               goto bad_state;
+
        if (state != NCSI_CHANNEL_INACTIVE &&
            state != NCSI_CHANNEL_ACTIVE) {
-               ncsi_stop_channel_monitor(nc);
+bad_state:
+               netdev_warn(ndp->ndev.dev,
+                           "Bad NCSI monitor state channel %d 0x%x %s queue\n",
+                           nc->id, state, chained ? "on" : "off");
+               spin_lock_irqsave(&nc->lock, flags);
+               nc->monitor.enabled = false;
+               spin_unlock_irqrestore(&nc->lock, flags);
                return;
        }
 
@@ -136,10 +143,9 @@ static void ncsi_channel_monitor(struct timer_list *t)
                ncsi_report_link(ndp, true);
                ndp->flags |= NCSI_DEV_RESHUFFLE;
 
-               ncsi_stop_channel_monitor(nc);
-
                ncm = &nc->modes[NCSI_MODE_LINK];
                spin_lock_irqsave(&nc->lock, flags);
+               nc->monitor.enabled = false;
                nc->state = NCSI_CHANNEL_INVISIBLE;
                ncm->data[2] &= ~0x1;
                spin_unlock_irqrestore(&nc->lock, flags);
index 0ee702d374b028df2cc8fdaa3fe1cbd1450819db..c6c0cb46566456c0718b77995c173bebd20205c8 100644 (file)
@@ -266,6 +266,7 @@ static const char* l4proto_name(u16 proto)
        case IPPROTO_GRE: return "gre";
        case IPPROTO_SCTP: return "sctp";
        case IPPROTO_UDPLITE: return "udplite";
+       case IPPROTO_ICMPV6: return "icmpv6";
        }
 
        return "unknown";
index 2a6993fa40d78dcccce1e4425ab3cabf6120a899..1c5460e7bce875d2c9cff8bdb6a2c8c5b9ee4f44 100644 (file)
@@ -305,12 +305,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
                                     const __be32 *addr, const __be32 *mask)
 {
        struct flow_action_entry *entry;
-       int i;
+       int i, j;
 
-       for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
+       for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
                entry = flow_action_entry_next(flow_rule);
                flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
-                                   offset + i, &addr[i], mask);
+                                   offset + i, &addr[j], mask);
        }
 }
 
index f57f1a6ba96f6c6479ffa696ecd89d277223302f..589d2f6978d3809592e17ef537abecf44ebe8fda 100644 (file)
@@ -5295,16 +5295,35 @@ err_expr:
        return -ENOMEM;
 }
 
-static void nft_set_elem_expr_setup(const struct nft_set_ext *ext, int i,
-                                   struct nft_expr *expr_array[])
+static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
+                                  const struct nft_set_ext *ext,
+                                  struct nft_expr *expr_array[],
+                                  u32 num_exprs)
 {
        struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
-       struct nft_expr *expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
+       struct nft_expr *expr;
+       int i, err;
+
+       for (i = 0; i < num_exprs; i++) {
+               expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
+               err = nft_expr_clone(expr, expr_array[i]);
+               if (err < 0)
+                       goto err_elem_expr_setup;
+
+               elem_expr->size += expr_array[i]->ops->size;
+               nft_expr_destroy(ctx, expr_array[i]);
+               expr_array[i] = NULL;
+       }
+
+       return 0;
+
+err_elem_expr_setup:
+       for (; i < num_exprs; i++) {
+               nft_expr_destroy(ctx, expr_array[i]);
+               expr_array[i] = NULL;
+       }
 
-       memcpy(expr, expr_array[i], expr_array[i]->ops->size);
-       elem_expr->size += expr_array[i]->ops->size;
-       kfree(expr_array[i]);
-       expr_array[i] = NULL;
+       return -ENOMEM;
 }
 
 static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
@@ -5556,12 +5575,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                *nft_set_ext_obj(ext) = obj;
                obj->use++;
        }
-       for (i = 0; i < num_exprs; i++)
-               nft_set_elem_expr_setup(ext, i, expr_array);
+       err = nft_set_elem_expr_setup(ctx, ext, expr_array, num_exprs);
+       if (err < 0)
+               goto err_elem_expr;
 
        trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
-       if (trans == NULL)
-               goto err_trans;
+       if (trans == NULL) {
+               err = -ENOMEM;
+               goto err_elem_expr;
+       }
 
        ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
        err = set->ops->insert(ctx->net, set, &elem, &ext2);
@@ -5605,7 +5627,7 @@ err_set_full:
        set->ops->remove(ctx->net, set, &elem);
 err_element_clash:
        kfree(trans);
-err_trans:
+err_elem_expr:
        if (obj)
                obj->use--;
 
index 0e2c315c3b5ed5503b93ea0972d06a111ca6a4ab..82ec27bdf94120f89c8c475f02e56d0d64f9e385 100644 (file)
@@ -76,13 +76,13 @@ static int nft_limit_init(struct nft_limit *limit,
                return -EOVERFLOW;
 
        if (pkts) {
-               tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
+               tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
        } else {
                /* The token bucket size limits the number of tokens can be
                 * accumulated. tokens_max specifies the bucket size.
                 * tokens_max = unit * (rate + burst) / rate.
                 */
-               tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
+               tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
                                 limit->rate);
        }
 
index 6bd31a7a27fc5856271e8405d638a5aa33441c9a..92e9d4ebc5e8d713a860b176d0adf2b4c998f45e 100644 (file)
@@ -733,7 +733,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 {
        const struct xt_match *match = m->u.kernel.match;
        struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
-       int pad, off = xt_compat_match_offset(match);
+       int off = xt_compat_match_offset(match);
        u_int16_t msize = cm->u.user.match_size;
        char name[sizeof(m->u.user.name)];
 
@@ -743,9 +743,6 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
                match->compat_from_user(m->data, cm->data);
        else
                memcpy(m->data, cm->data, msize - sizeof(*cm));
-       pad = XT_ALIGN(match->matchsize) - match->matchsize;
-       if (pad > 0)
-               memset(m->data + match->matchsize, 0, pad);
 
        msize += off;
        m->u.user.match_size = msize;
@@ -1116,7 +1113,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
 {
        const struct xt_target *target = t->u.kernel.target;
        struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
-       int pad, off = xt_compat_target_offset(target);
+       int off = xt_compat_target_offset(target);
        u_int16_t tsize = ct->u.user.target_size;
        char name[sizeof(t->u.user.name)];
 
@@ -1126,9 +1123,6 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
                target->compat_from_user(t->data, ct->data);
        else
                memcpy(t->data, ct->data, tsize - sizeof(*ct));
-       pad = XT_ALIGN(target->targetsize) - target->targetsize;
-       if (pad > 0)
-               memset(t->data + target->targetsize, 0, pad);
 
        tsize += off;
        t->u.user.target_size = tsize;
index dd488938447f9735daf1fb727c339a9874bab38b..3a62f97acf39d11a18547308a2f375eb5f37abaa 100644 (file)
@@ -1019,7 +1019,6 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        return -EINVAL;
        }
 
-       netlink_lock_table();
        if (nlk->netlink_bind && groups) {
                int group;
 
@@ -1031,13 +1030,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        if (!err)
                                continue;
                        netlink_undo_bind(group, groups, sk);
-                       goto unlock;
+                       return err;
                }
        }
 
        /* No need for barriers here as we return to user-space without
         * using any of the bound attributes.
         */
+       netlink_lock_table();
        if (!bound) {
                err = nladdr->nl_pid ?
                        netlink_insert(sk, nladdr->nl_pid) :
index d257ed3b732ae356441b7ea205c79817fd2daad6..a3b46f8888033e191e133eedd0f3ea65df82bd33 100644 (file)
@@ -108,11 +108,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
        if (!llcp_sock->service_name) {
+               nfc_llcp_local_put(llcp_sock->local);
                ret = -ENOMEM;
                goto put_dev;
        }
        llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               nfc_llcp_local_put(llcp_sock->local);
                kfree(llcp_sock->service_name);
                llcp_sock->service_name = NULL;
                ret = -EADDRINUSE;
@@ -671,6 +673,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
                ret = -EISCONN;
                goto error;
        }
+       if (sk->sk_state == LLCP_CONNECTING) {
+               ret = -EINPROGRESS;
+               goto error;
+       }
 
        dev = nfc_get_device(addr->dev_idx);
        if (dev == NULL) {
@@ -702,6 +708,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        llcp_sock->local = nfc_llcp_local_get(local);
        llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               nfc_llcp_local_put(llcp_sock->local);
                ret = -ENOMEM;
                goto put_dev;
        }
@@ -743,9 +750,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
 
 sock_unlink:
        nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+       kfree(llcp_sock->service_name);
+       llcp_sock->service_name = NULL;
 
 sock_llcp_release:
        nfc_llcp_put_ssap(local, llcp_sock->ssap);
+       nfc_llcp_local_put(llcp_sock->local);
 
 put_dev:
        nfc_put_device(dev);
index 71cec03e861249209c1cb3f20ca09be2f300091b..d217bd91176b612b9b155b5ad4598fe0c40f344b 100644 (file)
@@ -2034,10 +2034,10 @@ static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
                                          struct sk_buff *reply)
 {
-       struct ovs_zone_limit zone_limit;
-
-       zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
-       zone_limit.limit = info->default_limit;
+       struct ovs_zone_limit zone_limit = {
+               .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
+               .limit   = info->default_limit,
+       };
 
        return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
 }
index dfc820ee553a0948cc64f25f5b8f9c5d0061cfd4..1e4fb568fa841d10e82564291c02e0e2d84c9ac4 100644 (file)
@@ -271,7 +271,10 @@ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
                flow = kzalloc(sizeof(*flow), GFP_KERNEL);
                if (flow) {
                        init_waitqueue_head(&flow->resume_tx);
-                       radix_tree_insert(&node->qrtr_tx_flow, key, flow);
+                       if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
+                               kfree(flow);
+                               flow = NULL;
+                       }
                }
        }
        mutex_unlock(&node->qrtr_tx_lock);
index 071a261fdaabbfbefe2ddc007c162f61e70a1d96..799034e0f513d988334280186cbdf255fbf50eb7 100644 (file)
@@ -347,8 +347,9 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
        rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
        if (IS_ERR(rm->data.op_sg)) {
+               void *err = ERR_CAST(rm->data.op_sg);
                rds_message_put(rm);
-               return ERR_CAST(rm->data.op_sg);
+               return err;
        }
 
        for (i = 0; i < rm->data.op_nents; ++i) {
index 68d6ef9e59fc47c719ca22d684171ea26691ccbc..ac15a944573f7fdc3258a3fc1df8e7c825b21737 100644 (file)
@@ -69,7 +69,7 @@ struct rfkill {
 
 struct rfkill_int_event {
        struct list_head        list;
-       struct rfkill_event     ev;
+       struct rfkill_event_ext ev;
 };
 
 struct rfkill_data {
@@ -253,7 +253,8 @@ static void rfkill_global_led_trigger_unregister(void)
 }
 #endif /* CONFIG_RFKILL_LEDS */
 
-static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
+static void rfkill_fill_event(struct rfkill_event_ext *ev,
+                             struct rfkill *rfkill,
                              enum rfkill_operation op)
 {
        unsigned long flags;
@@ -1237,7 +1238,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *pos)
 {
        struct rfkill *rfkill;
-       struct rfkill_event ev;
+       struct rfkill_event_ext ev;
        int ret;
 
        /* we don't need the 'hard' variable but accept it */
index b919826939e0bd4e3b5fc986f024b3e1f0f5e4c6..f6d5755d669eba66a40b44088b362c8ae6f8d934 100644 (file)
@@ -158,7 +158,7 @@ static int __tcf_action_put(struct tc_action *p, bool bind)
        return 0;
 }
 
-int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
+static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 {
        int ret = 0;
 
@@ -184,7 +184,18 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 
        return ret;
 }
-EXPORT_SYMBOL(__tcf_idr_release);
+
+int tcf_idr_release(struct tc_action *a, bool bind)
+{
+       const struct tc_action_ops *ops = a->ops;
+       int ret;
+
+       ret = __tcf_idr_release(a, bind, false);
+       if (ret == ACT_P_DELETED)
+               module_put(ops->owner);
+       return ret;
+}
+EXPORT_SYMBOL(tcf_idr_release);
 
 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
 {
@@ -493,6 +504,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
        }
 
        p->idrinfo = idrinfo;
+       __module_get(ops->owner);
        p->ops = ops;
        *a = p;
        return 0;
@@ -992,7 +1004,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
-                                   struct tc_action_ops *a_o, bool rtnl_held,
+                                   struct tc_action_ops *a_o, int *init_res,
+                                   bool rtnl_held,
                                    struct netlink_ext_ack *extack)
 {
        struct nla_bitfield32 flags = { 0, 0 };
@@ -1028,6 +1041,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        }
        if (err < 0)
                goto err_out;
+       *init_res = err;
 
        if (!name && tb[TCA_ACT_COOKIE])
                tcf_set_action_cookie(&a->act_cookie, cookie);
@@ -1035,13 +1049,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (!name)
                a->hw_stats = hw_stats;
 
-       /* module count goes up only when brand new policy is created
-        * if it exists and is only bound to in a_o->init() then
-        * ACT_P_CREATED is not returned (a zero is).
-        */
-       if (err != ACT_P_CREATED)
-               module_put(a_o->owner);
-
        return a;
 
 err_out:
@@ -1056,7 +1063,7 @@ err_out:
 
 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
                    struct nlattr *est, char *name, int ovr, int bind,
-                   struct tc_action *actions[], size_t *attr_size,
+                   struct tc_action *actions[], int init_res[], size_t *attr_size,
                    bool rtnl_held, struct netlink_ext_ack *extack)
 {
        struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
@@ -1084,7 +1091,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
 
        for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
                act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
-                                       ops[i - 1], rtnl_held, extack);
+                                       ops[i - 1], &init_res[i - 1], rtnl_held,
+                                       extack);
                if (IS_ERR(act)) {
                        err = PTR_ERR(act);
                        goto err;
@@ -1100,7 +1108,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
        tcf_idr_insert_many(actions);
 
        *attr_size = tcf_action_full_attrs_size(sz);
-       return i - 1;
+       err = i - 1;
+       goto err_mod;
 
 err:
        tcf_action_destroy(actions, bind);
@@ -1497,12 +1506,13 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
                          struct netlink_ext_ack *extack)
 {
        size_t attr_size = 0;
-       int loop, ret;
+       int loop, ret, i;
        struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
+       int init_res[TCA_ACT_MAX_PRIO] = {};
 
        for (loop = 0; loop < 10; loop++) {
                ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
-                                     actions, &attr_size, true, extack);
+                                     actions, init_res, &attr_size, true, extack);
                if (ret != -EAGAIN)
                        break;
        }
@@ -1510,8 +1520,12 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
        if (ret < 0)
                return ret;
        ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
-       if (ovr)
-               tcf_action_put_many(actions);
+
+       /* only put existing actions */
+       for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
+               if (init_res[i] == ACT_P_CREATED)
+                       actions[i] = NULL;
+       tcf_action_put_many(actions);
 
        return ret;
 }
index 13341e7fb077c481d66cbb028fc026746e81aa3e..340d5af86e87fec6003b6cf1f5c3d9de03bcaf15 100644 (file)
@@ -646,7 +646,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
        struct net_device *dev = block_cb->indr.dev;
        struct Qdisc *sch = block_cb->indr.sch;
        struct netlink_ext_ack extack = {};
-       struct flow_block_offload bo;
+       struct flow_block_offload bo = {};
 
        tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
                               block_cb->indr.binder_type,
@@ -3040,6 +3040,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 {
 #ifdef CONFIG_NET_CLS_ACT
        {
+               int init_res[TCA_ACT_MAX_PRIO] = {};
                struct tc_action *act;
                size_t attr_size = 0;
 
@@ -3051,12 +3052,11 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                                return PTR_ERR(a_o);
                        act = tcf_action_init_1(net, tp, tb[exts->police],
                                                rate_tlv, "police", ovr,
-                                               TCA_ACT_BIND, a_o, rtnl_held,
-                                               extack);
-                       if (IS_ERR(act)) {
-                               module_put(a_o->owner);
+                                               TCA_ACT_BIND, a_o, init_res,
+                                               rtnl_held, extack);
+                       module_put(a_o->owner);
+                       if (IS_ERR(act))
                                return PTR_ERR(act);
-                       }
 
                        act->type = exts->type = TCA_OLD_COMPAT;
                        exts->actions[0] = act;
@@ -3067,8 +3067,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 
                        err = tcf_action_init(net, tp, tb[exts->action],
                                              rate_tlv, NULL, ovr, TCA_ACT_BIND,
-                                             exts->actions, &attr_size,
-                                             rtnl_held, extack);
+                                             exts->actions, init_res,
+                                             &attr_size, rtnl_held, extack);
                        if (err < 0)
                                return err;
                        exts->nr_actions = err;
index 62e12cb41a3e1c0b2403552eb05128e4d3ac50b6..081c11d5717c4a7ad2d76a45f1fd45c47100a5bf 100644 (file)
@@ -1675,9 +1675,10 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
                                          cl->parent->common.classid,
                                          NULL);
                if (q->offload) {
-                       if (new_q)
+                       if (new_q) {
                                htb_set_lockdep_class_child(new_q);
-                       htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+                               htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+                       }
                }
        }
 
index 2f1f0a3784083088bf9cfeb3b4c84e1391fb9e54..6af6b95bdb6723a3d600c1defae08fd897c7a308 100644 (file)
@@ -134,6 +134,9 @@ teql_destroy(struct Qdisc *sch)
        struct teql_sched_data *dat = qdisc_priv(sch);
        struct teql_master *master = dat->m;
 
+       if (!master)
+               return;
+
        prev = master->slaves;
        if (prev) {
                do {
index c3e89c776e66380dbf518294ba7bf1e9bb5f01c0..bd08807c9e44758b56cdf1cad94dda7184e14fb5 100644 (file)
@@ -664,8 +664,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
        if (!(type & IPV6_ADDR_UNICAST))
                return 0;
 
-       return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind ||
-               ipv6_chk_addr(net, in6, NULL, 0);
+       return ipv6_can_nonlocal_bind(net, &sp->inet) ||
+              ipv6_chk_addr(net, in6, NULL, 0);
 }
 
 /* This function checks if the address is a valid address to be used for
@@ -954,8 +954,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
                        net = sock_net(&opt->inet.sk);
                        rcu_read_lock();
                        dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
-                       if (!dev || !(opt->inet.freebind ||
-                                     net->ipv6.sysctl.ip_nonlocal_bind ||
+                       if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) ||
                                      ipv6_chk_addr(net, &addr->v6.sin6_addr,
                                                    dev, 0))) {
                                rcu_read_unlock();
index a710917c5ac73ab823aa9f8d312c20083ca991ce..b9b3d899a611ccf3b998fe7876d407a8b7664d82 100644 (file)
@@ -1520,11 +1520,9 @@ static void sctp_close(struct sock *sk, long timeout)
 
        /* Supposedly, no process has access to the socket, but
         * the net layers still may.
-        * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
-        * held and that should be grabbed before socket lock.
         */
-       spin_lock_bh(&net->sctp.addr_wq_lock);
-       bh_lock_sock_nested(sk);
+       local_bh_disable();
+       bh_lock_sock(sk);
 
        /* Hold the sock, since sk_common_release() will put sock_put()
         * and we have just a little more cleanup.
@@ -1533,7 +1531,7 @@ static void sctp_close(struct sock *sk, long timeout)
        sk_common_release(sk);
 
        bh_unlock_sock(sk);
-       spin_unlock_bh(&net->sctp.addr_wq_lock);
+       local_bh_enable();
 
        sock_put(sk);
 
@@ -4993,9 +4991,6 @@ static int sctp_init_sock(struct sock *sk)
        sk_sockets_allocated_inc(sk);
        sock_prot_inuse_add(net, sk->sk_prot, 1);
 
-       /* Nothing can fail after this block, otherwise
-        * sctp_destroy_sock() will be called without addr_wq_lock held
-        */
        if (net->sctp.default_auto_asconf) {
                spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
                list_add_tail(&sp->auto_asconf_list,
@@ -5030,7 +5025,9 @@ static void sctp_destroy_sock(struct sock *sk)
 
        if (sp->do_auto_asconf) {
                sp->do_auto_asconf = 0;
+               spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
                list_del(&sp->auto_asconf_list);
+               spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
        }
        sctp_endpoint_free(sp->ep);
        local_bh_disable();
index 6bf4550aa1ac1ec588cc881713988cc0a8d10c79..57c6a1a719e243157f91d2b6aa1f9e9e63298a3a 100644 (file)
@@ -154,9 +154,9 @@ struct tipc_media {
  * care of initializing all other fields.
  */
 struct tipc_bearer {
-       void __rcu *media_ptr;                  /* initalized by media */
-       u32 mtu;                                /* initalized by media */
-       struct tipc_media_addr addr;            /* initalized by media */
+       void __rcu *media_ptr;                  /* initialized by media */
+       u32 mtu;                                /* initialized by media */
+       struct tipc_media_addr addr;            /* initialized by media */
        char name[TIPC_MAX_BEARER_NAME];
        struct tipc_media *media;
        struct tipc_media_addr bcast_addr;
index f4fca8f7f63fa5af9706c40c100abdabbb2c5d5c..97710ce36047ce6ec4785676fdea46820a4629fb 100644 (file)
@@ -1941,12 +1941,13 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
                        goto rcv;
                if (tipc_aead_clone(&tmp, aead) < 0)
                        goto rcv;
+               WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
                if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
                        tipc_aead_free(&tmp->rcu);
                        goto rcv;
                }
                tipc_aead_put(aead);
-               aead = tipc_aead_get(tmp);
+               aead = tmp;
        }
 
        if (unlikely(err)) {
index a129f661bee3117b368bdae6c69aa5db2ec5fa1b..faf6bf55451466b05971af220014cef43600e072 100644 (file)
@@ -89,7 +89,7 @@
  *     - A spin lock to protect the registry of kernel/driver users (reg.c)
  *     - A global spin_lock (tipc_port_lock), which only task is to ensure
  *       consistency where more than one port is involved in an operation,
- *       i.e., whe a port is part of a linked list of ports.
+ *       i.e., when a port is part of a linked list of ports.
  *       There are two such lists; 'port_list', which is used for management,
  *       and 'wait_list', which is used to queue ports during congestion.
  *
index 136338b85504bee716a48cd03c92eb88d016eadf..e0ee83263a39137e6b31468deefd43702777a041 100644 (file)
@@ -1734,7 +1734,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
 }
 
 /* tipc_node_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
  * messages, which will not be rejected
  * The only exception is datagram messages rerouted after secondary
  * lookup, which are rare and safe to dispose of anyway.
index cebcc104dc70ae6313278e2958e51a04909b956f..022999e0202d71d46b39b6d4785a4aca6a48731a 100644 (file)
@@ -1265,7 +1265,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                spin_lock_bh(&inputq->lock);
                if (skb_peek(arrvq) == skb) {
                        skb_queue_splice_tail_init(&tmpq, inputq);
-                       kfree_skb(__skb_dequeue(arrvq));
+                       __skb_dequeue(arrvq);
                }
                spin_unlock_bh(&inputq->lock);
                __skb_queue_purge(&tmpq);
index 034af85f79d84117cd2e2505cfd0d4a391bacdb3..b1df42e4f1eb9ad637e8190d243434430c28e102 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 #include <linux/if.h>
@@ -229,9 +229,13 @@ static int validate_beacon_head(const struct nlattr *attr,
        unsigned int len = nla_len(attr);
        const struct element *elem;
        const struct ieee80211_mgmt *mgmt = (void *)data;
-       bool s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
        unsigned int fixedlen, hdrlen;
+       bool s1g_bcn;
 
+       if (len < offsetofend(typeof(*mgmt), frame_control))
+               goto err;
+
+       s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
        if (s1g_bcn) {
                fixedlen = offsetof(struct ieee80211_ext,
                                    u.s1g_beacon.variable);
@@ -5485,7 +5489,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                        rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
                        &params);
                if (err)
-                       return err;
+                       goto out;
        }
 
        nl80211_calculate_ap_params(&params);
index 019952d4fc7db8d78aba6c194a76b09150c1c361..758eb7d2a7068b14267290c526763017007acfae 100644 (file)
@@ -2352,14 +2352,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
                return NULL;
 
        if (ext) {
-               struct ieee80211_s1g_bcn_compat_ie *compat;
-               u8 *ie;
+               const struct ieee80211_s1g_bcn_compat_ie *compat;
+               const struct element *elem;
 
-               ie = (void *)cfg80211_find_ie(WLAN_EID_S1G_BCN_COMPAT,
-                                             variable, ielen);
-               if (!ie)
+               elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
+                                         variable, ielen);
+               if (!elem)
+                       return NULL;
+               if (elem->datalen < sizeof(*compat))
                        return NULL;
-               compat = (void *)(ie + 2);
+               compat = (void *)elem->data;
                bssid = ext->u.s1g_beacon.sa;
                capability = le16_to_cpu(compat->compat_info);
                beacon_int = le16_to_cpu(compat->beacon_int);
index 07756ca5e3b5960198955db356741be90b9355c5..08a70b4f090cc28b7aed3afe5dd61e4f9542e87b 100644 (file)
@@ -529,7 +529,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
                cfg80211_sme_free(wdev);
        }
 
-       if (WARN_ON(wdev->conn))
+       if (wdev->conn)
                return -EINPROGRESS;
 
        wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
index d8e8a11ca845e3603680c61aa0dc3a9b2ecfefe2..a20aec9d73933a5df26c36581c0793f2fd328624 100644 (file)
@@ -216,7 +216,7 @@ static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
        case XFRM_MSG_GETSADINFO:
        case XFRM_MSG_GETSPDINFO:
        default:
-               WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type);
+               pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
                return ERR_PTR(-EOPNOTSUPP);
        }
 
@@ -277,7 +277,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
                return xfrm_nla_cpy(dst, src, nla_len(src));
        default:
                BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
-               WARN_ONCE(1, "unsupported nla_type %d", src->nla_type);
+               pr_warn_once("unsupported nla_type %d\n", src->nla_type);
                return -EOPNOTSUPP;
        }
 }
@@ -315,8 +315,10 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
        struct sk_buff *new = NULL;
        int err;
 
-       if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min)))
+       if (type >= ARRAY_SIZE(xfrm_msg_min)) {
+               pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
                return -EOPNOTSUPP;
+       }
 
        if (skb_shinfo(skb)->frag_list == NULL) {
                new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC);
@@ -378,6 +380,10 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
        struct nlmsghdr *nlmsg = dst;
        struct nlattr *nla;
 
+       /* xfrm_user_rcv_msg_compat() relies on fact that 32-bit messages
+        * have the same len or shorted than 64-bit ones.
+        * 32-bit translation that is bigger than 64-bit original is unexpected.
+        */
        if (WARN_ON_ONCE(copy_len > payload))
                copy_len = payload;
 
index edf11893dbe81ffcc026a18b8b66f19355c6ff39..6d6917b68856fc7f544961d018a23a2c13bc63d9 100644 (file)
@@ -134,8 +134,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                return skb;
        }
 
-       xo->flags |= XFRM_XMIT;
-
        if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
                struct sk_buff *segs;
 
index 495b1f5c979bc31bacdbca3aae4c50f4f7f21541..8831f5a9e99233c8b1b19bd20705c5174f88530b 100644 (file)
@@ -306,6 +306,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
                        icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                } else {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                }
@@ -314,6 +316,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                return -EMSGSIZE;
        }
 
+xmit:
        xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = tdev;
index a7ab19353313ce1d8b6ce6a4e6f6c1821c8982d0..e4cb0ff4dcf413227676d9adf62c9ac2898d5e9a 100644 (file)
@@ -503,22 +503,22 @@ out:
        return err;
 }
 
-int xfrm_output_resume(struct sk_buff *skb, int err)
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
 {
        struct net *net = xs_net(skb_dst(skb)->xfrm);
 
        while (likely((err = xfrm_output_one(skb, err)) == 0)) {
                nf_reset_ct(skb);
 
-               err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
+               err = skb_dst(skb)->ops->local_out(net, sk, skb);
                if (unlikely(err != 1))
                        goto out;
 
                if (!skb_dst(skb)->xfrm)
-                       return dst_output(net, skb->sk, skb);
+                       return dst_output(net, sk, skb);
 
                err = nf_hook(skb_dst(skb)->ops->family,
-                             NF_INET_POST_ROUTING, net, skb->sk, skb,
+                             NF_INET_POST_ROUTING, net, sk, skb,
                              NULL, skb_dst(skb)->dev, xfrm_output2);
                if (unlikely(err != 1))
                        goto out;
@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(xfrm_output_resume);
 
 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       return xfrm_output_resume(skb, 1);
+       return xfrm_output_resume(sk, skb, 1);
 }
 
 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -660,6 +660,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
 
+       if (x->outer_mode.encap == XFRM_MODE_BEET &&
+           ip_is_fragment(ip_hdr(skb))) {
+               net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
+               return -EAFNOSUPPORT;
+       }
+
        err = xfrm4_tunnel_check_size(skb);
        if (err)
                return err;
@@ -705,8 +711,15 @@ out:
 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_IPV6)
+       unsigned int ptr = 0;
        int err;
 
+       if (x->outer_mode.encap == XFRM_MODE_BEET &&
+           ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
+               net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
+               return -EAFNOSUPPORT;
+       }
+
        err = xfrm6_tunnel_check_size(skb);
        if (err)
                return err;
index d01ca1a184189bfc00d111cfda1443baee571bda..4496f7efa220017b1c8d8a747e6be3aef65f07b3 100644 (file)
@@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work);
  */
 
 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
 static struct kmem_cache *xfrm_state_cache __ro_after_init;
 
 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
@@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        }
 
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
-       write_seqcount_begin(&xfrm_state_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
 
        nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
        odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
@@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        rcu_assign_pointer(net->xfrm.state_byspi, nspi);
        net->xfrm.state_hmask = nhashmask;
 
-       write_seqcount_end(&xfrm_state_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 
        osize = (ohashmask + 1) * sizeof(struct hlist_head);
@@ -1063,7 +1062,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
 
        to_put = NULL;
 
-       sequence = read_seqcount_begin(&xfrm_state_hash_generation);
+       sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
 
        rcu_read_lock();
        h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
@@ -1176,7 +1175,7 @@ out:
        if (to_put)
                xfrm_state_put(to_put);
 
-       if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
+       if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
                *err = -EAGAIN;
                if (x) {
                        xfrm_state_put(x);
@@ -2666,6 +2665,8 @@ int __net_init xfrm_state_init(struct net *net)
        net->xfrm.state_num = 0;
        INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
        spin_lock_init(&net->xfrm.xfrm_state_lock);
+       seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
+                              &net->xfrm.xfrm_state_lock);
        return 0;
 
 out_byspi:
index c36106bce80ee273342558186ca3399b91eb72f1..9adb6d247818f1355e2e478256022cd10afd05f2 100644 (file)
@@ -14,6 +14,7 @@ hostprogs-always-$(CONFIG_ASN1)                               += asn1_compiler
 hostprogs-always-$(CONFIG_MODULE_SIG_FORMAT)           += sign-file
 hostprogs-always-$(CONFIG_SYSTEM_TRUSTED_KEYRING)      += extract-cert
 hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE)    += insert-sys-cert
+hostprogs-always-$(CONFIG_SYSTEM_REVOCATION_LIST)      += extract-cert
 
 HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
 HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
index 1e000cc2e7b4b2477655ce6ba74b4e4fa60b9c69..3d791908ed364ba71f9a589b42a707b72ef3878c 100644 (file)
@@ -2,6 +2,14 @@
 CFLAGS_KASAN_NOSANITIZE := -fno-builtin
 KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
 
+cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
+
+ifdef CONFIG_KASAN_STACK
+       stack_enable := 1
+else
+       stack_enable := 0
+endif
+
 ifdef CONFIG_KASAN_GENERIC
 
 ifdef CONFIG_KASAN_INLINE
@@ -12,8 +20,6 @@ endif
 
 CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
 
-cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
-
 # -fasan-shadow-offset fails without -fsanitize
 CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
                        -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
@@ -27,7 +33,7 @@ else
        CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
         $(call cc-param,asan-globals=1) \
         $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
-        $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
+        $(call cc-param,asan-stack=$(stack_enable)) \
         $(call cc-param,asan-instrument-allocas=1)
 endif
 
@@ -36,14 +42,14 @@ endif # CONFIG_KASAN_GENERIC
 ifdef CONFIG_KASAN_SW_TAGS
 
 ifdef CONFIG_KASAN_INLINE
-    instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)
+    instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
 else
-    instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1
+    instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
 endif
 
 CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
-               -mllvm -hwasan-instrument-stack=$(CONFIG_KASAN_STACK) \
-               -mllvm -hwasan-use-short-granules=0 \
+               $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
+               $(call cc-param,hwasan-use-short-granules=0) \
                $(instrumentation_flags)
 
 endif # CONFIG_KASAN_SW_TAGS
index cbdb5c83c08f9e45975af2a115d324473ea17935..3e784cf9f401fd480db03e03e7f43ee5d5d8e12b 100755 (executable)
@@ -243,7 +243,7 @@ if __name__ == '__main__':
         # Initialize SPDX data
         spdx = read_spdxdata(repo)
 
-        # Initilize the parser
+        # Initialize the parser
         parser = id_parser(spdx)
 
     except SPDXException as se:
index 269967c4fc1b67e81b403e9925497620974c68f8..a56c36470cb195c30f57faedb8701ef252722dae 100644 (file)
@@ -64,7 +64,7 @@ choice
        config GCC_PLUGIN_STRUCTLEAK_BYREF
                bool "zero-init structs passed by reference (strong)"
                depends on GCC_PLUGINS
-               depends on !(KASAN && KASAN_STACK=1)
+               depends on !(KASAN && KASAN_STACK)
                select GCC_PLUGIN_STRUCTLEAK
                help
                  Zero-initialize any structures on the stack that may
@@ -82,7 +82,7 @@ choice
        config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
                bool "zero-init anything passed by reference (very strong)"
                depends on GCC_PLUGINS
-               depends on !(KASAN && KASAN_STACK=1)
+               depends on !(KASAN && KASAN_STACK)
                select GCC_PLUGIN_STRUCTLEAK
                help
                  Zero-initialize any stack variables that may be passed
index a662024b4c70bc7d10d9b4c8130d822deecf092c..23240d793b074e645047e56169ed7d85f93c12ec 100644 (file)
@@ -84,6 +84,7 @@ int asymmetric_verify(struct key *keyring, const char *sig,
 {
        struct public_key_signature pks;
        struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig;
+       const struct public_key *pk;
        struct key *key;
        int ret;
 
@@ -105,23 +106,20 @@ int asymmetric_verify(struct key *keyring, const char *sig,
        memset(&pks, 0, sizeof(pks));
 
        pks.hash_algo = hash_algo_name[hdr->hash_algo];
-       switch (hdr->hash_algo) {
-       case HASH_ALGO_STREEBOG_256:
-       case HASH_ALGO_STREEBOG_512:
-               /* EC-RDSA and Streebog should go together. */
-               pks.pkey_algo = "ecrdsa";
-               pks.encoding = "raw";
-               break;
-       case HASH_ALGO_SM3_256:
-               /* SM2 and SM3 should go together. */
-               pks.pkey_algo = "sm2";
-               pks.encoding = "raw";
-               break;
-       default:
-               pks.pkey_algo = "rsa";
+
+       pk = asymmetric_key_public_key(key);
+       pks.pkey_algo = pk->pkey_algo;
+       if (!strcmp(pk->pkey_algo, "rsa"))
                pks.encoding = "pkcs1";
-               break;
-       }
+       else if (!strncmp(pk->pkey_algo, "ecdsa-", 6))
+               /* edcsa-nist-p192 etc. */
+               pks.encoding = "x962";
+       else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
+                  !strcmp(pk->pkey_algo, "sm2"))
+               pks.encoding = "raw";
+       else
+               return -ENOPKG;
+
        pks.digest = (u8 *)data;
        pks.digest_size = datalen;
        pks.s = hdr->sig;
index c5ba695c10e3a5afd1633fc5bd765c0f1bf7a303..5604bd57c99077d8238cde7b5e6335a13cf16dce 100644 (file)
@@ -55,6 +55,15 @@ static __init void uefi_blacklist_binary(const char *source,
        uefi_blacklist_hash(source, data, len, "bin:", 4);
 }
 
+/*
+ * Add an X509 cert to the revocation list.
+ */
+static __init void uefi_revocation_list_x509(const char *source,
+                                            const void *data, size_t len)
+{
+       add_key_to_revocation_list(data, len);
+}
+
 /*
  * Return the appropriate handler for particular signature list types found in
  * the UEFI db and MokListRT tables.
@@ -76,5 +85,7 @@ __init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type)
                return uefi_blacklist_x509_tbs;
        if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
                return uefi_blacklist_binary;
+       if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
+               return uefi_revocation_list_x509;
        return 0;
 }
index ee4b4c666854f6b40f5664acf20a10bc641c7448..f290f78c3f3012976a15d507cd26f195230a7fd0 100644 (file)
@@ -132,8 +132,9 @@ static int __init load_moklist_certs(void)
 static int __init load_uefi_certs(void)
 {
        efi_guid_t secure_var = EFI_IMAGE_SECURITY_DATABASE_GUID;
-       void *db = NULL, *dbx = NULL;
-       unsigned long dbsize = 0, dbxsize = 0;
+       efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
+       void *db = NULL, *dbx = NULL, *mokx = NULL;
+       unsigned long dbsize = 0, dbxsize = 0, mokxsize = 0;
        efi_status_t status;
        int rc = 0;
 
@@ -175,6 +176,21 @@ static int __init load_uefi_certs(void)
                kfree(dbx);
        }
 
+       mokx = get_cert_list(L"MokListXRT", &mok_var, &mokxsize, &status);
+       if (!mokx) {
+               if (status == EFI_NOT_FOUND)
+                       pr_debug("mokx variable wasn't found\n");
+               else
+                       pr_info("Couldn't get mokx list\n");
+       } else {
+               rc = parse_efi_signature_list("UEFI:MokListXRT",
+                                             mokx, mokxsize,
+                                             get_handler_for_dbx);
+               if (rc)
+                       pr_err("Couldn't parse mokx signatures %d\n", rc);
+               kfree(mokx);
+       }
+
        /* Load the MokListRT certs */
        rc = load_moklist_certs();
 
index c161642a8484172a9dfb7a2fa9500da523fecfb3..64b81abd087e3d531bd18a82002777caf4d3494b 100644 (file)
@@ -75,6 +75,9 @@ config TRUSTED_KEYS
        select CRYPTO_HMAC
        select CRYPTO_SHA1
        select CRYPTO_HASH_INFO
+       select ASN1_ENCODER
+       select OID_REGISTRY
+       select ASN1
        help
          This option provides support for creating, sealing, and unsealing
          keys in the kernel. Trusted keys are random number symmetric keys,
index 7b73cebbb378a2f2eadffa083ec9d28b44295ee2..feb8b6c3cc79cb6c2235ff90523b1bf5303d09c1 100644 (file)
@@ -4,5 +4,11 @@
 #
 
 obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+trusted-y += trusted_core.o
 trusted-y += trusted_tpm1.o
+
+$(obj)/trusted_tpm2.o: $(obj)/tpm2key.asn1.h
 trusted-y += trusted_tpm2.o
+trusted-y += tpm2key.asn1.o
+
+trusted-$(CONFIG_TEE) += trusted_tee.o
diff --git a/security/keys/trusted-keys/tpm2key.asn1 b/security/keys/trusted-keys/tpm2key.asn1
new file mode 100644 (file)
index 0000000..f57f869
--- /dev/null
@@ -0,0 +1,11 @@
+---
+--- ASN.1 for TPM 2.0 keys
+---
+
+TPMKey ::= SEQUENCE {
+       type            OBJECT IDENTIFIER ({tpm2_key_type}),
+       emptyAuth       [0] EXPLICIT BOOLEAN OPTIONAL,
+       parent          INTEGER ({tpm2_key_parent}),
+       pubkey          OCTET STRING ({tpm2_key_pub}),
+       privkey         OCTET STRING ({tpm2_key_priv})
+       }
diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
new file mode 100644 (file)
index 0000000..d5c891d
--- /dev/null
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (c) 2019-2021, Linaro Limited
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/trusted_tee.h>
+#include <keys/trusted_tpm.h>
+#include <linux/capability.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/key-type.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/static_call.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+static char *trusted_key_source;
+module_param_named(source, trusted_key_source, charp, 0);
+MODULE_PARM_DESC(source, "Select trusted keys source (tpm or tee)");
+
+static const struct trusted_key_source trusted_key_sources[] = {
+#if defined(CONFIG_TCG_TPM)
+       { "tpm", &trusted_key_tpm_ops },
+#endif
+#if defined(CONFIG_TEE)
+       { "tee", &trusted_key_tee_ops },
+#endif
+};
+
+DEFINE_STATIC_CALL_NULL(trusted_key_init, *trusted_key_sources[0].ops->init);
+DEFINE_STATIC_CALL_NULL(trusted_key_seal, *trusted_key_sources[0].ops->seal);
+DEFINE_STATIC_CALL_NULL(trusted_key_unseal,
+                       *trusted_key_sources[0].ops->unseal);
+DEFINE_STATIC_CALL_NULL(trusted_key_get_random,
+                       *trusted_key_sources[0].ops->get_random);
+DEFINE_STATIC_CALL_NULL(trusted_key_exit, *trusted_key_sources[0].ops->exit);
+static unsigned char migratable;
+
+enum {
+       Opt_err,
+       Opt_new, Opt_load, Opt_update,
+};
+
+static const match_table_t key_tokens = {
+       {Opt_new, "new"},
+       {Opt_load, "load"},
+       {Opt_update, "update"},
+       {Opt_err, NULL}
+};
+
+/*
+ * datablob_parse - parse the keyctl data and fill in the
+ *                  payload structure
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char **datablob, struct trusted_key_payload *p)
+{
+       substring_t args[MAX_OPT_ARGS];
+       long keylen;
+       int ret = -EINVAL;
+       int key_cmd;
+       char *c;
+
+       /* main command */
+       c = strsep(datablob, " \t");
+       if (!c)
+               return -EINVAL;
+       key_cmd = match_token(c, key_tokens, args);
+       switch (key_cmd) {
+       case Opt_new:
+               /* first argument is key size */
+               c = strsep(datablob, " \t");
+               if (!c)
+                       return -EINVAL;
+               ret = kstrtol(c, 10, &keylen);
+               if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
+                       return -EINVAL;
+               p->key_len = keylen;
+               ret = Opt_new;
+               break;
+       case Opt_load:
+               /* first argument is sealed blob */
+               c = strsep(datablob, " \t");
+               if (!c)
+                       return -EINVAL;
+               p->blob_len = strlen(c) / 2;
+               if (p->blob_len > MAX_BLOB_SIZE)
+                       return -EINVAL;
+               ret = hex2bin(p->blob, c, p->blob_len);
+               if (ret < 0)
+                       return -EINVAL;
+               ret = Opt_load;
+               break;
+       case Opt_update:
+               ret = Opt_update;
+               break;
+       case Opt_err:
+               return -EINVAL;
+       }
+       return ret;
+}
+
+static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+{
+       struct trusted_key_payload *p = NULL;
+       int ret;
+
+       ret = key_payload_reserve(key, sizeof(*p));
+       if (ret < 0)
+               goto err;
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p)
+               goto err;
+
+       p->migratable = migratable;
+err:
+       return p;
+}
+
+/*
+ * trusted_instantiate - create a new trusted key
+ *
+ * Unseal an existing trusted blob or, for a new key, get a
+ * random key, then seal and create a trusted key-type key,
+ * adding it to the specified keyring.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int trusted_instantiate(struct key *key,
+                              struct key_preparsed_payload *prep)
+{
+       struct trusted_key_payload *payload = NULL;
+       size_t datalen = prep->datalen;
+       char *datablob, *orig_datablob;
+       int ret = 0;
+       int key_cmd;
+       size_t key_len;
+
+       if (datalen <= 0 || datalen > 32767 || !prep->data)
+               return -EINVAL;
+
+       orig_datablob = datablob = kmalloc(datalen + 1, GFP_KERNEL);
+       if (!datablob)
+               return -ENOMEM;
+       memcpy(datablob, prep->data, datalen);
+       datablob[datalen] = '\0';
+
+       payload = trusted_payload_alloc(key);
+       if (!payload) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       key_cmd = datablob_parse(&datablob, payload);
+       if (key_cmd < 0) {
+               ret = key_cmd;
+               goto out;
+       }
+
+       dump_payload(payload);
+
+       switch (key_cmd) {
+       case Opt_load:
+               ret = static_call(trusted_key_unseal)(payload, datablob);
+               dump_payload(payload);
+               if (ret < 0)
+                       pr_info("key_unseal failed (%d)\n", ret);
+               break;
+       case Opt_new:
+               key_len = payload->key_len;
+               ret = static_call(trusted_key_get_random)(payload->key,
+                                                         key_len);
+               if (ret < 0)
+                       goto out;
+
+               if (ret != key_len) {
+                       pr_info("key_create failed (%d)\n", ret);
+                       ret = -EIO;
+                       goto out;
+               }
+
+               ret = static_call(trusted_key_seal)(payload, datablob);
+               if (ret < 0)
+                       pr_info("key_seal failed (%d)\n", ret);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+out:
+       kfree_sensitive(orig_datablob);
+       if (!ret)
+               rcu_assign_keypointer(key, payload);
+       else
+               kfree_sensitive(payload);
+       return ret;
+}
+
+static void trusted_rcu_free(struct rcu_head *rcu)
+{
+       struct trusted_key_payload *p;
+
+       p = container_of(rcu, struct trusted_key_payload, rcu);
+       kfree_sensitive(p);
+}
+
+/*
+ * trusted_update - reseal an existing key with new PCR values
+ */
+static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+       struct trusted_key_payload *p;
+       struct trusted_key_payload *new_p;
+       size_t datalen = prep->datalen;
+       char *datablob, *orig_datablob;
+       int ret = 0;
+
+       if (key_is_negative(key))
+               return -ENOKEY;
+       p = key->payload.data[0];
+       if (!p->migratable)
+               return -EPERM;
+       if (datalen <= 0 || datalen > 32767 || !prep->data)
+               return -EINVAL;
+
+       orig_datablob = datablob = kmalloc(datalen + 1, GFP_KERNEL);
+       if (!datablob)
+               return -ENOMEM;
+
+       new_p = trusted_payload_alloc(key);
+       if (!new_p) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       memcpy(datablob, prep->data, datalen);
+       datablob[datalen] = '\0';
+       ret = datablob_parse(&datablob, new_p);
+       if (ret != Opt_update) {
+               ret = -EINVAL;
+               kfree_sensitive(new_p);
+               goto out;
+       }
+
+       /* copy old key values, and reseal with new pcrs */
+       new_p->migratable = p->migratable;
+       new_p->key_len = p->key_len;
+       memcpy(new_p->key, p->key, p->key_len);
+       dump_payload(p);
+       dump_payload(new_p);
+
+       ret = static_call(trusted_key_seal)(new_p, datablob);
+       if (ret < 0) {
+               pr_info("key_seal failed (%d)\n", ret);
+               kfree_sensitive(new_p);
+               goto out;
+       }
+
+       rcu_assign_keypointer(key, new_p);
+       call_rcu(&p->rcu, trusted_rcu_free);
+out:
+       kfree_sensitive(orig_datablob);
+       return ret;
+}
+
+/*
+ * trusted_read - copy the sealed blob data to userspace in hex.
+ * On success, return to userspace the trusted key datablob size.
+ */
+static long trusted_read(const struct key *key, char *buffer,
+                        size_t buflen)
+{
+       const struct trusted_key_payload *p;
+       char *bufp;
+       int i;
+
+       p = dereference_key_locked(key);
+       if (!p)
+               return -EINVAL;
+
+       if (buffer && buflen >= 2 * p->blob_len) {
+               bufp = buffer;
+               for (i = 0; i < p->blob_len; i++)
+                       bufp = hex_byte_pack(bufp, p->blob[i]);
+       }
+       return 2 * p->blob_len;
+}
+
+/*
+ * trusted_destroy - clear and free the key's payload
+ */
+static void trusted_destroy(struct key *key)
+{
+       kfree_sensitive(key->payload.data[0]);
+}
+
+struct key_type key_type_trusted = {
+       .name = "trusted",
+       .instantiate = trusted_instantiate,
+       .update = trusted_update,
+       .destroy = trusted_destroy,
+       .describe = user_describe,
+       .read = trusted_read,
+};
+EXPORT_SYMBOL_GPL(key_type_trusted);
+
+static int __init init_trusted(void)
+{
+       int i, ret = 0;
+
+       for (i = 0; i < ARRAY_SIZE(trusted_key_sources); i++) {
+               if (trusted_key_source &&
+                   strncmp(trusted_key_source, trusted_key_sources[i].name,
+                           strlen(trusted_key_sources[i].name)))
+                       continue;
+
+               static_call_update(trusted_key_init,
+                                  trusted_key_sources[i].ops->init);
+               static_call_update(trusted_key_seal,
+                                  trusted_key_sources[i].ops->seal);
+               static_call_update(trusted_key_unseal,
+                                  trusted_key_sources[i].ops->unseal);
+               static_call_update(trusted_key_get_random,
+                                  trusted_key_sources[i].ops->get_random);
+               static_call_update(trusted_key_exit,
+                                  trusted_key_sources[i].ops->exit);
+               migratable = trusted_key_sources[i].ops->migratable;
+
+               ret = static_call(trusted_key_init)();
+               if (!ret)
+                       break;
+       }
+
+       /*
+        * encrypted_keys.ko depends on successful load of this module even if
+        * trusted key implementation is not found.
+        */
+       if (ret == -ENODEV)
+               return 0;
+
+       return ret;
+}
+
+static void __exit cleanup_trusted(void)
+{
+       static_call(trusted_key_exit)();
+}
+
+late_initcall(init_trusted);
+module_exit(cleanup_trusted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
new file mode 100644 (file)
index 0000000..2ce66c1
--- /dev/null
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ *
+ * Author:
+ * Sumit Garg <sumit.garg@linaro.org>
+ */
+
+#include <linux/err.h>
+#include <linux/key-type.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <keys/trusted_tee.h>
+
+#define DRIVER_NAME "trusted-key-tee"
+
+/*
+ * Get random data for symmetric key
+ *
+ * [out]     memref[0]        Random data
+ */
+#define TA_CMD_GET_RANDOM      0x0
+
+/*
+ * Seal trusted key using hardware unique key
+ *
+ * [in]      memref[0]        Plain key
+ * [out]     memref[1]        Sealed key datablob
+ */
+#define TA_CMD_SEAL            0x1
+
+/*
+ * Unseal trusted key using hardware unique key
+ *
+ * [in]      memref[0]        Sealed key datablob
+ * [out]     memref[1]        Plain key
+ */
+#define TA_CMD_UNSEAL          0x2
+
+/**
+ * struct trusted_key_tee_private - TEE Trusted key private data
+ * @dev:               TEE based Trusted key device.
+ * @ctx:               TEE context handler.
+ * @session_id:                Trusted key TA session identifier.
+ * @shm_pool:          Memory pool shared with TEE device.
+ */
+struct trusted_key_tee_private {
+       struct device *dev;
+       struct tee_context *ctx;
+       u32 session_id;
+       struct tee_shm *shm_pool;
+};
+
+static struct trusted_key_tee_private pvt_data;
+
+/*
+ * Have the TEE seal(encrypt) the symmetric key
+ */
+static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+{
+       int ret;
+       struct tee_ioctl_invoke_arg inv_arg;
+       struct tee_param param[4];
+       struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
+
+       memset(&inv_arg, 0, sizeof(inv_arg));
+       memset(&param, 0, sizeof(param));
+
+       reg_shm_in = tee_shm_register(pvt_data.ctx, (unsigned long)p->key,
+                                     p->key_len, TEE_SHM_DMA_BUF |
+                                     TEE_SHM_KERNEL_MAPPED);
+       if (IS_ERR(reg_shm_in)) {
+               dev_err(pvt_data.dev, "key shm register failed\n");
+               return PTR_ERR(reg_shm_in);
+       }
+
+       reg_shm_out = tee_shm_register(pvt_data.ctx, (unsigned long)p->blob,
+                                      sizeof(p->blob), TEE_SHM_DMA_BUF |
+                                      TEE_SHM_KERNEL_MAPPED);
+       if (IS_ERR(reg_shm_out)) {
+               dev_err(pvt_data.dev, "blob shm register failed\n");
+               ret = PTR_ERR(reg_shm_out);
+               goto out;
+       }
+
+       inv_arg.func = TA_CMD_SEAL;
+       inv_arg.session = pvt_data.session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+       param[0].u.memref.shm = reg_shm_in;
+       param[0].u.memref.size = p->key_len;
+       param[0].u.memref.shm_offs = 0;
+       param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[1].u.memref.shm = reg_shm_out;
+       param[1].u.memref.size = sizeof(p->blob);
+       param[1].u.memref.shm_offs = 0;
+
+       ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+       if ((ret < 0) || (inv_arg.ret != 0)) {
+               dev_err(pvt_data.dev, "TA_CMD_SEAL invoke err: %x\n",
+                       inv_arg.ret);
+               ret = -EFAULT;
+       } else {
+               p->blob_len = param[1].u.memref.size;
+       }
+
+out:
+       if (reg_shm_out)
+               tee_shm_free(reg_shm_out);
+       if (reg_shm_in)
+               tee_shm_free(reg_shm_in);
+
+       return ret;
+}
+
+/*
+ * Have the TEE unseal(decrypt) the symmetric key
+ */
+static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+{
+       int ret;
+       struct tee_ioctl_invoke_arg inv_arg;
+       struct tee_param param[4];
+       struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
+
+       memset(&inv_arg, 0, sizeof(inv_arg));
+       memset(&param, 0, sizeof(param));
+
+       reg_shm_in = tee_shm_register(pvt_data.ctx, (unsigned long)p->blob,
+                                     p->blob_len, TEE_SHM_DMA_BUF |
+                                     TEE_SHM_KERNEL_MAPPED);
+       if (IS_ERR(reg_shm_in)) {
+               dev_err(pvt_data.dev, "blob shm register failed\n");
+               return PTR_ERR(reg_shm_in);
+       }
+
+       reg_shm_out = tee_shm_register(pvt_data.ctx, (unsigned long)p->key,
+                                      sizeof(p->key), TEE_SHM_DMA_BUF |
+                                      TEE_SHM_KERNEL_MAPPED);
+       if (IS_ERR(reg_shm_out)) {
+               dev_err(pvt_data.dev, "key shm register failed\n");
+               ret = PTR_ERR(reg_shm_out);
+               goto out;
+       }
+
+       inv_arg.func = TA_CMD_UNSEAL;
+       inv_arg.session = pvt_data.session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+       param[0].u.memref.shm = reg_shm_in;
+       param[0].u.memref.size = p->blob_len;
+       param[0].u.memref.shm_offs = 0;
+       param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[1].u.memref.shm = reg_shm_out;
+       param[1].u.memref.size = sizeof(p->key);
+       param[1].u.memref.shm_offs = 0;
+
+       ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+       if ((ret < 0) || (inv_arg.ret != 0)) {
+               dev_err(pvt_data.dev, "TA_CMD_UNSEAL invoke err: %x\n",
+                       inv_arg.ret);
+               ret = -EFAULT;
+       } else {
+               p->key_len = param[1].u.memref.size;
+       }
+
+out:
+       if (reg_shm_out)
+               tee_shm_free(reg_shm_out);
+       if (reg_shm_in)
+               tee_shm_free(reg_shm_in);
+
+       return ret;
+}
+
+/*
+ * Have the TEE generate random symmetric key
+ */
+static int trusted_tee_get_random(unsigned char *key, size_t key_len)
+{
+       int ret;
+       struct tee_ioctl_invoke_arg inv_arg;
+       struct tee_param param[4];
+       struct tee_shm *reg_shm = NULL;
+
+       memset(&inv_arg, 0, sizeof(inv_arg));
+       memset(&param, 0, sizeof(param));
+
+       reg_shm = tee_shm_register(pvt_data.ctx, (unsigned long)key, key_len,
+                                  TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED);
+       if (IS_ERR(reg_shm)) {
+               dev_err(pvt_data.dev, "key shm register failed\n");
+               return PTR_ERR(reg_shm);
+       }
+
+       inv_arg.func = TA_CMD_GET_RANDOM;
+       inv_arg.session = pvt_data.session_id;
+       inv_arg.num_params = 4;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[0].u.memref.shm = reg_shm;
+       param[0].u.memref.size = key_len;
+       param[0].u.memref.shm_offs = 0;
+
+       ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+       if ((ret < 0) || (inv_arg.ret != 0)) {
+               dev_err(pvt_data.dev, "TA_CMD_GET_RANDOM invoke err: %x\n",
+                       inv_arg.ret);
+               ret = -EFAULT;
+       } else {
+               ret = param[0].u.memref.size;
+       }
+
+       tee_shm_free(reg_shm);
+
+       return ret;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+       if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+               return 1;
+       else
+               return 0;
+}
+
+static int trusted_key_probe(struct device *dev)
+{
+       struct tee_client_device *rng_device = to_tee_client_device(dev);
+       int ret;
+       struct tee_ioctl_open_session_arg sess_arg;
+
+       memset(&sess_arg, 0, sizeof(sess_arg));
+
+       pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+                                              NULL);
+       if (IS_ERR(pvt_data.ctx))
+               return -ENODEV;
+
+       memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+       sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+       sess_arg.num_params = 0;
+
+       ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+       if ((ret < 0) || (sess_arg.ret != 0)) {
+               dev_err(dev, "tee_client_open_session failed, err: %x\n",
+                       sess_arg.ret);
+               ret = -EINVAL;
+               goto out_ctx;
+       }
+       pvt_data.session_id = sess_arg.session;
+
+       ret = register_key_type(&key_type_trusted);
+       if (ret < 0)
+               goto out_sess;
+
+       pvt_data.dev = dev;
+
+       return 0;
+
+out_sess:
+       tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+       tee_client_close_context(pvt_data.ctx);
+
+       return ret;
+}
+
+static int trusted_key_remove(struct device *dev)
+{
+       unregister_key_type(&key_type_trusted);
+       tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+       tee_client_close_context(pvt_data.ctx);
+
+       return 0;
+}
+
+static const struct tee_client_device_id trusted_key_id_table[] = {
+       {UUID_INIT(0xf04a0fe7, 0x1f5d, 0x4b9b,
+                  0xab, 0xf7, 0x61, 0x9b, 0x85, 0xb4, 0xce, 0x8c)},
+       {}
+};
+MODULE_DEVICE_TABLE(tee, trusted_key_id_table);
+
+static struct tee_client_driver trusted_key_driver = {
+       .id_table       = trusted_key_id_table,
+       .driver         = {
+               .name           = DRIVER_NAME,
+               .bus            = &tee_bus_type,
+               .probe          = trusted_key_probe,
+               .remove         = trusted_key_remove,
+       },
+};
+
+static int trusted_tee_init(void)
+{
+       return driver_register(&trusted_key_driver.driver);
+}
+
+static void trusted_tee_exit(void)
+{
+       driver_unregister(&trusted_key_driver.driver);
+}
+
+struct trusted_key_ops trusted_key_tee_ops = {
+       .migratable = 0, /* non-migratable */
+       .init = trusted_tee_init,
+       .seal = trusted_tee_seal,
+       .unseal = trusted_tee_unseal,
+       .get_random = trusted_tee_get_random,
+       .exit = trusted_tee_exit,
+};
index 493eb91ed017ff9da39c9fdd47e2a9014ee078c7..4693945508019c277b6a9cf2628b7bdeff25581f 100644 (file)
@@ -1,29 +1,22 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2010 IBM Corporation
- *
- * Author:
- * David Safford <safford@us.ibm.com>
+ * Copyright (c) 2019-2021, Linaro Limited
  *
  * See Documentation/security/keys/trusted-encrypted.rst
  */
 
 #include <crypto/hash_info.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/parser.h>
 #include <linux/string.h>
 #include <linux/err.h>
-#include <keys/user-type.h>
 #include <keys/trusted-type.h>
 #include <linux/key-type.h>
-#include <linux/rcupdate.h>
 #include <linux/crypto.h>
 #include <crypto/hash.h>
 #include <crypto/sha1.h>
-#include <linux/capability.h>
 #include <linux/tpm.h>
 #include <linux/tpm_command.h>
 
@@ -63,7 +56,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
 
        sdesc = init_sdesc(hashalg);
        if (IS_ERR(sdesc)) {
-               pr_info("trusted_key: can't alloc %s\n", hash_alg);
+               pr_info("can't alloc %s\n", hash_alg);
                return PTR_ERR(sdesc);
        }
 
@@ -83,7 +76,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
 
        sdesc = init_sdesc(hmacalg);
        if (IS_ERR(sdesc)) {
-               pr_info("trusted_key: can't alloc %s\n", hmac_alg);
+               pr_info("can't alloc %s\n", hmac_alg);
                return PTR_ERR(sdesc);
        }
 
@@ -136,7 +129,7 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key,
 
        sdesc = init_sdesc(hashalg);
        if (IS_ERR(sdesc)) {
-               pr_info("trusted_key: can't alloc %s\n", hash_alg);
+               pr_info("can't alloc %s\n", hash_alg);
                return PTR_ERR(sdesc);
        }
 
@@ -212,7 +205,7 @@ int TSS_checkhmac1(unsigned char *buffer,
 
        sdesc = init_sdesc(hashalg);
        if (IS_ERR(sdesc)) {
-               pr_info("trusted_key: can't alloc %s\n", hash_alg);
+               pr_info("can't alloc %s\n", hash_alg);
                return PTR_ERR(sdesc);
        }
        ret = crypto_shash_init(&sdesc->shash);
@@ -305,7 +298,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
 
        sdesc = init_sdesc(hashalg);
        if (IS_ERR(sdesc)) {
-               pr_info("trusted_key: can't alloc %s\n", hash_alg);
+               pr_info("can't alloc %s\n", hash_alg);
                return PTR_ERR(sdesc);
        }
        ret = crypto_shash_init(&sdesc->shash);
@@ -597,12 +590,12 @@ static int tpm_unseal(struct tpm_buf *tb,
        /* sessions for unsealing key and data */
        ret = oiap(tb, &authhandle1, enonce1);
        if (ret < 0) {
-               pr_info("trusted_key: oiap failed (%d)\n", ret);
+               pr_info("oiap failed (%d)\n", ret);
                return ret;
        }
        ret = oiap(tb, &authhandle2, enonce2);
        if (ret < 0) {
-               pr_info("trusted_key: oiap failed (%d)\n", ret);
+               pr_info("oiap failed (%d)\n", ret);
                return ret;
        }
 
@@ -612,7 +605,7 @@ static int tpm_unseal(struct tpm_buf *tb,
                return ret;
 
        if (ret != TPM_NONCE_SIZE) {
-               pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
+               pr_info("tpm_get_random failed (%d)\n", ret);
                return -EIO;
        }
        ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
@@ -641,7 +634,7 @@ static int tpm_unseal(struct tpm_buf *tb,
 
        ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
        if (ret < 0) {
-               pr_info("trusted_key: authhmac failed (%d)\n", ret);
+               pr_info("authhmac failed (%d)\n", ret);
                return ret;
        }
 
@@ -653,7 +646,7 @@ static int tpm_unseal(struct tpm_buf *tb,
                             *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0,
                             0);
        if (ret < 0) {
-               pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret);
+               pr_info("TSS_checkhmac2 failed (%d)\n", ret);
                return ret;
        }
        memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
@@ -680,7 +673,7 @@ static int key_seal(struct trusted_key_payload *p,
                       p->key, p->key_len + 1, p->blob, &p->blob_len,
                       o->blobauth, o->pcrinfo, o->pcrinfo_len);
        if (ret < 0)
-               pr_info("trusted_key: srkseal failed (%d)\n", ret);
+               pr_info("srkseal failed (%d)\n", ret);
 
        tpm_buf_destroy(&tb);
        return ret;
@@ -702,7 +695,7 @@ static int key_unseal(struct trusted_key_payload *p,
        ret = tpm_unseal(&tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
                         o->blobauth, p->key, &p->key_len);
        if (ret < 0)
-               pr_info("trusted_key: srkunseal failed (%d)\n", ret);
+               pr_info("srkunseal failed (%d)\n", ret);
        else
                /* pull migratable flag out of sealed key */
                p->migratable = p->key[--p->key_len];
@@ -713,7 +706,6 @@ static int key_unseal(struct trusted_key_payload *p,
 
 enum {
        Opt_err,
-       Opt_new, Opt_load, Opt_update,
        Opt_keyhandle, Opt_keyauth, Opt_blobauth,
        Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
        Opt_hash,
@@ -722,9 +714,6 @@ enum {
 };
 
 static const match_table_t key_tokens = {
-       {Opt_new, "new"},
-       {Opt_load, "load"},
-       {Opt_update, "update"},
        {Opt_keyhandle, "keyhandle=%s"},
        {Opt_keyauth, "keyauth=%s"},
        {Opt_blobauth, "blobauth=%s"},
@@ -758,6 +747,9 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
 
        opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
 
+       if (!c)
+               return 0;
+
        while ((p = strsep(&c, " \t"))) {
                if (*p == '\0' || *p == ' ' || *p == '\t')
                        continue;
@@ -791,13 +783,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
                                return -EINVAL;
                        break;
                case Opt_blobauth:
-                       if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
-                               return -EINVAL;
-                       res = hex2bin(opt->blobauth, args[0].from,
-                                     SHA1_DIGEST_SIZE);
-                       if (res < 0)
-                               return -EINVAL;
+                       /*
+                        * TPM 1.2 authorizations are sha1 hashes passed in as
+                        * hex strings.  TPM 2.0 authorizations are simple
+                        * passwords (although it can take a hash as well)
+                        */
+                       opt->blobauth_len = strlen(args[0].from);
+
+                       if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
+                               res = hex2bin(opt->blobauth, args[0].from,
+                                             TPM_DIGEST_SIZE);
+                               if (res < 0)
+                                       return -EINVAL;
+
+                               opt->blobauth_len = TPM_DIGEST_SIZE;
+                               break;
+                       }
+
+                       if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
+                               memcpy(opt->blobauth, args[0].from,
+                                      opt->blobauth_len);
+                               break;
+                       }
+
+                       return -EINVAL;
+
                        break;
+
                case Opt_migratable:
                        if (*args[0].from == '0')
                                pay->migratable = 0;
@@ -822,7 +834,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
                        if (i == HASH_ALGO__LAST)
                                return -EINVAL;
                        if  (!tpm2 && i != HASH_ALGO_SHA1) {
-                               pr_info("trusted_key: TPM 1.x only supports SHA-1.\n");
+                               pr_info("TPM 1.x only supports SHA-1.\n");
                                return -EINVAL;
                        }
                        break;
@@ -851,71 +863,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
        return 0;
 }
 
-/*
- * datablob_parse - parse the keyctl data and fill in the
- *                 payload and options structures
- *
- * On success returns 0, otherwise -EINVAL.
- */
-static int datablob_parse(char *datablob, struct trusted_key_payload *p,
-                         struct trusted_key_options *o)
-{
-       substring_t args[MAX_OPT_ARGS];
-       long keylen;
-       int ret = -EINVAL;
-       int key_cmd;
-       char *c;
-
-       /* main command */
-       c = strsep(&datablob, " \t");
-       if (!c)
-               return -EINVAL;
-       key_cmd = match_token(c, key_tokens, args);
-       switch (key_cmd) {
-       case Opt_new:
-               /* first argument is key size */
-               c = strsep(&datablob, " \t");
-               if (!c)
-                       return -EINVAL;
-               ret = kstrtol(c, 10, &keylen);
-               if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
-                       return -EINVAL;
-               p->key_len = keylen;
-               ret = getoptions(datablob, p, o);
-               if (ret < 0)
-                       return ret;
-               ret = Opt_new;
-               break;
-       case Opt_load:
-               /* first argument is sealed blob */
-               c = strsep(&datablob, " \t");
-               if (!c)
-                       return -EINVAL;
-               p->blob_len = strlen(c) / 2;
-               if (p->blob_len > MAX_BLOB_SIZE)
-                       return -EINVAL;
-               ret = hex2bin(p->blob, c, p->blob_len);
-               if (ret < 0)
-                       return -EINVAL;
-               ret = getoptions(datablob, p, o);
-               if (ret < 0)
-                       return ret;
-               ret = Opt_load;
-               break;
-       case Opt_update:
-               /* all arguments are options */
-               ret = getoptions(datablob, p, o);
-               if (ret < 0)
-                       return ret;
-               ret = Opt_update;
-               break;
-       case Opt_err:
-               return -EINVAL;
-               break;
-       }
-       return ret;
-}
-
 static struct trusted_key_options *trusted_options_alloc(void)
 {
        struct trusted_key_options *options;
@@ -936,252 +883,99 @@ static struct trusted_key_options *trusted_options_alloc(void)
        return options;
 }
 
-static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+static int trusted_tpm_seal(struct trusted_key_payload *p, char *datablob)
 {
-       struct trusted_key_payload *p = NULL;
-       int ret;
-
-       ret = key_payload_reserve(key, sizeof *p);
-       if (ret < 0)
-               return p;
-       p = kzalloc(sizeof *p, GFP_KERNEL);
-       if (p)
-               p->migratable = 1; /* migratable by default */
-       return p;
-}
-
-/*
- * trusted_instantiate - create a new trusted key
- *
- * Unseal an existing trusted blob or, for a new key, get a
- * random key, then seal and create a trusted key-type key,
- * adding it to the specified keyring.
- *
- * On success, return 0. Otherwise return errno.
- */
-static int trusted_instantiate(struct key *key,
-                              struct key_preparsed_payload *prep)
-{
-       struct trusted_key_payload *payload = NULL;
        struct trusted_key_options *options = NULL;
-       size_t datalen = prep->datalen;
-       char *datablob;
        int ret = 0;
-       int key_cmd;
-       size_t key_len;
        int tpm2;
 
        tpm2 = tpm_is_tpm2(chip);
        if (tpm2 < 0)
                return tpm2;
 
-       if (datalen <= 0 || datalen > 32767 || !prep->data)
-               return -EINVAL;
-
-       datablob = kmalloc(datalen + 1, GFP_KERNEL);
-       if (!datablob)
+       options = trusted_options_alloc();
+       if (!options)
                return -ENOMEM;
-       memcpy(datablob, prep->data, datalen);
-       datablob[datalen] = '\0';
 
-       options = trusted_options_alloc();
-       if (!options) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       payload = trusted_payload_alloc(key);
-       if (!payload) {
-               ret = -ENOMEM;
+       ret = getoptions(datablob, p, options);
+       if (ret < 0)
                goto out;
-       }
+       dump_options(options);
 
-       key_cmd = datablob_parse(datablob, payload, options);
-       if (key_cmd < 0) {
-               ret = key_cmd;
+       if (!options->keyhandle && !tpm2) {
+               ret = -EINVAL;
                goto out;
        }
 
-       if (!options->keyhandle) {
-               ret = -EINVAL;
+       if (tpm2)
+               ret = tpm2_seal_trusted(chip, p, options);
+       else
+               ret = key_seal(p, options);
+       if (ret < 0) {
+               pr_info("key_seal failed (%d)\n", ret);
                goto out;
        }
 
-       dump_payload(payload);
-       dump_options(options);
-
-       switch (key_cmd) {
-       case Opt_load:
-               if (tpm2)
-                       ret = tpm2_unseal_trusted(chip, payload, options);
-               else
-                       ret = key_unseal(payload, options);
-               dump_payload(payload);
-               dump_options(options);
-               if (ret < 0)
-                       pr_info("trusted_key: key_unseal failed (%d)\n", ret);
-               break;
-       case Opt_new:
-               key_len = payload->key_len;
-               ret = tpm_get_random(chip, payload->key, key_len);
-               if (ret < 0)
-                       goto out;
-
-               if (ret != key_len) {
-                       pr_info("trusted_key: key_create failed (%d)\n", ret);
-                       ret = -EIO;
+       if (options->pcrlock) {
+               ret = pcrlock(options->pcrlock);
+               if (ret < 0) {
+                       pr_info("pcrlock failed (%d)\n", ret);
                        goto out;
                }
-               if (tpm2)
-                       ret = tpm2_seal_trusted(chip, payload, options);
-               else
-                       ret = key_seal(payload, options);
-               if (ret < 0)
-                       pr_info("trusted_key: key_seal failed (%d)\n", ret);
-               break;
-       default:
-               ret = -EINVAL;
-               goto out;
        }
-       if (!ret && options->pcrlock)
-               ret = pcrlock(options->pcrlock);
 out:
-       kfree_sensitive(datablob);
        kfree_sensitive(options);
-       if (!ret)
-               rcu_assign_keypointer(key, payload);
-       else
-               kfree_sensitive(payload);
        return ret;
 }
 
-static void trusted_rcu_free(struct rcu_head *rcu)
+static int trusted_tpm_unseal(struct trusted_key_payload *p, char *datablob)
 {
-       struct trusted_key_payload *p;
-
-       p = container_of(rcu, struct trusted_key_payload, rcu);
-       kfree_sensitive(p);
-}
-
-/*
- * trusted_update - reseal an existing key with new PCR values
- */
-static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
-{
-       struct trusted_key_payload *p;
-       struct trusted_key_payload *new_p;
-       struct trusted_key_options *new_o;
-       size_t datalen = prep->datalen;
-       char *datablob;
+       struct trusted_key_options *options = NULL;
        int ret = 0;
+       int tpm2;
 
-       if (key_is_negative(key))
-               return -ENOKEY;
-       p = key->payload.data[0];
-       if (!p->migratable)
-               return -EPERM;
-       if (datalen <= 0 || datalen > 32767 || !prep->data)
-               return -EINVAL;
+       tpm2 = tpm_is_tpm2(chip);
+       if (tpm2 < 0)
+               return tpm2;
 
-       datablob = kmalloc(datalen + 1, GFP_KERNEL);
-       if (!datablob)
+       options = trusted_options_alloc();
+       if (!options)
                return -ENOMEM;
-       new_o = trusted_options_alloc();
-       if (!new_o) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       new_p = trusted_payload_alloc(key);
-       if (!new_p) {
-               ret = -ENOMEM;
-               goto out;
-       }
 
-       memcpy(datablob, prep->data, datalen);
-       datablob[datalen] = '\0';
-       ret = datablob_parse(datablob, new_p, new_o);
-       if (ret != Opt_update) {
-               ret = -EINVAL;
-               kfree_sensitive(new_p);
+       ret = getoptions(datablob, p, options);
+       if (ret < 0)
                goto out;
-       }
+       dump_options(options);
 
-       if (!new_o->keyhandle) {
+       if (!options->keyhandle && !tpm2) {
                ret = -EINVAL;
-               kfree_sensitive(new_p);
                goto out;
        }
 
-       /* copy old key values, and reseal with new pcrs */
-       new_p->migratable = p->migratable;
-       new_p->key_len = p->key_len;
-       memcpy(new_p->key, p->key, p->key_len);
-       dump_payload(p);
-       dump_payload(new_p);
+       if (tpm2)
+               ret = tpm2_unseal_trusted(chip, p, options);
+       else
+               ret = key_unseal(p, options);
+       if (ret < 0)
+               pr_info("key_unseal failed (%d)\n", ret);
 
-       ret = key_seal(new_p, new_o);
-       if (ret < 0) {
-               pr_info("trusted_key: key_seal failed (%d)\n", ret);
-               kfree_sensitive(new_p);
-               goto out;
-       }
-       if (new_o->pcrlock) {
-               ret = pcrlock(new_o->pcrlock);
+       if (options->pcrlock) {
+               ret = pcrlock(options->pcrlock);
                if (ret < 0) {
-                       pr_info("trusted_key: pcrlock failed (%d)\n", ret);
-                       kfree_sensitive(new_p);
+                       pr_info("pcrlock failed (%d)\n", ret);
                        goto out;
                }
        }
-       rcu_assign_keypointer(key, new_p);
-       call_rcu(&p->rcu, trusted_rcu_free);
 out:
-       kfree_sensitive(datablob);
-       kfree_sensitive(new_o);
+       kfree_sensitive(options);
        return ret;
 }
 
-/*
- * trusted_read - copy the sealed blob data to userspace in hex.
- * On success, return to userspace the trusted key datablob size.
- */
-static long trusted_read(const struct key *key, char *buffer,
-                        size_t buflen)
-{
-       const struct trusted_key_payload *p;
-       char *bufp;
-       int i;
-
-       p = dereference_key_locked(key);
-       if (!p)
-               return -EINVAL;
-
-       if (buffer && buflen >= 2 * p->blob_len) {
-               bufp = buffer;
-               for (i = 0; i < p->blob_len; i++)
-                       bufp = hex_byte_pack(bufp, p->blob[i]);
-       }
-       return 2 * p->blob_len;
-}
-
-/*
- * trusted_destroy - clear and free the key's payload
- */
-static void trusted_destroy(struct key *key)
+static int trusted_tpm_get_random(unsigned char *key, size_t key_len)
 {
-       kfree_sensitive(key->payload.data[0]);
+       return tpm_get_random(chip, key, key_len);
 }
 
-struct key_type key_type_trusted = {
-       .name = "trusted",
-       .instantiate = trusted_instantiate,
-       .update = trusted_update,
-       .destroy = trusted_destroy,
-       .describe = user_describe,
-       .read = trusted_read,
-};
-
-EXPORT_SYMBOL_GPL(key_type_trusted);
-
 static void trusted_shash_release(void)
 {
        if (hashalg)
@@ -1196,14 +990,14 @@ static int __init trusted_shash_alloc(void)
 
        hmacalg = crypto_alloc_shash(hmac_alg, 0, 0);
        if (IS_ERR(hmacalg)) {
-               pr_info("trusted_key: could not allocate crypto %s\n",
+               pr_info("could not allocate crypto %s\n",
                        hmac_alg);
                return PTR_ERR(hmacalg);
        }
 
        hashalg = crypto_alloc_shash(hash_alg, 0, 0);
        if (IS_ERR(hashalg)) {
-               pr_info("trusted_key: could not allocate crypto %s\n",
+               pr_info("could not allocate crypto %s\n",
                        hash_alg);
                ret = PTR_ERR(hashalg);
                goto hashalg_fail;
@@ -1231,16 +1025,13 @@ static int __init init_digests(void)
        return 0;
 }
 
-static int __init init_trusted(void)
+static int __init trusted_tpm_init(void)
 {
        int ret;
 
-       /* encrypted_keys.ko depends on successful load of this module even if
-        * TPM is not used.
-        */
        chip = tpm_default_chip();
        if (!chip)
-               return 0;
+               return -ENODEV;
 
        ret = init_digests();
        if (ret < 0)
@@ -1261,7 +1052,7 @@ err_put:
        return ret;
 }
 
-static void __exit cleanup_trusted(void)
+static void trusted_tpm_exit(void)
 {
        if (chip) {
                put_device(&chip->dev);
@@ -1271,7 +1062,11 @@ static void __exit cleanup_trusted(void)
        }
 }
 
-late_initcall(init_trusted);
-module_exit(cleanup_trusted);
-
-MODULE_LICENSE("GPL");
+struct trusted_key_ops trusted_key_tpm_ops = {
+       .migratable = 1, /* migratable by default */
+       .init = trusted_tpm_init,
+       .seal = trusted_tpm_seal,
+       .unseal = trusted_tpm_unseal,
+       .get_random = trusted_tpm_get_random,
+       .exit = trusted_tpm_exit,
+};
index e2a0ed5d02f013392d337a1b543eeda1e986ba74..617fabd4d913b81caa5e0537e12477671dbe3a77 100644 (file)
@@ -4,6 +4,8 @@
  * Copyright (C) 2014 Intel Corporation
  */
 
+#include <linux/asn1_encoder.h>
+#include <linux/oid_registry.h>
 #include <linux/string.h>
 #include <linux/err.h>
 #include <linux/tpm.h>
 #include <keys/trusted-type.h>
 #include <keys/trusted_tpm.h>
 
+#include <asm/unaligned.h>
+
+#include "tpm2key.asn1.h"
+
 static struct tpm2_hash tpm2_hash_map[] = {
        {HASH_ALGO_SHA1, TPM_ALG_SHA1},
        {HASH_ALGO_SHA256, TPM_ALG_SHA256},
@@ -20,6 +26,165 @@ static struct tpm2_hash tpm2_hash_map[] = {
        {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
 };
 
+static u32 tpm2key_oid[] = { 2, 23, 133, 10, 1, 5 };
+
+static int tpm2_key_encode(struct trusted_key_payload *payload,
+                          struct trusted_key_options *options,
+                          u8 *src, u32 len)
+{
+       const int SCRATCH_SIZE = PAGE_SIZE;
+       u8 *scratch = kmalloc(SCRATCH_SIZE, GFP_KERNEL);
+       u8 *work = scratch, *work1;
+       u8 *end_work = scratch + SCRATCH_SIZE;
+       u8 *priv, *pub;
+       u16 priv_len, pub_len;
+
+       priv_len = get_unaligned_be16(src) + 2;
+       priv = src;
+
+       src += priv_len;
+
+       pub_len = get_unaligned_be16(src) + 2;
+       pub = src;
+
+       if (!scratch)
+               return -ENOMEM;
+
+       work = asn1_encode_oid(work, end_work, tpm2key_oid,
+                              asn1_oid_len(tpm2key_oid));
+
+       if (options->blobauth_len == 0) {
+               unsigned char bool[3], *w = bool;
+               /* tag 0 is emptyAuth */
+               w = asn1_encode_boolean(w, w + sizeof(bool), true);
+               if (WARN(IS_ERR(w), "BUG: Boolean failed to encode"))
+                       return PTR_ERR(w);
+               work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
+       }
+
+       /*
+        * Assume both octet strings will encode to a 2 byte definite length
+        *
+        * Note: For a well behaved TPM, this warning should never
+        * trigger, so if it does there's something nefarious going on
+        */
+       if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
+                "BUG: scratch buffer is too small"))
+               return -EINVAL;
+
+       work = asn1_encode_integer(work, end_work, options->keyhandle);
+       work = asn1_encode_octet_string(work, end_work, pub, pub_len);
+       work = asn1_encode_octet_string(work, end_work, priv, priv_len);
+
+       work1 = payload->blob;
+       work1 = asn1_encode_sequence(work1, work1 + sizeof(payload->blob),
+                                    scratch, work - scratch);
+       if (WARN(IS_ERR(work1), "BUG: ASN.1 encoder failed"))
+               return PTR_ERR(work1);
+
+       return work1 - payload->blob;
+}
+
+struct tpm2_key_context {
+       u32 parent;
+       const u8 *pub;
+       u32 pub_len;
+       const u8 *priv;
+       u32 priv_len;
+};
+
+static int tpm2_key_decode(struct trusted_key_payload *payload,
+                          struct trusted_key_options *options,
+                          u8 **buf)
+{
+       int ret;
+       struct tpm2_key_context ctx;
+       u8 *blob;
+
+       memset(&ctx, 0, sizeof(ctx));
+
+       ret = asn1_ber_decoder(&tpm2key_decoder, &ctx, payload->blob,
+                              payload->blob_len);
+       if (ret < 0)
+               return ret;
+
+       if (ctx.priv_len + ctx.pub_len > MAX_BLOB_SIZE)
+               return -EINVAL;
+
+       blob = kmalloc(ctx.priv_len + ctx.pub_len + 4, GFP_KERNEL);
+       if (!blob)
+               return -ENOMEM;
+
+       *buf = blob;
+       options->keyhandle = ctx.parent;
+
+       memcpy(blob, ctx.priv, ctx.priv_len);
+       blob += ctx.priv_len;
+
+       memcpy(blob, ctx.pub, ctx.pub_len);
+
+       return 0;
+}
+
+int tpm2_key_parent(void *context, size_t hdrlen,
+                 unsigned char tag,
+                 const void *value, size_t vlen)
+{
+       struct tpm2_key_context *ctx = context;
+       const u8 *v = value;
+       int i;
+
+       ctx->parent = 0;
+       for (i = 0; i < vlen; i++) {
+               ctx->parent <<= 8;
+               ctx->parent |= v[i];
+       }
+
+       return 0;
+}
+
+int tpm2_key_type(void *context, size_t hdrlen,
+               unsigned char tag,
+               const void *value, size_t vlen)
+{
+       enum OID oid = look_up_OID(value, vlen);
+
+       if (oid != OID_TPMSealedData) {
+               char buffer[50];
+
+               sprint_oid(value, vlen, buffer, sizeof(buffer));
+               pr_debug("OID is \"%s\" which is not TPMSealedData\n",
+                        buffer);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int tpm2_key_pub(void *context, size_t hdrlen,
+              unsigned char tag,
+              const void *value, size_t vlen)
+{
+       struct tpm2_key_context *ctx = context;
+
+       ctx->pub = value;
+       ctx->pub_len = vlen;
+
+       return 0;
+}
+
+int tpm2_key_priv(void *context, size_t hdrlen,
+               unsigned char tag,
+               const void *value, size_t vlen)
+{
+       struct tpm2_key_context *ctx = context;
+
+       ctx->priv = value;
+       ctx->priv_len = vlen;
+
+       return 0;
+}
+
 /**
  * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
  *
@@ -63,9 +228,10 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
                      struct trusted_key_payload *payload,
                      struct trusted_key_options *options)
 {
-       unsigned int blob_len;
+       int blob_len = 0;
        struct tpm_buf buf;
        u32 hash;
+       u32 flags;
        int i;
        int rc;
 
@@ -79,7 +245,10 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
        if (i == ARRAY_SIZE(tpm2_hash_map))
                return -EINVAL;
 
-       rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+       if (!options->keyhandle)
+               return -EINVAL;
+
+       rc = tpm_try_get_ops(chip);
        if (rc)
                return rc;
 
@@ -97,29 +266,32 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
                             TPM_DIGEST_SIZE);
 
        /* sensitive */
-       tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
+       tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len);
+
+       tpm_buf_append_u16(&buf, options->blobauth_len);
+       if (options->blobauth_len)
+               tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
 
-       tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
-       tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
-       tpm_buf_append_u16(&buf, payload->key_len + 1);
+       tpm_buf_append_u16(&buf, payload->key_len);
        tpm_buf_append(&buf, payload->key, payload->key_len);
-       tpm_buf_append_u8(&buf, payload->migratable);
 
        /* public */
        tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
        tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
        tpm_buf_append_u16(&buf, hash);
 
+       /* key properties */
+       flags = 0;
+       flags |= options->policydigest_len ? 0 : TPM2_OA_USER_WITH_AUTH;
+       flags |= payload->migratable ? (TPM2_OA_FIXED_TPM |
+                                       TPM2_OA_FIXED_PARENT) : 0;
+       tpm_buf_append_u32(&buf, flags);
+
        /* policy */
-       if (options->policydigest_len) {
-               tpm_buf_append_u32(&buf, 0);
-               tpm_buf_append_u16(&buf, options->policydigest_len);
+       tpm_buf_append_u16(&buf, options->policydigest_len);
+       if (options->policydigest_len)
                tpm_buf_append(&buf, options->policydigest,
                               options->policydigest_len);
-       } else {
-               tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
-               tpm_buf_append_u16(&buf, 0);
-       }
 
        /* public parameters */
        tpm_buf_append_u16(&buf, TPM_ALG_NULL);
@@ -150,8 +322,9 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
                goto out;
        }
 
-       memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
-       payload->blob_len = blob_len;
+       blob_len = tpm2_key_encode(payload, options,
+                                  &buf.data[TPM_HEADER_SIZE + 4],
+                                  blob_len);
 
 out:
        tpm_buf_destroy(&buf);
@@ -162,6 +335,10 @@ out:
                else
                        rc = -EPERM;
        }
+       if (blob_len < 0)
+               return blob_len;
+
+       payload->blob_len = blob_len;
 
        tpm_put_ops(chip);
        return rc;
@@ -189,13 +366,45 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
        unsigned int private_len;
        unsigned int public_len;
        unsigned int blob_len;
+       u8 *blob, *pub;
        int rc;
+       u32 attrs;
+
+       rc = tpm2_key_decode(payload, options, &blob);
+       if (rc) {
+               /* old form */
+               blob = payload->blob;
+               payload->old_format = 1;
+       }
+
+       /* new format carries keyhandle but old format doesn't */
+       if (!options->keyhandle)
+               return -EINVAL;
 
-       private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
-       if (private_len > (payload->blob_len - 2))
+       /* must be big enough for at least the two be16 size counts */
+       if (payload->blob_len < 4)
+               return -EINVAL;
+
+       private_len = get_unaligned_be16(blob);
+
+       /* must be big enough for following public_len */
+       if (private_len + 2 + 2 > (payload->blob_len))
+               return -E2BIG;
+
+       public_len = get_unaligned_be16(blob + 2 + private_len);
+       if (private_len + 2 + public_len + 2 > payload->blob_len)
                return -E2BIG;
 
-       public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
+       pub = blob + 2 + private_len + 2;
+       /* key attributes are always at offset 4 */
+       attrs = get_unaligned_be32(pub + 4);
+
+       if ((attrs & (TPM2_OA_FIXED_TPM | TPM2_OA_FIXED_PARENT)) ==
+           (TPM2_OA_FIXED_TPM | TPM2_OA_FIXED_PARENT))
+               payload->migratable = 0;
+       else
+               payload->migratable = 1;
+
        blob_len = private_len + public_len + 4;
        if (blob_len > payload->blob_len)
                return -E2BIG;
@@ -211,7 +420,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
                             options->keyauth /* hmac */,
                             TPM_DIGEST_SIZE);
 
-       tpm_buf_append(&buf, payload->blob, blob_len);
+       tpm_buf_append(&buf, blob, blob_len);
 
        if (buf.flags & TPM_BUF_OVERFLOW) {
                rc = -E2BIG;
@@ -224,6 +433,8 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
                        (__be32 *) &buf.data[TPM_HEADER_SIZE]);
 
 out:
+       if (blob != payload->blob)
+               kfree(blob);
        tpm_buf_destroy(&buf);
 
        if (rc > 0)
@@ -265,7 +476,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
                             NULL /* nonce */, 0,
                             TPM2_SA_CONTINUE_SESSION,
                             options->blobauth /* hmac */,
-                            TPM_DIGEST_SIZE);
+                            options->blobauth_len);
 
        rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
        if (rc > 0)
@@ -274,7 +485,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
        if (!rc) {
                data_len = be16_to_cpup(
                        (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
-               if (data_len < MIN_KEY_SIZE ||  data_len > MAX_KEY_SIZE + 1) {
+               if (data_len < MIN_KEY_SIZE ||  data_len > MAX_KEY_SIZE) {
                        rc = -EFAULT;
                        goto out;
                }
@@ -285,9 +496,19 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
                }
                data = &buf.data[TPM_HEADER_SIZE + 6];
 
-               memcpy(payload->key, data, data_len - 1);
-               payload->key_len = data_len - 1;
-               payload->migratable = data[data_len - 1];
+               if (payload->old_format) {
+                       /* migratable flag is at the end of the key */
+                       memcpy(payload->key, data, data_len - 1);
+                       payload->key_len = data_len - 1;
+                       payload->migratable = data[data_len - 1];
+               } else {
+                       /*
+                        * migratable flag already collected from key
+                        * attributes
+                        */
+                       memcpy(payload->key, data, data_len);
+                       payload->key_len = data_len;
+               }
        }
 
 out:
index 6dcb6aa4db7f0d51bcd525b3defed6324a3f67bf..75df32906055498562fb8df7dd4e632f620222f4 100644 (file)
@@ -109,7 +109,7 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
        struct avtab_node *prev, *cur, *newnode;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return -EINVAL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -154,7 +154,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
        struct avtab_node *prev, *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
        hvalue = avtab_hash(key, h->mask);
        for (prev = NULL, cur = h->htable[hvalue];
@@ -184,7 +184,7 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
        struct avtab_node *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -220,7 +220,7 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
        struct avtab_node *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -295,6 +295,7 @@ void avtab_destroy(struct avtab *h)
        }
        kvfree(h->htable);
        h->htable = NULL;
+       h->nel = 0;
        h->nslot = 0;
        h->mask = 0;
 }
@@ -303,88 +304,52 @@ void avtab_init(struct avtab *h)
 {
        h->htable = NULL;
        h->nel = 0;
+       h->nslot = 0;
+       h->mask = 0;
 }
 
-int avtab_alloc(struct avtab *h, u32 nrules)
+static int avtab_alloc_common(struct avtab *h, u32 nslot)
 {
-       u32 mask = 0;
-       u32 shift = 0;
-       u32 work = nrules;
-       u32 nslot = 0;
-
-       if (nrules == 0)
-               goto avtab_alloc_out;
-
-       while (work) {
-               work  = work >> 1;
-               shift++;
-       }
-       if (shift > 2)
-               shift = shift - 2;
-       nslot = 1 << shift;
-       if (nslot > MAX_AVTAB_HASH_BUCKETS)
-               nslot = MAX_AVTAB_HASH_BUCKETS;
-       mask = nslot - 1;
+       if (!nslot)
+               return 0;
 
        h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL);
        if (!h->htable)
                return -ENOMEM;
 
- avtab_alloc_out:
-       h->nel = 0;
        h->nslot = nslot;
-       h->mask = mask;
-       pr_debug("SELinux: %d avtab hash slots, %d rules.\n",
-              h->nslot, nrules);
+       h->mask = nslot - 1;
        return 0;
 }
 
-int avtab_duplicate(struct avtab *new, struct avtab *orig)
+int avtab_alloc(struct avtab *h, u32 nrules)
 {
-       int i;
-       struct avtab_node *node, *tmp, *tail;
-
-       memset(new, 0, sizeof(*new));
+       int rc;
+       u32 nslot = 0;
 
-       new->htable = kvcalloc(orig->nslot, sizeof(void *), GFP_KERNEL);
-       if (!new->htable)
-               return -ENOMEM;
-       new->nslot = orig->nslot;
-       new->mask = orig->mask;
-
-       for (i = 0; i < orig->nslot; i++) {
-               tail = NULL;
-               for (node = orig->htable[i]; node; node = node->next) {
-                       tmp = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
-                       if (!tmp)
-                               goto error;
-                       tmp->key = node->key;
-                       if (tmp->key.specified & AVTAB_XPERMS) {
-                               tmp->datum.u.xperms =
-                                       kmem_cache_zalloc(avtab_xperms_cachep,
-                                                       GFP_KERNEL);
-                               if (!tmp->datum.u.xperms) {
-                                       kmem_cache_free(avtab_node_cachep, tmp);
-                                       goto error;
-                               }
-                               tmp->datum.u.xperms = node->datum.u.xperms;
-                       } else
-                               tmp->datum.u.data = node->datum.u.data;
-
-                       if (tail)
-                               tail->next = tmp;
-                       else
-                               new->htable[i] = tmp;
-
-                       tail = tmp;
-                       new->nel++;
+       if (nrules != 0) {
+               u32 shift = 1;
+               u32 work = nrules >> 3;
+               while (work) {
+                       work >>= 1;
+                       shift++;
                }
+               nslot = 1 << shift;
+               if (nslot > MAX_AVTAB_HASH_BUCKETS)
+                       nslot = MAX_AVTAB_HASH_BUCKETS;
+
+               rc = avtab_alloc_common(h, nslot);
+               if (rc)
+                       return rc;
        }
 
+       pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules);
        return 0;
-error:
-       avtab_destroy(new);
-       return -ENOMEM;
+}
+
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig)
+{
+       return avtab_alloc_common(new, orig->nslot);
 }
 
 void avtab_hash_eval(struct avtab *h, char *tag)
index 4c4445ca9118e7ff4c3624ea9f958f650eb17de4..f2eeb36265d15f9cac85632b4f492949517c2455 100644 (file)
@@ -89,7 +89,7 @@ struct avtab {
 
 void avtab_init(struct avtab *h);
 int avtab_alloc(struct avtab *, u32);
-int avtab_duplicate(struct avtab *new, struct avtab *orig);
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
 struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
 void avtab_destroy(struct avtab *h);
 void avtab_hash_eval(struct avtab *h, char *tag);
index 0b32f3ab025e599753bbf5bdb00c4ac784885d7e..1ef74c085f2b0f288ca689097e38ce03be6090c7 100644 (file)
@@ -605,7 +605,6 @@ static int cond_dup_av_list(struct cond_av_list *new,
                        struct cond_av_list *orig,
                        struct avtab *avtab)
 {
-       struct avtab_node *avnode;
        u32 i;
 
        memset(new, 0, sizeof(*new));
@@ -615,10 +614,11 @@ static int cond_dup_av_list(struct cond_av_list *new,
                return -ENOMEM;
 
        for (i = 0; i < orig->len; i++) {
-               avnode = avtab_search_node(avtab, &orig->nodes[i]->key);
-               if (WARN_ON(!avnode))
-                       return -EINVAL;
-               new->nodes[i] = avnode;
+               new->nodes[i] = avtab_insert_nonunique(avtab,
+                                                      &orig->nodes[i]->key,
+                                                      &orig->nodes[i]->datum);
+               if (!new->nodes[i])
+                       return -ENOMEM;
                new->len++;
        }
 
@@ -630,7 +630,7 @@ static int duplicate_policydb_cond_list(struct policydb *newp,
 {
        int rc, i, j;
 
-       rc = avtab_duplicate(&newp->te_cond_avtab, &origp->te_cond_avtab);
+       rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab);
        if (rc)
                return rc;
 
index d91e41d47777b26390b626fd38897548724c4b58..30163314504053e8a97943f1ef8a049cdf3e7e22 100644 (file)
@@ -1552,6 +1552,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
                if (!str)
                        goto out;
        }
+retry:
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -1565,6 +1566,15 @@ static int security_context_to_sid_core(struct selinux_state *state,
        } else if (rc)
                goto out_unlock;
        rc = sidtab_context_to_sid(sidtab, &context, sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               if (context.str) {
+                       str = context.str;
+                       context.str = NULL;
+               }
+               context_destroy(&context);
+               goto retry;
+       }
        context_destroy(&context);
 out_unlock:
        rcu_read_unlock();
@@ -1714,7 +1724,7 @@ static int security_compute_sid(struct selinux_state *state,
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       struct class_datum *cladatum = NULL;
+       struct class_datum *cladatum;
        struct context *scontext, *tcontext, newcontext;
        struct sidtab_entry *sentry, *tentry;
        struct avtab_key avkey;
@@ -1736,6 +1746,8 @@ static int security_compute_sid(struct selinux_state *state,
                goto out;
        }
 
+retry:
+       cladatum = NULL;
        context_init(&newcontext);
 
        rcu_read_lock();
@@ -1880,6 +1892,11 @@ static int security_compute_sid(struct selinux_state *state,
        }
        /* Obtain the sid for the context. */
        rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               context_destroy(&newcontext);
+               goto retry;
+       }
 out_unlock:
        rcu_read_unlock();
        context_destroy(&newcontext);
@@ -2192,6 +2209,7 @@ void selinux_policy_commit(struct selinux_state *state,
                           struct selinux_load_state *load_state)
 {
        struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
+       unsigned long flags;
        u32 seqno;
 
        oldpolicy = rcu_dereference_protected(state->policy,
@@ -2213,7 +2231,13 @@ void selinux_policy_commit(struct selinux_state *state,
        seqno = newpolicy->latest_granting;
 
        /* Install the new policy. */
-       rcu_assign_pointer(state->policy, newpolicy);
+       if (oldpolicy) {
+               sidtab_freeze_begin(oldpolicy->sidtab, &flags);
+               rcu_assign_pointer(state->policy, newpolicy);
+               sidtab_freeze_end(oldpolicy->sidtab, &flags);
+       } else {
+               rcu_assign_pointer(state->policy, newpolicy);
+       }
 
        /* Load the policycaps from the new policy */
        security_load_policycaps(state, newpolicy);
@@ -2357,13 +2381,15 @@ int security_port_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_PORT;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2382,6 +2408,10 @@ int security_port_sid(struct selinux_state *state,
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2408,13 +2438,15 @@ int security_ib_pkey_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_UNLABELED;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2435,6 +2467,10 @@ int security_ib_pkey_sid(struct selinux_state *state,
                        rc = sidtab_context_to_sid(sidtab,
                                                   &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2460,13 +2496,15 @@ int security_ib_endport_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_UNLABELED;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2487,6 +2525,10 @@ int security_ib_endport_sid(struct selinux_state *state,
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2510,7 +2552,7 @@ int security_netif_sid(struct selinux_state *state,
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       int rc = 0;
+       int rc;
        struct ocontext *c;
 
        if (!selinux_initialized(state)) {
@@ -2518,6 +2560,8 @@ int security_netif_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2534,10 +2578,18 @@ int security_netif_sid(struct selinux_state *state,
                if (!c->sid[0] || !c->sid[1]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                        rc = sidtab_context_to_sid(sidtab, &c->context[1],
                                                   &c->sid[1]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2587,6 +2639,7 @@ int security_node_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2635,6 +2688,10 @@ int security_node_sid(struct selinux_state *state,
                        rc = sidtab_context_to_sid(sidtab,
                                                   &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2676,18 +2733,24 @@ int security_get_user_sids(struct selinux_state *state,
        struct sidtab *sidtab;
        struct context *fromcon, usercon;
        u32 *mysids = NULL, *mysids2, sid;
-       u32 mynel = 0, maxnel = SIDS_NEL;
+       u32 i, j, mynel, maxnel = SIDS_NEL;
        struct user_datum *user;
        struct role_datum *role;
        struct ebitmap_node *rnode, *tnode;
-       int rc = 0, i, j;
+       int rc;
 
        *sids = NULL;
        *nel = 0;
 
        if (!selinux_initialized(state))
-               goto out;
+               return 0;
+
+       mysids = kcalloc(maxnel, sizeof(*mysids), GFP_KERNEL);
+       if (!mysids)
+               return -ENOMEM;
 
+retry:
+       mynel = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2707,11 +2770,6 @@ int security_get_user_sids(struct selinux_state *state,
 
        usercon.user = user->value;
 
-       rc = -ENOMEM;
-       mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
-       if (!mysids)
-               goto out_unlock;
-
        ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
                role = policydb->role_val_to_struct[i];
                usercon.role = i + 1;
@@ -2723,6 +2781,10 @@ int security_get_user_sids(struct selinux_state *state,
                                continue;
 
                        rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out_unlock;
                        if (mynel < maxnel) {
@@ -2745,14 +2807,14 @@ out_unlock:
        rcu_read_unlock();
        if (rc || !mynel) {
                kfree(mysids);
-               goto out;
+               return rc;
        }
 
        rc = -ENOMEM;
        mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
        if (!mysids2) {
                kfree(mysids);
-               goto out;
+               return rc;
        }
        for (i = 0, j = 0; i < mynel; i++) {
                struct av_decision dummy_avd;
@@ -2765,12 +2827,10 @@ out_unlock:
                        mysids2[j++] = mysids[i];
                cond_resched();
        }
-       rc = 0;
        kfree(mysids);
        *sids = mysids2;
        *nel = j;
-out:
-       return rc;
+       return 0;
 }
 
 /**
@@ -2783,6 +2843,9 @@ out:
  * Obtain a SID to use for a file in a filesystem that
  * cannot support xattr or use a fixed labeling behavior like
  * transition SIDs or task SIDs.
+ *
+ * WARNING: This function may return -ESTALE, indicating that the caller
+ * must retry the operation after re-acquiring the policy pointer!
  */
 static inline int __security_genfs_sid(struct selinux_policy *policy,
                                       const char *fstype,
@@ -2861,11 +2924,13 @@ int security_genfs_sid(struct selinux_state *state,
                return 0;
        }
 
-       rcu_read_lock();
-       policy = rcu_dereference(state->policy);
-       retval = __security_genfs_sid(policy,
-                               fstype, path, orig_sclass, sid);
-       rcu_read_unlock();
+       do {
+               rcu_read_lock();
+               policy = rcu_dereference(state->policy);
+               retval = __security_genfs_sid(policy, fstype, path,
+                                             orig_sclass, sid);
+               rcu_read_unlock();
+       } while (retval == -ESTALE);
        return retval;
 }
 
@@ -2888,7 +2953,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       int rc = 0;
+       int rc;
        struct ocontext *c;
        struct superblock_security_struct *sbsec = sb->s_security;
        const char *fstype = sb->s_type->name;
@@ -2899,6 +2964,8 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2916,6 +2983,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2923,6 +2994,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
        } else {
                rc = __security_genfs_sid(policy, fstype, "/",
                                        SECCLASS_DIR, &sbsec->sid);
+               if (rc == -ESTALE) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
                if (rc) {
                        sbsec->behavior = SECURITY_FS_USE_NONE;
                        rc = 0;
@@ -3132,12 +3207,13 @@ int security_sid_mls_copy(struct selinux_state *state,
        u32 len;
        int rc;
 
-       rc = 0;
        if (!selinux_initialized(state)) {
                *new_sid = sid;
-               goto out;
+               return 0;
        }
 
+retry:
+       rc = 0;
        context_init(&newcon);
 
        rcu_read_lock();
@@ -3196,10 +3272,14 @@ int security_sid_mls_copy(struct selinux_state *state,
                }
        }
        rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               context_destroy(&newcon);
+               goto retry;
+       }
 out_unlock:
        rcu_read_unlock();
        context_destroy(&newcon);
-out:
        return rc;
 }
 
@@ -3792,6 +3872,8 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -3818,23 +3900,24 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
                                goto out;
                }
                rc = -EIDRM;
-               if (!mls_context_isvalid(policydb, &ctx_new))
-                       goto out_free;
+               if (!mls_context_isvalid(policydb, &ctx_new)) {
+                       ebitmap_destroy(&ctx_new.range.level[0].cat);
+                       goto out;
+               }
 
                rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
+               ebitmap_destroy(&ctx_new.range.level[0].cat);
+               if (rc == -ESTALE) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
                if (rc)
-                       goto out_free;
+                       goto out;
 
                security_netlbl_cache_add(secattr, *sid);
-
-               ebitmap_destroy(&ctx_new.range.level[0].cat);
        } else
                *sid = SECSID_NULL;
 
-       rcu_read_unlock();
-       return 0;
-out_free:
-       ebitmap_destroy(&ctx_new.range.level[0].cat);
 out:
        rcu_read_unlock();
        return rc;
index 5ee190bd30f53fde45c80d8a21f179371f8350df..656d50b09f7629bc22b884bf4b1fea1039ca243c 100644 (file)
@@ -39,6 +39,7 @@ int sidtab_init(struct sidtab *s)
        for (i = 0; i < SECINITSID_NUM; i++)
                s->isids[i].set = 0;
 
+       s->frozen = false;
        s->count = 0;
        s->convert = NULL;
        hash_init(s->context_to_sid);
@@ -281,6 +282,15 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
        if (*sid)
                goto out_unlock;
 
+       if (unlikely(s->frozen)) {
+               /*
+                * This sidtab is now frozen - tell the caller to abort and
+                * get the new one.
+                */
+               rc = -ESTALE;
+               goto out_unlock;
+       }
+
        count = s->count;
        convert = s->convert;
 
@@ -474,6 +484,17 @@ void sidtab_cancel_convert(struct sidtab *s)
        spin_unlock_irqrestore(&s->lock, flags);
 }
 
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
+{
+       spin_lock_irqsave(&s->lock, *flags);
+       s->frozen = true;
+       s->convert = NULL;
+}
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
+{
+       spin_unlock_irqrestore(&s->lock, *flags);
+}
+
 static void sidtab_destroy_entry(struct sidtab_entry *entry)
 {
        context_destroy(&entry->context);
index 80c744d07ad62280cdcb77939dfe3d81ef2d7790..4eff0e49dcb22e490e2a880fd72099b4274e6ec5 100644 (file)
@@ -86,6 +86,7 @@ struct sidtab {
        u32 count;
        /* access only under spinlock */
        struct sidtab_convert_params *convert;
+       bool frozen;
        spinlock_t lock;
 
 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
@@ -125,6 +126,9 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
 
 void sidtab_cancel_convert(struct sidtab *s);
 
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock);
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock);
+
 int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
 
 void sidtab_destroy(struct sidtab *s);
index 52637180af3366e0472c9d23e820dbe46c8691d4..80b814b9922a95d3ab13220b61f9439ebe97d8cc 100644 (file)
@@ -1571,6 +1571,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
                                        return -ENOMEM;
                                kctl->id.device = dev;
                                kctl->id.subdevice = substr;
+
+                               /* Add the control before copying the id so that
+                                * the numid field of the id is set in the copy.
+                                */
+                               err = snd_ctl_add(card, kctl);
+                               if (err < 0)
+                                       return err;
+
                                switch (idx) {
                                case ACTIVE_IDX:
                                        setup->active_id = kctl->id;
@@ -1587,9 +1595,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
                                default:
                                        break;
                                }
-                               err = snd_ctl_add(card, kctl);
-                               if (err < 0)
-                                       return err;
                        }
                }
        }
index c20dad46a7c90314ad879cd9d9e291ca730756eb..dfef9c17e14048dff091b38c676c71befc21c957 100644 (file)
@@ -944,6 +944,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
index 58946d069ee59e068bf8990fdbe3a9cab59632e8..a7544b77d3f7cca63a3492fc895097348701877e 100644 (file)
@@ -3927,6 +3927,15 @@ static void alc271_fixup_dmic(struct hda_codec *codec,
                snd_hda_sequence_write(codec, verbs);
 }
 
+/* Fix the speaker amp after resume, etc */
+static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec,
+                                         const struct hda_fixup *fix,
+                                         int action)
+{
+       if (action == HDA_FIXUP_ACT_INIT)
+               alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000);
+}
+
 static void alc269_fixup_pcm_44k(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
@@ -6301,6 +6310,7 @@ enum {
        ALC283_FIXUP_HEADSET_MIC,
        ALC255_FIXUP_MIC_MUTE_LED,
        ALC282_FIXUP_ASPIRE_V5_PINS,
+       ALC269VB_FIXUP_ASPIRE_E1_COEF,
        ALC280_FIXUP_HP_GPIO4,
        ALC286_FIXUP_HP_GPIO_LED,
        ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
@@ -6979,6 +6989,10 @@ static const struct hda_fixup alc269_fixups[] = {
                        { },
                },
        },
+       [ALC269VB_FIXUP_ASPIRE_E1_COEF] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269vb_fixup_aspire_e1_coef,
+       },
        [ALC280_FIXUP_HP_GPIO4] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc280_fixup_hp_gpio4,
@@ -7901,6 +7915,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+       SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
        SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
@@ -8395,6 +8410,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"},
        {.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"},
        {.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"},
+       {.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"},
        {.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"},
        {.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
        {.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"},
index 6e634b44829306c87647668bf14f7e4b42f78d92..aa16a237513460bf63e0cc88651b2ff66e4d2f7e 100644 (file)
@@ -1348,8 +1348,10 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
                                        &cygnus_ssp_dai[active_port_count]);
 
                /* negative is err, 0 is active and good, 1 is disabled */
-               if (err < 0)
+               if (err < 0) {
+                       of_node_put(child_node);
                        return err;
+               }
                else if (!err) {
                        dev_dbg(dev, "Activating DAI: %s\n",
                                cygnus_ssp_dai[active_port_count].name);
index 8c04b3b2c9075df41fa38cb2ed503ea39bf74a7f..7878da89d8e092c2d267b9bd6b442c33a6ecbd76 100644 (file)
@@ -3551,7 +3551,7 @@ static int rx_macro_probe(struct platform_device *pdev)
 
        /* set MCLK and NPL rates */
        clk_set_rate(rx->clks[2].clk, MCLK_FREQ);
-       clk_set_rate(rx->clks[3].clk, MCLK_FREQ);
+       clk_set_rate(rx->clks[3].clk, 2 * MCLK_FREQ);
 
        ret = clk_bulk_prepare_enable(RX_NUM_CLKS_MAX, rx->clks);
        if (ret)
index 36d7a6442cdbc512302e4ce4213af90f505e5223..e8c6c738bbaa054f1db333a1f36f0485f2e1dba4 100644 (file)
@@ -1811,7 +1811,7 @@ static int tx_macro_probe(struct platform_device *pdev)
 
        /* set MCLK and NPL rates */
        clk_set_rate(tx->clks[2].clk, MCLK_FREQ);
-       clk_set_rate(tx->clks[3].clk, MCLK_FREQ);
+       clk_set_rate(tx->clks[3].clk, 2 * MCLK_FREQ);
 
        ret = clk_bulk_prepare_enable(TX_NUM_CLKS_MAX, tx->clks);
        if (ret)
index 85f6865019d4a82d9ac924294c32452c86f8d028..ddb6436835d737b14cbc451313af64c12bc57bf9 100644 (file)
@@ -446,6 +446,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
        case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
        case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+       case MAX98373_R20FF_GLOBAL_SHDN:
        case MAX98373_R21FF_REV_ID:
                return true;
        default:
index d8c47667a9ea2f8c6a67881fb16b28b6a5916ce7..f3a12205cd48486673c706880dab0abf3a3b2f42 100644 (file)
@@ -220,6 +220,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
        case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
        case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+       case MAX98373_R20FF_GLOBAL_SHDN:
        case MAX98373_R21FF_REV_ID:
        /* SoundWire Control Port Registers */
        case MAX98373_R0040_SCP_INIT_STAT_1 ... MAX98373_R0070_SCP_FRAME_CTLR:
index 746c829312b87029680bf80ba3105278f503a750..1346a98ce8a15fe282e93288db88d7c426035370 100644 (file)
@@ -28,11 +28,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
                regmap_update_bits(max98373->regmap,
                        MAX98373_R20FF_GLOBAL_SHDN,
                        MAX98373_GLOBAL_EN_MASK, 1);
+               usleep_range(30000, 31000);
                break;
        case SND_SOC_DAPM_POST_PMD:
                regmap_update_bits(max98373->regmap,
                        MAX98373_R20FF_GLOBAL_SHDN,
                        MAX98373_GLOBAL_EN_MASK, 0);
+               usleep_range(30000, 31000);
                max98373->tdm_mode = false;
                break;
        default:
index df351519a3a6bd4efcef1c5b94ea2dbb0b57020c..cda9cd935d4f3833d517073583060dc339384f30 100644 (file)
@@ -707,7 +707,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in,
        best_freq_out = -EINVAL;
        *sysclk_idx = *dac_idx = *bclk_idx = -1;
 
-       for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+       /*
+        * From Datasheet, the PLL performs best when f2 is between
+        * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz
+        * or 12.288MHz, then sysclkdiv = 2 is the best choice.
+        * So search sysclk_divs from 2 to 1 other than from 1 to 2.
+        */
+       for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) {
                if (sysclk_divs[i] == -1)
                        continue;
                for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
index 08056fa0a0fa5ff1f5e18db91569224b7958b33f..a857a624864fc0c05a2acf014d20c71b63380942 100644 (file)
@@ -519,11 +519,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
                                   ESAI_SAICR_SYNC, esai_priv->synchronous ?
                                   ESAI_SAICR_SYNC : 0);
 
-               /* Set a default slot number -- 2 */
+               /* Set slots count */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
-                                  ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+                                  ESAI_xCCR_xDC_MASK,
+                                  ESAI_xCCR_xDC(esai_priv->slots));
                regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
-                                  ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+                                  ESAI_xCCR_xDC_MASK,
+                                  ESAI_xCCR_xDC(esai_priv->slots));
        }
 
        return 0;
index 9e9b05883557c7a6c352cd932c46b256ee90415c..4124aa2fc2479a7737507788ccf456ac739e4684 100644 (file)
@@ -487,15 +487,15 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
                .stream_name = "Headset Playback",
                .channels_min = SST_STEREO,
                .channels_max = SST_STEREO,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
        .capture = {
                .stream_name = "Headset Capture",
                .channels_min = 1,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
 },
 {
@@ -505,8 +505,8 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
                .stream_name = "Deepbuffer Playback",
                .channels_min = SST_STEREO,
                .channels_max = SST_STEREO,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
 },
 {
index 6d8f7d9fd192057ac5b25f245a7c405d30d73928..4a3d522f612b488c64c2179e4f001b716053e2b5 100644 (file)
@@ -399,7 +399,13 @@ int snd_sof_device_shutdown(struct device *dev)
 {
        struct snd_sof_dev *sdev = dev_get_drvdata(dev);
 
-       return snd_sof_shutdown(sdev);
+       if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+               cancel_work_sync(&sdev->probe_work);
+
+       if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
+               return snd_sof_shutdown(sdev);
+
+       return 0;
 }
 EXPORT_SYMBOL(snd_sof_device_shutdown);
 
index fc29b91b8932bd77252e7961f492ad4efe8c3896..c7ed2b3d6abca24285ff00c88da4c6d00ee47bbd 100644 (file)
@@ -27,9 +27,10 @@ static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
 
 /* apollolake ops */
 const struct snd_sof_dsp_ops sof_apl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
index e38db519f38dc9dd523a06a2d7170fe6d04d32ff..821f25fbcf089dc0ca82aeae9fc6cc137d495a08 100644 (file)
@@ -232,9 +232,10 @@ void cnl_ipc_dump(struct snd_sof_dev *sdev)
 
 /* cannonlake ops */
 const struct snd_sof_dsp_ops sof_cnl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
@@ -349,22 +350,6 @@ const struct sof_intel_dsp_desc cnl_chip_info = {
 };
 EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
 
-const struct sof_intel_dsp_desc ehl_chip_info = {
-       /* Elkhartlake */
-       .cores_num = 4,
-       .init_core_mask = 1,
-       .host_managed_cores_mask = BIT(0),
-       .ipc_req = CNL_DSP_REG_HIPCIDR,
-       .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
-       .ipc_ack = CNL_DSP_REG_HIPCIDA,
-       .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
-       .ipc_ctl = CNL_DSP_REG_HIPCCTL,
-       .rom_init_timeout       = 300,
-       .ssp_count = ICL_SSP_COUNT,
-       .ssp_base_offset = CNL_SSP_BASE_OFFSET,
-};
-EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
-
 const struct sof_intel_dsp_desc jsl_chip_info = {
        /* Jasperlake */
        .cores_num = 2,
index c3b757cf01a04fb83d7e4c1fe832dd0ac14e15dc..736a54beca234933ea13aae6bdb1f7c13e2d096a 100644 (file)
@@ -226,10 +226,17 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
 
        val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
 
-       is_enable = (val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
-                   (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
-                   !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
-                   !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+#define MASK_IS_EQUAL(v, m, field) ({  \
+       u32 _m = field(m);              \
+       ((v) & _m) == _m;               \
+})
+
+       is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
+               MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
+               !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+               !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+#undef MASK_IS_EQUAL
 
        dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
                is_enable, core_mask);
@@ -885,6 +892,12 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
        return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 }
 
+int hda_dsp_shutdown(struct snd_sof_dev *sdev)
+{
+       sdev->system_suspend_target = SOF_SUSPEND_S3;
+       return snd_sof_suspend(sdev->dev);
+}
+
 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
 {
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
index 7c7579daee7f8d429703ef6464698d489bb6c079..ae80725b0e33f7e06131d67645322cc54c7bf9d7 100644 (file)
@@ -517,6 +517,7 @@ int hda_dsp_resume(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev);
+int hda_dsp_shutdown(struct snd_sof_dev *sdev);
 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
 void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
 void hda_ipc_dump(struct snd_sof_dev *sdev);
index e9d5a0a585046a251dcf1d39c2cfccdb5df4c69a..88a74be8a0c141f2b7565fe1fad580cfe920bd3f 100644 (file)
@@ -26,9 +26,10 @@ static const struct snd_sof_debugfs_map icl_dsp_debugfs[] = {
 
 /* Icelake ops */
 const struct snd_sof_dsp_ops sof_icl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
index 4856074711817096c0be3fa0cd0c195ef6ab73d9..38bc353f731307477850e4fc1c58df856a26ab88 100644 (file)
@@ -65,7 +65,7 @@ static const struct sof_dev_desc ehl_desc = {
        .default_tplg_path = "intel/sof-tplg",
        .default_fw_filename = "sof-ehl.ri",
        .nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
-       .ops = &sof_cnl_ops,
+       .ops = &sof_tgl_ops,
 };
 
 static const struct sof_dev_desc adls_desc = {
index 419f05ba192086300f93eb710c795f589374fdbe..54ba1b88ba862eac518728d97dee61729eddbad0 100644 (file)
@@ -25,7 +25,7 @@ const struct snd_sof_dsp_ops sof_tgl_ops = {
        /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
-       .shutdown       = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
@@ -156,6 +156,22 @@ const struct sof_intel_dsp_desc tglh_chip_info = {
 };
 EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
 
+const struct sof_intel_dsp_desc ehl_chip_info = {
+       /* Elkhartlake */
+       .cores_num = 4,
+       .init_core_mask = 1,
+       .host_managed_cores_mask = BIT(0),
+       .ipc_req = CNL_DSP_REG_HIPCIDR,
+       .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+       .ipc_ack = CNL_DSP_REG_HIPCIDA,
+       .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+       .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+       .rom_init_timeout       = 300,
+       .ssp_count = ICL_SSP_COUNT,
+       .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
 const struct sof_intel_dsp_desc adls_chip_info = {
        /* Alderlake-S */
        .cores_num = 2,
index 6c13cc84b3fb553f2ab98c2bf4fd17951f2704f7..2173991c13db1b10c3273044cde67ce946156cd1 100644 (file)
@@ -1364,6 +1364,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "sun4i-codec";
        card->dapm_widgets      = sun4i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun4i_codec_card_dapm_widgets);
@@ -1396,6 +1397,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "A31 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1449,6 +1451,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "A23 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1487,6 +1490,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "H3 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1525,6 +1529,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "V3s Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
index 4d471d9511a54db552c63951651539b7d0fdd3a2..6fffe56827134385ee70fe7a1718ef46fefc7084 100644 (file)
@@ -39,9 +39,6 @@
  * sequential memory pages only.
  */
 
-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
-#define ia64_mf()       asm volatile ("mf" ::: "memory")
-
 #define mb()           ia64_mf()
 #define rmb()          mb()
 #define wmb()          mb()
index 546d6ecf0a35b52a58196f07676058e59c301b62..45029354e0a8b42f94e379ecafdf9ba7d50f24bf 100644 (file)
 #define MSR_IA32_APICBASE_ENABLE       (1<<11)
 #define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
 
-#define MSR_IA32_TSCDEADLINE           0x000006e0
-
 #define MSR_IA32_UCODE_WRITE           0x00000079
 #define MSR_IA32_UCODE_REV             0x0000008b
 
diff --git a/tools/arch/x86/kcpuid/Makefile b/tools/arch/x86/kcpuid/Makefile
new file mode 100644 (file)
index 0000000..87b554f
--- /dev/null
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for x86/kcpuid tool
+
+kcpuid : kcpuid.c
+
+CFLAGS = -Wextra
+
+BINDIR ?= /usr/sbin
+
+HWDATADIR ?= /usr/share/misc/
+
+override CFLAGS += -O2 -Wall -I../../../include
+
+%: %.c
+       $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
+
+.PHONY : clean
+clean :
+       @rm -f kcpuid
+
+install : kcpuid
+       install -d  $(DESTDIR)$(BINDIR)
+       install -m 755 -p kcpuid $(DESTDIR)$(BINDIR)/kcpuid
+       install -m 444 -p cpuid.csv $(HWDATADIR)/cpuid.csv
diff --git a/tools/arch/x86/kcpuid/cpuid.csv b/tools/arch/x86/kcpuid/cpuid.csv
new file mode 100644 (file)
index 0000000..4f1c4b0
--- /dev/null
@@ -0,0 +1,400 @@
+# The basic row format is:
+# LEAF, SUBLEAF, register_name, bits, short_name, long_description
+
+# Leaf 00H
+         0,    0,  EAX,   31:0, max_basic_leafs, Max input value for supported subleafs
+
+# Leaf 01H
+         1,    0,  EAX,    3:0, stepping, Stepping ID
+         1,    0,  EAX,    7:4, model, Model
+         1,    0,  EAX,   11:8, family, Family ID
+         1,    0,  EAX,  13:12, processor, Processor Type
+         1,    0,  EAX,  19:16, model_ext, Extended Model ID
+         1,    0,  EAX,  27:20, family_ext, Extended Family ID
+
+         1,    0,  EBX,    7:0, brand, Brand Index
+         1,    0,  EBX,   15:8, clflush_size, CLFLUSH line size (value * 8) in bytes
+         1,    0,  EBX,  23:16, max_cpu_id, Maxim number of addressable logic cpu in this package
+         1,    0,  EBX,  31:24, apic_id, Initial APIC ID
+
+         1,    0,  ECX,      0, sse3, Streaming SIMD Extensions 3(SSE3)
+         1,    0,  ECX,      1, pclmulqdq, PCLMULQDQ instruction supported
+         1,    0,  ECX,      2, dtes64, DS area uses 64-bit layout
+         1,    0,  ECX,      3, mwait, MONITOR/MWAIT supported
+         1,    0,  ECX,      4, ds_cpl, CPL Qualified Debug Store which allows for branch message storage qualified by CPL
+         1,    0,  ECX,      5, vmx, Virtual Machine Extensions supported
+         1,    0,  ECX,      6, smx, Safer Mode Extension supported
+         1,    0,  ECX,      7, eist, Enhanced Intel SpeedStep Technology
+         1,    0,  ECX,      8, tm2, Thermal Monitor 2
+         1,    0,  ECX,      9, ssse3, Supplemental Streaming SIMD Extensions 3 (SSSE3)
+         1,    0,  ECX,     10, l1_ctx_id, L1 data cache could be set to either adaptive mode or shared mode (check IA32_MISC_ENABLE bit 24 definition)
+         1,    0,  ECX,     11, sdbg, IA32_DEBUG_INTERFACE MSR for silicon debug supported
+         1,    0,  ECX,     12, fma, FMA extensions using YMM state supported
+         1,    0,  ECX,     13, cmpxchg16b, 'CMPXCHG16B - Compare and Exchange Bytes' supported
+         1,    0,  ECX,     14, xtpr_update, xTPR Update Control supported
+         1,    0,  ECX,     15, pdcm, Perfmon and Debug Capability present
+         1,    0,  ECX,     17, pcid, Process-Context Identifiers feature present
+         1,    0,  ECX,     18, dca, Prefetching data from a memory mapped device supported
+         1,    0,  ECX,     19, sse4_1, SSE4.1 feature present
+         1,    0,  ECX,     20, sse4_2, SSE4.2 feature present
+         1,    0,  ECX,     21, x2apic, x2APIC supported
+         1,    0,  ECX,     22, movbe, MOVBE instruction supported
+         1,    0,  ECX,     23, popcnt, POPCNT instruction supported
+         1,    0,  ECX,     24, tsc_deadline_timer, LAPIC supports one-shot operation using a TSC deadline value
+         1,    0,  ECX,     25, aesni, AESNI instruction supported
+         1,    0,  ECX,     26, xsave, XSAVE/XRSTOR processor extended states (XSETBV/XGETBV/XCR0)
+         1,    0,  ECX,     27, osxsave, OS has set CR4.OSXSAVE bit to enable XSETBV/XGETBV/XCR0
+         1,    0,  ECX,     28, avx, AVX instruction supported
+         1,    0,  ECX,     29, f16c, 16-bit floating-point conversion instruction supported
+         1,    0,  ECX,     30, rdrand, RDRAND instruction supported
+
+         1,    0,  EDX,      0, fpu, x87 FPU on chip
+         1,    0,  EDX,      1, vme, Virtual-8086 Mode Enhancement
+         1,    0,  EDX,      2, de, Debugging Extensions
+         1,    0,  EDX,      3, pse, Page Size Extensions
+         1,    0,  EDX,      4, tsc, Time Stamp Counter
+         1,    0,  EDX,      5, msr, RDMSR and WRMSR Support
+         1,    0,  EDX,      6, pae, Physical Address Extensions
+         1,    0,  EDX,      7, mce, Machine Check Exception
+         1,    0,  EDX,      8, cx8, CMPXCHG8B instr
+         1,    0,  EDX,      9, apic, APIC on Chip
+         1,    0,  EDX,     11, sep, SYSENTER and SYSEXIT instrs
+         1,    0,  EDX,     12, mtrr, Memory Type Range Registers
+         1,    0,  EDX,     13, pge, Page Global Bit
+         1,    0,  EDX,     14, mca, Machine Check Architecture
+         1,    0,  EDX,     15, cmov, Conditional Move Instrs
+         1,    0,  EDX,     16, pat, Page Attribute Table
+         1,    0,  EDX,     17, pse36, 36-Bit Page Size Extension
+         1,    0,  EDX,     18, psn, Processor Serial Number
+         1,    0,  EDX,     19, clflush, CLFLUSH instr
+#         1,    0,  EDX,     20,
+         1,    0,  EDX,     21, ds, Debug Store
+         1,    0,  EDX,     22, acpi, Thermal Monitor and Software Controlled Clock Facilities
+         1,    0,  EDX,     23, mmx, Intel MMX Technology
+         1,    0,  EDX,     24, fxsr, XSAVE and FXRSTOR Instrs
+         1,    0,  EDX,     25, sse, SSE
+         1,    0,  EDX,     26, sse2, SSE2
+         1,    0,  EDX,     27, ss, Self Snoop
+         1,    0,  EDX,     28, hit, Max APIC IDs
+         1,    0,  EDX,     29, tm, Thermal Monitor
+#         1,    0,  EDX,     30,
+         1,    0,  EDX,     31, pbe, Pending Break Enable
+
+# Leaf 02H
+# cache and TLB descriptor info
+
+# Leaf 03H
+# Precessor Serial Number, introduced on Pentium III, not valid for
+# latest models
+
+# Leaf 04H
+# thread/core and cache topology
+         4,    0,  EAX,    4:0, cache_type, Cache type like instr/data or unified
+         4,    0,  EAX,    7:5, cache_level, Cache Level (starts at 1)
+         4,    0,  EAX,      8, cache_self_init, Cache Self Initialization
+         4,    0,  EAX,      9, fully_associate, Fully Associative cache
+#         4,    0,  EAX,  13:10, resvd, resvd
+         4,    0,  EAX,  25:14, max_logical_id, Max number of addressable IDs for logical processors sharing the cache
+         4,    0,  EAX,  31:26, max_phy_id, Max number of addressable IDs for processors in phy package
+
+         4,    0,  EBX,   11:0, cache_linesize, Size of a cache line in bytes
+         4,    0,  EBX,  21:12, cache_partition, Physical Line partitions
+         4,    0,  EBX,  31:22, cache_ways, Ways of associativity
+         4,    0,  ECX,   31:0, cache_sets, Number of Sets - 1
+         4,    0,  EDX,      0, c_wbinvd, 1 means WBINVD/INVD is not ganranteed to act upon lower level caches of non-originating threads sharing this cache
+         4,    0,  EDX,      1, c_incl, Whether cache is inclusive of lower cache level
+         4,    0,  EDX,      2, c_comp_index, Complex Cache Indexing
+
+# Leaf 05H
+# MONITOR/MWAIT
+        5,    0,  EAX,   15:0, min_mon_size, Smallest monitor line size in bytes
+        5,    0,  EBX,   15:0, max_mon_size, Largest monitor line size in bytes
+        5,    0,  ECX,      0, mwait_ext, Enum of Monitor-Mwait extensions supported
+        5,    0,  ECX,      1, mwait_irq_break, Largest monitor line size in bytes
+        5,    0,  EDX,    3:0, c0_sub_stats, Number of C0* sub C-states supported using MWAIT
+        5,    0,  EDX,    7:4, c1_sub_stats, Number of C1* sub C-states supported using MWAIT
+        5,    0,  EDX,   11:8, c2_sub_stats, Number of C2* sub C-states supported using MWAIT
+        5,    0,  EDX,  15:12, c3_sub_stats, Number of C3* sub C-states supported using MWAIT
+        5,    0,  EDX,  19:16, c4_sub_stats, Number of C4* sub C-states supported using MWAIT
+        5,    0,  EDX,  23:20, c5_sub_stats, Number of C5* sub C-states supported using MWAIT
+        5,    0,  EDX,  27:24, c6_sub_stats, Number of C6* sub C-states supported using MWAIT
+        5,    0,  EDX,  31:28, c7_sub_stats, Number of C7* sub C-states supported using MWAIT
+
+# Leaf 06H
+# Thermal & Power Management
+
+        6,    0,  EAX,      0, dig_temp, Digital temperature sensor supported
+        6,    0,  EAX,      1, turbo, Intel Turbo Boost
+        6,    0,  EAX,      2, arat, Always running APIC timer
+#       6,    0,  EAX,      3, resv, Reserved
+        6,    0,  EAX,      4, pln, Power limit notifications supported
+        6,    0,  EAX,      5, ecmd, Clock modulation duty cycle extension supported
+        6,    0,  EAX,      6, ptm, Package thermal management supported
+        6,    0,  EAX,      7, hwp, HWP base register
+        6,    0,  EAX,      8, hwp_notify, HWP notification
+        6,    0,  EAX,      9, hwp_act_window, HWP activity window
+        6,    0,  EAX,     10, hwp_energy, HWP energy performance preference
+        6,    0,  EAX,     11, hwp_pkg_req, HWP package level request
+#       6,    0,  EAX,     12, resv, Reserved
+        6,    0,  EAX,     13, hdc, HDC base registers supported
+        6,    0,  EAX,     14, turbo3, Turbo Boost Max 3.0
+        6,    0,  EAX,     15, hwp_cap, Highest Performance change supported
+        6,    0,  EAX,     16, hwp_peci, HWP PECI override is supported
+        6,    0,  EAX,     17, hwp_flex, Flexible HWP is supported
+        6,    0,  EAX,     18, hwp_fast, Fast access mode for the IA32_HWP_REQUEST MSR is supported
+#       6,    0,  EAX,     19, resv, Reserved
+        6,    0,  EAX,     20, hwp_ignr, Ignoring Idle Logical Processor HWP request is supported
+
+        6,    0,  EBX,    3:0, therm_irq_thresh, Number of Interrupt Thresholds in Digital Thermal Sensor
+        6,    0,  ECX,      0, aperfmperf, Presence of IA32_MPERF and IA32_APERF
+        6,    0,  ECX,      3, energ_bias, Performance-energy bias preference supported
+
+# Leaf 07H
+#      ECX == 0
+# AVX512 refers to https://en.wikipedia.org/wiki/AVX-512
+# XXX: Do we really need to enumerate each and every AVX512 sub features
+
+        7,    0,  EBX,      0, fsgsbase, RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE supported
+        7,    0,  EBX,      1, tsc_adjust, TSC_ADJUST MSR supported
+        7,    0,  EBX,      2, sgx, Software Guard Extensions
+        7,    0,  EBX,      3, bmi1, BMI1
+        7,    0,  EBX,      4, hle, Hardware Lock Elision
+        7,    0,  EBX,      5, avx2, AVX2
+#       7,    0,  EBX,      6, fdp_excp_only, x87 FPU Data Pointer updated only on x87 exceptions
+        7,    0,  EBX,      7, smep, Supervisor-Mode Execution Prevention
+        7,    0,  EBX,      8, bmi2, BMI2
+        7,    0,  EBX,      9, rep_movsb, Enhanced REP MOVSB/STOSB
+        7,    0,  EBX,     10, invpcid, INVPCID instruction
+        7,    0,  EBX,     11, rtm, Restricted Transactional Memory
+        7,    0,  EBX,     12, rdt_m, Intel RDT Monitoring capability
+        7,    0,  EBX,     13, depc_fpu_cs_ds, Deprecates FPU CS and FPU DS
+        7,    0,  EBX,     14, mpx, Memory Protection Extensions
+        7,    0,  EBX,     15, rdt_a, Intel RDT Allocation capability
+        7,    0,  EBX,     16, avx512f, AVX512 Foundation instr
+        7,    0,  EBX,     17, avx512dq, AVX512 Double and Quadword AVX512 instr
+        7,    0,  EBX,     18, rdseed, RDSEED instr
+        7,    0,  EBX,     19, adx, ADX instr
+        7,    0,  EBX,     20, smap, Supervisor Mode Access Prevention
+        7,    0,  EBX,     21, avx512ifma, AVX512 Integer Fused Multiply Add
+#       7,    0,  EBX,     22, resvd, resvd
+        7,    0,  EBX,     23, clflushopt, CLFLUSHOPT instr
+        7,    0,  EBX,     24, clwb, CLWB instr
+        7,    0,  EBX,     25, intel_pt, Intel Processor Trace instr
+        7,    0,  EBX,     26, avx512pf, Prefetch
+        7,    0,  EBX,     27, avx512er, AVX512 Exponent Reciproca instr
+        7,    0,  EBX,     28, avx512cd, AVX512 Conflict Detection instr
+        7,    0,  EBX,     29, sha, Intel Secure Hash Algorithm Extensions instr
+        7,    0,  EBX,     26, avx512bw, AVX512 Byte & Word instr
+        7,    0,  EBX,     28, avx512vl, AVX512 Vector Length Extentions (VL)
+        7,    0,  ECX,      0, prefetchwt1, X
+        7,    0,  ECX,      1, avx512vbmi, AVX512 Vector Byte Manipulation Instructions
+        7,    0,  ECX,      2, umip, User-mode Instruction Prevention
+
+        7,    0,  ECX,      3, pku, Protection Keys for User-mode pages
+        7,    0,  ECX,      4, ospke, CR4 PKE set to enable protection keys
+#       7,    0,  ECX,   16:5, resvd, resvd
+        7,    0,  ECX,  21:17, mawau, The value of MAWAU used by the BNDLDX and BNDSTX instructions in 64-bit mode
+        7,    0,  ECX,     22, rdpid, RDPID and IA32_TSC_AUX
+#       7,    0,  ECX,  29:23, resvd, resvd
+        7,    0,  ECX,     30, sgx_lc, SGX Launch Configuration
+#       7,    0,  ECX,     31, resvd, resvd
+
+# Leaf 08H
+#
+
+
+# Leaf 09H
+# Direct Cache Access (DCA) information
+        9,    0,  ECX,   31:0, dca_cap, The value of IA32_PLATFORM_DCA_CAP
+
+# Leaf 0AH
+# Architectural Performance Monitoring
+#
+# Do we really need to print out the PMU related stuff?
+# Does normal user really care about it?
+#
+       0xA,    0,  EAX,    7:0, pmu_ver, Performance Monitoring Unit version
+       0xA,    0,  EAX,   15:8, pmu_gp_cnt_num, Numer of general-purose PMU counters per logical CPU
+       0xA,    0,  EAX,  23:16, pmu_cnt_bits, Bit wideth of PMU counter
+       0xA,    0,  EAX,  31:24, pmu_ebx_bits, Length of EBX bit vector to enumerate PMU events
+
+       0xA,    0,  EBX,      0, pmu_no_core_cycle_evt, Core cycle event not available
+       0xA,    0,  EBX,      1, pmu_no_instr_ret_evt, Instruction retired event not available
+       0xA,    0,  EBX,      2, pmu_no_ref_cycle_evt, Reference cycles event not available
+       0xA,    0,  EBX,      3, pmu_no_llc_ref_evt, Last-level cache reference event not available
+       0xA,    0,  EBX,      4, pmu_no_llc_mis_evt, Last-level cache misses event not available
+       0xA,    0,  EBX,      5, pmu_no_br_instr_ret_evt, Branch instruction retired event not available
+       0xA,    0,  EBX,      6, pmu_no_br_mispredict_evt, Branch mispredict retired event not available
+
+       0xA,    0,  ECX,    4:0, pmu_fixed_cnt_num, Performance Monitoring Unit version
+       0xA,    0,  ECX,   12:5, pmu_fixed_cnt_bits, Numer of PMU counters per logical CPU
+
+# Leaf 0BH
+# Extended Topology Enumeration Leaf
+#
+
+       0xB,    0,  EAX,    4:0, id_shift, Number of bits to shift right on x2APIC ID to get a unique topology ID of the next level type
+       0xB,    0,  EBX,   15:0, cpu_nr, Number of logical processors at this level type
+       0xB,    0,  ECX,   15:8, lvl_type, 0-Invalid 1-SMT 2-Core
+       0xB,    0,  EDX,   31:0, x2apic_id, x2APIC ID the current logical processor
+
+
+# Leaf 0DH
+# Processor Extended State
+
+       0xD,    0,  EAX,      0, x87, X87 state
+       0xD,    0,  EAX,      1, sse, SSE state
+       0xD,    0,  EAX,      2, avx, AVX state
+       0xD,    0,  EAX,    4:3, mpx, MPX state
+       0xD,    0,  EAX,    7:5, avx512, AVX-512 state
+       0xD,    0,  EAX,      9, pkru, PKRU state
+
+       0xD,    0,  EBX,   31:0, max_sz_xcr0, Maximum size (bytes) required by enabled features in XCR0
+       0xD,    0,  ECX,   31:0, max_sz_xsave, Maximum size (bytes) of the XSAVE/XRSTOR save area
+
+       0xD,    1,  EAX,      0, xsaveopt, XSAVEOPT available
+       0xD,    1,  EAX,      1, xsavec, XSAVEC and compacted form supported
+       0xD,    1,  EAX,      2, xgetbv, XGETBV supported
+       0xD,    1,  EAX,      3, xsaves, XSAVES/XRSTORS and IA32_XSS supported
+
+       0xD,    1,  EBX,   31:0, max_sz_xcr0, Maximum size (bytes) required by enabled features in XCR0
+       0xD,    1,  ECX,      8, pt, PT state
+       0xD,    1,  ECX,      11, cet_usr, CET user state
+       0xD,    1,  ECX,      12, cet_supv, CET supervisor state
+       0xD,    1,  ECX,      13, hdc, HDC state
+       0xD,    1,  ECX,      16, hwp, HWP state
+
+# Leaf 0FH
+# Intel RDT Monitoring
+
+       0xF,    0,  EBX,   31:0, rmid_range, Maximum range (zero-based) of RMID within this physical processor of all types
+       0xF,    0,  EDX,      1, l3c_rdt_mon, L3 Cache RDT Monitoring supported
+
+       0xF,    1,  ECX,   31:0, rmid_range, Maximum range (zero-based) of RMID of this types
+       0xF,    1,  EDX,      0, l3c_ocp_mon, L3 Cache occupancy Monitoring supported
+       0xF,    1,  EDX,      1, l3c_tbw_mon, L3 Cache Total Bandwidth Monitoring supported
+       0xF,    1,  EDX,      2, l3c_lbw_mon, L3 Cache Local Bandwidth Monitoring supported
+
+# Leaf 10H
+# Intel RDT Allocation
+
+      0x10,    0,  EBX,      1, l3c_rdt_alloc, L3 Cache Allocation supported
+      0x10,    0,  EBX,      2, l2c_rdt_alloc, L2 Cache Allocation supported
+      0x10,    0,  EBX,      3, mem_bw_alloc, Memory Bandwidth Allocation supported
+
+
+# Leaf 12H
+# SGX Capability
+#
+# Some detailed SGX features not added yet
+
+      0x12,    0,  EAX,      0, sgx1, L3 Cache Allocation supported
+      0x12,    1,  EAX,      0, sgx2, L3 Cache Allocation supported
+
+
+# Leaf 14H
+# Intel Processor Tracer
+#
+
+# Leaf 15H
+# Time Stamp Counter and Nominal Core Crystal Clock Information
+
+      0x15,    0,  EAX,   31:0, tsc_denominator, The denominator of the TSC/”core crystal clock” ratio
+      0x15,    0,  EBX,   31:0, tsc_numerator, The numerator of the TSC/”core crystal clock” ratio
+      0x15,    0,  ECX,   31:0, nom_freq, Nominal frequency of the core crystal clock in Hz
+
+# Leaf 16H
+# Processor Frequency Information
+
+      0x16,    0,  EAX,   15:0, cpu_base_freq, Processor Base Frequency in MHz
+      0x16,    0,  EBX,   15:0, cpu_max_freq, Maximum Frequency in MHz
+      0x16,    0,  ECX,   15:0, bus_freq, Bus (Reference) Frequency in MHz
+
+# Leaf 17H
+# System-On-Chip Vendor Attribute
+
+      0x17,    0,  EAX,   31:0, max_socid, Maximum input value of supported sub-leaf
+      0x17,    0,  EBX,   15:0, soc_vid, SOC Vendor ID
+      0x17,    0,  EBX,     16, std_vid, SOC Vendor ID is assigned via an industry standard scheme
+      0x17,    0,  ECX,   31:0, soc_pid, SOC Project ID assigned by vendor
+      0x17,    0,  EDX,   31:0, soc_sid, SOC Stepping ID
+
+# Leaf 18H
+# Deterministic Address Translation Parameters
+
+
+# Leaf 19H
+# Key Locker Leaf
+
+
+# Leaf 1AH
+# Hybrid Information
+
+      0x1A,    0,  EAX,  31:24, core_type, 20H-Intel_Atom 40H-Intel_Core
+
+
+# Leaf 1FH
+# V2 Extended Topology - A preferred superset to leaf 0BH
+
+
+# According to SDM
+# 40000000H - 4FFFFFFFH is invalid range
+
+
+# Leaf 80000001H
+# Extended Processor Signature and Feature Bits
+
+0x80000001,    0,  ECX,      0, lahf_lm, LAHF/SAHF available in 64-bit mode
+0x80000001,    0,  ECX,      5, lzcnt, LZCNT
+0x80000001,    0,  ECX,      8, prefetchw, PREFETCHW
+
+0x80000001,    0,  EDX,     11, sysret, SYSCALL/SYSRET supported
+0x80000001,    0,  EDX,     20, exec_dis, Execute Disable Bit available
+0x80000001,    0,  EDX,     26, 1gb_page, 1GB page supported
+0x80000001,    0,  EDX,     27, rdtscp, RDTSCP and IA32_TSC_AUX are available
+#0x80000001,    0,  EDX,     29, 64b, 64b Architecture supported
+
+# Leaf 80000002H/80000003H/80000004H
+# Processor Brand String
+
+# Leaf 80000005H
+# Reserved
+
+# Leaf 80000006H
+# Extended L2 Cache Features
+
+0x80000006,    0,  ECX,    7:0, clsize, Cache Line size in bytes
+0x80000006,    0,  ECX,  15:12, l2c_assoc, L2 Associativity
+0x80000006,    0,  ECX,  31:16, csize, Cache size in 1K units
+
+
+# Leaf 80000007H
+
+0x80000007,    0,  EDX,      8, nonstop_tsc, Invariant TSC available
+
+
+# Leaf 80000008H
+
+0x80000008,    0,  EAX,    7:0, phy_adr_bits, Physical Address Bits
+0x80000008,    0,  EAX,   15:8, lnr_adr_bits, Linear Address Bits
+0x80000007,    0,  EBX,      9, wbnoinvd, WBNOINVD
+
+# 0x8000001E
+# EAX: Extended APIC ID
+0x8000001E,    0, EAX,   31:0, extended_apic_id, Extended APIC ID
+# EBX: Core Identifiers
+0x8000001E,    0, EBX,    7:0, core_id, Identifies the logical core ID
+0x8000001E,    0, EBX,   15:8, threads_per_core, The number of threads per core is threads_per_core + 1
+# ECX: Node Identifiers
+0x8000001E,    0, ECX,    7:0, node_id, Node ID
+0x8000001E,    0, ECX,   10:8, nodes_per_processor, Nodes per processor { 0: 1 node, else reserved }
+
+# 8000001F: AMD Secure Encryption
+0x8000001F,    0, EAX,      0, sme,    Secure Memory Encryption
+0x8000001F,    0, EAX,      1, sev,    Secure Encrypted Virtualization
+0x8000001F,    0, EAX,      2, vmpgflush, VM Page Flush MSR
+0x8000001F,    0, EAX,      3, seves, SEV Encrypted State
+0x8000001F,    0, EBX,    5:0, c-bit, Page table bit number used to enable memory encryption
+0x8000001F,    0, EBX,   11:6, mem_encrypt_physaddr_width, Reduction of physical address space in bits with SME enabled
+0x8000001F,    0, ECX,   31:0, num_encrypted_guests, Maximum ASID value that may be used for an SEV-enabled guest
+0x8000001F,    0, EDX,   31:0, minimum_sev_asid, Minimum ASID value that must be used for an SEV-enabled, SEV-ES-disabled guest
diff --git a/tools/arch/x86/kcpuid/kcpuid.c b/tools/arch/x86/kcpuid/kcpuid.c
new file mode 100644 (file)
index 0000000..dae7551
--- /dev/null
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+char *def_csv = "/usr/share/misc/cpuid.csv";
+char *user_csv;
+
+
+/* Cover both single-bit flag and multiple-bits fields */
+struct bits_desc {
+       /* start and end bits */
+       int start, end;
+       /* 0 or 1 for 1-bit flag */
+       int value;
+       char simp[32];
+       char detail[256];
+};
+
+/* descriptor info for eax/ebx/ecx/edx */
+struct reg_desc {
+       /* number of valid entries */
+       int nr;
+       struct bits_desc descs[32];
+};
+
+enum {
+       R_EAX = 0,
+       R_EBX,
+       R_ECX,
+       R_EDX,
+       NR_REGS
+};
+
+struct subleaf {
+       u32 index;
+       u32 sub;
+       u32 eax, ebx, ecx, edx;
+       struct reg_desc info[NR_REGS];
+};
+
+/* Represent one leaf (basic or extended) */
+struct cpuid_func {
+       /*
+        * Array of subleafs for this func, if there is no subleafs
+        * then the leafs[0] is the main leaf
+        */
+       struct subleaf *leafs;
+       int nr;
+};
+
+struct cpuid_range {
+       /* array of main leafs */
+       struct cpuid_func *funcs;
+       /* number of valid leafs */
+       int nr;
+       bool is_ext;
+};
+
+/*
+ * basic:  basic functions range: [0... ]
+ * ext:    extended functions range: [0x80000000... ]
+ */
+struct cpuid_range *leafs_basic, *leafs_ext;
+
+static int num_leafs;
+static bool is_amd;
+static bool show_details;
+static bool show_raw;
+static bool show_flags_only = true;
+static u32 user_index = 0xFFFFFFFF;
+static u32 user_sub = 0xFFFFFFFF;
+static int flines;
+
+static inline void cpuid(u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
+{
+       /* ecx is often an input as well as an output. */
+       asm volatile("cpuid"
+           : "=a" (*eax),
+             "=b" (*ebx),
+             "=c" (*ecx),
+             "=d" (*edx)
+           : "0" (*eax), "2" (*ecx));
+}
+
+static inline bool has_subleafs(u32 f)
+{
+       if (f == 0x7 || f == 0xd)
+               return true;
+
+       if (is_amd) {
+               if (f == 0x8000001d)
+                       return true;
+               return false;
+       }
+
+       switch (f) {
+       case 0x4:
+       case 0xb:
+       case 0xf:
+       case 0x10:
+       case 0x14:
+       case 0x18:
+       case 0x1f:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void leaf_print_raw(struct subleaf *leaf)
+{
+       if (has_subleafs(leaf->index)) {
+               if (leaf->sub == 0)
+                       printf("0x%08x: subleafs:\n", leaf->index);
+
+               printf(" %2d: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n",
+                       leaf->sub, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx);
+       } else {
+               printf("0x%08x: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n",
+                       leaf->index, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx);
+       }
+}
+
+/* Return true is the input eax/ebx/ecx/edx are all zero */
+static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf,
+                       u32 a, u32 b, u32 c, u32 d)
+{
+       struct cpuid_func *func;
+       struct subleaf *leaf;
+       int s = 0;
+
+       if (a == 0 && b == 0 && c == 0 && d == 0)
+               return true;
+
+       /*
+        * Cut off vendor-prefix from CPUID function as we're using it as an
+        * index into ->funcs.
+        */
+       func = &range->funcs[f & 0xffff];
+
+       if (!func->leafs) {
+               func->leafs = malloc(sizeof(struct subleaf));
+               if (!func->leafs)
+                       perror("malloc func leaf");
+
+               func->nr = 1;
+       } else {
+               s = func->nr;
+               func->leafs = realloc(func->leafs, (s + 1) * sizeof(*leaf));
+               if (!func->leafs)
+                       perror("realloc f->leafs");
+
+               func->nr++;
+       }
+
+       leaf = &func->leafs[s];
+
+       leaf->index = f;
+       leaf->sub = subleaf;
+       leaf->eax = a;
+       leaf->ebx = b;
+       leaf->ecx = c;
+       leaf->edx = d;
+
+       return false;
+}
+
+static void raw_dump_range(struct cpuid_range *range)
+{
+       u32 f;
+       int i;
+
+       printf("%s Leafs :\n", range->is_ext ? "Extended" : "Basic");
+       printf("================\n");
+
+       for (f = 0; (int)f < range->nr; f++) {
+               struct cpuid_func *func = &range->funcs[f];
+               u32 index = f;
+
+               if (range->is_ext)
+                       index += 0x80000000;
+
+               /* Skip leaf without valid items */
+               if (!func->nr)
+                       continue;
+
+               /* First item is the main leaf, followed by all subleafs */
+               for (i = 0; i < func->nr; i++)
+                       leaf_print_raw(&func->leafs[i]);
+       }
+}
+
+#define MAX_SUBLEAF_NUM                32
+struct cpuid_range *setup_cpuid_range(u32 input_eax)
+{
+       u32 max_func, idx_func;
+       int subleaf;
+       struct cpuid_range *range;
+       u32 eax, ebx, ecx, edx;
+       u32 f = input_eax;
+       int max_subleaf;
+       bool allzero;
+
+       eax = input_eax;
+       ebx = ecx = edx = 0;
+
+       cpuid(&eax, &ebx, &ecx, &edx);
+       max_func = eax;
+       idx_func = (max_func & 0xffff) + 1;
+
+       range = malloc(sizeof(struct cpuid_range));
+       if (!range)
+               perror("malloc range");
+
+       if (input_eax & 0x80000000)
+               range->is_ext = true;
+       else
+               range->is_ext = false;
+
+       range->funcs = malloc(sizeof(struct cpuid_func) * idx_func);
+       if (!range->funcs)
+               perror("malloc range->funcs");
+
+       range->nr = idx_func;
+       memset(range->funcs, 0, sizeof(struct cpuid_func) * idx_func);
+
+       for (; f <= max_func; f++) {
+               eax = f;
+               subleaf = ecx = 0;
+
+               cpuid(&eax, &ebx, &ecx, &edx);
+               allzero = cpuid_store(range, f, subleaf, eax, ebx, ecx, edx);
+               if (allzero)
+                       continue;
+               num_leafs++;
+
+               if (!has_subleafs(f))
+                       continue;
+
+               max_subleaf = MAX_SUBLEAF_NUM;
+
+               /*
+                * Some can provide the exact number of subleafs,
+                * others have to be tried (0xf)
+                */
+               if (f == 0x7 || f == 0x14 || f == 0x17 || f == 0x18)
+                       max_subleaf = (eax & 0xff) + 1;
+
+               if (f == 0xb)
+                       max_subleaf = 2;
+
+               for (subleaf = 1; subleaf < max_subleaf; subleaf++) {
+                       eax = f;
+                       ecx = subleaf;
+
+                       cpuid(&eax, &ebx, &ecx, &edx);
+                       allzero = cpuid_store(range, f, subleaf,
+                                               eax, ebx, ecx, edx);
+                       if (allzero)
+                               continue;
+                       num_leafs++;
+               }
+
+       }
+
+       return range;
+}
+
+/*
+ * The basic row format for cpuid.csv  is
+ *     LEAF,SUBLEAF,register_name,bits,short name,long description
+ *
+ * like:
+ *     0,    0,  EAX,   31:0, max_basic_leafs,  Max input value for supported subleafs
+ *     1,    0,  ECX,      0, sse3,  Streaming SIMD Extensions 3(SSE3)
+ */
+static int parse_line(char *line)
+{
+       char *str;
+       int i;
+       struct cpuid_range *range;
+       struct cpuid_func *func;
+       struct subleaf *leaf;
+       u32 index;
+       u32 sub;
+       char buffer[512];
+       char *buf;
+       /*
+        * Tokens:
+        *  1. leaf
+        *  2. subleaf
+        *  3. register
+        *  4. bits
+        *  5. short name
+        *  6. long detail
+        */
+       char *tokens[6];
+       struct reg_desc *reg;
+       struct bits_desc *bdesc;
+       int reg_index;
+       char *start, *end;
+
+       /* Skip comments and NULL line */
+       if (line[0] == '#' || line[0] == '\n')
+               return 0;
+
+       strncpy(buffer, line, 511);
+       buffer[511] = 0;
+       str = buffer;
+       for (i = 0; i < 5; i++) {
+               tokens[i] = strtok(str, ",");
+               if (!tokens[i])
+                       goto err_exit;
+               str = NULL;
+       }
+       tokens[5] = strtok(str, "\n");
+       if (!tokens[5])
+               goto err_exit;
+
+       /* index/main-leaf */
+       index = strtoull(tokens[0], NULL, 0);
+
+       if (index & 0x80000000)
+               range = leafs_ext;
+       else
+               range = leafs_basic;
+
+       index &= 0x7FFFFFFF;
+       /* Skip line parsing for non-existing indexes */
+       if ((int)index >= range->nr)
+               return -1;
+
+       func = &range->funcs[index];
+
+       /* Return if the index has no valid item on this platform */
+       if (!func->nr)
+               return 0;
+
+       /* subleaf */
+       sub = strtoul(tokens[1], NULL, 0);
+       if ((int)sub > func->nr)
+               return -1;
+
+       leaf = &func->leafs[sub];
+       buf = tokens[2];
+
+       if (strcasestr(buf, "EAX"))
+               reg_index = R_EAX;
+       else if (strcasestr(buf, "EBX"))
+               reg_index = R_EBX;
+       else if (strcasestr(buf, "ECX"))
+               reg_index = R_ECX;
+       else if (strcasestr(buf, "EDX"))
+               reg_index = R_EDX;
+       else
+               goto err_exit;
+
+       reg = &leaf->info[reg_index];
+       bdesc = &reg->descs[reg->nr++];
+
+       /* bit flag or bits field */
+       buf = tokens[3];
+
+       end = strtok(buf, ":");
+       bdesc->end = strtoul(end, NULL, 0);
+       bdesc->start = bdesc->end;
+
+       /* start != NULL means it is bit fields */
+       start = strtok(NULL, ":");
+       if (start)
+               bdesc->start = strtoul(start, NULL, 0);
+
+       strcpy(bdesc->simp, tokens[4]);
+       strcpy(bdesc->detail, tokens[5]);
+       return 0;
+
+err_exit:
+       printf("Warning: wrong line format:\n");
+       printf("\tline[%d]: %s\n", flines, line);
+       return -1;
+}
+
+/* Parse csv file, and construct the array of all leafs and subleafs */
+static void parse_text(void)
+{
+       FILE *file;
+       char *filename, *line = NULL;
+       size_t len = 0;
+       int ret;
+
+       if (show_raw)
+               return;
+
+       filename = user_csv ? user_csv : def_csv;
+       file = fopen(filename, "r");
+       if (!file) {
+               /* Fallback to a csv in the same dir */
+               file = fopen("./cpuid.csv", "r");
+       }
+
+       if (!file) {
+               printf("Fail to open '%s'\n", filename);
+               return;
+       }
+
+       while (1) {
+               ret = getline(&line, &len, file);
+               flines++;
+               if (ret > 0)
+                       parse_line(line);
+
+               if (feof(file))
+                       break;
+       }
+
+       fclose(file);
+}
+
+
+/* Decode every eax/ebx/ecx/edx */
+static void decode_bits(u32 value, struct reg_desc *rdesc)
+{
+       struct bits_desc *bdesc;
+       int start, end, i;
+       u32 mask;
+
+       for (i = 0; i < rdesc->nr; i++) {
+               bdesc = &rdesc->descs[i];
+
+               start = bdesc->start;
+               end = bdesc->end;
+               if (start == end) {
+                       /* single bit flag */
+                       if (value & (1 << start))
+                               printf("\t%-20s %s%s\n",
+                                       bdesc->simp,
+                                       show_details ? "-" : "",
+                                       show_details ? bdesc->detail : ""
+                                       );
+               } else {
+                       /* bit fields */
+                       if (show_flags_only)
+                               continue;
+
+                       mask = ((u64)1 << (end - start + 1)) - 1;
+                       printf("\t%-20s\t: 0x%-8x\t%s%s\n",
+                                       bdesc->simp,
+                                       (value >> start) & mask,
+                                       show_details ? "-" : "",
+                                       show_details ? bdesc->detail : ""
+                                       );
+               }
+       }
+}
+
+static void show_leaf(struct subleaf *leaf)
+{
+       if (!leaf)
+               return;
+
+       if (show_raw)
+               leaf_print_raw(leaf);
+
+       decode_bits(leaf->eax, &leaf->info[R_EAX]);
+       decode_bits(leaf->ebx, &leaf->info[R_EBX]);
+       decode_bits(leaf->ecx, &leaf->info[R_ECX]);
+       decode_bits(leaf->edx, &leaf->info[R_EDX]);
+}
+
+static void show_func(struct cpuid_func *func)
+{
+       int i;
+
+       if (!func)
+               return;
+
+       for (i = 0; i < func->nr; i++)
+               show_leaf(&func->leafs[i]);
+}
+
+static void show_range(struct cpuid_range *range)
+{
+       int i;
+
+       for (i = 0; i < range->nr; i++)
+               show_func(&range->funcs[i]);
+}
+
+static inline struct cpuid_func *index_to_func(u32 index)
+{
+       struct cpuid_range *range;
+
+       range = (index & 0x80000000) ? leafs_ext : leafs_basic;
+       index &= 0x7FFFFFFF;
+
+       if (((index & 0xFFFF) + 1) > (u32)range->nr) {
+               printf("ERR: invalid input index (0x%x)\n", index);
+               return NULL;
+       }
+       return &range->funcs[index];
+}
+
+static void show_info(void)
+{
+       struct cpuid_func *func;
+
+       if (show_raw) {
+               /* Show all of the raw output of 'cpuid' instr */
+               raw_dump_range(leafs_basic);
+               raw_dump_range(leafs_ext);
+               return;
+       }
+
+       if (user_index != 0xFFFFFFFF) {
+               /* Only show specific leaf/subleaf info */
+               func = index_to_func(user_index);
+               if (!func)
+                       return;
+
+               /* Dump the raw data also */
+               show_raw = true;
+
+               if (user_sub != 0xFFFFFFFF) {
+                       if (user_sub + 1 <= (u32)func->nr) {
+                               show_leaf(&func->leafs[user_sub]);
+                               return;
+                       }
+
+                       printf("ERR: invalid input subleaf (0x%x)\n", user_sub);
+               }
+
+               show_func(func);
+               return;
+       }
+
+       printf("CPU features:\n=============\n\n");
+       show_range(leafs_basic);
+       show_range(leafs_ext);
+}
+
+static void setup_platform_cpuid(void)
+{
+        u32 eax, ebx, ecx, edx;
+
+       /* Check vendor */
+       eax = ebx = ecx = edx = 0;
+       cpuid(&eax, &ebx, &ecx, &edx);
+
+       /* "htuA" */
+       if (ebx == 0x68747541)
+               is_amd = true;
+
+       /* Setup leafs for the basic and extended range */
+       leafs_basic = setup_cpuid_range(0x0);
+       leafs_ext = setup_cpuid_range(0x80000000);
+}
+
+static void usage(void)
+{
+       printf("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n"
+               "\t-a|--all             Show both bit flags and complex bit fields info\n"
+               "\t-b|--bitflags        Show boolean flags only\n"
+               "\t-d|--detail          Show details of the flag/fields (default)\n"
+               "\t-f|--flags           Specify the cpuid csv file\n"
+               "\t-h|--help            Show usage info\n"
+               "\t-l|--leaf=index      Specify the leaf you want to check\n"
+               "\t-r|--raw             Show raw cpuid data\n"
+               "\t-s|--subleaf=sub     Specify the subleaf you want to check\n"
+       );
+}
+
+static struct option opts[] = {
+       { "all", no_argument, NULL, 'a' },              /* show both bit flags and fields */
+       { "bitflags", no_argument, NULL, 'b' },         /* only show bit flags, default on */
+       { "detail", no_argument, NULL, 'd' },           /* show detail descriptions */
+       { "file", required_argument, NULL, 'f' },       /* use user's cpuid file */
+       { "help", no_argument, NULL, 'h'},              /* show usage */
+       { "leaf", required_argument, NULL, 'l'},        /* only check a specific leaf */
+       { "raw", no_argument, NULL, 'r'},               /* show raw CPUID leaf data */
+       { "subleaf", required_argument, NULL, 's'},     /* check a specific subleaf */
+       { NULL, 0, NULL, 0 }
+};
+
+static int parse_options(int argc, char *argv[])
+{
+       int c;
+
+       while ((c = getopt_long(argc, argv, "abdf:hl:rs:",
+                                       opts, NULL)) != -1)
+               switch (c) {
+               case 'a':
+                       show_flags_only = false;
+                       break;
+               case 'b':
+                       show_flags_only = true;
+                       break;
+               case 'd':
+                       show_details = true;
+                       break;
+               case 'f':
+                       user_csv = optarg;
+                       break;
+               case 'h':
+                       usage();
+                       exit(1);
+                       break;
+               case 'l':
+                       /* main leaf */
+                       user_index = strtoul(optarg, NULL, 0);
+                       break;
+               case 'r':
+                       show_raw = true;
+                       break;
+               case 's':
+                       /* subleaf */
+                       user_sub = strtoul(optarg, NULL, 0);
+                       break;
+               default:
+                       printf("%s: Invalid option '%c'\n", argv[0], optopt);
+                       return -1;
+       }
+
+       return 0;
+}
+
+/*
+ * Do 4 things in turn:
+ * 1. Parse user options
+ * 2. Parse and store all the CPUID leaf data supported on this platform
+ * 2. Parse the csv file, while skipping leafs which are not available
+ *    on this platform
+ * 3. Print leafs info based on user options
+ */
+int main(int argc, char *argv[])
+{
+       if (parse_options(argc, argv))
+               return -1;
+
+       /* Setup the cpuid leafs of current platform */
+       setup_platform_cpuid();
+
+       /* Read and parse the 'cpuid.csv' */
+       parse_text();
+
+       show_info();
+       return 0;
+}
index c4225ed63565a9aa4bf1170014a0ab99305a288b..1600b17dbb8ab67e88e8fee1a2d83ed7dda5b106 100644 (file)
@@ -128,9 +128,9 @@ def detect_kernel_config():
 
     cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
 
-    if prog.type('struct kmem_cache').members[1][1] == 'flags':
+    if prog.type('struct kmem_cache').members[1].name == 'flags':
         cfg['allocator'] = 'SLUB'
-    elif prog.type('struct kmem_cache').members[1][1] == 'batchcount':
+    elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
         cfg['allocator'] = 'SLAB'
     else:
         err('Can\'t determine the slab allocator')
@@ -193,7 +193,7 @@ def main():
         # look over all slab pages, belonging to non-root memcgs
         # and look for objects belonging to the given memory cgroup
         for page in for_each_slab_page(prog):
-            objcg_vec_raw = page.obj_cgroups.value_()
+            objcg_vec_raw = page.memcg_data.value_()
             if objcg_vec_raw == 0:
                 continue
             cache = page.slab_cache
@@ -202,7 +202,7 @@ def main():
             addr = cache.value_()
             caches[addr] = cache
             # clear the lowest bit to get the true obj_cgroups
-            objcg_vec = Object(prog, page.obj_cgroups.type_,
+            objcg_vec = Object(prog, 'struct obj_cgroup **',
                                value=objcg_vec_raw & ~1)
 
             if addr not in stats:
index ae5662d368b98f7bceb3d065e2e832507b32f671..5a00b8b2cf9fcceb1fa247a4da5fe2f330f378ce 100644 (file)
@@ -58,11 +58,25 @@ struct static_call_site {
        __raw_static_call(name);                                        \
 })
 
+struct static_call_key {
+       void *func;
+       union {
+               /* bit 0: 0 = mods, 1 = sites */
+               unsigned long type;
+               struct static_call_mod *mods;
+               struct static_call_site *sites;
+       };
+};
+
 #else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
 
 #define __STATIC_CALL_ADDRESSABLE(name)
 #define __static_call(name)    __raw_static_call(name)
 
+struct static_call_key {
+       void *func;
+};
+
 #endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
 
 #ifdef MODULE
@@ -77,6 +91,10 @@ struct static_call_site {
 
 #else
 
+struct static_call_key {
+       void *func;
+};
+
 #define static_call(name)                                              \
        ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
 
index 637189ec1ab992e5a5313a8d71c8c499ffb27411..d30439b4b8ab45c7a851891e69ca6ebea007a319 100644 (file)
@@ -9,8 +9,6 @@
 #include "../../../arch/alpha/include/uapi/asm/errno.h"
 #elif defined(__mips__)
 #include "../../../arch/mips/include/uapi/asm/errno.h"
-#elif defined(__ia64__)
-#include "../../../arch/ia64/include/uapi/asm/errno.h"
 #elif defined(__xtensa__)
 #include "../../../arch/xtensa/include/uapi/asm/errno.h"
 #else
index 8caaafe7e312b30eed538e39c2324f4065895754..e7a8d847161f28e265b0a400c5f35e5e67c60b93 100644 (file)
@@ -227,7 +227,7 @@ static int ringbuf_process_ring(struct ring* r)
                        if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
                                sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
                                err = r->sample_cb(r->ctx, sample, len);
-                               if (err) {
+                               if (err < 0) {
                                        /* update consumer pos and bail out */
                                        smp_store_release(r->consumer_pos,
                                                          cons_pos);
index 526fc35c0b23318621ec9e70169192a776bcb499..007fe5d5943861c2f6f71f8d390360036ebebe44 100644 (file)
@@ -59,6 +59,8 @@ struct xsk_umem {
        int fd;
        int refcount;
        struct list_head ctx_list;
+       bool rx_ring_setup_done;
+       bool tx_ring_setup_done;
 };
 
 struct xsk_ctx {
@@ -743,26 +745,30 @@ static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
        return NULL;
 }
 
-static void xsk_put_ctx(struct xsk_ctx *ctx)
+static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
 {
        struct xsk_umem *umem = ctx->umem;
        struct xdp_mmap_offsets off;
        int err;
 
-       if (--ctx->refcount == 0) {
-               err = xsk_get_mmap_offsets(umem->fd, &off);
-               if (!err) {
-                       munmap(ctx->fill->ring - off.fr.desc,
-                              off.fr.desc + umem->config.fill_size *
-                              sizeof(__u64));
-                       munmap(ctx->comp->ring - off.cr.desc,
-                              off.cr.desc + umem->config.comp_size *
-                              sizeof(__u64));
-               }
+       if (--ctx->refcount)
+               return;
 
-               list_del(&ctx->list);
-               free(ctx);
-       }
+       if (!unmap)
+               goto out_free;
+
+       err = xsk_get_mmap_offsets(umem->fd, &off);
+       if (err)
+               goto out_free;
+
+       munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
+              sizeof(__u64));
+       munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
+              sizeof(__u64));
+
+out_free:
+       list_del(&ctx->list);
+       free(ctx);
 }
 
 static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
@@ -797,8 +803,6 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
        memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
        ctx->ifname[IFNAMSIZ - 1] = '\0';
 
-       umem->fill_save = NULL;
-       umem->comp_save = NULL;
        ctx->fill = fill;
        ctx->comp = comp;
        list_add(&ctx->list, &umem->ctx_list);
@@ -848,6 +852,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                              struct xsk_ring_cons *comp,
                              const struct xsk_socket_config *usr_config)
 {
+       bool unmap, rx_setup_done = false, tx_setup_done = false;
        void *rx_map = NULL, *tx_map = NULL;
        struct sockaddr_xdp sxdp = {};
        struct xdp_mmap_offsets off;
@@ -858,6 +863,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        if (!umem || !xsk_ptr || !(rx || tx))
                return -EFAULT;
 
+       unmap = umem->fill_save != fill;
+
        xsk = calloc(1, sizeof(*xsk));
        if (!xsk)
                return -ENOMEM;
@@ -881,6 +888,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                }
        } else {
                xsk->fd = umem->fd;
+               rx_setup_done = umem->rx_ring_setup_done;
+               tx_setup_done = umem->tx_ring_setup_done;
        }
 
        ctx = xsk_get_ctx(umem, ifindex, queue_id);
@@ -899,7 +908,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        }
        xsk->ctx = ctx;
 
-       if (rx) {
+       if (rx && !rx_setup_done) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
                                 &xsk->config.rx_size,
                                 sizeof(xsk->config.rx_size));
@@ -907,8 +916,10 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                        err = -errno;
                        goto out_put_ctx;
                }
+               if (xsk->fd == umem->fd)
+                       umem->rx_ring_setup_done = true;
        }
-       if (tx) {
+       if (tx && !tx_setup_done) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
                                 &xsk->config.tx_size,
                                 sizeof(xsk->config.tx_size));
@@ -916,6 +927,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                        err = -errno;
                        goto out_put_ctx;
                }
+               if (xsk->fd == umem->fd)
+                       umem->rx_ring_setup_done = true;
        }
 
        err = xsk_get_mmap_offsets(xsk->fd, &off);
@@ -994,6 +1007,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        }
 
        *xsk_ptr = xsk;
+       umem->fill_save = NULL;
+       umem->comp_save = NULL;
        return 0;
 
 out_mmap_tx:
@@ -1005,7 +1020,7 @@ out_mmap_rx:
                munmap(rx_map, off.rx.desc +
                       xsk->config.rx_size * sizeof(struct xdp_desc));
 out_put_ctx:
-       xsk_put_ctx(ctx);
+       xsk_put_ctx(ctx, unmap);
 out_socket:
        if (--umem->refcount)
                close(xsk->fd);
@@ -1019,6 +1034,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
                       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
                       const struct xsk_socket_config *usr_config)
 {
+       if (!umem)
+               return -EFAULT;
+
        return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
                                         rx, tx, umem->fill_save,
                                         umem->comp_save, usr_config);
@@ -1068,7 +1086,7 @@ void xsk_socket__delete(struct xsk_socket *xsk)
                }
        }
 
-       xsk_put_ctx(ctx);
+       xsk_put_ctx(ctx, true);
 
        umem->refcount--;
        /* Do not close an fd that also has an associated umem connected
index d49448a1060c9f8e2ffbca9c2e6744f66b022bd1..87cb11a7a3ee9cc4bab7356de3e9848013efca7a 100644 (file)
@@ -289,7 +289,7 @@ static int set_tracing_pid(struct perf_ftrace *ftrace)
 
        for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
                scnprintf(buf, sizeof(buf), "%d",
-                         ftrace->evlist->core.threads->map[i]);
+                         perf_thread_map__pid(ftrace->evlist->core.threads, i));
                if (append_tracing_file("set_ftrace_pid", buf) < 0)
                        return -1;
        }
index 6fe44d97fde5a2be7366c4a3ff39d18b1206b602..ddccc0eb739076dc4ccd18a897318e07448fcae5 100644 (file)
@@ -906,7 +906,7 @@ int cmd_inject(int argc, const char **argv)
        }
 
        data.path = inject.input_name;
-       inject.session = perf_session__new(&data, true, &inject.tool);
+       inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
        if (IS_ERR(inject.session))
                return PTR_ERR(inject.session);
 
index 27ee1ea1fe941fed1ec29703e220b69ee4fecbda..9b0614a87831ba13769dc444e1acc398f34d835c 100755 (executable)
@@ -15,7 +15,7 @@ x86_msr_index=${arch_x86_header_dir}/msr-index.h
 
 printf "static const char *x86_MSRs[] = {\n"
 regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MSR_([[:alnum:]][[:alnum:]_]+)[[:space:]]+(0x00000[[:xdigit:]]+)[[:space:]]*.*'
-egrep $regex ${x86_msr_index} | egrep -v 'MSR_(ATOM|P[46]|IA32_(TSCDEADLINE|UCODE_REV)|IDT_FCR4)' | \
+egrep $regex ${x86_msr_index} | egrep -v 'MSR_(ATOM|P[46]|IA32_(TSC_DEADLINE|UCODE_REV)|IDT_FCR4)' | \
        sed -r "s/$regex/\2 \1/g" | sort -n | \
        xargs printf "\t[%s] = \"%s\",\n"
 printf "};\n\n"
index f3ac9d40cebf4cfa0b8cfef7dcc7e23a9449131e..2e5eff4f8f03982eb37d8deb0965dc5a71af67df 100644 (file)
@@ -210,8 +210,10 @@ static int arm_spe_do_get_packet(const unsigned char *buf, size_t len,
 
        if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_EXTENDED) {
                /* 16-bit extended format header */
-               ext_hdr = 1;
+               if (len == 1)
+                       return ARM_SPE_BAD_PACKET;
 
+               ext_hdr = 1;
                hdr = buf[1];
                if (hdr == SPE_HEADER1_ALIGNMENT)
                        return arm_spe_get_alignment(buf, len, packet);
index 5b6ccb90b39712916203d405019460caeac7812a..1b4091a3b508bf2babf2133d0e5e790cb7a6ba79 100644 (file)
@@ -634,7 +634,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
                break;
        }
 
-       if (itr)
+       if (itr && itr->parse_snapshot_options)
                return itr->parse_snapshot_options(itr, opts, str);
 
        pr_err("No AUX area tracing to snapshot\n");
index 423ec69bda6ca50d3b5544ba751d493a9287d2d9..5ecd4f401f324b391d40111f372a714acf615169 100644 (file)
@@ -201,7 +201,7 @@ static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
        double ratio = 0.0;
 
        if (block_fmt->total_cycles)
-               ratio = (double)bi->cycles / (double)block_fmt->total_cycles;
+               ratio = (double)bi->cycles_aggr / (double)block_fmt->total_cycles;
 
        return color_pct(hpp, block_fmt->width, 100.0 * ratio);
 }
@@ -216,9 +216,9 @@ static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
        double l, r;
 
        if (block_fmt->total_cycles) {
-               l = ((double)bi_l->cycles /
+               l = ((double)bi_l->cycles_aggr /
                        (double)block_fmt->total_cycles) * 100000.0;
-               r = ((double)bi_r->cycles /
+               r = ((double)bi_r->cycles_aggr /
                        (double)block_fmt->total_cycles) * 100000.0;
                return (int64_t)l - (int64_t)r;
        }
index f29af4fc3d09390c90aa53c7e5f5434fb979e342..8fca4779ae6a8e909f63592ff71102e900561bfe 100644 (file)
@@ -35,7 +35,7 @@ void perf_data__close_dir(struct perf_data *data)
 int perf_data__create_dir(struct perf_data *data, int nr)
 {
        struct perf_data_file *files = NULL;
-       int i, ret = -1;
+       int i, ret;
 
        if (WARN_ON(!data->is_dir))
                return -EINVAL;
@@ -51,7 +51,8 @@ int perf_data__create_dir(struct perf_data *data, int nr)
        for (i = 0; i < nr; i++) {
                struct perf_data_file *file = &files[i];
 
-               if (asprintf(&file->path, "%s/data.%d", data->path, i) < 0)
+               ret = asprintf(&file->path, "%s/data.%d", data->path, i);
+               if (ret < 0)
                        goto out_err;
 
                ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);
index fbc40a2c17d4dca0658b9b0c129eb4253e64a2bc..8af693d9678cefe06ffa322e4913437fb04beb36 100644 (file)
@@ -840,15 +840,18 @@ out:
 int maps__clone(struct thread *thread, struct maps *parent)
 {
        struct maps *maps = thread->maps;
-       int err = -ENOMEM;
+       int err;
        struct map *map;
 
        down_read(&parent->lock);
 
        maps__for_each_entry(parent, map) {
                struct map *new = map__clone(map);
-               if (new == NULL)
+
+               if (new == NULL) {
+                       err = -ENOMEM;
                        goto out_unlock;
+               }
 
                err = unwind__prepare_access(maps, new, NULL);
                if (err)
index 582feb88eca34d0d47baaa1bef749d27c485cef7..ab940c508ef0cbfd6bcbc5856f9ea627f5d898ee 100644 (file)
@@ -15,7 +15,7 @@ struct process_cmd_struct {
        int arg;
 };
 
-static const char *version_str = "v1.8";
+static const char *version_str = "v1.9";
 static const int supported_api_ver = 1;
 static struct isst_if_platform_info isst_platform_info;
 static char *progname;
@@ -381,6 +381,18 @@ static void set_cpu_online_offline(int cpu, int state)
        close(fd);
 }
 
+static void force_all_cpus_online(void)
+{
+       int i;
+
+       fprintf(stderr, "Forcing all CPUs online\n");
+
+       for (i = 0; i < topo_max_cpus; ++i)
+               set_cpu_online_offline(i, 1);
+
+       unlink("/var/run/isst_cpu_topology.dat");
+}
+
 #define MAX_PACKAGE_COUNT 8
 #define MAX_DIE_PER_PACKAGE 2
 static void for_each_online_package_in_set(void (*callback)(int, void *, void *,
@@ -959,6 +971,10 @@ static void isst_print_extended_platform_info(void)
                fprintf(outf, "Intel(R) SST-BF (feature base-freq) is not supported\n");
 
        ret = isst_read_pm_config(i, &cp_state, &cp_cap);
+       if (ret) {
+               fprintf(outf, "Intel(R) SST-CP (feature core-power) status is unknown\n");
+               return;
+       }
        if (cp_cap)
                fprintf(outf, "Intel(R) SST-CP (feature core-power) is supported\n");
        else
@@ -2763,6 +2779,7 @@ static void usage(void)
        printf("\t[-f|--format] : output format [json|text]. Default: text\n");
        printf("\t[-h|--help] : Print help\n");
        printf("\t[-i|--info] : Print platform information\n");
+       printf("\t[-a|--all-cpus-online] : Force online every CPU in the system\n");
        printf("\t[-o|--out] : Output file\n");
        printf("\t\t\tDefault : stderr\n");
        printf("\t[-p|--pause] : Delay between two mail box commands in milliseconds\n");
@@ -2791,7 +2808,6 @@ static void usage(void)
 static void print_version(void)
 {
        fprintf(outf, "Version %s\n", version_str);
-       fprintf(outf, "Build date %s time %s\n", __DATE__, __TIME__);
        exit(0);
 }
 
@@ -2800,11 +2816,12 @@ static void cmdline(int argc, char **argv)
        const char *pathname = "/dev/isst_interface";
        char *ptr;
        FILE *fp;
-       int opt;
+       int opt, force_cpus_online = 0;
        int option_index = 0;
        int ret;
 
        static struct option long_options[] = {
+               { "all-cpus-online", no_argument, 0, 'a' },
                { "cpu", required_argument, 0, 'c' },
                { "debug", no_argument, 0, 'd' },
                { "format", required_argument, 0, 'f' },
@@ -2840,9 +2857,12 @@ static void cmdline(int argc, char **argv)
        }
 
        progname = argv[0];
-       while ((opt = getopt_long_only(argc, argv, "+c:df:hio:v", long_options,
+       while ((opt = getopt_long_only(argc, argv, "+c:df:hio:va", long_options,
                                       &option_index)) != -1) {
                switch (opt) {
+               case 'a':
+                       force_cpus_online = 1;
+                       break;
                case 'c':
                        parse_cpu_command(optarg);
                        break;
@@ -2892,6 +2912,8 @@ static void cmdline(int argc, char **argv)
                exit(0);
        }
        set_max_cpu_num();
+       if (force_cpus_online)
+               force_all_cpus_online();
        store_cpu_topology();
        set_cpu_present_cpu_mask();
        set_cpu_target_cpu_mask();
index 8e54ce47648e28f0ffc0c297e6fd0e7655f77f71..3bf1820c0da1184a2bcaf6df94c30c8725b6a95a 100644 (file)
@@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
                        index = snprintf(&str[curr_index],
                                         str_len - curr_index, ",");
                        curr_index += index;
+                       if (curr_index >= str_len)
+                               break;
                }
                index = snprintf(&str[curr_index], str_len - curr_index, "%d",
                                 i);
                curr_index += index;
+               if (curr_index >= str_len)
+                       break;
                first = 0;
        }
 }
@@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
                index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
                                 mask[i]);
                curr_index += index;
+               if (curr_index >= str_len)
+                       break;
                if (i) {
                        strncat(&str[curr_index], ",", str_len - curr_index);
                        curr_index++;
                }
+               if (curr_index >= str_len)
+                       break;
        }
 
        free(mask);
@@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
                                          int disp_level)
 {
        char header[256];
-       char value[256];
+       char value[512];
 
        snprintf(header, sizeof(header), "speed-select-base-freq-properties");
        format_and_print(outf, disp_level, header, NULL);
@@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
                                   struct isst_pkg_ctdp *pkg_dev)
 {
        char header[256];
-       char value[256];
+       char value[512];
        static int level;
        int i;
 
index a7c4f0772e5342e2b95af302a633cb37e503bfd3..5939615265f1845c7067d2c17422707e98985fdf 100644 (file)
@@ -2449,7 +2449,7 @@ dump_knl_turbo_ratio_limits(void)
        fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n",
                base_cpu, msr);
 
-       /**
+       /*
         * Turbo encoding in KNL is as follows:
         * [0] -- Reserved
         * [7:1] -- Base value of number of active cores of bucket 1.
index 2c9d012797a7f1e4acb7d7c090e0b59885846d96..ced910fb40198c38abbd85832f3482c77024c35f 100644 (file)
@@ -4,7 +4,7 @@
 ARCH ?= $(shell uname -m 2>/dev/null || echo not)
 
 ifneq (,$(filter $(ARCH),aarch64 arm64))
-ARM64_SUBTARGETS ?= tags signal pauth fp mte
+ARM64_SUBTARGETS ?= tags signal pauth fp mte bti
 else
 ARM64_SUBTARGETS :=
 endif
diff --git a/tools/testing/selftests/arm64/bti/.gitignore b/tools/testing/selftests/arm64/bti/.gitignore
new file mode 100644 (file)
index 0000000..73869fa
--- /dev/null
@@ -0,0 +1,2 @@
+btitest
+nobtitest
diff --git a/tools/testing/selftests/arm64/bti/Makefile b/tools/testing/selftests/arm64/bti/Makefile
new file mode 100644 (file)
index 0000000..73e013c
--- /dev/null
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_GEN_PROGS := btitest nobtitest
+
+PROGS := $(patsubst %,gen/%,$(TEST_GEN_PROGS))
+
+# These tests are built as freestanding binaries since otherwise BTI
+# support in ld.so is required which is not currently widespread; when
+# it is available it will still be useful to test this separately as the
+# cases for statically linked and dynamically lined binaries are
+# slightly different.
+
+CFLAGS_NOBTI = -DBTI=0
+CFLAGS_BTI = -mbranch-protection=standard -DBTI=1
+
+CFLAGS_COMMON = -ffreestanding -Wall -Wextra $(CFLAGS)
+
+BTI_CC_COMMAND = $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -c -o $@ $<
+NOBTI_CC_COMMAND = $(CC) $(CFLAGS_NOBTI) $(CFLAGS_COMMON) -c -o $@ $<
+
+%-bti.o: %.c
+       $(BTI_CC_COMMAND)
+
+%-bti.o: %.S
+       $(BTI_CC_COMMAND)
+
+%-nobti.o: %.c
+       $(NOBTI_CC_COMMAND)
+
+%-nobti.o: %.S
+       $(NOBTI_CC_COMMAND)
+
+BTI_OBJS =                                      \
+       test-bti.o                           \
+       signal-bti.o                            \
+       start-bti.o                             \
+       syscall-bti.o                           \
+       system-bti.o                            \
+       teststubs-bti.o                         \
+       trampoline-bti.o
+gen/btitest: $(BTI_OBJS)
+       $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -o $@ $^
+
+NOBTI_OBJS =                                    \
+       test-nobti.o                         \
+       signal-nobti.o                          \
+       start-nobti.o                           \
+       syscall-nobti.o                         \
+       system-nobti.o                          \
+       teststubs-nobti.o                       \
+       trampoline-nobti.o
+gen/nobtitest: $(NOBTI_OBJS)
+       $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -o $@ $^
+
+# Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list
+# to account for any OUTPUT target-dirs optionally provided by
+# the toplevel makefile
+include ../../lib.mk
+
+$(TEST_GEN_PROGS): $(PROGS)
+       cp $(PROGS) $(OUTPUT)/
diff --git a/tools/testing/selftests/arm64/bti/assembler.h b/tools/testing/selftests/arm64/bti/assembler.h
new file mode 100644 (file)
index 0000000..04e7b72
--- /dev/null
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef ASSEMBLER_H
+#define ASSEMBLER_H
+
+#define NT_GNU_PROPERTY_TYPE_0 5
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND     0xc0000000
+
+/* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI     (1U << 0)
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC     (1U << 1)
+
+
+.macro startfn name:req
+       .globl \name
+\name:
+       .macro endfn
+               .size \name, . - \name
+               .type \name, @function
+               .purgem endfn
+       .endm
+.endm
+
+.macro emit_aarch64_feature_1_and
+       .pushsection .note.gnu.property, "a"
+       .align  3
+       .long   2f - 1f
+       .long   6f - 3f
+       .long   NT_GNU_PROPERTY_TYPE_0
+1:     .string "GNU"
+2:
+       .align  3
+3:     .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
+       .long   5f - 4f
+4:
+#if BTI
+       .long   GNU_PROPERTY_AARCH64_FEATURE_1_PAC | \
+               GNU_PROPERTY_AARCH64_FEATURE_1_BTI
+#else
+       .long   0
+#endif
+5:
+       .align  3
+6:
+       .popsection
+.endm
+
+.macro paciasp
+       hint    0x19
+.endm
+
+.macro autiasp
+       hint    0x1d
+.endm
+
+.macro __bti_
+       hint    0x20
+.endm
+
+.macro __bti_c
+       hint    0x22
+.endm
+
+.macro __bti_j
+       hint    0x24
+.endm
+
+.macro __bti_jc
+       hint    0x26
+.endm
+
+.macro bti what=
+       __bti_\what
+.endm
+
+#endif /* ! ASSEMBLER_H */
diff --git a/tools/testing/selftests/arm64/bti/btitest.h b/tools/testing/selftests/arm64/bti/btitest.h
new file mode 100644 (file)
index 0000000..2aff9b1
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef BTITEST_H
+#define BTITEST_H
+
+/* Trampolines for calling the test stubs: */
+void call_using_br_x0(void (*)(void));
+void call_using_br_x16(void (*)(void));
+void call_using_blr(void (*)(void));
+
+/* Test stubs: */
+void nohint_func(void);
+void bti_none_func(void);
+void bti_c_func(void);
+void bti_j_func(void);
+void bti_jc_func(void);
+void paciasp_func(void);
+
+#endif /* !BTITEST_H */
diff --git a/tools/testing/selftests/arm64/bti/compiler.h b/tools/testing/selftests/arm64/bti/compiler.h
new file mode 100644 (file)
index 0000000..ebb6204
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef COMPILER_H
+#define COMPILER_H
+
+#define __always_unused __attribute__((__unused__))
+#define __noreturn __attribute__((__noreturn__))
+#define __unreachable() __builtin_unreachable()
+
+/* curse(e) has value e, but the compiler cannot assume so */
+#define curse(e) ({                            \
+       __typeof__(e) __curse_e = (e);          \
+       asm ("" : "+r" (__curse_e));            \
+       __curse_e;                              \
+})
+
+#endif /* ! COMPILER_H */
diff --git a/tools/testing/selftests/arm64/bti/gen/.gitignore b/tools/testing/selftests/arm64/bti/gen/.gitignore
new file mode 100644 (file)
index 0000000..73869fa
--- /dev/null
@@ -0,0 +1,2 @@
+btitest
+nobtitest
diff --git a/tools/testing/selftests/arm64/bti/signal.c b/tools/testing/selftests/arm64/bti/signal.c
new file mode 100644 (file)
index 0000000..f3fd29b
--- /dev/null
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+#include "signal.h"
+
+int sigemptyset(sigset_t *s)
+{
+       unsigned int i;
+
+       for (i = 0; i < _NSIG_WORDS; ++i)
+               s->sig[i] = 0;
+
+       return 0;
+}
+
+int sigaddset(sigset_t *s, int n)
+{
+       if (n < 1 || n > _NSIG)
+               return -EINVAL;
+
+       s->sig[(n - 1) / _NSIG_BPW] |= 1UL << (n - 1) % _NSIG_BPW;
+       return 0;
+}
+
+int sigaction(int n, struct sigaction *sa, const struct sigaction *old)
+{
+       return syscall(__NR_rt_sigaction, n, sa, old, sizeof(sa->sa_mask));
+}
+
+int sigprocmask(int how, const sigset_t *mask, sigset_t *old)
+{
+       return syscall(__NR_rt_sigprocmask, how, mask, old, sizeof(*mask));
+}
diff --git a/tools/testing/selftests/arm64/bti/signal.h b/tools/testing/selftests/arm64/bti/signal.h
new file mode 100644 (file)
index 0000000..103457d
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef SIGNAL_H
+#define SIGNAL_H
+
+#include <linux/signal.h>
+
+#include "system.h"
+
+typedef __sighandler_t sighandler_t;
+
+int sigemptyset(sigset_t *s);
+int sigaddset(sigset_t *s, int n);
+int sigaction(int n, struct sigaction *sa, const struct sigaction *old);
+int sigprocmask(int how, const sigset_t *mask, sigset_t *old);
+
+#endif /* ! SIGNAL_H */
diff --git a/tools/testing/selftests/arm64/bti/start.S b/tools/testing/selftests/arm64/bti/start.S
new file mode 100644 (file)
index 0000000..831f952
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn _start
+       mov     x0, sp
+       b       start
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/syscall.S b/tools/testing/selftests/arm64/bti/syscall.S
new file mode 100644 (file)
index 0000000..8dde8b6
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn syscall
+       bti     c
+       mov     w8, w0
+       mov     x0, x1
+       mov     x1, x2
+       mov     x2, x3
+       mov     x3, x4
+       mov     x4, x5
+       mov     x5, x6
+       mov     x6, x7
+       svc     #0
+       ret
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/system.c b/tools/testing/selftests/arm64/bti/system.c
new file mode 100644 (file)
index 0000000..6385d8d
--- /dev/null
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+
+#include <asm/unistd.h>
+
+#include "compiler.h"
+
+void __noreturn exit(int n)
+{
+       syscall(__NR_exit, n);
+       __unreachable();
+}
+
+ssize_t write(int fd, const void *buf, size_t size)
+{
+       return syscall(__NR_write, fd, buf, size);
+}
diff --git a/tools/testing/selftests/arm64/bti/system.h b/tools/testing/selftests/arm64/bti/system.h
new file mode 100644 (file)
index 0000000..aca1185
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#ifndef SYSTEM_H
+#define SYSTEM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+typedef __kernel_size_t size_t;
+typedef __kernel_ssize_t ssize_t;
+
+#include <linux/errno.h>
+#include <asm/hwcap.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+#include "compiler.h"
+
+long syscall(int nr, ...);
+
+void __noreturn exit(int n);
+ssize_t write(int fd, const void *buf, size_t size);
+
+#endif /* ! SYSTEM_H */
diff --git a/tools/testing/selftests/arm64/bti/test.c b/tools/testing/selftests/arm64/bti/test.c
new file mode 100644 (file)
index 0000000..656b049
--- /dev/null
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019,2021  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "system.h"
+
+#include <linux/errno.h>
+#include <linux/auxvec.h>
+#include <linux/signal.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+
+typedef struct ucontext ucontext_t;
+
+#include "btitest.h"
+#include "compiler.h"
+#include "signal.h"
+
+#define EXPECTED_TESTS 18
+
+static volatile unsigned int test_num = 1;
+static unsigned int test_passed;
+static unsigned int test_failed;
+static unsigned int test_skipped;
+
+static void fdputs(int fd, const char *str)
+{
+       size_t len = 0;
+       const char *p = str;
+
+       while (*p++)
+               ++len;
+
+       write(fd, str, len);
+}
+
+static void putstr(const char *str)
+{
+       fdputs(1, str);
+}
+
+static void putnum(unsigned int num)
+{
+       char c;
+
+       if (num / 10)
+               putnum(num / 10);
+
+       c = '0' + (num % 10);
+       write(1, &c, 1);
+}
+
+#define puttestname(test_name, trampoline_name) do {   \
+       putstr(test_name);                              \
+       putstr("/");                                    \
+       putstr(trampoline_name);                        \
+} while (0)
+
+void print_summary(void)
+{
+       putstr("# Totals: pass:");
+       putnum(test_passed);
+       putstr(" fail:");
+       putnum(test_failed);
+       putstr(" xfail:0 xpass:0 skip:");
+       putnum(test_skipped);
+       putstr(" error:0\n");
+}
+
+static const char *volatile current_test_name;
+static const char *volatile current_trampoline_name;
+static volatile int sigill_expected, sigill_received;
+
+static void handler(int n, siginfo_t *si __always_unused,
+                   void *uc_ __always_unused)
+{
+       ucontext_t *uc = uc_;
+
+       putstr("# \t[SIGILL in ");
+       puttestname(current_test_name, current_trampoline_name);
+       putstr(", BTYPE=");
+       write(1, &"00011011"[((uc->uc_mcontext.pstate & PSR_BTYPE_MASK)
+                             >> PSR_BTYPE_SHIFT) * 2], 2);
+       if (!sigill_expected) {
+               putstr("]\n");
+               putstr("not ok ");
+               putnum(test_num);
+               putstr(" ");
+               puttestname(current_test_name, current_trampoline_name);
+               putstr("(unexpected SIGILL)\n");
+               print_summary();
+               exit(128 + n);
+       }
+
+       putstr(" (expected)]\n");
+       sigill_received = 1;
+       /* zap BTYPE so that resuming the faulting code will work */
+       uc->uc_mcontext.pstate &= ~PSR_BTYPE_MASK;
+}
+
+static int skip_all;
+
+static void __do_test(void (*trampoline)(void (*)(void)),
+                     void (*fn)(void),
+                     const char *trampoline_name,
+                     const char *name,
+                     int expect_sigill)
+{
+       if (skip_all) {
+               test_skipped++;
+               putstr("ok ");
+               putnum(test_num);
+               putstr(" ");
+               puttestname(name, trampoline_name);
+               putstr(" # SKIP\n");
+
+               return;
+       }
+
+       /* Branch Target exceptions should only happen in BTI binaries: */
+       if (!BTI)
+               expect_sigill = 0;
+
+       sigill_expected = expect_sigill;
+       sigill_received = 0;
+       current_test_name = name;
+       current_trampoline_name = trampoline_name;
+
+       trampoline(fn);
+
+       if (expect_sigill && !sigill_received) {
+               putstr("not ok ");
+               test_failed++;
+       } else {
+               putstr("ok ");
+               test_passed++;
+       }
+       putnum(test_num++);
+       putstr(" ");
+       puttestname(name, trampoline_name);
+       putstr("\n");
+}
+
+#define do_test(expect_sigill_br_x0,                                   \
+               expect_sigill_br_x16,                                   \
+               expect_sigill_blr,                                      \
+               name)                                                   \
+do {                                                                   \
+       __do_test(call_using_br_x0, name, "call_using_br_x0", #name,    \
+                 expect_sigill_br_x0);                                 \
+       __do_test(call_using_br_x16, name, "call_using_br_x16", #name,  \
+                 expect_sigill_br_x16);                                \
+       __do_test(call_using_blr, name, "call_using_blr", #name,        \
+                 expect_sigill_blr);                                   \
+} while (0)
+
+void start(int *argcp)
+{
+       struct sigaction sa;
+       void *const *p;
+       const struct auxv_entry {
+               unsigned long type;
+               unsigned long val;
+       } *auxv;
+       unsigned long hwcap = 0, hwcap2 = 0;
+
+       putstr("TAP version 13\n");
+       putstr("1..");
+       putnum(EXPECTED_TESTS);
+       putstr("\n");
+
+       /* Gross hack for finding AT_HWCAP2 from the initial process stack: */
+       p = (void *const *)argcp + 1 + *argcp + 1; /* start of environment */
+       /* step over environment */
+       while (*p++)
+               ;
+       for (auxv = (const struct auxv_entry *)p; auxv->type != AT_NULL; ++auxv) {
+               switch (auxv->type) {
+               case AT_HWCAP:
+                       hwcap = auxv->val;
+                       break;
+               case AT_HWCAP2:
+                       hwcap2 = auxv->val;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       if (hwcap & HWCAP_PACA)
+               putstr("# HWCAP_PACA present\n");
+       else
+               putstr("# HWCAP_PACA not present\n");
+
+       if (hwcap2 & HWCAP2_BTI) {
+               putstr("# HWCAP2_BTI present\n");
+               if (!(hwcap & HWCAP_PACA))
+                       putstr("# Bad hardware?  Expect problems.\n");
+       } else {
+               putstr("# HWCAP2_BTI not present\n");
+               skip_all = 1;
+       }
+
+       putstr("# Test binary");
+       if (!BTI)
+               putstr(" not");
+       putstr(" built for BTI\n");
+
+       sa.sa_handler = (sighandler_t)(void *)handler;
+       sa.sa_flags = SA_SIGINFO;
+       sigemptyset(&sa.sa_mask);
+       sigaction(SIGILL, &sa, NULL);
+       sigaddset(&sa.sa_mask, SIGILL);
+       sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
+
+       do_test(1, 1, 1, nohint_func);
+       do_test(1, 1, 1, bti_none_func);
+       do_test(1, 0, 0, bti_c_func);
+       do_test(0, 0, 1, bti_j_func);
+       do_test(0, 0, 0, bti_jc_func);
+       do_test(1, 0, 0, paciasp_func);
+
+       print_summary();
+
+       if (test_num - 1 != EXPECTED_TESTS)
+               putstr("# WARNING - EXPECTED TEST COUNT WRONG\n");
+
+       if (test_failed)
+               exit(1);
+       else
+               exit(0);
+}
diff --git a/tools/testing/selftests/arm64/bti/teststubs.S b/tools/testing/selftests/arm64/bti/teststubs.S
new file mode 100644 (file)
index 0000000..b62c8c3
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn bti_none_func
+       bti
+       ret
+endfn
+
+startfn bti_c_func
+       bti     c
+       ret
+endfn
+
+startfn bti_j_func
+       bti     j
+       ret
+endfn
+
+startfn bti_jc_func
+       bti     jc
+       ret
+endfn
+
+startfn paciasp_func
+       paciasp
+       autiasp
+       ret
+endfn
+
+startfn nohint_func
+       ret
+endfn
+
+emit_aarch64_feature_1_and
diff --git a/tools/testing/selftests/arm64/bti/trampoline.S b/tools/testing/selftests/arm64/bti/trampoline.S
new file mode 100644 (file)
index 0000000..09beb3f
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019  Arm Limited
+ * Original author: Dave Martin <Dave.Martin@arm.com>
+ */
+
+#include "assembler.h"
+
+startfn call_using_br_x0
+       bti     c
+       br      x0
+endfn
+
+startfn call_using_br_x16
+       bti     c
+       mov     x16, x0
+       br      x16
+endfn
+
+startfn call_using_blr
+       paciasp
+       stp     x29, x30, [sp, #-16]!
+       blr     x0
+       ldp     x29, x30, [sp], #16
+       autiasp
+       ret
+endfn
+
+emit_aarch64_feature_1_and
index 0b3af552632a62123bada07b2b508478eb9ac762..409e3e53d00af61d1a11b7018e6afdf706476019 100644 (file)
@@ -1,14 +1,18 @@
 # SPDX-License-Identifier: GPL-2.0
 # Copyright (C) 2020 ARM Limited
 
-CFLAGS += -std=gnu99 -I. -lpthread
+# preserve CC value from top level Makefile
+ifeq ($(CC),cc)
+CC := $(CROSS_COMPILE)gcc
+endif
+
+CFLAGS += -std=gnu99 -I. -pthread
+LDFLAGS += -pthread
 SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
 PROGS := $(patsubst %.c,%,$(SRCS))
 
 #Add mte compiler option
-ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
 CFLAGS += -march=armv8.5-a+memtag
-endif
 
 #check if the compiler works well
 mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
@@ -19,11 +23,14 @@ TEST_GEN_PROGS := $(PROGS)
 
 # Get Kernel headers installed and use them.
 KSFT_KHDR_INSTALL := 1
+else
+    $(warning compiler "$(CC)" does not support the ARMv8.5 MTE extension.)
+    $(warning test program "mte" will not be created.)
 endif
 
 # Include KSFT lib.mk.
 include ../../lib.mk
 
 ifeq ($(mte_cc_support),1)
-$(TEST_GEN_PROGS): mte_common_util.c mte_common_util.h mte_helper.S
+$(TEST_GEN_PROGS): mte_common_util.c mte_helper.S
 endif
index 3b23c4d61d38885bd8ee98a2c6f7c1a6748cdb4e..88c74bc46d4fce66d6f5c1ba9cdb386bf0d8c6da 100644 (file)
@@ -33,7 +33,10 @@ static unsigned long read_sysfs(char *str)
                ksft_print_msg("ERR: missing %s\n", str);
                return 0;
        }
-       fscanf(f, "%lu", &val);
+       if (fscanf(f, "%lu", &val) != 1) {
+               ksft_print_msg("ERR: parsing %s\n", str);
+               val = 0;
+       }
        fclose(f);
        return val;
 }
index 4bfa80f2a8c3e1743bea5e8867e28e432888e040..1de7a0abd0ae391d3d3c117eca425b437be9b01c 100644 (file)
@@ -33,7 +33,8 @@ static int check_usermem_access_fault(int mem_type, int mode, int mapping)
        if (fd == -1)
                return KSFT_FAIL;
        for (i = 0; i < len; i++)
-               write(fd, &val, sizeof(val));
+               if (write(fd, &val, sizeof(val)) != sizeof(val))
+                       return KSFT_FAIL;
        lseek(fd, 0, 0);
        ptr = mte_allocate_memory(len, mem_type, mapping, true);
        if (check_allocated_memory(ptr, len, mem_type, true) != KSFT_PASS) {
index 39f8908988eab0d032382bc551df6bf754691a4f..f50ac31920d134bd904ce09b846ff7f3b0eecf24 100644 (file)
@@ -181,10 +181,17 @@ void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags
        }
        /* Initialize the file for mappable size */
        lseek(fd, 0, SEEK_SET);
-       for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE)
-               write(fd, buffer, INIT_BUFFER_SIZE);
+       for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE) {
+               if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
+                       perror("initialising buffer");
+                       return NULL;
+               }
+       }
        index -= INIT_BUFFER_SIZE;
-       write(fd, buffer, size - index);
+       if (write(fd, buffer, size - index) != size - index) {
+               perror("initialising buffer");
+               return NULL;
+       }
        return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd);
 }
 
@@ -202,9 +209,15 @@ void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
        /* Initialize the file for mappable size */
        lseek(fd, 0, SEEK_SET);
        for (index = INIT_BUFFER_SIZE; index < map_size; index += INIT_BUFFER_SIZE)
-               write(fd, buffer, INIT_BUFFER_SIZE);
+               if (write(fd, buffer, INIT_BUFFER_SIZE) != INIT_BUFFER_SIZE) {
+                       perror("initialising buffer");
+                       return NULL;
+               }
        index -= INIT_BUFFER_SIZE;
-       write(fd, buffer, map_size - index);
+       if (write(fd, buffer, map_size - index) != map_size - index) {
+               perror("initialising buffer");
+               return NULL;
+       }
        return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
                                           range_after, true, fd);
 }
@@ -271,29 +284,20 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
 
        en |= (incl_mask << PR_MTE_TAG_SHIFT);
        /* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */
-       if (!prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) == 0) {
+       if (prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) != 0) {
                ksft_print_msg("FAIL:prctl PR_SET_TAGGED_ADDR_CTRL for mte mode\n");
                return -EINVAL;
        }
        return 0;
 }
 
-#define ID_AA64PFR1_MTE_SHIFT          8
-#define ID_AA64PFR1_MTE                        2
-
 int mte_default_setup(void)
 {
-       unsigned long hwcaps = getauxval(AT_HWCAP);
+       unsigned long hwcaps2 = getauxval(AT_HWCAP2);
        unsigned long en = 0;
        int ret;
 
-       if (!(hwcaps & HWCAP_CPUID)) {
-               ksft_print_msg("FAIL: CPUID registers unavailable\n");
-               return KSFT_FAIL;
-       }
-       /* Read ID_AA64PFR1_EL1 register */
-       asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
-       if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
+       if (!(hwcaps2 & HWCAP2_MTE)) {
                ksft_print_msg("FAIL: MTE features unavailable\n");
                return KSFT_SKIP;
        }
@@ -333,6 +337,7 @@ int create_temp_file(void)
        /* Create a file in the tmpfs filesystem */
        fd = mkstemp(&filename[0]);
        if (fd == -1) {
+               perror(filename);
                ksft_print_msg("FAIL: Unable to open temporary file\n");
                return 0;
        }
index 37c5494a0381bff8d8aa841618a1a8e997ae5c5c..e25917f0460251d4ffdd8e0d9744b3b927b1c83d 100644 (file)
@@ -6,6 +6,7 @@
 #include <test_progs.h>
 #include "bpf_dctcp.skel.h"
 #include "bpf_cubic.skel.h"
+#include "bpf_tcp_nogpl.skel.h"
 
 #define min(a, b) ((a) < (b) ? (a) : (b))
 
@@ -227,10 +228,53 @@ static void test_dctcp(void)
        bpf_dctcp__destroy(dctcp_skel);
 }
 
+static char *err_str;
+static bool found;
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+                             const char *format, va_list args)
+{
+       char *log_buf;
+
+       if (level != LIBBPF_WARN ||
+           strcmp(format, "libbpf: \n%s\n")) {
+               vprintf(format, args);
+               return 0;
+       }
+
+       log_buf = va_arg(args, char *);
+       if (!log_buf)
+               goto out;
+       if (err_str && strstr(log_buf, err_str) != NULL)
+               found = true;
+out:
+       printf(format, log_buf);
+       return 0;
+}
+
+static void test_invalid_license(void)
+{
+       libbpf_print_fn_t old_print_fn;
+       struct bpf_tcp_nogpl *skel;
+
+       err_str = "struct ops programs must have a GPL compatible license";
+       found = false;
+       old_print_fn = libbpf_set_print(libbpf_debug_print);
+
+       skel = bpf_tcp_nogpl__open_and_load();
+       ASSERT_NULL(skel, "bpf_tcp_nogpl");
+       ASSERT_EQ(found, true, "expected_err_msg");
+
+       bpf_tcp_nogpl__destroy(skel);
+       libbpf_set_print(old_print_fn);
+}
+
 void test_bpf_tcp_ca(void)
 {
        if (test__start_subtest("dctcp"))
                test_dctcp();
        if (test__start_subtest("cubic"))
                test_cubic();
+       if (test__start_subtest("invalid_license"))
+               test_invalid_license();
 }
diff --git a/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
new file mode 100644 (file)
index 0000000..2ecd833
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "X";
+
+void BPF_STRUCT_OPS(nogpltcp_init, struct sock *sk)
+{
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops bpf_nogpltcp = {
+       .init           = (void *)nogpltcp_init,
+       .name           = "bpf_nogpltcp",
+};
index 57ed67b8607465c97c1ee6020b06373043dc7e34..8a1caf46ffbc37800612d6e4a447733cd473b380 100644 (file)
        },
        .fixup_map_hash_8b = { 3 },
        /* not actually fully unbounded, but the bound is very high */
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
-       .result_unpriv = REJECT,
        .errstr = "value -4294967168 makes map_value pointer be out of bounds",
        .result = REJECT,
 },
        BPF_EXIT_INSN(),
        },
        .fixup_map_hash_8b = { 3 },
-       /* not actually fully unbounded, but the bound is very high */
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
-       .result_unpriv = REJECT,
        .errstr = "value -4294967168 makes map_value pointer be out of bounds",
        .result = REJECT,
 },
index c162498a64fc6a0644903bde7255304690af66b7..91869aea6d6414acfb34bd9caa52968c2e7f0303 100644 (file)
@@ -6,7 +6,7 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
@@ -21,7 +21,7 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 1,
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
 {
        "check deducing bounds from const, 4",
        .insns = {
+               BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
                BPF_MOV64_IMM(BPF_REG_0, 0),
                BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
                BPF_EXIT_INSN(),
                BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
                BPF_EXIT_INSN(),
-               BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+               BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R6 has pointer with unsupported alu operation",
        .result_unpriv = REJECT,
        .result = ACCEPT,
 },
@@ -61,7 +62,7 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
@@ -74,7 +75,7 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
@@ -88,7 +89,7 @@
                            offsetof(struct __sk_buff, mark)),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "dereference of modified ctx ptr",
        .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
                            offsetof(struct __sk_buff, mark)),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "dereference of modified ctx ptr",
        .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
index 9baca7a75c42ae8db9918745824b4b944a05911a..c2aa6f26738b4181d8e944916a04d09cb0def051 100644 (file)
@@ -19,7 +19,6 @@
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
@@ -43,7 +42,6 @@
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
@@ -69,7 +67,6 @@
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
@@ -94,7 +91,6 @@
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 4 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
-       .result_unpriv = REJECT,
 },
index 6f610cfddae53b1099c905e9ff7b345e3485cb09..1f82021429bf2394fa08f331747e008949862c38 100644 (file)
@@ -76,7 +76,7 @@
        },
        .fixup_map_hash_16b = { 4 },
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .result = ACCEPT,
 },
 {
@@ -94,6 +94,6 @@
        },
        .fixup_map_hash_16b = { 4 },
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R0 has pointer with unsupported alu operation",
        .result = ACCEPT,
 },
index 3e32400c4b44b5590166f69a073b9276c89e1ed4..bd436df5cc3266af3df04bc8ce284c94edd1a8d6 100644 (file)
        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
        .result_unpriv = REJECT,
        .result = ACCEPT,
 },
index feb91266db39a09cf78f21d34666fd129b0684fb..e5913fd3b9030d41f944a1fd0babdd629d31293e 100644 (file)
@@ -21,8 +21,6 @@
        .fixup_map_hash_16b = { 5 },
        .fixup_map_array_48b = { 8 },
        .result = ACCEPT,
-       .result_unpriv = REJECT,
-       .errstr_unpriv = "R1 tried to add from different maps",
        .retval = 1,
 },
 {
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different pointers or scalars",
+       .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
        .retval = 0,
 },
 {
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
        .retval = 0,
 },
 {
index 5ebc1aec7923b41b3d8697957883d1c358ffd53d..0e393cb5f42de42d9d93a4b954b32b16881bb7e6 100644 (file)
@@ -95,7 +95,7 @@ static bool test_fw_in_ns(const char *fw_name, const char *sys_path, bool block_
                }
                if (block_fw_in_parent_ns)
                        umount("/lib/firmware");
-               return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
+               return WEXITSTATUS(status) == EXIT_SUCCESS;
        }
 
        if (unshare(CLONE_NEWNS) != 0) {
index f26212605b6b7592b1b255e11e6e165b9e975705..d4b0be857deba00b2b51ed49e9b2fafed348e698 100644 (file)
@@ -1,2 +1,3 @@
 *.sh
 !run.sh
+!stack-entropy.sh
index 1bcc9ee990eb92db6051f3c28519fe568e25e1f4..c71109ceeb2d2538666f093c23d595c0d26754d0 100644 (file)
@@ -5,6 +5,7 @@ include ../lib.mk
 
 # NOTE: $(OUTPUT) won't get default value if used before lib.mk
 TEST_FILES := tests.txt
+TEST_PROGS := stack-entropy.sh
 TEST_GEN_PROGS = $(patsubst %,$(OUTPUT)/%.sh,$(shell awk '{print $$1}' tests.txt | sed -e 's/\#//'))
 all: $(TEST_GEN_PROGS)
 
diff --git a/tools/testing/selftests/lkdtm/stack-entropy.sh b/tools/testing/selftests/lkdtm/stack-entropy.sh
new file mode 100755 (executable)
index 0000000..b1b8a50
--- /dev/null
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Measure kernel stack entropy by sampling via LKDTM's REPORT_STACK test.
+set -e
+samples="${1:-1000}"
+
+# Capture dmesg continuously since it may fill up depending on sample size.
+log=$(mktemp -t stack-entropy-XXXXXX)
+dmesg --follow >"$log" & pid=$!
+report=-1
+for i in $(seq 1 $samples); do
+        echo "REPORT_STACK" >/sys/kernel/debug/provoke-crash/DIRECT
+       if [ -t 1 ]; then
+               percent=$(( 100 * $i / $samples ))
+               if [ "$percent" -ne "$report" ]; then
+                       /bin/echo -en "$percent%\r"
+                       report="$percent"
+               fi
+       fi
+done
+kill "$pid"
+
+# Count unique offsets since last run.
+seen=$(tac "$log" | grep -m1 -B"$samples"0 'Starting stack offset' | \
+       grep 'Stack offset' | awk '{print $NF}' | sort | uniq -c | wc -l)
+bits=$(echo "obase=2; $seen" | bc | wc -L)
+echo "Bits of stack entropy: $bits"
+rm -f "$log"
+
+# We would expect any functional stack randomization to be at least 5 bits.
+if [ "$bits" -lt 5 ]; then
+       exit 1
+else
+       exit 0
+fi
index 0ccb1dda099ae9558d7cb8b2579092f1128f0a5d..eb307ca37bfa69df1acd7cccad1435fed0cf166e 100755 (executable)
@@ -657,10 +657,21 @@ test_ecn_decap()
 {
        # In accordance with INET_ECN_decapsulate()
        __test_ecn_decap 00 00 0x00
+       __test_ecn_decap 00 01 0x00
+       __test_ecn_decap 00 02 0x00
+       # 00 03 is tested in test_ecn_decap_error()
+       __test_ecn_decap 01 00 0x01
        __test_ecn_decap 01 01 0x01
-       __test_ecn_decap 02 01 0x01
+       __test_ecn_decap 01 02 0x01
        __test_ecn_decap 01 03 0x03
+       __test_ecn_decap 02 00 0x02
+       __test_ecn_decap 02 01 0x01
+       __test_ecn_decap 02 02 0x02
        __test_ecn_decap 02 03 0x03
+       __test_ecn_decap 03 00 0x03
+       __test_ecn_decap 03 01 0x03
+       __test_ecn_decap 03 02 0x03
+       __test_ecn_decap 03 03 0x03
        test_ecn_decap_error
 }
 
index 592c1ccf4576d316c81b1abc76c7accd0a0e468d..0bd73428d2f3ebb8787130df22627abfbfc07ca7 100644 (file)
@@ -14,7 +14,7 @@
 #define __aligned(x) __attribute__((__aligned__(x)))
 #define __packed __attribute__((packed))
 
-#include "../../../../arch/x86/kernel/cpu/sgx/arch.h"
+#include "../../../../arch/x86/include/asm/sgx.h"
 #include "../../../../arch/x86/include/asm/enclu.h"
 #include "../../../../arch/x86/include/uapi/asm/sgx.h"
 
index 9d43b75aaa55323428eae29cb02e6588a3d2241f..f441ac34b4d4454f918af8003c659c582036b6e0 100644 (file)
@@ -45,19 +45,19 @@ static bool encl_map_bin(const char *path, struct encl *encl)
 
        fd = open(path, O_RDONLY);
        if (fd == -1)  {
-               perror("open()");
+               perror("enclave executable open()");
                return false;
        }
 
        ret = stat(path, &sb);
        if (ret) {
-               perror("stat()");
+               perror("enclave executable stat()");
                goto err;
        }
 
        bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
        if (bin == MAP_FAILED) {
-               perror("mmap()");
+               perror("enclave executable mmap()");
                goto err;
        }
 
@@ -90,8 +90,7 @@ static bool encl_ioc_create(struct encl *encl)
        ioc.src = (unsigned long)secs;
        rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc);
        if (rc) {
-               fprintf(stderr, "SGX_IOC_ENCLAVE_CREATE failed: errno=%d\n",
-                       errno);
+               perror("SGX_IOC_ENCLAVE_CREATE failed");
                munmap((void *)secs->base, encl->encl_size);
                return false;
        }
@@ -116,31 +115,72 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
 
        rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc);
        if (rc < 0) {
-               fprintf(stderr, "SGX_IOC_ENCLAVE_ADD_PAGES failed: errno=%d.\n",
-                       errno);
+               perror("SGX_IOC_ENCLAVE_ADD_PAGES failed");
                return false;
        }
 
        return true;
 }
 
+
+
 bool encl_load(const char *path, struct encl *encl)
 {
+       const char device_path[] = "/dev/sgx_enclave";
        Elf64_Phdr *phdr_tbl;
        off_t src_offset;
        Elf64_Ehdr *ehdr;
+       struct stat sb;
+       void *ptr;
        int i, j;
        int ret;
+       int fd = -1;
 
        memset(encl, 0, sizeof(*encl));
 
-       ret = open("/dev/sgx_enclave", O_RDWR);
-       if (ret < 0) {
-               fprintf(stderr, "Unable to open /dev/sgx_enclave\n");
+       fd = open(device_path, O_RDWR);
+       if (fd < 0) {
+               perror("Unable to open /dev/sgx_enclave");
+               goto err;
+       }
+
+       ret = stat(device_path, &sb);
+       if (ret) {
+               perror("device file stat()");
+               goto err;
+       }
+
+       /*
+        * This just checks if the /dev file has these permission
+        * bits set.  It does not check that the current user is
+        * the owner or in the owning group.
+        */
+       if (!(sb.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
+               fprintf(stderr, "no execute permissions on device file %s\n", device_path);
+               goto err;
+       }
+
+       ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
+       if (ptr == (void *)-1) {
+               perror("mmap for read");
+               goto err;
+       }
+       munmap(ptr, PAGE_SIZE);
+
+#define ERR_MSG \
+"mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
+" Check that current user has execute permissions on %s and \n" \
+" that /dev does not have noexec set: mount | grep \"/dev .*noexec\"\n" \
+" If so, remount it executable: mount -o remount,exec /dev\n\n"
+
+       ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
+       if (ptr == (void *)-1) {
+               fprintf(stderr, ERR_MSG, device_path);
                goto err;
        }
+       munmap(ptr, PAGE_SIZE);
 
-       encl->fd = ret;
+       encl->fd = fd;
 
        if (!encl_map_bin(path, encl))
                goto err;
@@ -217,6 +257,8 @@ bool encl_load(const char *path, struct encl *encl)
        return true;
 
 err:
+       if (fd != -1)
+               close(fd);
        encl_delete(encl);
        return false;
 }
@@ -229,7 +271,7 @@ static bool encl_map_area(struct encl *encl)
        area = mmap(NULL, encl_size * 2, PROT_NONE,
                    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
        if (area == MAP_FAILED) {
-               perror("mmap");
+               perror("reservation mmap()");
                return false;
        }
 
@@ -268,8 +310,7 @@ bool encl_build(struct encl *encl)
        ioc.sigstruct = (uint64_t)&encl->sigstruct;
        ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc);
        if (ret) {
-               fprintf(stderr, "SGX_IOC_ENCLAVE_INIT failed: errno=%d\n",
-                       errno);
+               perror("SGX_IOC_ENCLAVE_INIT failed");
                return false;
        }
 
index 724cec700926b8a7b03f4b81cfdadb0dad24e00d..d304a4044eb9c69b69b3a0865fe26227ef22f5be 100644 (file)
@@ -15,6 +15,7 @@
 #include <sys/stat.h>
 #include <sys/time.h>
 #include <sys/types.h>
+#include <sys/auxv.h>
 #include "defines.h"
 #include "main.h"
 #include "../kselftest.h"
@@ -28,24 +29,6 @@ struct vdso_symtab {
        Elf64_Word *elf_hashtab;
 };
 
-static void *vdso_get_base_addr(char *envp[])
-{
-       Elf64_auxv_t *auxv;
-       int i;
-
-       for (i = 0; envp[i]; i++)
-               ;
-
-       auxv = (Elf64_auxv_t *)&envp[i + 1];
-
-       for (i = 0; auxv[i].a_type != AT_NULL; i++) {
-               if (auxv[i].a_type == AT_SYSINFO_EHDR)
-                       return (void *)auxv[i].a_un.a_val;
-       }
-
-       return NULL;
-}
-
 static Elf64_Dyn *vdso_get_dyntab(void *addr)
 {
        Elf64_Ehdr *ehdr = addr;
@@ -162,7 +145,7 @@ static int user_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r
        return 0;
 }
 
-int main(int argc, char *argv[], char *envp[])
+int main(int argc, char *argv[])
 {
        struct sgx_enclave_run run;
        struct vdso_symtab symtab;
@@ -195,7 +178,7 @@ int main(int argc, char *argv[], char *envp[])
                addr = mmap((void *)encl.encl_base + seg->offset, seg->size,
                            seg->prot, MAP_SHARED | MAP_FIXED, encl.fd, 0);
                if (addr == MAP_FAILED) {
-                       fprintf(stderr, "mmap() failed, errno=%d.\n", errno);
+                       perror("mmap() segment failed");
                        exit(KSFT_FAIL);
                }
        }
@@ -203,7 +186,8 @@ int main(int argc, char *argv[], char *envp[])
        memset(&run, 0, sizeof(run));
        run.tcs = encl.encl_base;
 
-       addr = vdso_get_base_addr(envp);
+       /* Get vDSO base address */
+       addr = (void *)getauxval(AT_SYSINFO_EHDR);
        if (!addr)
                goto err;
 
index bfc974b4572d55eb2c2db684a3307dcb66fbe0b7..ef8eb3604595eb9a2b56e5e48c61673dc2c9d0fd 100644 (file)
@@ -3,7 +3,7 @@
  *             (C) Copyright IBM 2012
  *             Licensed under the GPLv2
  *
- *  NOTE: This is a meta-test which quickly changes the clocksourc and
+ *  NOTE: This is a meta-test which quickly changes the clocksource and
  *  then uses other tests to detect problems. Thus this test requires
  *  that the inconsistency-check and nanosleep tests be present in the
  *  same directory it is run from.
@@ -134,7 +134,7 @@ int main(int argv, char **argc)
                return -1;
        }
 
-       /* Check everything is sane before we start switching asyncrhonously */
+       /* Check everything is sane before we start switching asynchronously */
        for (i = 0; i < count; i++) {
                printf("Validating clocksource %s\n", clocksource_list[i]);
                if (change_clocksource(clocksource_list[i])) {
index 19e46ed5dfb5915540404e95acabc7430ba08505..23eb398c8140ae0a9efb637fcc8ecaf331f47a23 100644 (file)
@@ -5,7 +5,7 @@
  *              Licensed under the GPLv2
  *
  *  This test signals the kernel to insert a leap second
- *  every day at midnight GMT. This allows for stessing the
+ *  every day at midnight GMT. This allows for stressing the
  *  kernel's leap-second behavior, as well as how well applications
  *  handle the leap-second discontinuity.
  *
index dc80728ed1915551bf83c1ea855450d0ccc31c8a..f70802c5dd0d67e16596db582ffb6850575dd4fa 100644 (file)
@@ -4,10 +4,10 @@
  *              (C) Copyright 2013, 2015 Linaro Limited
  *              Licensed under the GPL
  *
- * This test demonstrates leapsecond deadlock that is possibe
+ * This test demonstrates leapsecond deadlock that is possible
  * on kernels from 2.6.26 to 3.3.
  *
- * WARNING: THIS WILL LIKELY HARDHANG SYSTEMS AND MAY LOSE DATA
+ * WARNING: THIS WILL LIKELY HARD HANG SYSTEMS AND MAY LOSE DATA
  * RUN AT YOUR OWN RISK!
  *  To build:
  *     $ gcc leapcrash.c -o leapcrash -lrt
index cf3e48919874b3a1a23f8a10bb1e2fce5f9dfadd..80aed4bf06fba35cf2885eb81d85a3358b1fada9 100644 (file)
@@ -76,7 +76,7 @@ void checklist(struct timespec *list, int size)
 
 /* The shared thread shares a global list
  * that each thread fills while holding the lock.
- * This stresses clock syncronization across cpus.
+ * This stresses clock synchronization across cpus.
  */
 void *shared_thread(void *arg)
 {
index a71d92da8f46684eefb9ac43550a183ea19460f9..f3f56e681e9fbeaddeda911873c7cfba40363c9b 100644 (file)
@@ -45,3 +45,5 @@ call64_from_32:
        ret
 
 .size call64_from_32, .-call64_from_32
+
+.section .note.GNU-stack,"",%progbits